diff --git a/go.mod b/go.mod
index 46cc8e5bb..4944eb90c 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/containers/podman-tui
-go 1.24.7
+go 1.25.0
require (
github.com/containers/buildah v1.42.2
@@ -19,7 +19,7 @@ require (
github.com/rivo/tview v0.42.0
github.com/rs/zerolog v1.34.0
github.com/sirupsen/logrus v1.9.3
- github.com/spf13/cobra v1.10.1
+ github.com/spf13/cobra v1.10.2
go.podman.io/common v0.66.1
golang.org/x/crypto v0.46.0
)
@@ -53,7 +53,7 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/gdamore/encoding v1.0.1 // indirect
- github.com/go-jose/go-jose/v4 v4.0.5 // indirect
+ github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
@@ -74,11 +74,10 @@ require (
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/fs v0.1.0 // indirect
- github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
- github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.19 // indirect
+ github.com/mattn/go-colorable v0.1.14 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mattn/go-sqlite3 v1.14.32 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
@@ -104,42 +103,41 @@ require (
github.com/proglottis/gpgme v0.1.5 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect
- github.com/sigstore/fulcio v1.7.1 // indirect
- github.com/sigstore/protobuf-specs v0.4.1 // indirect
- github.com/sigstore/sigstore v1.9.5 // indirect
+ github.com/sigstore/fulcio v1.8.3 // indirect
+ github.com/sigstore/protobuf-specs v0.5.0 // indirect
+ github.com/sigstore/sigstore v1.10.0 // indirect
github.com/skeema/knownhosts v1.3.2 // indirect
github.com/smallstep/pkcs7 v0.1.1 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
github.com/sylabs/sif/v2 v2.22.0 // indirect
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
- github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.15 // indirect
github.com/vbatts/tar-split v0.12.1 // indirect
github.com/vbauerster/mpb/v8 v8.10.2 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
- go.opentelemetry.io/otel v1.36.0 // indirect
- go.opentelemetry.io/otel/metric v1.36.0 // indirect
- go.opentelemetry.io/otel/trace v1.36.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
+ go.opentelemetry.io/otel v1.38.0 // indirect
+ go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.podman.io/image/v5 v5.38.0 // indirect
go.podman.io/storage v1.61.0 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
- go.yaml.in/yaml/v2 v2.4.2 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect
+ golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
- golang.org/x/time v0.11.0 // indirect
+ golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.39.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
- google.golang.org/grpc v1.72.2 // indirect
- google.golang.org/protobuf v1.36.9 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
+ google.golang.org/grpc v1.77.0 // indirect
+ google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
diff --git a/go.sum b/go.sum
index 408719b01..98b615198 100644
--- a/go.sum
+++ b/go.sum
@@ -100,8 +100,8 @@ github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZ
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
github.com/gkampitakis/go-snaps v0.5.14 h1:3fAqdB6BCPKHDMHAKRwtPUwYexKtGrNuw8HX/T/4neo=
github.com/gkampitakis/go-snaps v0.5.14/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
-github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
-github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
+github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
+github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -109,8 +109,6 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
-github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -135,8 +133,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -148,8 +146,6 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
-github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
-github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -166,19 +162,19 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ=
-github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk=
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
@@ -247,21 +243,21 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/proglottis/gpgme v0.1.5 h1:KCGyOw8sQ+SI96j6G8D8YkOGn+1TwbQTT9/zQXoVlz0=
github.com/proglottis/gpgme v0.1.5/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
-github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
-github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
-github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
-github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
+github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rivo/tview v0.42.0 h1:b/ftp+RxtDsHSaynXTbJb+/n/BxDEi+W3UfF5jILK6c=
github.com/rivo/tview v0.42.0/go.mod h1:cSfIYfhpSGCjp3r/ECJb+GKS7cGJnqV8vfjQPwoXyfY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
@@ -274,20 +270,20 @@ github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL
github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
-github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ=
-github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8=
-github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc=
-github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc=
-github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU=
-github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII=
+github.com/sigstore/fulcio v1.8.3 h1:zkuAkRHbD53hhYGlBHHeAW4NRDrrTiDHumAbcfSyyFw=
+github.com/sigstore/fulcio v1.8.3/go.mod h1:YxP7TTdn9H5Gg+dXOsu61X36LLYxT2ZuvODhWelMNwA=
+github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY=
+github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc=
+github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI=
+github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.3.2 h1:EDL9mgf4NzwMXCTfaxSD/o/a5fxDw/xL9nkU28JjdBg=
github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow=
github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU=
github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA=
-github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
-github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -313,8 +309,6 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
-github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
-github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY=
github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
@@ -328,26 +322,26 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
-go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
-go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
-go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
-go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
-go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
-go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
-go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
-go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
-go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
-go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
-go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
-go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
+go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
+go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
+go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
+go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
+go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.podman.io/common v0.66.1 h1:zDyd4HhVgQAN8LupBHCnhtM3FEOJ9DwmThjulXZq2qA=
go.podman.io/common v0.66.1/go.mod h1:aNd2a0S7pY+fx1X5kpQYuF4hbwLU8ZOccuVrhu7h1Xc=
go.podman.io/image/v5 v5.38.0 h1:aUKrCANkPvze1bnhLJsaubcfz0d9v/bSDLnwsXJm6G4=
@@ -356,8 +350,8 @@ go.podman.io/storage v1.61.0 h1:5hD/oyRYt1f1gxgvect+8syZBQhGhV28dCw2+CZpx0Q=
go.podman.io/storage v1.61.0/go.mod h1:A3UBK0XypjNZ6pghRhuxg62+2NIm5lcUGv/7XyMhMUI=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
-go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
-go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -369,8 +363,8 @@ golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ss
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
-golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588=
-golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
+golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
+golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
@@ -439,8 +433,8 @@ golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
-golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
-golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
+golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@@ -450,14 +444,16 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY=
-google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
-google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
-google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
-google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
-google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
+gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
+google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
+google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
+google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
+google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
+google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
deleted file mode 100644
index 6f717dbd8..000000000
--- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
+++ /dev/null
@@ -1,96 +0,0 @@
-# v4.0.4
-
-## Fixed
-
- - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a
- breaking change. See #136 / #137.
-
-# v4.0.3
-
-## Changed
-
- - Allow unmarshalling JSONWebKeySets with unsupported key types (#130)
- - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129)
- - Dependency updates
-
-# v4.0.2
-
-## Changed
-
- - Improved documentation of Verify() to note that JSONWebKeySet is a supported
- argument type (#104)
- - Defined exported error values for missing x5c header and unsupported elliptic
- curves error cases (#117)
-
-# v4.0.1
-
-## Fixed
-
- - An attacker could send a JWE containing compressed data that used large
- amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`.
- Those functions now return an error if the decompressed data would exceed
- 250kB or 10x the compressed size (whichever is larger). Thanks to
- Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj)
- for reporting.
-
-# v4.0.0
-
-This release makes some breaking changes in order to more thoroughly
-address the vulnerabilities discussed in [Three New Attacks Against JSON Web
-Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot
-token".
-
-## Changed
-
- - Limit JWT encryption types (exclude password or public key types) (#78)
- - Enforce minimum length for HMAC keys (#85)
- - jwt: match any audience in a list, rather than requiring all audiences (#81)
- - jwt: accept only Compact Serialization (#75)
- - jws: Add expected algorithms for signatures (#74)
- - Require specifying expected algorithms for ParseEncrypted,
- ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned,
- jwt.ParseSignedAndEncrypted (#69, #74)
- - Usually there is a small, known set of appropriate algorithms for a program
- to use and it's a mistake to allow unexpected algorithms. For instance the
- "billion hash attack" relies in part on programs accepting the PBES2
- encryption algorithm and doing the necessary work even if they weren't
- specifically configured to allow PBES2.
- - Revert "Strip padding off base64 strings" (#82)
- - The specs require base64url encoding without padding.
- - Minimum supported Go version is now 1.21
-
-## Added
-
- - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON.
- - These allow parsing a specific serialization, as opposed to ParseSigned and
- ParseEncrypted, which try to automatically detect which serialization was
- provided. It's common to require a specific serialization for a specific
- protocol - for instance JWT requires Compact serialization.
-
-[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
-
-# v3.0.2
-
-## Fixed
-
- - DecryptMulti: handle decompression error (#19)
-
-## Changed
-
- - jwe/CompactSerialize: improve performance (#67)
- - Increase the default number of PBKDF2 iterations to 600k (#48)
- - Return the proper algorithm for ECDSA keys (#45)
-
-## Added
-
- - Add Thumbprint support for opaque signers (#38)
-
-# v3.0.1
-
-## Fixed
-
- - Security issue: an attacker specifying a large "p2c" value can cause
- JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
- amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
- disclosure and to Tom Tervoort for originally publishing the category of attack.
- https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md
index 02b574954..55c550917 100644
--- a/vendor/github.com/go-jose/go-jose/v4/README.md
+++ b/vendor/github.com/go-jose/go-jose/v4/README.md
@@ -3,7 +3,6 @@
[](https://pkg.go.dev/github.com/go-jose/go-jose/v4)
[](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt)
[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
-[](https://github.com/go-jose/go-jose/actions)
Package jose aims to provide an implementation of the Javascript Object Signing
and Encryption set of standards. This includes support for JSON Web Encryption,
@@ -29,17 +28,20 @@ libraries in other languages.
### Versions
-[Version 4](https://github.com/go-jose/go-jose)
-([branch](https://github.com/go-jose/go-jose/tree/main),
-[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version:
+The forthcoming Version 5 will be released with several breaking API changes,
+and will require Golang's `encoding/json/v2`, which is currently requires
+Go 1.25 built with GOEXPERIMENT=jsonv2.
+
+Version 4 is the current stable version:
import "github.com/go-jose/go-jose/v4"
-The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which
-are still useable but not actively developed anymore.
+It supports at least the current and previous Golang release. Currently it
+requires Golang 1.24.
+
+Version 3 is only receiving critical security updates. Migration to Version 4 is recommended.
-Version 3, in this repo, is still receiving security fixes but not functionality
-updates.
+Versions 1 and 2 are obsolete, but can be found in the old repository, [square/go-jose](https://github.com/square/go-jose).
### Supported algorithms
@@ -47,36 +49,36 @@ See below for a table of supported algorithms. Algorithm identifiers match
the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518)
standard where possible. The Godoc reference has a list of constants.
- Key encryption | Algorithm identifier(s)
- :------------------------- | :------------------------------
- RSA-PKCS#1v1.5 | RSA1_5
- RSA-OAEP | RSA-OAEP, RSA-OAEP-256
- AES key wrap | A128KW, A192KW, A256KW
- AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
- ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
- ECDH-ES (direct) | ECDH-ES1
- Direct encryption | dir1
+| Key encryption | Algorithm identifier(s) |
+|:-----------------------|:-----------------------------------------------|
+| RSA-PKCS#1v1.5 | RSA1_5 |
+| RSA-OAEP | RSA-OAEP, RSA-OAEP-256 |
+| AES key wrap | A128KW, A192KW, A256KW |
+| AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW |
+| ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW |
+| ECDH-ES (direct) | ECDH-ES1 |
+| Direct encryption | dir1 |
1. Not supported in multi-recipient mode
- Signing / MAC | Algorithm identifier(s)
- :------------------------- | :------------------------------
- RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
- RSASSA-PSS | PS256, PS384, PS512
- HMAC | HS256, HS384, HS512
- ECDSA | ES256, ES384, ES512
- Ed25519 | EdDSA2
+| Signing / MAC | Algorithm identifier(s) |
+|:------------------|:------------------------|
+| RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 |
+| RSASSA-PSS | PS256, PS384, PS512 |
+| HMAC | HS256, HS384, HS512 |
+| ECDSA | ES256, ES384, ES512 |
+| Ed25519 | EdDSA2 |
2. Only available in version 2 of the package
- Content encryption | Algorithm identifier(s)
- :------------------------- | :------------------------------
- AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
- AES-GCM | A128GCM, A192GCM, A256GCM
+| Content encryption | Algorithm identifier(s) |
+|:-------------------|:--------------------------------------------|
+| AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 |
+| AES-GCM | A128GCM, A192GCM, A256GCM |
- Compression | Algorithm identifiers(s)
- :------------------------- | -------------------------------
- DEFLATE (RFC 1951) | DEF
+| Compression | Algorithm identifiers(s) |
+|:-------------------|--------------------------|
+| DEFLATE (RFC 1951) | DEF |
### Supported key types
@@ -85,12 +87,12 @@ library, and can be passed to corresponding functions such as `NewEncrypter` or
`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
allows attaching a key id.
- Algorithm(s) | Corresponding types
- :------------------------- | -------------------------------
- RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey)
- ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey)
- EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey)
- AES, HMAC | []byte
+| Algorithm(s) | Corresponding types |
+|:------------------|--------------------------------------------------------------------------------------------------------------------------------------|
+| RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) |
+| ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) |
+| EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) |
+| AES, HMAC | []byte |
1. Only available in version 2 or later of the package
diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go
index d81b03b44..31290fc87 100644
--- a/vendor/github.com/go-jose/go-jose/v4/crypter.go
+++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go
@@ -286,6 +286,10 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey
return newSymmetricRecipient(alg, encryptionKey)
case string:
return newSymmetricRecipient(alg, []byte(encryptionKey))
+ case JSONWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ recipient.keyID = encryptionKey.KeyID
+ return recipient, err
case *JSONWebKey:
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
recipient.keyID = encryptionKey.KeyID
@@ -450,13 +454,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
}
- critical, err := headers.getCritical()
+ err := headers.checkNoCritical()
if err != nil {
- return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
- }
-
- if len(critical) > 0 {
- return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ return nil, err
}
key, err := tryJWKS(decryptionKey, obj.Header)
@@ -523,13 +523,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
globalHeaders := obj.mergedHeaders(nil)
- critical, err := globalHeaders.getCritical()
+ err := globalHeaders.checkNoCritical()
if err != nil {
- return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
- }
-
- if len(critical) > 0 {
- return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ return -1, Header{}, nil, err
}
key, err := tryJWKS(decryptionKey, obj.Header)
diff --git a/vendor/github.com/go-jose/go-jose/v4/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go
index 9f1322dcc..6102f9100 100644
--- a/vendor/github.com/go-jose/go-jose/v4/jwe.go
+++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go
@@ -274,7 +274,7 @@ func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncr
if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) {
return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms)
}
- if alg != "" && !containsContentEncryption(contentEncryption, enc) {
+ if enc != "" && !containsContentEncryption(contentEncryption, enc) {
return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption)
}
return nil
@@ -288,11 +288,20 @@ func ParseEncryptedCompact(
keyAlgorithms []KeyAlgorithm,
contentEncryption []ContentEncryption,
) (*JSONWebEncryption, error) {
- // Five parts is four separators
- if strings.Count(input, ".") != 4 {
- return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
+ var parts [5]string
+ var ok bool
+
+ for i := range 4 {
+ parts[i], input, ok = strings.Cut(input, ".")
+ if !ok {
+ return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts")
+ }
+ }
+ // Validate that the last part does not contain more dots
+ if strings.ContainsRune(input, '.') {
+ return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts")
}
- parts := strings.SplitN(input, ".", 5)
+ parts[4] = input
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
if err != nil {
diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go
index 9e57e93ba..164d6a161 100644
--- a/vendor/github.com/go-jose/go-jose/v4/jwk.go
+++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go
@@ -175,6 +175,8 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) {
}
// UnmarshalJSON reads a key from its JSON representation.
+//
+// Returns ErrUnsupportedKeyType for unrecognized or unsupported "kty" header values.
func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
var raw rawJSONWebKey
err = json.Unmarshal(data, &raw)
@@ -228,7 +230,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
}
key, err = raw.symmetricKey()
case "OKP":
- if raw.Crv == "Ed25519" && raw.X != nil {
+ if raw.Crv == "Ed25519" {
if raw.D != nil {
key, err = raw.edPrivateKey()
if err == nil {
@@ -238,17 +240,27 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
key, err = raw.edPublicKey()
keyPub = key
}
- } else {
- return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
}
- default:
- return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
+ case "":
+ // kty MUST be present
+ err = fmt.Errorf("go-jose/go-jose: missing json web key type")
}
if err != nil {
return
}
+ if key == nil {
+ // RFC 7517:
+ // 5. JWK Set Format
+ // ...
+ // Implementations SHOULD ignore JWKs within a JWK Set that use "kty"
+ // (key type) values that are not understood by them, that are missing
+ // required members, or for which values are out of the supported
+ // ranges.
+ return ErrUnsupportedKeyType
+ }
+
if certPub != nil && keyPub != nil {
if !reflect.DeepEqual(certPub, keyPub) {
return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match")
@@ -581,10 +593,10 @@ func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
var missing []string
- switch {
- case key.D == nil:
+ if key.D == nil {
missing = append(missing, "D")
- case key.X == nil:
+ }
+ if key.X == nil {
missing = append(missing, "X")
}
@@ -611,19 +623,21 @@ func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
var missing []string
- switch {
- case key.N == nil:
+ if key.N == nil {
missing = append(missing, "N")
- case key.E == nil:
+ }
+ if key.E == nil {
missing = append(missing, "E")
- case key.D == nil:
+ }
+ if key.D == nil {
missing = append(missing, "D")
- case key.P == nil:
+ }
+ if key.P == nil {
missing = append(missing, "P")
- case key.Q == nil:
+ }
+ if key.Q == nil {
missing = append(missing, "Q")
}
-
if len(missing) > 0 {
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
}
@@ -698,8 +712,19 @@ func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
}
- if key.X == nil || key.Y == nil || key.D == nil {
- return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing x/y/d values")
+ var missing []string
+ if key.X == nil {
+ missing = append(missing, "X")
+ }
+ if key.Y == nil {
+ missing = append(missing, "Y")
+ }
+ if key.D == nil {
+ missing = append(missing, "D")
+ }
+
+ if len(missing) > 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing %s value(s)", strings.Join(missing, ", "))
}
// The length of this octet string MUST be the full size of a coordinate for
diff --git a/vendor/github.com/go-jose/go-jose/v4/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go
index d09d8ba50..c40bd3ec1 100644
--- a/vendor/github.com/go-jose/go-jose/v4/jws.go
+++ b/vendor/github.com/go-jose/go-jose/v4/jws.go
@@ -75,7 +75,14 @@ type Signature struct {
original *rawSignatureInfo
}
-// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization.
+// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. Validation fails if
+// the JWS is signed with an algorithm that isn't in the provided list of signature algorithms.
+// Applications should decide for themselves which signature algorithms are acceptable. If you're
+// not sure which signature algorithms your application might receive, consult the documentation of
+// the program which provides them or the protocol that you are implementing. You can also try
+// getting an example JWS and decoding it with a tool like https://jwt.io to see what its "alg"
+// header parameter indicates. The signature on the JWS does not get validated during parsing. Call
+// Verify() after parsing to validate the signature and obtain the payload.
//
// https://datatracker.ietf.org/doc/html/rfc7515#section-7
func ParseSigned(
@@ -90,7 +97,14 @@ func ParseSigned(
return parseSignedCompact(signature, nil, signatureAlgorithms)
}
-// ParseSignedCompact parses a message in JWS Compact Serialization.
+// ParseSignedCompact parses a message in JWS Compact Serialization. Validation fails if the JWS is
+// signed with an algorithm that isn't in the provided list of signature algorithms. Applications
+// should decide for themselves which signature algorithms are acceptable.If you're not sure which
+// signature algorithms your application might receive, consult the documentation of the program
+// which provides them or the protocol that you are implementing. You can also try getting an
+// example JWS and decoding it with a tool like https://jwt.io to see what its "alg" header
+// parameter indicates. The signature on the JWS does not get validated during parsing. Call
+// Verify() after parsing to validate the signature and obtain the payload.
//
// https://datatracker.ietf.org/doc/html/rfc7515#section-7.1
func ParseSignedCompact(
@@ -101,6 +115,15 @@ func ParseSignedCompact(
}
// ParseDetached parses a signed message in compact serialization format with detached payload.
+// Validation fails if the JWS is signed with an algorithm that isn't in the provided list of
+// signature algorithms. Applications should decide for themselves which signature algorithms are
+// acceptable. If you're not sure which signature algorithms your application might receive, consult
+// the documentation of the program which provides them or the protocol that you are implementing.
+// You can also try getting an example JWS and decoding it with a tool like https://jwt.io to see
+// what its "alg" header parameter indicates. The signature on the JWS does not get validated during
+// parsing. Call Verify() after parsing to validate the signature and obtain the payload.
+//
+// https://datatracker.ietf.org/doc/html/rfc7515#appendix-F
func ParseDetached(
signature string,
payload []byte,
@@ -181,6 +204,25 @@ func containsSignatureAlgorithm(haystack []SignatureAlgorithm, needle SignatureA
return false
}
+// ErrUnexpectedSignatureAlgorithm is returned when the signature algorithm in
+// the JWS header does not match one of the expected algorithms.
+type ErrUnexpectedSignatureAlgorithm struct {
+ // Got is the signature algorithm found in the JWS header.
+ Got SignatureAlgorithm
+ expected []SignatureAlgorithm
+}
+
+func (e *ErrUnexpectedSignatureAlgorithm) Error() string {
+ return fmt.Sprintf("unexpected signature algorithm %q; expected %q", e.Got, e.expected)
+}
+
+func newErrUnexpectedSignatureAlgorithm(got SignatureAlgorithm, expected []SignatureAlgorithm) error {
+ return &ErrUnexpectedSignatureAlgorithm{
+ Got: got,
+ expected: expected,
+ }
+}
+
// sanitized produces a cleaned-up JWS object from the raw JSON.
func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) {
if len(signatureAlgorithms) == 0 {
@@ -236,8 +278,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
alg := SignatureAlgorithm(signature.Header.Algorithm)
if !containsSignatureAlgorithm(signatureAlgorithms, alg) {
- return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q",
- alg, signatureAlgorithms)
+ return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms)
}
if signature.header != nil {
@@ -285,8 +326,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm)
if !containsSignatureAlgorithm(signatureAlgorithms, alg) {
- return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q",
- alg, signatureAlgorithms)
+ return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms)
}
if obj.Signatures[i].header != nil {
@@ -321,35 +361,43 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
return obj, nil
}
+const tokenDelim = "."
+
// parseSignedCompact parses a message in compact format.
func parseSignedCompact(
input string,
payload []byte,
signatureAlgorithms []SignatureAlgorithm,
) (*JSONWebSignature, error) {
- // Three parts is two separators
- if strings.Count(input, ".") != 2 {
+ protected, s, ok := strings.Cut(input, tokenDelim)
+ if !ok { // no period found
+ return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
+ }
+ claims, sig, ok := strings.Cut(s, tokenDelim)
+ if !ok { // only one period found
+ return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
+ }
+ if strings.ContainsRune(sig, '.') { // too many periods found
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
- parts := strings.SplitN(input, ".", 3)
- if parts[1] != "" && payload != nil {
+ if claims != "" && payload != nil {
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
}
- rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
+ rawProtected, err := base64.RawURLEncoding.DecodeString(protected)
if err != nil {
return nil, err
}
if payload == nil {
- payload, err = base64.RawURLEncoding.DecodeString(parts[1])
+ payload, err = base64.RawURLEncoding.DecodeString(claims)
if err != nil {
return nil, err
}
}
- signature, err := base64.RawURLEncoding.DecodeString(parts[2])
+ signature, err := base64.RawURLEncoding.DecodeString(sig)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/go-jose/go-jose/v4/shared.go b/vendor/github.com/go-jose/go-jose/v4/shared.go
index 1ec339612..35130b3aa 100644
--- a/vendor/github.com/go-jose/go-jose/v4/shared.go
+++ b/vendor/github.com/go-jose/go-jose/v4/shared.go
@@ -77,6 +77,9 @@ var (
// ErrUnsupportedEllipticCurve indicates unsupported or unknown elliptic curve has been found.
ErrUnsupportedEllipticCurve = errors.New("go-jose/go-jose: unsupported/unknown elliptic curve")
+
+ // ErrUnsupportedCriticalHeader is returned when a header is marked critical but not supported by go-jose.
+ ErrUnsupportedCriticalHeader = errors.New("go-jose/go-jose: unsupported critical header")
)
// Key management algorithms
@@ -167,8 +170,8 @@ const (
)
// supportedCritical is the set of supported extensions that are understood and processed.
-var supportedCritical = map[string]bool{
- headerB64: true,
+var supportedCritical = map[string]struct{}{
+ headerB64: {},
}
// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
@@ -346,6 +349,32 @@ func (parsed rawHeader) getCritical() ([]string, error) {
return q, nil
}
+// checkNoCritical verifies there are no critical headers present.
+func (parsed rawHeader) checkNoCritical() error {
+ if _, ok := parsed[headerCritical]; ok {
+ return ErrUnsupportedCriticalHeader
+ }
+
+ return nil
+}
+
+// checkSupportedCritical verifies there are no unsupported critical headers.
+// Supported headers are passed in as a set: map of names to empty structs
+func (parsed rawHeader) checkSupportedCritical(supported map[string]struct{}) error {
+ crit, err := parsed.getCritical()
+ if err != nil {
+ return err
+ }
+
+ for _, name := range crit {
+ if _, ok := supported[name]; !ok {
+ return ErrUnsupportedCriticalHeader
+ }
+ }
+
+ return nil
+}
+
// getS2C extracts parsed "p2c" from the raw JSON.
func (parsed rawHeader) getP2C() (int, error) {
v := parsed[headerP2C]
diff --git a/vendor/github.com/go-jose/go-jose/v4/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go
index 3dec0112b..5dbd04c27 100644
--- a/vendor/github.com/go-jose/go-jose/v4/signing.go
+++ b/vendor/github.com/go-jose/go-jose/v4/signing.go
@@ -404,15 +404,23 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter
}
signature := obj.Signatures[0]
- headers := signature.mergedHeaders()
- critical, err := headers.getCritical()
- if err != nil {
- return err
+
+ if signature.header != nil {
+ // Per https://www.rfc-editor.org/rfc/rfc7515.html#section-4.1.11,
+ // 4.1.11. "crit" (Critical) Header Parameter
+ // "When used, this Header Parameter MUST be integrity
+ // protected; therefore, it MUST occur only within the JWS
+ // Protected Header."
+ err = signature.header.checkNoCritical()
+ if err != nil {
+ return err
+ }
}
- for _, name := range critical {
- if !supportedCritical[name] {
- return ErrCryptoFailure
+ if signature.protected != nil {
+ err = signature.protected.checkSupportedCritical(supportedCritical)
+ if err != nil {
+ return err
}
}
@@ -421,6 +429,7 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter
return ErrCryptoFailure
}
+ headers := signature.mergedHeaders()
alg := headers.getSignatureAlgorithm()
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
@@ -469,14 +478,22 @@ func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey
outer:
for i, signature := range obj.Signatures {
- headers := signature.mergedHeaders()
- critical, err := headers.getCritical()
- if err != nil {
- continue
+ if signature.header != nil {
+ // Per https://www.rfc-editor.org/rfc/rfc7515.html#section-4.1.11,
+ // 4.1.11. "crit" (Critical) Header Parameter
+ // "When used, this Header Parameter MUST be integrity
+ // protected; therefore, it MUST occur only within the JWS
+ // Protected Header."
+ err = signature.header.checkNoCritical()
+ if err != nil {
+ continue outer
+ }
}
- for _, name := range critical {
- if !supportedCritical[name] {
+ if signature.protected != nil {
+ // Check for only supported critical headers
+ err = signature.protected.checkSupportedCritical(supportedCritical)
+ if err != nil {
continue outer
}
}
@@ -486,6 +503,7 @@ outer:
continue
}
+ headers := signature.mergedHeaders()
alg := headers.getSignatureAlgorithm()
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric.go b/vendor/github.com/go-jose/go-jose/v4/symmetric.go
index a69103b08..09efefb26 100644
--- a/vendor/github.com/go-jose/go-jose/v4/symmetric.go
+++ b/vendor/github.com/go-jose/go-jose/v4/symmetric.go
@@ -21,6 +21,7 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
+ "crypto/pbkdf2"
"crypto/rand"
"crypto/sha256"
"crypto/sha512"
@@ -30,8 +31,6 @@ import (
"hash"
"io"
- "golang.org/x/crypto/pbkdf2"
-
josecipher "github.com/go-jose/go-jose/v4/cipher"
)
@@ -330,7 +329,10 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie
// derive key
keyLen, h := getPbkdf2Params(alg)
- key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h)
+ key, err := pbkdf2.Key(h, string(ctx.key), salt, ctx.p2c, keyLen)
+ if err != nil {
+ return recipientInfo{}, nil
+ }
// use AES cipher with derived key
block, err := aes.NewCipher(key)
@@ -432,7 +434,10 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien
// derive key
keyLen, h := getPbkdf2Params(alg)
- key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h)
+ key, err := pbkdf2.Key(h, string(ctx.key), salt, p2c, keyLen)
+ if err != nil {
+ return nil, err
+ }
// use AES cipher with derived key
block, err := aes.NewCipher(key)
diff --git a/vendor/github.com/letsencrypt/boulder/LICENSE.txt b/vendor/github.com/letsencrypt/boulder/LICENSE.txt
deleted file mode 100644
index fa274d92d..000000000
--- a/vendor/github.com/letsencrypt/boulder/LICENSE.txt
+++ /dev/null
@@ -1,375 +0,0 @@
-Copyright 2016 ISRG. All rights reserved.
-
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go
deleted file mode 100644
index d5e7a8729..000000000
--- a/vendor/github.com/letsencrypt/boulder/core/challenges.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package core
-
-import "fmt"
-
-func newChallenge(challengeType AcmeChallenge, token string) Challenge {
- return Challenge{
- Type: challengeType,
- Status: StatusPending,
- Token: token,
- }
-}
-
-// HTTPChallenge01 constructs a http-01 challenge.
-func HTTPChallenge01(token string) Challenge {
- return newChallenge(ChallengeTypeHTTP01, token)
-}
-
-// DNSChallenge01 constructs a dns-01 challenge.
-func DNSChallenge01(token string) Challenge {
- return newChallenge(ChallengeTypeDNS01, token)
-}
-
-// TLSALPNChallenge01 constructs a tls-alpn-01 challenge.
-func TLSALPNChallenge01(token string) Challenge {
- return newChallenge(ChallengeTypeTLSALPN01, token)
-}
-
-// NewChallenge constructs a challenge of the given kind. It returns an
-// error if the challenge type is unrecognized.
-func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) {
- switch kind {
- case ChallengeTypeHTTP01:
- return HTTPChallenge01(token), nil
- case ChallengeTypeDNS01:
- return DNSChallenge01(token), nil
- case ChallengeTypeTLSALPN01:
- return TLSALPNChallenge01(token), nil
- default:
- return Challenge{}, fmt.Errorf("unrecognized challenge type %q", kind)
- }
-}
diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go
deleted file mode 100644
index 59b55a3f4..000000000
--- a/vendor/github.com/letsencrypt/boulder/core/interfaces.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package core
-
-import (
- "github.com/letsencrypt/boulder/identifier"
-)
-
-// PolicyAuthority defines the public interface for the Boulder PA
-// TODO(#5891): Move this interface to a more appropriate location.
-type PolicyAuthority interface {
- WillingToIssue([]string) error
- ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error)
- ChallengeTypeEnabled(AcmeChallenge) bool
- CheckAuthz(*Authorization) error
-}
diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go
deleted file mode 100644
index c01f551ab..000000000
--- a/vendor/github.com/letsencrypt/boulder/core/objects.go
+++ /dev/null
@@ -1,505 +0,0 @@
-package core
-
-import (
- "crypto"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "hash/fnv"
- "net"
- "strings"
- "time"
-
- "github.com/go-jose/go-jose/v4"
- "golang.org/x/crypto/ocsp"
-
- "github.com/letsencrypt/boulder/identifier"
- "github.com/letsencrypt/boulder/probs"
- "github.com/letsencrypt/boulder/revocation"
-)
-
-// AcmeStatus defines the state of a given authorization
-type AcmeStatus string
-
-// These statuses are the states of authorizations, challenges, and registrations
-const (
- StatusUnknown = AcmeStatus("unknown") // Unknown status; the default
- StatusPending = AcmeStatus("pending") // In process; client has next action
- StatusProcessing = AcmeStatus("processing") // In process; server has next action
- StatusReady = AcmeStatus("ready") // Order is ready for finalization
- StatusValid = AcmeStatus("valid") // Object is valid
- StatusInvalid = AcmeStatus("invalid") // Validation failed
- StatusRevoked = AcmeStatus("revoked") // Object no longer valid
- StatusDeactivated = AcmeStatus("deactivated") // Object has been deactivated
-)
-
-// AcmeResource values identify different types of ACME resources
-type AcmeResource string
-
-// The types of ACME resources
-const (
- ResourceNewReg = AcmeResource("new-reg")
- ResourceNewAuthz = AcmeResource("new-authz")
- ResourceNewCert = AcmeResource("new-cert")
- ResourceRevokeCert = AcmeResource("revoke-cert")
- ResourceRegistration = AcmeResource("reg")
- ResourceChallenge = AcmeResource("challenge")
- ResourceAuthz = AcmeResource("authz")
- ResourceKeyChange = AcmeResource("key-change")
-)
-
-// AcmeChallenge values identify different types of ACME challenges
-type AcmeChallenge string
-
-// These types are the available challenges
-const (
- ChallengeTypeHTTP01 = AcmeChallenge("http-01")
- ChallengeTypeDNS01 = AcmeChallenge("dns-01")
- ChallengeTypeTLSALPN01 = AcmeChallenge("tls-alpn-01")
-)
-
-// IsValid tests whether the challenge is a known challenge
-func (c AcmeChallenge) IsValid() bool {
- switch c {
- case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01:
- return true
- default:
- return false
- }
-}
-
-// OCSPStatus defines the state of OCSP for a domain
-type OCSPStatus string
-
-// These status are the states of OCSP
-const (
- OCSPStatusGood = OCSPStatus("good")
- OCSPStatusRevoked = OCSPStatus("revoked")
- // Not a real OCSP status. This is a placeholder we write before the
- // actual precertificate is issued, to ensure we never return "good" before
- // issuance succeeds, for BR compliance reasons.
- OCSPStatusNotReady = OCSPStatus("wait")
-)
-
-var OCSPStatusToInt = map[OCSPStatus]int{
- OCSPStatusGood: ocsp.Good,
- OCSPStatusRevoked: ocsp.Revoked,
- OCSPStatusNotReady: -1,
-}
-
-// DNSPrefix is attached to DNS names in DNS challenges
-const DNSPrefix = "_acme-challenge"
-
-type RawCertificateRequest struct {
- CSR JSONBuffer `json:"csr"` // The encoded CSR
-}
-
-// Registration objects represent non-public metadata attached
-// to account keys.
-type Registration struct {
- // Unique identifier
- ID int64 `json:"id,omitempty" db:"id"`
-
- // Account key to which the details are attached
- Key *jose.JSONWebKey `json:"key"`
-
- // Contact URIs
- Contact *[]string `json:"contact,omitempty"`
-
- // Agreement with terms of service
- Agreement string `json:"agreement,omitempty"`
-
- // InitialIP is the IP address from which the registration was created
- InitialIP net.IP `json:"initialIp"`
-
- // CreatedAt is the time the registration was created.
- CreatedAt *time.Time `json:"createdAt,omitempty"`
-
- Status AcmeStatus `json:"status"`
-}
-
-// ValidationRecord represents a validation attempt against a specific URL/hostname
-// and the IP addresses that were resolved and used.
-type ValidationRecord struct {
- // SimpleHTTP only
- URL string `json:"url,omitempty"`
-
- // Shared
- Hostname string `json:"hostname,omitempty"`
- Port string `json:"port,omitempty"`
- AddressesResolved []net.IP `json:"addressesResolved,omitempty"`
- AddressUsed net.IP `json:"addressUsed,omitempty"`
- // AddressesTried contains a list of addresses tried before the `AddressUsed`.
- // Presently this will only ever be one IP from `AddressesResolved` since the
- // only retry is in the case of a v6 failure with one v4 fallback. E.g. if
- // a record with `AddressesResolved: { 127.0.0.1, ::1 }` were processed for
- // a challenge validation with the IPv6 first flag on and the ::1 address
- // failed but the 127.0.0.1 retry succeeded then the record would end up
- // being:
- // {
- // ...
- // AddressesResolved: [ 127.0.0.1, ::1 ],
- // AddressUsed: 127.0.0.1
- // AddressesTried: [ ::1 ],
- // ...
- // }
- AddressesTried []net.IP `json:"addressesTried,omitempty"`
- // ResolverAddrs is the host:port of the DNS resolver(s) that fulfilled the
- // lookup for AddressUsed. During recursive A and AAAA lookups, a record may
- // instead look like A:host:port or AAAA:host:port
- ResolverAddrs []string `json:"resolverAddrs,omitempty"`
- // UsedRSAKEX is a *temporary* addition to the validation record, so we can
- // see how many servers that we reach out to during HTTP-01 and TLS-ALPN-01
- // validation are only willing to negotiate RSA key exchange mechanisms. The
- // field is not included in the serialized json to avoid cluttering the
- // database and log lines.
- // TODO(#7321): Remove this when we have collected sufficient data.
- UsedRSAKEX bool `json:"-"`
-}
-
-// Challenge is an aggregate of all data needed for any challenges.
-//
-// Rather than define individual types for different types of
-// challenge, we just throw all the elements into one bucket,
-// together with the common metadata elements.
-type Challenge struct {
- // Type is the type of challenge encoded in this object.
- Type AcmeChallenge `json:"type"`
-
- // URL is the URL to which a response can be posted. Required for all types.
- URL string `json:"url,omitempty"`
-
- // Status is the status of this challenge. Required for all types.
- Status AcmeStatus `json:"status,omitempty"`
-
- // Validated is the time at which the server validated the challenge. Required
- // if status is valid.
- Validated *time.Time `json:"validated,omitempty"`
-
- // Error contains the error that occurred during challenge validation, if any.
- // If set, the Status must be "invalid".
- Error *probs.ProblemDetails `json:"error,omitempty"`
-
- // Token is a random value that uniquely identifies the challenge. It is used
- // by all current challenges (http-01, tls-alpn-01, and dns-01).
- Token string `json:"token,omitempty"`
-
- // ProvidedKeyAuthorization used to carry the expected key authorization from
- // the RA to the VA. However, since this field is never presented to the user
- // via the ACME API, it should not be on this type.
- //
- // Deprecated: use vapb.PerformValidationRequest.ExpectedKeyAuthorization instead.
- // TODO(#7514): Remove this.
- ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"`
-
- // Contains information about URLs used or redirected to and IPs resolved and
- // used
- ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"`
-}
-
-// ExpectedKeyAuthorization computes the expected KeyAuthorization value for
-// the challenge.
-func (ch Challenge) ExpectedKeyAuthorization(key *jose.JSONWebKey) (string, error) {
- if key == nil {
- return "", fmt.Errorf("Cannot authorize a nil key")
- }
-
- thumbprint, err := key.Thumbprint(crypto.SHA256)
- if err != nil {
- return "", err
- }
-
- return ch.Token + "." + base64.RawURLEncoding.EncodeToString(thumbprint), nil
-}
-
-// RecordsSane checks the sanity of a ValidationRecord object before sending it
-// back to the RA to be stored.
-func (ch Challenge) RecordsSane() bool {
- if ch.ValidationRecord == nil || len(ch.ValidationRecord) == 0 {
- return false
- }
-
- switch ch.Type {
- case ChallengeTypeHTTP01:
- for _, rec := range ch.ValidationRecord {
- // TODO(#7140): Add a check for ResolverAddress == "" only after the
- // core.proto change has been deployed.
- if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || rec.AddressUsed == nil ||
- len(rec.AddressesResolved) == 0 {
- return false
- }
- }
- case ChallengeTypeTLSALPN01:
- if len(ch.ValidationRecord) > 1 {
- return false
- }
- if ch.ValidationRecord[0].URL != "" {
- return false
- }
- // TODO(#7140): Add a check for ResolverAddress == "" only after the
- // core.proto change has been deployed.
- if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" ||
- ch.ValidationRecord[0].AddressUsed == nil || len(ch.ValidationRecord[0].AddressesResolved) == 0 {
- return false
- }
- case ChallengeTypeDNS01:
- if len(ch.ValidationRecord) > 1 {
- return false
- }
- // TODO(#7140): Add a check for ResolverAddress == "" only after the
- // core.proto change has been deployed.
- if ch.ValidationRecord[0].Hostname == "" {
- return false
- }
- return true
- default: // Unsupported challenge type
- return false
- }
-
- return true
-}
-
-// CheckPending ensures that a challenge object is pending and has a token.
-// This is used before offering the challenge to the client, and before actually
-// validating a challenge.
-func (ch Challenge) CheckPending() error {
- if ch.Status != StatusPending {
- return fmt.Errorf("challenge is not pending")
- }
-
- if !looksLikeAToken(ch.Token) {
- return fmt.Errorf("token is missing or malformed")
- }
-
- return nil
-}
-
-// StringID is used to generate a ID for challenges associated with new style authorizations.
-// This is necessary as these challenges no longer have a unique non-sequential identifier
-// in the new storage scheme. This identifier is generated by constructing a fnv hash over the
-// challenge token and type and encoding the first 4 bytes of it using the base64 URL encoding.
-func (ch Challenge) StringID() string {
- h := fnv.New128a()
- h.Write([]byte(ch.Token))
- h.Write([]byte(ch.Type))
- return base64.RawURLEncoding.EncodeToString(h.Sum(nil)[0:4])
-}
-
-// Authorization represents the authorization of an account key holder
-// to act on behalf of a domain. This struct is intended to be used both
-// internally and for JSON marshaling on the wire. Any fields that should be
-// suppressed on the wire (e.g., ID, regID) must be made empty before marshaling.
-type Authorization struct {
- // An identifier for this authorization, unique across
- // authorizations and certificates within this instance.
- ID string `json:"id,omitempty" db:"id"`
-
- // The identifier for which authorization is being given
- Identifier identifier.ACMEIdentifier `json:"identifier,omitempty" db:"identifier"`
-
- // The registration ID associated with the authorization
- RegistrationID int64 `json:"regId,omitempty" db:"registrationID"`
-
- // The status of the validation of this authorization
- Status AcmeStatus `json:"status,omitempty" db:"status"`
-
- // The date after which this authorization will be no
- // longer be considered valid. Note: a certificate may be issued even on the
- // last day of an authorization's lifetime. The last day for which someone can
- // hold a valid certificate based on an authorization is authorization
- // lifetime + certificate lifetime.
- Expires *time.Time `json:"expires,omitempty" db:"expires"`
-
- // An array of challenges objects used to validate the
- // applicant's control of the identifier. For authorizations
- // in process, these are challenges to be fulfilled; for
- // final authorizations, they describe the evidence that
- // the server used in support of granting the authorization.
- //
- // There should only ever be one challenge of each type in this
- // slice and the order of these challenges may not be predictable.
- Challenges []Challenge `json:"challenges,omitempty" db:"-"`
-
- // https://datatracker.ietf.org/doc/html/rfc8555#page-29
- //
- // wildcard (optional, boolean): This field MUST be present and true
- // for authorizations created as a result of a newOrder request
- // containing a DNS identifier with a value that was a wildcard
- // domain name. For other authorizations, it MUST be absent.
- // Wildcard domain names are described in Section 7.1.3.
- //
- // This is not represented in the database because we calculate it from
- // the identifier stored in the database. Unlike the identifier returned
- // as part of the authorization, the identifier we store in the database
- // can contain an asterisk.
- Wildcard bool `json:"wildcard,omitempty" db:"-"`
-}
-
-// FindChallengeByStringID will look for a challenge matching the given ID inside
-// this authorization. If found, it will return the index of that challenge within
-// the Authorization's Challenges array. Otherwise it will return -1.
-func (authz *Authorization) FindChallengeByStringID(id string) int {
- for i, c := range authz.Challenges {
- if c.StringID() == id {
- return i
- }
- }
- return -1
-}
-
-// SolvedBy will look through the Authorizations challenges, returning the type
-// of the *first* challenge it finds with Status: valid, or an error if no
-// challenge is valid.
-func (authz *Authorization) SolvedBy() (AcmeChallenge, error) {
- if len(authz.Challenges) == 0 {
- return "", fmt.Errorf("Authorization has no challenges")
- }
- for _, chal := range authz.Challenges {
- if chal.Status == StatusValid {
- return chal.Type, nil
- }
- }
- return "", fmt.Errorf("Authorization not solved by any challenge")
-}
-
-// JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding
-// with stripped padding.
-type JSONBuffer []byte
-
-// MarshalJSON encodes a JSONBuffer for transmission.
-func (jb JSONBuffer) MarshalJSON() (result []byte, err error) {
- return json.Marshal(base64.RawURLEncoding.EncodeToString(jb))
-}
-
-// UnmarshalJSON decodes a JSONBuffer to an object.
-func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) {
- var str string
- err = json.Unmarshal(data, &str)
- if err != nil {
- return err
- }
- *jb, err = base64.RawURLEncoding.DecodeString(strings.TrimRight(str, "="))
- return
-}
-
-// Certificate objects are entirely internal to the server. The only
-// thing exposed on the wire is the certificate itself.
-type Certificate struct {
- ID int64 `db:"id"`
- RegistrationID int64 `db:"registrationID"`
-
- Serial string `db:"serial"`
- Digest string `db:"digest"`
- DER []byte `db:"der"`
- Issued time.Time `db:"issued"`
- Expires time.Time `db:"expires"`
-}
-
-// CertificateStatus structs are internal to the server. They represent the
-// latest data about the status of the certificate, required for generating new
-// OCSP responses and determining if a certificate has been revoked.
-type CertificateStatus struct {
- ID int64 `db:"id"`
-
- Serial string `db:"serial"`
-
- // status: 'good' or 'revoked'. Note that good, expired certificates remain
- // with status 'good' but don't necessarily get fresh OCSP responses.
- Status OCSPStatus `db:"status"`
-
- // ocspLastUpdated: The date and time of the last time we generated an OCSP
- // response. If we have never generated one, this has the zero value of
- // time.Time, i.e. Jan 1 1970.
- OCSPLastUpdated time.Time `db:"ocspLastUpdated"`
-
- // revokedDate: If status is 'revoked', this is the date and time it was
- // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970.
- RevokedDate time.Time `db:"revokedDate"`
-
- // revokedReason: If status is 'revoked', this is the reason code for the
- // revocation. Otherwise it is zero (which happens to be the reason
- // code for 'unspecified').
- RevokedReason revocation.Reason `db:"revokedReason"`
-
- LastExpirationNagSent time.Time `db:"lastExpirationNagSent"`
-
- // NotAfter and IsExpired are convenience columns which allow expensive
- // queries to quickly filter out certificates that we don't need to care about
- // anymore. These are particularly useful for the expiration mailer and CRL
- // updater. See https://github.com/letsencrypt/boulder/issues/1864.
- NotAfter time.Time `db:"notAfter"`
- IsExpired bool `db:"isExpired"`
-
- // Note: this is not an issuance.IssuerNameID because that would create an
- // import cycle between core and issuance.
- // Note2: This field used to be called `issuerID`. We keep the old name in
- // the DB, but update the Go field name to be clear which type of ID this
- // is.
- IssuerNameID int64 `db:"issuerID"`
-}
-
-// FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames
-// contained in a certificate.
-type FQDNSet struct {
- ID int64
- SetHash []byte
- Serial string
- Issued time.Time
- Expires time.Time
-}
-
-// SCTDERs is a convenience type
-type SCTDERs [][]byte
-
-// CertDER is a convenience type that helps differentiate what the
-// underlying byte slice contains
-type CertDER []byte
-
-// SuggestedWindow is a type exposed inside the RenewalInfo resource.
-type SuggestedWindow struct {
- Start time.Time `json:"start"`
- End time.Time `json:"end"`
-}
-
-// IsWithin returns true if the given time is within the suggested window,
-// inclusive of the start time and exclusive of the end time.
-func (window SuggestedWindow) IsWithin(now time.Time) bool {
- return !now.Before(window.Start) && now.Before(window.End)
-}
-
-// RenewalInfo is a type which is exposed to clients which query the renewalInfo
-// endpoint specified in draft-aaron-ari.
-type RenewalInfo struct {
- SuggestedWindow SuggestedWindow `json:"suggestedWindow"`
-}
-
-// RenewalInfoSimple constructs a `RenewalInfo` object and suggested window
-// using a very simple renewal calculation: calculate a point 2/3rds of the way
-// through the validity period, then give a 2-day window around that. Both the
-// `issued` and `expires` timestamps are expected to be UTC.
-func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo {
- validity := expires.Add(time.Second).Sub(issued)
- renewalOffset := validity / time.Duration(3)
- idealRenewal := expires.Add(-renewalOffset)
- return RenewalInfo{
- SuggestedWindow: SuggestedWindow{
- Start: idealRenewal.Add(-24 * time.Hour),
- End: idealRenewal.Add(24 * time.Hour),
- },
- }
-}
-
-// RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested
-// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should
-// attempt to renew immediately if the suggested window is in the past. The
-// passed `now` is assumed to be a timestamp representing the current moment in
-// time.
-func RenewalInfoImmediate(now time.Time) RenewalInfo {
- oneHourAgo := now.Add(-1 * time.Hour)
- return RenewalInfo{
- SuggestedWindow: SuggestedWindow{
- Start: oneHourAgo,
- End: oneHourAgo.Add(time.Minute * 30),
- },
- }
-}
diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go
deleted file mode 100644
index 641521f16..000000000
--- a/vendor/github.com/letsencrypt/boulder/core/util.go
+++ /dev/null
@@ -1,383 +0,0 @@
-package core
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha256"
- "crypto/x509"
- "encoding/base64"
- "encoding/hex"
- "encoding/pem"
- "errors"
- "expvar"
- "fmt"
- "io"
- "math/big"
- mrand "math/rand"
- "os"
- "path"
- "reflect"
- "regexp"
- "sort"
- "strings"
- "time"
- "unicode"
-
- "github.com/go-jose/go-jose/v4"
- "google.golang.org/protobuf/types/known/durationpb"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const Unspecified = "Unspecified"
-
-// Package Variables Variables
-
-// BuildID is set by the compiler (using -ldflags "-X core.BuildID $(git rev-parse --short HEAD)")
-// and is used by GetBuildID
-var BuildID string
-
-// BuildHost is set by the compiler and is used by GetBuildHost
-var BuildHost string
-
-// BuildTime is set by the compiler and is used by GetBuildTime
-var BuildTime string
-
-func init() {
- expvar.NewString("BuildID").Set(BuildID)
- expvar.NewString("BuildTime").Set(BuildTime)
-}
-
-// Random stuff
-
-type randSource interface {
- Read(p []byte) (n int, err error)
-}
-
-// RandReader is used so that it can be replaced in tests that require
-// deterministic output
-var RandReader randSource = rand.Reader
-
-// RandomString returns a randomly generated string of the requested length.
-func RandomString(byteLength int) string {
- b := make([]byte, byteLength)
- _, err := io.ReadFull(RandReader, b)
- if err != nil {
- panic(fmt.Sprintf("Error reading random bytes: %s", err))
- }
- return base64.RawURLEncoding.EncodeToString(b)
-}
-
-// NewToken produces a random string for Challenges, etc.
-func NewToken() string {
- return RandomString(32)
-}
-
-var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`)
-
-// looksLikeAToken checks whether a string represents a 32-octet value in
-// the URL-safe base64 alphabet.
-func looksLikeAToken(token string) bool {
- return tokenFormat.MatchString(token)
-}
-
-// Fingerprints
-
-// Fingerprint256 produces an unpadded, URL-safe Base64-encoded SHA256 digest
-// of the data.
-func Fingerprint256(data []byte) string {
- d := sha256.New()
- _, _ = d.Write(data) // Never returns an error
- return base64.RawURLEncoding.EncodeToString(d.Sum(nil))
-}
-
-type Sha256Digest [sha256.Size]byte
-
-// KeyDigest produces the SHA256 digest of a provided public key.
-func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) {
- switch t := key.(type) {
- case *jose.JSONWebKey:
- if t == nil {
- return Sha256Digest{}, errors.New("cannot compute digest of nil key")
- }
- return KeyDigest(t.Key)
- case jose.JSONWebKey:
- return KeyDigest(t.Key)
- default:
- keyDER, err := x509.MarshalPKIXPublicKey(key)
- if err != nil {
- return Sha256Digest{}, err
- }
- return sha256.Sum256(keyDER), nil
- }
-}
-
-// KeyDigestB64 produces a padded, standard Base64-encoded SHA256 digest of a
-// provided public key.
-func KeyDigestB64(key crypto.PublicKey) (string, error) {
- digest, err := KeyDigest(key)
- if err != nil {
- return "", err
- }
- return base64.StdEncoding.EncodeToString(digest[:]), nil
-}
-
-// KeyDigestEquals determines whether two public keys have the same digest.
-func KeyDigestEquals(j, k crypto.PublicKey) bool {
- digestJ, errJ := KeyDigestB64(j)
- digestK, errK := KeyDigestB64(k)
- // Keys that don't have a valid digest (due to marshalling problems)
- // are never equal. So, e.g. nil keys are not equal.
- if errJ != nil || errK != nil {
- return false
- }
- return digestJ == digestK
-}
-
-// PublicKeysEqual determines whether two public keys are identical.
-func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) {
- switch ak := a.(type) {
- case *rsa.PublicKey:
- return ak.Equal(b), nil
- case *ecdsa.PublicKey:
- return ak.Equal(b), nil
- default:
- return false, fmt.Errorf("unsupported public key type %T", ak)
- }
-}
-
-// SerialToString converts a certificate serial number (big.Int) to a String
-// consistently.
-func SerialToString(serial *big.Int) string {
- return fmt.Sprintf("%036x", serial)
-}
-
-// StringToSerial converts a string into a certificate serial number (big.Int)
-// consistently.
-func StringToSerial(serial string) (*big.Int, error) {
- var serialNum big.Int
- if !ValidSerial(serial) {
- return &serialNum, fmt.Errorf("invalid serial number %q", serial)
- }
- _, err := fmt.Sscanf(serial, "%036x", &serialNum)
- return &serialNum, err
-}
-
-// ValidSerial tests whether the input string represents a syntactically
-// valid serial number, i.e., that it is a valid hex string between 32
-// and 36 characters long.
-func ValidSerial(serial string) bool {
- // Originally, serial numbers were 32 hex characters long. We later increased
- // them to 36, but we allow the shorter ones because they exist in some
- // production databases.
- if len(serial) != 32 && len(serial) != 36 {
- return false
- }
- _, err := hex.DecodeString(serial)
- return err == nil
-}
-
-// GetBuildID identifies what build is running.
-func GetBuildID() (retID string) {
- retID = BuildID
- if retID == "" {
- retID = Unspecified
- }
- return
-}
-
-// GetBuildTime identifies when this build was made
-func GetBuildTime() (retID string) {
- retID = BuildTime
- if retID == "" {
- retID = Unspecified
- }
- return
-}
-
-// GetBuildHost identifies the building host
-func GetBuildHost() (retID string) {
- retID = BuildHost
- if retID == "" {
- retID = Unspecified
- }
- return
-}
-
-// IsAnyNilOrZero returns whether any of the supplied values are nil, or (if not)
-// if any of them is its type's zero-value. This is useful for validating that
-// all required fields on a proto message are present.
-func IsAnyNilOrZero(vals ...interface{}) bool {
- for _, val := range vals {
- switch v := val.(type) {
- case nil:
- return true
- case bool:
- if !v {
- return true
- }
- case string:
- if v == "" {
- return true
- }
- case []string:
- if len(v) == 0 {
- return true
- }
- case byte:
- // Byte is an alias for uint8 and will cover that case.
- if v == 0 {
- return true
- }
- case []byte:
- if len(v) == 0 {
- return true
- }
- case int:
- if v == 0 {
- return true
- }
- case int8:
- if v == 0 {
- return true
- }
- case int16:
- if v == 0 {
- return true
- }
- case int32:
- if v == 0 {
- return true
- }
- case int64:
- if v == 0 {
- return true
- }
- case uint:
- if v == 0 {
- return true
- }
- case uint16:
- if v == 0 {
- return true
- }
- case uint32:
- if v == 0 {
- return true
- }
- case uint64:
- if v == 0 {
- return true
- }
- case float32:
- if v == 0 {
- return true
- }
- case float64:
- if v == 0 {
- return true
- }
- case time.Time:
- if v.IsZero() {
- return true
- }
- case *timestamppb.Timestamp:
- if v == nil || v.AsTime().IsZero() {
- return true
- }
- case *durationpb.Duration:
- if v == nil || v.AsDuration() == time.Duration(0) {
- return true
- }
- default:
- if reflect.ValueOf(v).IsZero() {
- return true
- }
- }
- }
- return false
-}
-
-// UniqueLowerNames returns the set of all unique names in the input after all
-// of them are lowercased. The returned names will be in their lowercased form
-// and sorted alphabetically.
-func UniqueLowerNames(names []string) (unique []string) {
- nameMap := make(map[string]int, len(names))
- for _, name := range names {
- nameMap[strings.ToLower(name)] = 1
- }
-
- unique = make([]string, 0, len(nameMap))
- for name := range nameMap {
- unique = append(unique, name)
- }
- sort.Strings(unique)
- return
-}
-
-// HashNames returns a hash of the names requested. This is intended for use
-// when interacting with the orderFqdnSets table and rate limiting.
-func HashNames(names []string) []byte {
- names = UniqueLowerNames(names)
- hash := sha256.Sum256([]byte(strings.Join(names, ",")))
- return hash[:]
-}
-
-// LoadCert loads a PEM certificate specified by filename or returns an error
-func LoadCert(filename string) (*x509.Certificate, error) {
- certPEM, err := os.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- block, _ := pem.Decode(certPEM)
- if block == nil {
- return nil, fmt.Errorf("no data in cert PEM file %q", filename)
- }
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, err
- }
- return cert, nil
-}
-
-// retryJitter is used to prevent bunched retried queries from falling into lockstep
-const retryJitter = 0.2
-
-// RetryBackoff calculates a backoff time based on number of retries, will always
-// add jitter so requests that start in unison won't fall into lockstep. Because of
-// this the returned duration can always be larger than the maximum by a factor of
-// retryJitter. Adapted from
-// https://github.com/grpc/grpc-go/blob/v1.11.3/backoff.go#L77-L96
-func RetryBackoff(retries int, base, max time.Duration, factor float64) time.Duration {
- if retries == 0 {
- return 0
- }
- backoff, fMax := float64(base), float64(max)
- for backoff < fMax && retries > 1 {
- backoff *= factor
- retries--
- }
- if backoff > fMax {
- backoff = fMax
- }
- // Randomize backoff delays so that if a cluster of requests start at
- // the same time, they won't operate in lockstep.
- backoff *= (1 - retryJitter) + 2*retryJitter*mrand.Float64()
- return time.Duration(backoff)
-}
-
-// IsASCII determines if every character in a string is encoded in
-// the ASCII character set.
-func IsASCII(str string) bool {
- for _, r := range str {
- if r > unicode.MaxASCII {
- return false
- }
- }
- return true
-}
-
-func Command() string {
- return path.Base(os.Args[0])
-}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
deleted file mode 100644
index 198c09db4..000000000
--- a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package goodkey
-
-import (
- "crypto"
- "crypto/sha256"
- "encoding/base64"
- "encoding/hex"
- "errors"
- "os"
-
- "github.com/letsencrypt/boulder/core"
- "github.com/letsencrypt/boulder/strictyaml"
-)
-
-// blockedKeys is a type for maintaining a map of SHA256 hashes
-// of SubjectPublicKeyInfo's that should be considered blocked.
-// blockedKeys are created by using loadBlockedKeysList.
-type blockedKeys map[core.Sha256Digest]bool
-
-var ErrWrongDecodedSize = errors.New("not enough bytes decoded for sha256 hash")
-
-// blocked checks if the given public key is considered administratively
-// blocked based on a SHA256 hash of the SubjectPublicKeyInfo.
-// Important: blocked should not be called except on a blockedKeys instance
-// returned from loadBlockedKeysList.
-// function should not be used until after `loadBlockedKeysList` has returned.
-func (b blockedKeys) blocked(key crypto.PublicKey) (bool, error) {
- hash, err := core.KeyDigest(key)
- if err != nil {
- // the bool result should be ignored when err is != nil but to be on the
- // paranoid side return true anyway so that a key we can't compute the
- // digest for will always be blocked even if a caller foolishly discards the
- // err result.
- return true, err
- }
- return b[hash], nil
-}
-
-// loadBlockedKeysList creates a blockedKeys object that can be used to check if
-// a key is blocked. It creates a lookup map from a list of
-// SHA256 hashes of SubjectPublicKeyInfo's in the input YAML file
-// with the expected format:
-//
-// blocked:
-// - cuwGhNNI6nfob5aqY90e7BleU6l7rfxku4X3UTJ3Z7M=
-//
-// - Qebc1V3SkX3izkYRGNJilm9Bcuvf0oox4U2Rn+b4JOE=
-//
-// If no hashes are found in the input YAML an error is returned.
-func loadBlockedKeysList(filename string) (*blockedKeys, error) {
- yamlBytes, err := os.ReadFile(filename)
- if err != nil {
- return nil, err
- }
-
- var list struct {
- BlockedHashes []string `yaml:"blocked"`
- BlockedHashesHex []string `yaml:"blockedHashesHex"`
- }
- err = strictyaml.Unmarshal(yamlBytes, &list)
- if err != nil {
- return nil, err
- }
-
- if len(list.BlockedHashes) == 0 && len(list.BlockedHashesHex) == 0 {
- return nil, errors.New("no blocked hashes in YAML")
- }
-
- blockedKeys := make(blockedKeys, len(list.BlockedHashes)+len(list.BlockedHashesHex))
- for _, b64Hash := range list.BlockedHashes {
- decoded, err := base64.StdEncoding.DecodeString(b64Hash)
- if err != nil {
- return nil, err
- }
- if len(decoded) != sha256.Size {
- return nil, ErrWrongDecodedSize
- }
- var sha256Digest core.Sha256Digest
- copy(sha256Digest[:], decoded[0:sha256.Size])
- blockedKeys[sha256Digest] = true
- }
- for _, hexHash := range list.BlockedHashesHex {
- decoded, err := hex.DecodeString(hexHash)
- if err != nil {
- return nil, err
- }
- if len(decoded) != sha256.Size {
- return nil, ErrWrongDecodedSize
- }
- var sha256Digest core.Sha256Digest
- copy(sha256Digest[:], decoded[0:sha256.Size])
- blockedKeys[sha256Digest] = true
- }
- return &blockedKeys, nil
-}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
deleted file mode 100644
index 04a075d35..000000000
--- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
+++ /dev/null
@@ -1,460 +0,0 @@
-package goodkey
-
-import (
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rsa"
- "errors"
- "fmt"
- "math/big"
- "sync"
-
- "github.com/letsencrypt/boulder/core"
-
- "github.com/titanous/rocacheck"
-)
-
-// To generate, run: primes 2 752 | tr '\n' ,
-var smallPrimeInts = []int64{
- 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47,
- 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107,
- 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167,
- 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
- 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283,
- 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
- 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
- 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491,
- 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
- 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641,
- 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
- 719, 727, 733, 739, 743, 751,
-}
-
-// singleton defines the object of a Singleton pattern
-var (
- smallPrimesSingleton sync.Once
- smallPrimesProduct *big.Int
-)
-
-type Config struct {
- // AllowedKeys enables or disables specific key algorithms and sizes. If
- // nil, defaults to just those keys allowed by the Let's Encrypt CPS.
- AllowedKeys *AllowedKeys
- // WeakKeyFile is the path to a JSON file containing truncated modulus hashes
- // of known weak RSA keys. If this config value is empty, then RSA modulus
- // hash checking will be disabled.
- WeakKeyFile string
- // BlockedKeyFile is the path to a YAML file containing base64-encoded SHA256
- // hashes of PKIX Subject Public Keys that should be blocked. If this config
- // value is empty, then blocked key checking will be disabled.
- BlockedKeyFile string
- // FermatRounds is an integer number of rounds of Fermat's factorization
- // method that should be performed to attempt to detect keys whose modulus can
- // be trivially factored because the two factors are very close to each other.
- // If this config value is empty (0), no factorization will be attempted.
- FermatRounds int
-}
-
-// AllowedKeys is a map of six specific key algorithm and size combinations to
-// booleans indicating whether keys of that type are considered good.
-type AllowedKeys struct {
- // Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
- // of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
- // Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
- // have a known method to easily compute their private key, such as Debian Weak
- // Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
- // common key sizes, so we restrict all issuance to those common key sizes.
- RSA2048 bool
- RSA3072 bool
- RSA4096 bool
- // Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid
- // points on the NIST P-256, P-384, or P-521 elliptic curves.
- ECDSAP256 bool
- ECDSAP384 bool
- ECDSAP521 bool
-}
-
-// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's
-// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA
-// 4096, ECDSA 256 and ECDSA P384.
-// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate
-// If this is ever changed, the CP/CPS MUST be changed first.
-func LetsEncryptCPS() AllowedKeys {
- return AllowedKeys{
- RSA2048: true,
- RSA3072: true,
- RSA4096: true,
- ECDSAP256: true,
- ECDSAP384: true,
- }
-}
-
-// ErrBadKey represents an error with a key. It is distinct from the various
-// ways in which an ACME request can have an erroneous key (BadPublicKeyError,
-// BadCSRError) because this library is used to check both JWS signing keys and
-// keys in CSRs.
-var ErrBadKey = errors.New("")
-
-func badKey(msg string, args ...interface{}) error {
- return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...))
-}
-
-// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey functionality to KeyPolicy,
-// rather than storing a full sa.SQLStorageAuthority. This allows external
-// users who don’t want to import all of boulder/sa, and makes testing
-// significantly simpler.
-// On success, the function returns a boolean which is true if the key is blocked.
-type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error)
-
-// KeyPolicy determines which types of key may be used with various boulder
-// operations.
-type KeyPolicy struct {
- allowedKeys AllowedKeys
- weakRSAList *WeakRSAKeys
- blockedList *blockedKeys
- fermatRounds int
- blockedCheck BlockedKeyCheckFunc
-}
-
-// NewPolicy returns a key policy based on the given configuration, with sane
-// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys
-// is used. If the config's WeakKeyFile or BlockedKeyFile paths are empty, those
-// checks are disabled. If the config's FermatRounds is 0, Fermat Factorization
-// is disabled.
-func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
- if config == nil {
- config = &Config{}
- }
- kp := KeyPolicy{
- blockedCheck: bkc,
- }
- if config.AllowedKeys == nil {
- kp.allowedKeys = LetsEncryptCPS()
- } else {
- kp.allowedKeys = *config.AllowedKeys
- }
- if config.WeakKeyFile != "" {
- keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile)
- if err != nil {
- return KeyPolicy{}, err
- }
- kp.weakRSAList = keyList
- }
- if config.BlockedKeyFile != "" {
- blocked, err := loadBlockedKeysList(config.BlockedKeyFile)
- if err != nil {
- return KeyPolicy{}, err
- }
- kp.blockedList = blocked
- }
- if config.FermatRounds < 0 {
- return KeyPolicy{}, fmt.Errorf("Fermat factorization rounds cannot be negative: %d", config.FermatRounds)
- }
- kp.fermatRounds = config.FermatRounds
- return kp, nil
-}
-
-// GoodKey returns true if the key is acceptable for both TLS use and account
-// key use (our requirements are the same for either one), according to basic
-// strength and algorithm checking. GoodKey only supports pointers: *rsa.PublicKey
-// and *ecdsa.PublicKey. It will reject non-pointer types.
-// TODO: Support JSONWebKeys once go-jose migration is done.
-func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) error {
- // Early rejection of unacceptable key types to guard subsequent checks.
- switch t := key.(type) {
- case *rsa.PublicKey, *ecdsa.PublicKey:
- break
- default:
- return badKey("unsupported key type %T", t)
- }
- // If there is a blocked list configured then check if the public key is one
- // that has been administratively blocked.
- if policy.blockedList != nil {
- if blocked, err := policy.blockedList.blocked(key); err != nil {
- return fmt.Errorf("error checking blocklist for key: %v", key)
- } else if blocked {
- return badKey("public key is forbidden")
- }
- }
- if policy.blockedCheck != nil {
- digest, err := core.KeyDigest(key)
- if err != nil {
- return badKey("%w", err)
- }
- exists, err := policy.blockedCheck(ctx, digest[:])
- if err != nil {
- return err
- } else if exists {
- return badKey("public key is forbidden")
- }
- }
- switch t := key.(type) {
- case *rsa.PublicKey:
- return policy.goodKeyRSA(t)
- case *ecdsa.PublicKey:
- return policy.goodKeyECDSA(t)
- default:
- return badKey("unsupported key type %T", key)
- }
-}
-
-// GoodKeyECDSA determines if an ECDSA pubkey meets our requirements
-func (policy *KeyPolicy) goodKeyECDSA(key *ecdsa.PublicKey) (err error) {
- // Check the curve.
- //
- // The validity of the curve is an assumption for all following tests.
- err = policy.goodCurve(key.Curve)
- if err != nil {
- return err
- }
-
- // Key validation routine adapted from NIST SP800-56A § 5.6.2.3.2.
- //
- //
- // Assuming a prime field since a) we are only allowing such curves and b)
- // crypto/elliptic only supports prime curves. Where this assumption
- // simplifies the code below, it is explicitly stated and explained. If ever
- // adapting this code to support non-prime curves, refer to NIST SP800-56A §
- // 5.6.2.3.2 and adapt this code appropriately.
- params := key.Params()
-
- // SP800-56A § 5.6.2.3.2 Step 1.
- // Partial check of the public key for an invalid range in the EC group:
- // Verify that key is not the point at infinity O.
- // This code assumes that the point at infinity is (0,0), which is the
- // case for all supported curves.
- if isPointAtInfinityNISTP(key.X, key.Y) {
- return badKey("key x, y must not be the point at infinity")
- }
-
- // SP800-56A § 5.6.2.3.2 Step 2.
- // "Verify that x_Q and y_Q are integers in the interval [0,p-1] in the
- // case that q is an odd prime p, or that x_Q and y_Q are bit strings
- // of length m bits in the case that q = 2**m."
- //
- // Prove prime field: ASSUMED.
- // Prove q != 2: ASSUMED. (Curve parameter. No supported curve has q == 2.)
- // Prime field && q != 2 => q is an odd prime p
- // Therefore "verify that x, y are in [0, p-1]" satisfies step 2.
- //
- // Therefore verify that both x and y of the public key point have the unique
- // correct representation of an element in the underlying field by verifying
- // that x and y are integers in [0, p-1].
- if key.X.Sign() < 0 || key.Y.Sign() < 0 {
- return badKey("key x, y must not be negative")
- }
-
- if key.X.Cmp(params.P) >= 0 || key.Y.Cmp(params.P) >= 0 {
- return badKey("key x, y must not exceed P-1")
- }
-
- // SP800-56A § 5.6.2.3.2 Step 3.
- // "If q is an odd prime p, verify that (y_Q)**2 === (x_Q)***3 + a*x_Q + b (mod p).
- // If q = 2**m, verify that (y_Q)**2 + (x_Q)*(y_Q) == (x_Q)**3 + a*(x_Q)*2 + b in
- // the finite field of size 2**m.
- // (Ensures that the public key is on the correct elliptic curve.)"
- //
- // q is an odd prime p: proven/assumed above.
- // a = -3 for all supported curves.
- //
- // Therefore step 3 is satisfied simply by showing that
- // y**2 === x**3 - 3*x + B (mod P).
- //
- // This proves that the public key is on the correct elliptic curve.
- // But in practice, this test is provided by crypto/elliptic, so use that.
- if !key.Curve.IsOnCurve(key.X, key.Y) {
- return badKey("key point is not on the curve")
- }
-
- // SP800-56A § 5.6.2.3.2 Step 4.
- // "Verify that n*Q == Ø.
- // (Ensures that the public key has the correct order. Along with check 1,
- // ensures that the public key is in the correct range in the correct EC
- // subgroup, that is, it is in the correct EC subgroup and is not the
- // identity element.)"
- //
- // Ensure that public key has the correct order:
- // verify that n*Q = Ø.
- //
- // n*Q = Ø iff n*Q is the point at infinity (see step 1).
- ox, oy := key.Curve.ScalarMult(key.X, key.Y, params.N.Bytes())
- if !isPointAtInfinityNISTP(ox, oy) {
- return badKey("public key does not have correct order")
- }
-
- // End of SP800-56A § 5.6.2.3.2 Public Key Validation Routine.
- // Key is valid.
- return nil
-}
-
-// Returns true iff the point (x,y) on NIST P-256, NIST P-384 or NIST P-521 is
-// the point at infinity. These curves all have the same point at infinity
-// (0,0). This function must ONLY be used on points on curves verified to have
-// (0,0) as their point at infinity.
-func isPointAtInfinityNISTP(x, y *big.Int) bool {
- return x.Sign() == 0 && y.Sign() == 0
-}
-
-// GoodCurve determines if an elliptic curve meets our requirements.
-func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
- // Simply use a whitelist for now.
- params := c.Params()
- switch {
- case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params():
- return nil
- case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params():
- return nil
- case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params():
- return nil
- default:
- return badKey("ECDSA curve %v not allowed", params.Name)
- }
-}
-
-// GoodKeyRSA determines if a RSA pubkey meets our requirements
-func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error {
- modulus := key.N
-
- err := policy.goodRSABitLen(key)
- if err != nil {
- return err
- }
-
- if policy.weakRSAList != nil && policy.weakRSAList.Known(key) {
- return badKey("key is on a known weak RSA key list")
- }
-
- // Rather than support arbitrary exponents, which significantly increases
- // the size of the key space we allow, we restrict E to the defacto standard
- // RSA exponent 65537. There is no specific standards document that specifies
- // 65537 as the 'best' exponent, but ITU X.509 Annex C suggests there are
- // notable merits for using it if using a fixed exponent.
- //
- // The CABF Baseline Requirements state:
- // The CA SHALL confirm that the value of the public exponent is an
- // odd number equal to 3 or more. Additionally, the public exponent
- // SHOULD be in the range between 2^16 + 1 and 2^256-1.
- //
- // By only allowing one exponent, which fits these constraints, we satisfy
- // these requirements.
- if key.E != 65537 {
- return badKey("key exponent must be 65537")
- }
-
- // The modulus SHOULD also have the following characteristics: an odd
- // number, not the power of a prime, and have no factors smaller than 752.
- // TODO: We don't yet check for "power of a prime."
- if checkSmallPrimes(modulus) {
- return badKey("key divisible by small prime")
- }
- // Check for weak keys generated by Infineon hardware
- // (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
- if rocacheck.IsWeak(key) {
- return badKey("key generated by vulnerable Infineon-based hardware")
- }
- // Check if the key can be easily factored via Fermat's factorization method.
- if policy.fermatRounds > 0 {
- err := checkPrimeFactorsTooClose(modulus, policy.fermatRounds)
- if err != nil {
- return badKey("key generated with factors too close together: %w", err)
- }
- }
-
- return nil
-}
-
-func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error {
- // See comment on AllowedKeys above.
- modulusBitLen := key.N.BitLen()
- switch {
- case modulusBitLen == 2048 && policy.allowedKeys.RSA2048:
- return nil
- case modulusBitLen == 3072 && policy.allowedKeys.RSA3072:
- return nil
- case modulusBitLen == 4096 && policy.allowedKeys.RSA4096:
- return nil
- default:
- return badKey("key size not supported: %d", modulusBitLen)
- }
-}
-
-// Returns true iff integer i is divisible by any of the primes in smallPrimes.
-//
-// Short circuits; execution time is dependent on i. Do not use this on secret
-// values.
-//
-// Rather than checking each prime individually (invoking Mod on each),
-// multiply the primes together and let GCD do our work for us: if the
-// GCD between and is not one, we know we have
-// a bad key. This is substantially faster than checking each prime
-// individually.
-func checkSmallPrimes(i *big.Int) bool {
- smallPrimesSingleton.Do(func() {
- smallPrimesProduct = big.NewInt(1)
- for _, prime := range smallPrimeInts {
- smallPrimesProduct.Mul(smallPrimesProduct, big.NewInt(prime))
- }
- })
-
- // When the GCD is 1, i and smallPrimesProduct are coprime, meaning they
- // share no common factors. When the GCD is not one, it is the product of
- // all common factors, meaning we've identified at least one small prime
- // which invalidates i as a valid key.
-
- var result big.Int
- result.GCD(nil, nil, i, smallPrimesProduct)
- return result.Cmp(big.NewInt(1)) != 0
-}
-
-// Returns an error if the modulus n is able to be factored into primes p and q
-// via Fermat's factorization method. This method relies on the two primes being
-// very close together, which means that they were almost certainly not picked
-// independently from a uniform random distribution. Basically, if we can factor
-// the key this easily, so can anyone else.
-func checkPrimeFactorsTooClose(n *big.Int, rounds int) error {
- // Pre-allocate some big numbers that we'll use a lot down below.
- one := big.NewInt(1)
- bb := new(big.Int)
-
- // Any odd integer is equal to a difference of squares of integers:
- // n = a^2 - b^2 = (a + b)(a - b)
- // Any RSA public key modulus is equal to a product of two primes:
- // n = pq
- // Here we try to find values for a and b, since doing so also gives us the
- // prime factors p = (a + b) and q = (a - b).
-
- // We start with a close to the square root of the modulus n, to start with
- // two candidate prime factors that are as close together as possible and
- // work our way out from there. Specifically, we set a = ceil(sqrt(n)), the
- // first integer greater than the square root of n. Unfortunately, big.Int's
- // built-in square root function takes the floor, so we have to add one to get
- // the ceil.
- a := new(big.Int)
- a.Sqrt(n).Add(a, one)
-
- // We calculate b2 to see if it is a perfect square (i.e. b^2), and therefore
- // b is an integer. Specifically, b2 = a^2 - n.
- b2 := new(big.Int)
- b2.Mul(a, a).Sub(b2, n)
-
- for range rounds {
- // To see if b2 is a perfect square, we take its square root, square that,
- // and check to see if we got the same result back.
- bb.Sqrt(b2).Mul(bb, bb)
- if b2.Cmp(bb) == 0 {
- // b2 is a perfect square, so we've found integer values of a and b,
- // and can easily compute p and q as their sum and difference.
- bb.Sqrt(bb)
- p := new(big.Int).Add(a, bb)
- q := new(big.Int).Sub(a, bb)
- return fmt.Errorf("public modulus n = pq factored into p: %s; q: %s", p, q)
- }
-
- // Set up the next iteration by incrementing a by one and recalculating b2.
- a.Add(a, one)
- b2.Mul(a, a).Sub(b2, n)
- }
- return nil
-}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/weak.go b/vendor/github.com/letsencrypt/boulder/goodkey/weak.go
deleted file mode 100644
index dd7afd5e4..000000000
--- a/vendor/github.com/letsencrypt/boulder/goodkey/weak.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package goodkey
-
-// This file defines a basic method for testing if a given RSA public key is on one of
-// the Debian weak key lists and is therefore considered compromised. Instead of
-// directly loading the hash suffixes from the individual lists we flatten them all
-// into a single JSON list using cmd/weak-key-flatten for ease of use.
-
-import (
- "crypto/rsa"
- "crypto/sha1"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "os"
-)
-
-type truncatedHash [10]byte
-
-type WeakRSAKeys struct {
- suffixes map[truncatedHash]struct{}
-}
-
-func LoadWeakRSASuffixes(path string) (*WeakRSAKeys, error) {
- f, err := os.ReadFile(path)
- if err != nil {
- return nil, err
- }
-
- var suffixList []string
- err = json.Unmarshal(f, &suffixList)
- if err != nil {
- return nil, err
- }
-
- wk := &WeakRSAKeys{suffixes: make(map[truncatedHash]struct{})}
- for _, suffix := range suffixList {
- err := wk.addSuffix(suffix)
- if err != nil {
- return nil, err
- }
- }
- return wk, nil
-}
-
-func (wk *WeakRSAKeys) addSuffix(str string) error {
- var suffix truncatedHash
- decoded, err := hex.DecodeString(str)
- if err != nil {
- return err
- }
- if len(decoded) != 10 {
- return fmt.Errorf("unexpected suffix length of %d", len(decoded))
- }
- copy(suffix[:], decoded)
- wk.suffixes[suffix] = struct{}{}
- return nil
-}
-
-func (wk *WeakRSAKeys) Known(key *rsa.PublicKey) bool {
- // Hash input is in the format "Modulus={upper-case hex of modulus}\n"
- hash := sha1.Sum([]byte(fmt.Sprintf("Modulus=%X\n", key.N.Bytes())))
- var suffix truncatedHash
- copy(suffix[:], hash[10:])
- _, present := wk.suffixes[suffix]
- return present
-}
diff --git a/vendor/github.com/letsencrypt/boulder/identifier/identifier.go b/vendor/github.com/letsencrypt/boulder/identifier/identifier.go
deleted file mode 100644
index cbf228f86..000000000
--- a/vendor/github.com/letsencrypt/boulder/identifier/identifier.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// The identifier package defines types for RFC 8555 ACME identifiers.
-package identifier
-
-// IdentifierType is a named string type for registered ACME identifier types.
-// See https://tools.ietf.org/html/rfc8555#section-9.7.7
-type IdentifierType string
-
-const (
- // DNS is specified in RFC 8555 for DNS type identifiers.
- DNS = IdentifierType("dns")
-)
-
-// ACMEIdentifier is a struct encoding an identifier that can be validated. The
-// protocol allows for different types of identifier to be supported (DNS
-// names, IP addresses, etc.), but currently we only support RFC 8555 DNS type
-// identifiers for domain names.
-type ACMEIdentifier struct {
- // Type is the registered IdentifierType of the identifier.
- Type IdentifierType `json:"type"`
- // Value is the value of the identifier. For a DNS type identifier it is
- // a domain name.
- Value string `json:"value"`
-}
-
-// DNSIdentifier is a convenience function for creating an ACMEIdentifier with
-// Type DNS for a given domain name.
-func DNSIdentifier(domain string) ACMEIdentifier {
- return ACMEIdentifier{
- Type: DNS,
- Value: domain,
- }
-}
diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go
deleted file mode 100644
index ec6c272ae..000000000
--- a/vendor/github.com/letsencrypt/boulder/probs/probs.go
+++ /dev/null
@@ -1,343 +0,0 @@
-package probs
-
-import (
- "fmt"
- "net/http"
-
- "github.com/letsencrypt/boulder/identifier"
-)
-
-const (
- // Error types that can be used in ACME payloads. These are sorted in the
- // same order as they are defined in RFC8555 Section 6.7. We do not implement
- // the `compound`, `externalAccountRequired`, or `userActionRequired` errors,
- // because we have no path that would return them.
- AccountDoesNotExistProblem = ProblemType("accountDoesNotExist")
- AlreadyRevokedProblem = ProblemType("alreadyRevoked")
- BadCSRProblem = ProblemType("badCSR")
- BadNonceProblem = ProblemType("badNonce")
- BadPublicKeyProblem = ProblemType("badPublicKey")
- BadRevocationReasonProblem = ProblemType("badRevocationReason")
- BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm")
- CAAProblem = ProblemType("caa")
- // ConflictProblem is a problem type that is not defined in RFC8555.
- ConflictProblem = ProblemType("conflict")
- ConnectionProblem = ProblemType("connection")
- DNSProblem = ProblemType("dns")
- InvalidContactProblem = ProblemType("invalidContact")
- MalformedProblem = ProblemType("malformed")
- OrderNotReadyProblem = ProblemType("orderNotReady")
- RateLimitedProblem = ProblemType("rateLimited")
- RejectedIdentifierProblem = ProblemType("rejectedIdentifier")
- ServerInternalProblem = ProblemType("serverInternal")
- TLSProblem = ProblemType("tls")
- UnauthorizedProblem = ProblemType("unauthorized")
- UnsupportedContactProblem = ProblemType("unsupportedContact")
- UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier")
-
- ErrorNS = "urn:ietf:params:acme:error:"
-)
-
-// ProblemType defines the error types in the ACME protocol
-type ProblemType string
-
-// ProblemDetails objects represent problem documents
-// https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
-type ProblemDetails struct {
- Type ProblemType `json:"type,omitempty"`
- Detail string `json:"detail,omitempty"`
- // HTTPStatus is the HTTP status code the ProblemDetails should probably be sent
- // as.
- HTTPStatus int `json:"status,omitempty"`
- // SubProblems are optional additional per-identifier problems. See
- // RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1
- SubProblems []SubProblemDetails `json:"subproblems,omitempty"`
-}
-
-// SubProblemDetails represents sub-problems specific to an identifier that are
-// related to a top-level ProblemDetails.
-// See RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1
-type SubProblemDetails struct {
- ProblemDetails
- Identifier identifier.ACMEIdentifier `json:"identifier"`
-}
-
-func (pd *ProblemDetails) Error() string {
- return fmt.Sprintf("%s :: %s", pd.Type, pd.Detail)
-}
-
-// WithSubProblems returns a new ProblemsDetails instance created by adding the
-// provided subProbs to the existing ProblemsDetail.
-func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *ProblemDetails {
- return &ProblemDetails{
- Type: pd.Type,
- Detail: pd.Detail,
- HTTPStatus: pd.HTTPStatus,
- SubProblems: append(pd.SubProblems, subProbs...),
- }
-}
-
-// Helper functions which construct the basic RFC8555 Problem Documents, with
-// the Type already set and the Details supplied by the caller.
-
-// AccountDoesNotExist returns a ProblemDetails representing an
-// AccountDoesNotExistProblem error
-func AccountDoesNotExist(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: AccountDoesNotExistProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad
-// Request status code.
-func AlreadyRevoked(detail string, a ...any) *ProblemDetails {
- return &ProblemDetails{
- Type: AlreadyRevokedProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// BadCSR returns a ProblemDetails representing a BadCSRProblem.
-func BadCSR(detail string, a ...any) *ProblemDetails {
- return &ProblemDetails{
- Type: BadCSRProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad
-// Request status code.
-func BadNonce(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: BadNonceProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad
-// Request status code.
-func BadPublicKey(detail string, a ...any) *ProblemDetails {
- return &ProblemDetails{
- Type: BadPublicKeyProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// BadRevocationReason returns a ProblemDetails representing
-// a BadRevocationReasonProblem
-func BadRevocationReason(detail string, a ...any) *ProblemDetails {
- return &ProblemDetails{
- Type: BadRevocationReasonProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem
-// and a 400 Bad Request status code.
-func BadSignatureAlgorithm(detail string, a ...any) *ProblemDetails {
- return &ProblemDetails{
- Type: BadSignatureAlgorithmProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// CAA returns a ProblemDetails representing a CAAProblem
-func CAA(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: CAAProblem,
- Detail: detail,
- HTTPStatus: http.StatusForbidden,
- }
-}
-
-// Connection returns a ProblemDetails representing a ConnectionProblem
-// error
-func Connection(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: ConnectionProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// DNS returns a ProblemDetails representing a DNSProblem
-func DNS(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: DNSProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// InvalidContact returns a ProblemDetails representing an InvalidContactProblem.
-func InvalidContact(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: InvalidContactProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad
-// Request status code.
-func Malformed(detail string, a ...any) *ProblemDetails {
- if len(a) > 0 {
- detail = fmt.Sprintf(detail, a...)
- }
- return &ProblemDetails{
- Type: MalformedProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem
-func OrderNotReady(detail string, a ...any) *ProblemDetails {
- return &ProblemDetails{
- Type: OrderNotReadyProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusForbidden,
- }
-}
-
-// RateLimited returns a ProblemDetails representing a RateLimitedProblem error
-func RateLimited(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: RateLimitedProblem,
- Detail: detail,
- HTTPStatus: http.StatusTooManyRequests,
- }
-}
-
-// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad
-// Request status code.
-func RejectedIdentifier(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: RejectedIdentifierProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a
-// 500 Internal Server Failure status code.
-func ServerInternal(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: ServerInternalProblem,
- Detail: detail,
- HTTPStatus: http.StatusInternalServerError,
- }
-}
-
-// TLS returns a ProblemDetails representing a TLSProblem error
-func TLS(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: TLSProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403
-// Forbidden status code.
-func Unauthorized(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: UnauthorizedProblem,
- Detail: detail,
- HTTPStatus: http.StatusForbidden,
- }
-}
-
-// UnsupportedContact returns a ProblemDetails representing an
-// UnsupportedContactProblem
-func UnsupportedContact(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: UnsupportedContactProblem,
- Detail: detail,
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// UnsupportedIdentifier returns a ProblemDetails representing an
-// UnsupportedIdentifierProblem
-func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails {
- return &ProblemDetails{
- Type: UnsupportedIdentifierProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
- }
-}
-
-// Additional helper functions that return variations on MalformedProblem with
-// different HTTP status codes set.
-
-// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request
-// Timeout status code.
-func Canceled(detail string, a ...any) *ProblemDetails {
- if len(a) > 0 {
- detail = fmt.Sprintf(detail, a...)
- }
- return &ProblemDetails{
- Type: MalformedProblem,
- Detail: detail,
- HTTPStatus: http.StatusRequestTimeout,
- }
-}
-
-// Conflict returns a ProblemDetails with a ConflictProblem and a 409 Conflict
-// status code.
-func Conflict(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: ConflictProblem,
- Detail: detail,
- HTTPStatus: http.StatusConflict,
- }
-}
-
-// ContentLengthRequired returns a ProblemDetails representing a missing
-// Content-Length header error
-func ContentLengthRequired() *ProblemDetails {
- return &ProblemDetails{
- Type: MalformedProblem,
- Detail: "missing Content-Length header",
- HTTPStatus: http.StatusLengthRequired,
- }
-}
-
-// InvalidContentType returns a ProblemDetails suitable for a missing
-// ContentType header, or an incorrect ContentType header
-func InvalidContentType(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: MalformedProblem,
- Detail: detail,
- HTTPStatus: http.StatusUnsupportedMediaType,
- }
-}
-
-// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP
-// method error.
-func MethodNotAllowed() *ProblemDetails {
- return &ProblemDetails{
- Type: MalformedProblem,
- Detail: "Method not allowed",
- HTTPStatus: http.StatusMethodNotAllowed,
- }
-}
-
-// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found
-// status code.
-func NotFound(detail string) *ProblemDetails {
- return &ProblemDetails{
- Type: MalformedProblem,
- Detail: detail,
- HTTPStatus: http.StatusNotFound,
- }
-}
diff --git a/vendor/github.com/letsencrypt/boulder/revocation/reasons.go b/vendor/github.com/letsencrypt/boulder/revocation/reasons.go
deleted file mode 100644
index 50f556be0..000000000
--- a/vendor/github.com/letsencrypt/boulder/revocation/reasons.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package revocation
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "golang.org/x/crypto/ocsp"
-)
-
-// Reason is used to specify a certificate revocation reason
-type Reason int
-
-// ReasonToString provides a map from reason code to string
-var ReasonToString = map[Reason]string{
- ocsp.Unspecified: "unspecified",
- ocsp.KeyCompromise: "keyCompromise",
- ocsp.CACompromise: "cACompromise",
- ocsp.AffiliationChanged: "affiliationChanged",
- ocsp.Superseded: "superseded",
- ocsp.CessationOfOperation: "cessationOfOperation",
- ocsp.CertificateHold: "certificateHold",
- // 7 is unused
- ocsp.RemoveFromCRL: "removeFromCRL",
- ocsp.PrivilegeWithdrawn: "privilegeWithdrawn",
- ocsp.AACompromise: "aAcompromise",
-}
-
-// UserAllowedReasons contains the subset of Reasons which users are
-// allowed to use
-var UserAllowedReasons = map[Reason]struct{}{
- ocsp.Unspecified: {},
- ocsp.KeyCompromise: {},
- ocsp.Superseded: {},
- ocsp.CessationOfOperation: {},
-}
-
-// AdminAllowedReasons contains the subset of Reasons which admins are allowed
-// to use. Reasons not found here will soon be forbidden from appearing in CRLs
-// or OCSP responses by root programs.
-var AdminAllowedReasons = map[Reason]struct{}{
- ocsp.Unspecified: {},
- ocsp.KeyCompromise: {},
- ocsp.Superseded: {},
- ocsp.CessationOfOperation: {},
- ocsp.PrivilegeWithdrawn: {},
-}
-
-// UserAllowedReasonsMessage contains a string describing a list of user allowed
-// revocation reasons. This is useful when a revocation is rejected because it
-// is not a valid user supplied reason and the allowed values must be
-// communicated. This variable is populated during package initialization.
-var UserAllowedReasonsMessage = ""
-
-func init() {
- // Build a slice of ints from the allowed reason codes.
- // We want a slice because iterating `UserAllowedReasons` will change order
- // and make the message unpredictable and cumbersome for unit testing.
- // We use []ints instead of []Reason to use `sort.Ints` without fuss.
- var allowed []int
- for reason := range UserAllowedReasons {
- allowed = append(allowed, int(reason))
- }
- sort.Ints(allowed)
-
- var reasonStrings []string
- for _, reason := range allowed {
- reasonStrings = append(reasonStrings, fmt.Sprintf("%s (%d)",
- ReasonToString[Reason(reason)], reason))
- }
- UserAllowedReasonsMessage = strings.Join(reasonStrings, ", ")
-}
diff --git a/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go
deleted file mode 100644
index 8e3bae996..000000000
--- a/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Package strictyaml provides a strict YAML unmarshaller based on `go-yaml/yaml`
-package strictyaml
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-
- "gopkg.in/yaml.v3"
-)
-
-// Unmarshal takes a byte array and an interface passed by reference. The
-// d.Decode will read the next YAML-encoded value from its input and store it in
-// the value pointed to by yamlObj. Any config keys from the incoming YAML
-// document which do not correspond to expected keys in the config struct will
-// result in errors.
-//
-// TODO(https://github.com/go-yaml/yaml/issues/639): Replace this function with
-// yaml.Unmarshal once a more ergonomic way to set unmarshal options is added
-// upstream.
-func Unmarshal(b []byte, yamlObj interface{}) error {
- r := bytes.NewReader(b)
-
- d := yaml.NewDecoder(r)
- d.KnownFields(true)
-
- // d.Decode will mutate yamlObj
- err := d.Decode(yamlObj)
-
- if err != nil {
- // io.EOF is returned when the YAML document is empty.
- if errors.Is(err, io.EOF) {
- return fmt.Errorf("unmarshalling YAML, bytes cannot be nil: %w", err)
- }
- return fmt.Errorf("unmarshalling YAML: %w", err)
- }
-
- // As bytes are read by the decoder, the length of the byte buffer should
- // decrease. If it doesn't, there's a problem.
- if r.Len() != 0 {
- return fmt.Errorf("yaml object of size %d bytes had %d bytes of unexpected unconsumed trailers", r.Size(), r.Len())
- }
-
- return nil
-}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
deleted file mode 100644
index 416d1bbbf..000000000
--- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go
+++ /dev/null
@@ -1,38 +0,0 @@
-//go:build appengine
-// +build appengine
-
-package colorable
-
-import (
- "io"
- "os"
-
- _ "github.com/mattn/go-isatty"
-)
-
-// NewColorable returns new instance of Writer which handles escape sequence.
-func NewColorable(file *os.File) io.Writer {
- if file == nil {
- panic("nil passed instead of *os.File to NewColorable()")
- }
-
- return file
-}
-
-// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
-func NewColorableStdout() io.Writer {
- return os.Stdout
-}
-
-// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
-func NewColorableStderr() io.Writer {
- return os.Stderr
-}
-
-// EnableColorsStdout enable colors if possible.
-func EnableColorsStdout(enabled *bool) func() {
- if enabled != nil {
- *enabled = true
- }
- return func() {}
-}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go
index 766d94603..c1a78aa94 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_others.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -1,5 +1,5 @@
-//go:build !windows && !appengine
-// +build !windows,!appengine
+//go:build !windows || appengine
+// +build !windows appengine
package colorable
diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go
index 1846ad5ab..2df7b8598 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_windows.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -11,7 +11,7 @@ import (
"strconv"
"strings"
"sync"
- "syscall"
+ syscall "golang.org/x/sys/windows"
"unsafe"
"github.com/mattn/go-isatty"
@@ -73,7 +73,7 @@ type consoleCursorInfo struct {
}
var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ kernel32 = syscall.NewLazySystemDLL("kernel32.dll")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
@@ -87,8 +87,8 @@ var (
procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer")
)
-// Writer provides colorable Writer to the console
-type Writer struct {
+// writer provides colorable Writer to the console
+type writer struct {
out io.Writer
handle syscall.Handle
althandle syscall.Handle
@@ -98,7 +98,7 @@ type Writer struct {
mutex sync.Mutex
}
-// NewColorable returns new instance of Writer which handles escape sequence from File.
+// NewColorable returns new instance of writer which handles escape sequence from File.
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
@@ -112,17 +112,17 @@ func NewColorable(file *os.File) io.Writer {
var csbi consoleScreenBufferInfo
handle := syscall.Handle(file.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
- return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
+ return &writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
}
return file
}
-// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
+// NewColorableStdout returns new instance of writer which handles escape sequence for stdout.
func NewColorableStdout() io.Writer {
return NewColorable(os.Stdout)
}
-// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
+// NewColorableStderr returns new instance of writer which handles escape sequence for stderr.
func NewColorableStderr() io.Writer {
return NewColorable(os.Stderr)
}
@@ -434,7 +434,7 @@ func atoiWithDefault(s string, def int) (int, error) {
}
// Write writes data on console
-func (w *Writer) Write(data []byte) (n int, err error) {
+func (w *writer) Write(data []byte) (n int, err error) {
w.mutex.Lock()
defer w.mutex.Unlock()
var csbi consoleScreenBufferInfo
@@ -560,7 +560,7 @@ loop:
}
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'E':
- n, err = strconv.Atoi(buf.String())
+ n, err = atoiWithDefault(buf.String(), 1)
if err != nil {
continue
}
@@ -569,7 +569,7 @@ loop:
csbi.cursorPosition.y += short(n)
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'F':
- n, err = strconv.Atoi(buf.String())
+ n, err = atoiWithDefault(buf.String(), 1)
if err != nil {
continue
}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
index d569c0c94..d0ea68f40 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -1,6 +1,7 @@
-//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
+//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo
// +build darwin freebsd openbsd netbsd dragonfly hurd
// +build !appengine
+// +build !tinygo
package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
index 31503226f..7402e0618 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_others.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -1,5 +1,6 @@
-//go:build appengine || js || nacl || wasm
-// +build appengine js nacl wasm
+//go:build (appengine || js || nacl || tinygo || wasm) && !windows
+// +build appengine js nacl tinygo wasm
+// +build !windows
package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
index 67787657f..0337d8cf6 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
@@ -1,6 +1,7 @@
-//go:build (linux || aix || zos) && !appengine
+//go:build (linux || aix || zos) && !appengine && !tinygo
// +build linux aix zos
// +build !appengine
+// +build !tinygo
package isatty
diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
index 584aac971..4ed41e31a 100644
--- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
+++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
@@ -53,6 +53,7 @@ var (
OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20}
OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21}
OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22}
+ OIDDeploymentEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 23}
)
// Extensions contains all custom x509 extensions defined by Fulcio
@@ -132,6 +133,9 @@ type Extensions struct {
// Source repository visibility at the time of signing the certificate.
SourceRepositoryVisibilityAtSigning string `json:"SourceRepositoryVisibilityAtSigning,omitempty" yaml:"source-repository-visibility-at-signing,omitempty"` // 1.3.6.1.4.1.57264.1.22
+
+ // Deployment target for a workflow or job
+ DeploymentEnvironment string `json:"DeploymentEnvironment,omitempty" yaml:"deployment-environment,omitempty"` // 1.3.6.1.4.1.57264.1.23
}
func (e Extensions) Render() ([]pkix.Extension, error) {
@@ -334,6 +338,16 @@ func (e Extensions) Render() ([]pkix.Extension, error) {
Value: val,
})
}
+ if e.DeploymentEnvironment != "" {
+ val, err := asn1.MarshalWithParams(e.DeploymentEnvironment, "utf8")
+ if err != nil {
+ return nil, err
+ }
+ exts = append(exts, pkix.Extension{
+ Id: OIDDeploymentEnvironment,
+ Value: val,
+ })
+ }
return exts, nil
}
@@ -417,6 +431,10 @@ func ParseExtensions(ext []pkix.Extension) (Extensions, error) {
if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil {
return Extensions{}, err
}
+ case e.Id.Equal(OIDDeploymentEnvironment):
+ if err := ParseDERString(e.Value, &out.DeploymentEnvironment); err != nil {
+ return Extensions{}, err
+ }
}
}
diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go
index 40725bd79..5f339b2d7 100644
--- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go
+++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
-// protoc v5.29.4
+// protoc v6.30.2
// source: sigstore_common.proto
package v1
@@ -112,7 +112,8 @@ func (HashAlgorithm) EnumDescriptor() ([]byte, []int) {
// opinionated options instead of allowing every possible permutation.
//
// Any changes to this enum MUST be reflected in the algorithm registry.
-// See: docs/algorithm-registry.md
+//
+// See:
//
// To avoid the possibility of contradicting formats such as PKCS1 with
// ED25519 the valid permutations are listed as a linear set instead of a
@@ -159,8 +160,9 @@ const (
PublicKeyDetails_PKIX_ECDSA_P521_SHA_256 PublicKeyDetails = 20
// LMS and LM-OTS
//
- // These keys and signatures may be used by private Sigstore
- // deployments, but are not currently supported by the public
+ // These algorithms are deprecated and should not be used.
+ // Keys and signatures MAY be used by private Sigstore
+ // deployments, but will not be supported by the public
// good instance.
//
// USER WARNING: LMS and LM-OTS are both stateful signature schemes.
@@ -170,8 +172,26 @@ const (
// MUST NOT be used for more than one signature per LM-OTS key.
// If you cannot maintain these invariants, you MUST NOT use these
// schemes.
- PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14
+ //
+ // Deprecated: Marked as deprecated in sigstore_common.proto.
+ PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14
+ // Deprecated: Marked as deprecated in sigstore_common.proto.
PublicKeyDetails_LMOTS_SHA256 PublicKeyDetails = 15
+ // ML-DSA
+ //
+ // These ML_DSA_65 and ML-DSA_87 algorithms are the pure variants that
+ // take data to sign rather than the prehash variants (HashML-DSA), which
+ // take digests. While considered quantum-resistant, their usage
+ // involves tradeoffs in that signatures and keys are much larger, and
+ // this makes deployments more costly.
+ //
+ // USER WARNING: ML_DSA_65 and ML_DSA_87 are experimental algorithms.
+ // In the future they MAY be used by private Sigstore deployments, but
+ // they are not yet fully functional. This warning will be removed when
+ // these algorithms are widely supported by Sigstore clients and servers,
+ // but care should still be taken for production environments.
+ PublicKeyDetails_ML_DSA_65 PublicKeyDetails = 21 // See NIST FIPS 204
+ PublicKeyDetails_ML_DSA_87 PublicKeyDetails = 22
)
// Enum value maps for PublicKeyDetails.
@@ -198,6 +218,8 @@ var (
20: "PKIX_ECDSA_P521_SHA_256",
14: "LMS_SHA256",
15: "LMOTS_SHA256",
+ 21: "ML_DSA_65",
+ 22: "ML_DSA_87",
}
PublicKeyDetails_value = map[string]int32{
"PUBLIC_KEY_DETAILS_UNSPECIFIED": 0,
@@ -221,6 +243,8 @@ var (
"PKIX_ECDSA_P521_SHA_256": 20,
"LMS_SHA256": 14,
"LMOTS_SHA256": 15,
+ "ML_DSA_65": 21,
+ "ML_DSA_87": 22,
}
)
@@ -1134,7 +1158,7 @@ var file_sigstore_common_proto_rawDesc = string([]byte{
0x48, 0x41, 0x32, 0x5f, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41,
0x32, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f,
0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x33, 0x38,
- 0x34, 0x10, 0x05, 0x2a, 0xe9, 0x04, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65,
+ 0x34, 0x10, 0x05, 0x2a, 0x8f, 0x05, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65,
0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c,
0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x55,
0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x11,
@@ -1170,25 +1194,27 @@ var file_sigstore_common_proto_rawDesc = string([]byte{
0x44, 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, 0x36,
0x10, 0x13, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45,
0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35,
- 0x36, 0x10, 0x14, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53,
- 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x4d, 0x4f, 0x54, 0x53,
- 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x22, 0x04, 0x08, 0x15, 0x10, 0x32, 0x2a,
- 0x6f, 0x0a, 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e,
- 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a,
- 0x29, 0x53, 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41,
- 0x54, 0x49, 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
- 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05,
- 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x02,
- 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03,
- 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31,
- 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
- 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73,
- 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70,
- 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f,
- 0x72, 0x65, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x36, 0x10, 0x14, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x12, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53,
+ 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0e, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x4c,
+ 0x4d, 0x4f, 0x54, 0x53, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x1a, 0x02, 0x08,
+ 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x36, 0x35, 0x10, 0x15,
+ 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x38, 0x37, 0x10, 0x16, 0x22,
+ 0x04, 0x08, 0x17, 0x10, 0x32, 0x2a, 0x6f, 0x0a, 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x29, 0x53, 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41,
+ 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f,
+ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a,
+ 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f,
+ 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69,
+ 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62,
+ 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14,
+ 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
index 1e2fa031b..63e789a8d 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
@@ -16,7 +16,6 @@
package cryptoutils
import (
- "context"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
@@ -29,8 +28,6 @@ import (
"encoding/pem"
"errors"
"fmt"
-
- "github.com/letsencrypt/boulder/goodkey"
)
const (
@@ -133,54 +130,3 @@ func genErrMsg(first, second crypto.PublicKey, keyType string) string {
}
return fmt.Sprintf("%s (%s, %s)", msg, hex.EncodeToString(firstSKID), hex.EncodeToString(secondSKID))
}
-
-// ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key.
-func ValidatePubKey(pub crypto.PublicKey) error {
- // goodkey policy enforces:
- // * RSA
- // * Size of key: 2048 <= size <= 4096, size % 8 = 0
- // * Exponent E = 65537 (Default exponent for OpenSSL and Golang)
- // * Small primes check for modulus
- // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
- // * Key is easily factored with Fermat's factorization method
- // * EC
- // * Public key Q is not the identity element (Ø)
- // * Public key Q's x and y are within [0, p-1]
- // * Public key Q is on the curve
- // * Public key Q's order matches the subgroups (nQ = Ø)
- allowedKeys := &goodkey.AllowedKeys{
- RSA2048: true,
- RSA3072: true,
- RSA4096: true,
- ECDSAP256: true,
- ECDSAP384: true,
- ECDSAP521: true,
- }
- cfg := &goodkey.Config{
- FermatRounds: 100,
- AllowedKeys: allowedKeys,
- }
- p, err := goodkey.NewPolicy(cfg, nil)
- if err != nil {
- // Should not occur, only chances to return errors are if fermat rounds
- // are <0 or when loading blocked/weak keys from disk (not used here)
- return errors.New("unable to initialize key policy")
- }
-
- switch pk := pub.(type) {
- case *rsa.PublicKey:
- // ctx is unused
- return p.GoodKey(context.Background(), pub)
- case *ecdsa.PublicKey:
- // ctx is unused
- return p.GoodKey(context.Background(), pub)
- case ed25519.PublicKey:
- return validateEd25519Key(pk)
- }
- return errors.New("unsupported public key type")
-}
-
-// No validations currently, ED25519 supports only one key size.
-func validateEd25519Key(_ ed25519.PublicKey) error {
- return nil
-}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/safestring.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/safestring.go
new file mode 100644
index 000000000..cdc69aec3
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/safestring.go
@@ -0,0 +1,34 @@
+//
+// Copyright 2025 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+)
+
+// GenerateRandomURLSafeString generates a cryptographically secure random
+// URL-safe string with the specified number of bits of entropy.
+func GenerateRandomURLSafeString(entropyLength uint) string {
+ if entropyLength == 0 {
+ return ""
+ }
+ // Round up to the nearest byte to ensure minimum entropy is met
+ entropyBytes := (entropyLength + 7) / 8
+ b := make([]byte, entropyBytes)
+ _, _ = rand.Read(b)
+ return base64.RawURLEncoding.EncodeToString(b)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go
index d237ef58e..abcea306a 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go
@@ -132,6 +132,9 @@ func UnmarshalOtherNameSAN(exts []pkix.Extension) (string, error) {
// and OtherName SANs
func GetSubjectAlternateNames(cert *x509.Certificate) []string {
sans := []string{}
+ if cert == nil {
+ return sans
+ }
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go
index 6f8449eea..44771ff3d 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go
@@ -55,10 +55,10 @@ func ComputeDigestForSigning(rawMessage io.Reader, defaultHashFunc crypto.Hash,
if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() {
err = errors.New("unexpected length of digest for hash function specified")
}
- return
+ return digest, hashedWith, err
}
digest, err = hashMessage(rawMessage, hashedWith)
- return
+ return digest, hashedWith, err
}
// ComputeDigestForVerifying calculates the digest value for the specified message using a hash function selected by the following process:
@@ -81,10 +81,10 @@ func ComputeDigestForVerifying(rawMessage io.Reader, defaultHashFunc crypto.Hash
if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() {
err = errors.New("unexpected length of digest for hash function specified")
}
- return
+ return digest, hashedWith, err
}
digest, err = hashMessage(rawMessage, hashedWith)
- return
+ return digest, hashedWith, err
}
func hashMessage(rawMessage io.Reader, hashFunc crypto.Hash) ([]byte, error) {
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
index 1122989ff..50f432798 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
@@ -31,9 +31,6 @@ import (
"github.com/sigstore/sigstore/pkg/cryptoutils"
"github.com/sigstore/sigstore/pkg/signature/options"
-
- // these ensure we have the implementations loaded
- _ "golang.org/x/crypto/sha3"
)
// Signer creates digital signatures over a message using a specified key pair
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
index 6acf8ab1e..104dc2440 100644
--- a/vendor/github.com/spf13/cobra/.golangci.yml
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -57,3 +57,10 @@ linters:
- common-false-positives
- legacy
- std-error-handling
+ settings:
+ govet:
+ # Disable buildtag check to allow dual build tag syntax (both //go:build and // +build).
+ # This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17.
+ # This can be removed once Cobra requires Go 1.17 or higher.
+ disable:
+ - buildtag
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 78088db69..c05fed45a 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -557,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
}
}
-var minUsagePadding = 25
+const minUsagePadding = 25
// UsagePadding return padding for the usage.
func (c *Command) UsagePadding() int {
@@ -567,7 +567,7 @@ func (c *Command) UsagePadding() int {
return c.parent.commandsMaxUseLen
}
-var minCommandPathPadding = 11
+const minCommandPathPadding = 11
// CommandPathPadding return padding for the command path.
func (c *Command) CommandPathPadding() int {
@@ -577,7 +577,7 @@ func (c *Command) CommandPathPadding() int {
return c.parent.commandsMaxCommandPathLen
}
-var minNamePadding = 11
+const minNamePadding = 11
// NamePadding returns padding for the name.
func (c *Command) NamePadding() int {
@@ -1939,7 +1939,7 @@ type tmplFunc struct {
fn func(io.Writer, interface{}) error
}
-var defaultUsageTemplate = `Usage:{{if .Runnable}}
+const defaultUsageTemplate = `Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
@@ -2039,7 +2039,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error {
return nil
}
-var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
@@ -2061,7 +2061,7 @@ func defaultHelpFunc(w io.Writer, in interface{}) error {
return nil
}
-var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
`
// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync.
diff --git a/vendor/github.com/titanous/rocacheck/LICENSE b/vendor/github.com/titanous/rocacheck/LICENSE
deleted file mode 100644
index 7bdce481f..000000000
--- a/vendor/github.com/titanous/rocacheck/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-MIT License
-
-Copyright (c) 2017, Jonathan Rudenberg
-Copyright (c) 2017, CRoCS, EnigmaBridge Ltd.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/titanous/rocacheck/README.md b/vendor/github.com/titanous/rocacheck/README.md
deleted file mode 100644
index b8e765ea9..000000000
--- a/vendor/github.com/titanous/rocacheck/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# rocacheck [](https://godoc.org/github.com/titanous/rocacheck)
-
-Package rocacheck is a Go implementation of the [key fingerprint
-algorithm](https://github.com/crocs-muni/roca) that checks if an RSA key was
-generated by broken Infineon code and is vulnerable to factorization via the
-[Return of Coppersmith's Attack
-(ROCA)](https://crocs.fi.muni.cz/public/papers/rsa_ccs17) / CVE-2017-15361.
diff --git a/vendor/github.com/titanous/rocacheck/rocacheck.go b/vendor/github.com/titanous/rocacheck/rocacheck.go
deleted file mode 100644
index e813579bb..000000000
--- a/vendor/github.com/titanous/rocacheck/rocacheck.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Package rocacheck checks if a key was generated by broken Infineon code and
-// is vulnerable to factorization via the Return of Coppersmith's Attack (ROCA)
-// / CVE-2017-15361.
-package rocacheck
-
-import (
- "crypto/rsa"
- "math/big"
-)
-
-type test struct {
- Prime *big.Int
- Fingerprints map[int64]struct{}
-}
-
-var tests = make([]test, 17)
-
-func init() {
- bigOne := big.NewInt(1)
- n := &big.Int{}
- // relations table from https://github.com/crocs-muni/roca/pull/40
- for i, r := range [][2]int64{
- {2, 11}, {6, 13}, {8, 17}, {9, 19}, {3, 37}, {26, 53}, {20, 61},
- {35, 71}, {24, 73}, {13, 79}, {6, 97}, {51, 103}, {53, 107},
- {54, 109}, {42, 127}, {50, 151}, {78, 157},
- } {
- fps := make(map[int64]struct{})
- bp := big.NewInt(r[1])
- br := big.NewInt(r[0])
- for j := int64(0); j < r[1]; j++ {
- if n.Exp(big.NewInt(j), br, bp).Cmp(bigOne) == 0 {
- fps[j] = struct{}{}
- }
- }
- tests[i] = test{
- Prime: big.NewInt(r[1]),
- Fingerprints: fps,
- }
- }
-}
-
-// IsWeak returns true if a RSA public key is vulnerable to Return of
-// Coppersmith's Attack (ROCA).
-func IsWeak(k *rsa.PublicKey) bool {
- tmp := &big.Int{}
- for _, t := range tests {
- if _, ok := t.Fingerprints[tmp.Mod(k.N, t.Prime).Int64()]; !ok {
- return false
- }
- }
- return true
-}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
index e854d7e84..2950fdb42 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
@@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) {
}
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
-func unmarshalJSON(dst []byte, src []byte) error {
+func unmarshalJSON(dst, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1]
}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
index 29e629d66..5bb3b16c7 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
@@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error {
// strings or integers.
type protoUint64 uint64
-// Int64 returns the protoUint64 as a uint64.
+// Uint64 returns the protoUint64 as a uint64.
func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
// UnmarshalJSON decodes both strings and integers.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
index a13a6b733..67f80b6aa 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"time"
)
@@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) {
}{
Alias: Alias(s),
ParentSpanID: parentSpanId,
- StartTime: uint64(startT),
- EndTime: uint64(endT),
+ StartTime: uint64(startT), // nolint:gosec // >0 checked above.
+ EndTime: uint64(endT), // nolint:gosec // >0 checked above.
})
}
@@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error {
case "startTimeUnixNano", "start_time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- s.StartTime = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ s.StartTime = time.Unix(0, v)
case "endTimeUnixNano", "end_time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- s.EndTime = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ s.EndTime = time.Unix(0, v)
case "attributes":
err = decoder.Decode(&s.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
@@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error {
type SpanFlags int32
const (
+ // SpanFlagsTraceFlagsMask is a mask for trace-flags.
+ //
// Bits 0-7 are used for trace flags.
SpanFlagsTraceFlagsMask SpanFlags = 255
- // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
- // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
- // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
SpanFlagsContextHasIsRemoteMask SpanFlags = 256
- // SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
+ // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is
+ // remote.
SpanFlagsContextIsRemoteMask SpanFlags = 512
)
@@ -263,26 +273,30 @@ const (
type SpanKind int32
const (
- // Indicates that the span represents an internal operation within an application,
- // as opposed to an operation happening at the boundaries. Default value.
+ // SpanKindInternal indicates that the span represents an internal
+ // operation within an application, as opposed to an operation happening at
+ // the boundaries.
SpanKindInternal SpanKind = 1
- // Indicates that the span covers server-side handling of an RPC or other
- // remote network request.
+ // SpanKindServer indicates that the span covers server-side handling of an
+ // RPC or other remote network request.
SpanKindServer SpanKind = 2
- // Indicates that the span describes a request to some remote service.
+ // SpanKindClient indicates that the span describes a request to some
+ // remote service.
SpanKindClient SpanKind = 3
- // Indicates that the span describes a producer sending a message to a broker.
- // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
- // between producer and consumer spans. A PRODUCER span ends when the message was accepted
- // by the broker while the logical processing of the message might span a much longer time.
+ // SpanKindProducer indicates that the span describes a producer sending a
+ // message to a broker. Unlike SpanKindClient and SpanKindServer, there is
+ // often no direct critical path latency relationship between producer and
+ // consumer spans. A SpanKindProducer span ends when the message was
+ // accepted by the broker while the logical processing of the message might
+ // span a much longer time.
SpanKindProducer SpanKind = 4
- // Indicates that the span describes consumer receiving a message from a broker.
- // Like the PRODUCER kind, there is often no direct critical path latency relationship
- // between producer and consumer spans.
+ // SpanKindConsumer indicates that the span describes a consumer receiving
+ // a message from a broker. Like SpanKindProducer, there is often no direct
+ // critical path latency relationship between producer and consumer spans.
SpanKindConsumer SpanKind = 5
)
-// Event is a time-stamped annotation of the span, consisting of user-supplied
+// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs.
type SpanEvent struct {
// time_unix_nano is the time the event occurred.
@@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) {
Time uint64 `json:"timeUnixNano,omitempty"`
}{
Alias: Alias(e),
- Time: uint64(t),
+ Time: uint64(t), //nolint:gosec // >0 checked above
})
}
@@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
case "timeUnixNano", "time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- se.Time = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ se.Time = time.Unix(0, v)
case "name":
err = decoder.Decode(&se.Name)
case "attributes":
@@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
return nil
}
-// A pointer from the current span to another span in the same trace or in a
-// different trace. For example, this can be used in batching operations,
-// where a single batch handler processes multiple requests from different
-// traces or when the handler receives a request from a different project.
+// SpanLink is a reference from the current span to another span in the same
+// trace or in a different trace. For example, this can be used in batching
+// operations, where a single batch handler processes multiple requests from
+// different traces or when the handler receives a request from a different
+// project.
type SpanLink struct {
// A unique identifier of a trace that this linked span is part of. The ID is a
// 16-byte array.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
index 1217776ea..a2802764f 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
@@ -3,17 +3,19 @@
package telemetry
+// StatusCode is the status of a Span.
+//
// For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type StatusCode int32
const (
- // The default status.
+ // StatusCodeUnset is the default status.
StatusCodeUnset StatusCode = 0
- // The Span has been validated by an Application developer or Operator to
- // have completed successfully.
+ // StatusCodeOK is used when the Span has been validated by an Application
+ // developer or Operator to have completed successfully.
StatusCodeOK StatusCode = 1
- // The Span contains an error.
+ // StatusCodeError is used when the Span contains an error.
StatusCodeError StatusCode = 2
)
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
index 69a348f0f..44197b808 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
@@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error {
return nil
}
-// A collection of ScopeSpans from a Resource.
+// ResourceSpans is a collection of ScopeSpans from a Resource.
type ResourceSpans struct {
// The resource for the spans in this message.
// If this field is not set then no resource info is known.
@@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
return nil
}
-// A collection of Spans produced by an InstrumentationScope.
+// ScopeSpans is a collection of Spans produced by an InstrumentationScope.
type ScopeSpans struct {
// The instrumentation scope information for the spans in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
index 0dd01b063..022768bb5 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
@@ -1,8 +1,6 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-//go:generate stringer -type=ValueKind -trimprefix=ValueKind
-
package telemetry
import (
@@ -23,7 +21,7 @@ import (
// A zero value is valid and represents an empty value.
type Value struct {
// Ensure forward compatibility by explicitly making this not comparable.
- noCmp [0]func() //nolint: unused // This is indeed used.
+ noCmp [0]func() //nolint:unused // This is indeed used.
// num holds the value for Int64, Float64, and Bool. It holds the length
// for String, Bytes, Slice, Map.
@@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) }
// Int64Value returns a [Value] for an int64.
func Int64Value(v int64) Value {
- return Value{num: uint64(v), any: ValueKindInt64}
+ return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv.
}
// Float64Value returns a [Value] for a float64.
@@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 {
// this will return garbage.
func (v Value) asInt64() int64 {
// Assumes v.num was a valid int64 (overflow not checked).
- return int64(v.num) // nolint: gosec
+ return int64(v.num) //nolint:gosec // Bounded.
}
// AsBool returns the value held by v as a bool.
@@ -309,13 +307,13 @@ func (v Value) String() string {
return v.asString()
case ValueKindInt64:
// Assumes v.num was a valid int64 (overflow not checked).
- return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded.
case ValueKindFloat64:
return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
case ValueKindBool:
return strconv.FormatBool(v.asBool())
case ValueKindBytes:
- return fmt.Sprint(v.asBytes())
+ return string(v.asBytes())
case ValueKindMap:
return fmt.Sprint(v.asMap())
case ValueKindSlice:
@@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) {
case ValueKindInt64:
return json.Marshal(struct {
Value string `json:"intValue"`
- }{strconv.FormatInt(int64(v.num), 10)})
+ }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv.
case ValueKindFloat64:
return json.Marshal(struct {
Value float64 `json:"doubleValue"`
diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go
index 6ebea12a9..815d271ff 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/span.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/span.go
@@ -6,6 +6,7 @@ package sdk
import (
"encoding/json"
"fmt"
+ "math"
"reflect"
"runtime"
"strings"
@@ -16,7 +17,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
@@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
limit := maxSpan.Attrs
if limit == 0 {
// No attributes allowed.
- s.span.DroppedAttrs += uint32(len(attrs))
+ n := int64(len(attrs))
+ if n > 0 {
+ s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked.
+ min(n, math.MaxUint32),
+ )
+ }
return
}
@@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
// number of dropped attributes is also returned.
func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ n := len(attrs)
if limit == 0 {
- return nil, uint32(len(attrs))
+ var out uint32
+ if n > 0 {
+ out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked.
+ }
+ return nil, out
}
if limit < 0 {
@@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u
return convAttrs(attrs), 0
}
- limit = min(len(attrs), limit)
- return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
+ if n < 0 {
+ n = 0
+ }
+
+ limit = min(n, limit)
+ return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked.
}
func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
index cbcfabde3..e09acf022 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
@@ -5,6 +5,7 @@ package sdk
import (
"context"
+ "math"
"time"
"go.opentelemetry.io/otel/trace"
@@ -21,15 +22,20 @@ type tracer struct {
var _ trace.Tracer = tracer{}
-func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
- var psc trace.SpanContext
+func (t tracer) Start(
+ ctx context.Context,
+ name string,
+ opts ...trace.SpanStartOption,
+) (context.Context, trace.Span) {
+ var psc, sc trace.SpanContext
sampled := true
span := new(span)
// Ask eBPF for sampling decision and span context info.
- t.start(ctx, span, &psc, &sampled, &span.spanContext)
+ t.start(ctx, span, &psc, &sampled, &sc)
span.sampled.Store(sampled)
+ span.spanContext = sc
ctx = trace.ContextWithSpan(ctx, span)
@@ -58,7 +64,13 @@ func (t *tracer) start(
// start is used for testing.
var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
-func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
+var intToUint32Bound = min(math.MaxInt, math.MaxUint32)
+
+func (t tracer) traces(
+ name string,
+ cfg trace.SpanConfig,
+ sc, psc trace.SpanContext,
+) (*telemetry.Traces, *telemetry.Span) {
span := &telemetry.Span{
TraceID: telemetry.TraceID(sc.TraceID()),
SpanID: telemetry.SpanID(sc.SpanID()),
@@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont
links := cfg.Links()
if limit := maxSpan.Links; limit == 0 {
- span.DroppedLinks = uint32(len(links))
+ n := len(links)
+ if n > 0 {
+ bounded := max(min(n, intToUint32Bound), 0)
+ span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
+ }
} else {
if limit > 0 {
n := max(len(links)-limit, 0)
- span.DroppedLinks = uint32(n)
+ bounded := min(n, intToUint32Bound)
+ span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
links = links[n:]
}
span.Links = convLinks(links)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
index b25641c55..521daa25d 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
@@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
// Get is a convenient replacement for http.Get that adds a span around the request.
func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, http.NoBody)
if err != nil {
return nil, err
}
@@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error)
// Head is a convenient replacement for http.Head that adds a span around the request.
func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, http.NoBody)
if err != nil {
return nil, err
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
index 6bd50d4c9..38fb79c03 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -8,9 +8,8 @@ import (
"net/http"
"net/http/httptrace"
- "go.opentelemetry.io/otel/attribute"
-
"go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
index 937f9b4e7..fef83b42f 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -8,13 +8,13 @@ import (
"time"
"github.com/felixge/httpsnoop"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
)
// middleware is an http middleware which wraps the next handler in a span.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
index 7cb9693d9..821b80ec4 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
@@ -10,13 +10,13 @@ import (
"context"
"fmt"
"net/http"
- "os"
"strings"
"sync"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv"
)
// OTelSemConvStabilityOptIn is an environment variable.
@@ -32,17 +32,9 @@ type ResponseTelemetry struct {
}
type HTTPServer struct {
- duplicate bool
-
- // Old metrics
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- serverLatencyMeasure metric.Float64Histogram
-
- // New metrics
- requestBodySizeHistogram metric.Int64Histogram
- responseBodySizeHistogram metric.Int64Histogram
- requestDurationHistogram metric.Float64Histogram
+ requestBodySizeHistogram httpconv.ServerRequestBodySize
+ responseBodySizeHistogram httpconv.ServerResponseBodySize
+ requestDurationHistogram httpconv.ServerRequestDuration
}
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
@@ -62,20 +54,10 @@ type HTTPServer struct {
// If the primary server name is not known, server should be an empty string.
// The req Host will be used to determine the server instead.
func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue {
- attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts)
- if s.duplicate {
- return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs)
- }
- return attrs
+ return CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts)
}
func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
- if s.duplicate {
- return []attribute.KeyValue{
- OldHTTPServer{}.NetworkTransportAttr(network),
- CurrentHTTPServer{}.NetworkTransportAttr(network),
- }
- }
return []attribute.KeyValue{
CurrentHTTPServer{}.NetworkTransportAttr(network),
}
@@ -85,11 +67,7 @@ func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
//
// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
- attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp)
- if s.duplicate {
- return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs)
- }
- return attrs
+ return CurrentHTTPServer{}.ResponseTraceAttrs(resp)
}
// Route returns the attribute for the route.
@@ -133,42 +111,28 @@ type MetricData struct {
var (
metricAddOptionPool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &[]metric.AddOption{}
},
}
metricRecordOptionPool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &[]metric.RecordOption{}
},
}
)
func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
- if s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil {
- attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
- *recordOpts = append(*recordOpts, o)
- s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...)
- s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...)
- s.requestDurationHistogram.Record(ctx, md.ElapsedTime/1000.0, o)
- *recordOpts = (*recordOpts)[:0]
- metricRecordOptionPool.Put(recordOpts)
- }
-
- if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil {
- attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- addOpts := metricAddOptionPool.Get().(*[]metric.AddOption)
- *addOpts = append(*addOpts, o)
- s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...)
- s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...)
- s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
- *addOpts = (*addOpts)[:0]
- metricAddOptionPool.Put(addOpts)
- }
+ attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
+ *recordOpts = append(*recordOpts, o)
+ s.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...)
+ s.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...)
+ s.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o)
+ *recordOpts = (*recordOpts)[:0]
+ metricRecordOptionPool.Put(recordOpts)
}
// hasOptIn returns true if the comma-separated version string contains the
@@ -183,61 +147,55 @@ func hasOptIn(version, optIn string) bool {
}
func NewHTTPServer(meter metric.Meter) HTTPServer {
- env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
- duplicate := hasOptIn(env, "http/dup")
- server := HTTPServer{
- duplicate: duplicate,
- }
- server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter)
- if duplicate {
- server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter)
- }
+ server := HTTPServer{}
+
+ var err error
+ server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter)
+ handleErr(err)
+
+ server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter)
+ handleErr(err)
+
+ server.requestDurationHistogram, err = httpconv.NewServerRequestDuration(
+ meter,
+ metric.WithExplicitBucketBoundaries(
+ 0.005, 0.01, 0.025, 0.05, 0.075, 0.1,
+ 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10,
+ ),
+ )
+ handleErr(err)
return server
}
type HTTPClient struct {
- duplicate bool
-
- // old metrics
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- latencyMeasure metric.Float64Histogram
-
- // new metrics
- requestBodySize metric.Int64Histogram
- requestDuration metric.Float64Histogram
+ requestBodySize httpconv.ClientRequestBodySize
+ requestDuration httpconv.ClientRequestDuration
}
func NewHTTPClient(meter metric.Meter) HTTPClient {
- env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
- duplicate := hasOptIn(env, "http/dup")
- client := HTTPClient{
- duplicate: duplicate,
- }
- client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter)
- if duplicate {
- client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter)
- }
+ client := HTTPClient{}
+
+ var err error
+ client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter)
+ handleErr(err)
+
+ client.requestDuration, err = httpconv.NewClientRequestDuration(
+ meter,
+ metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
+ )
+ handleErr(err)
return client
}
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.RequestTraceAttrs(req)
- if c.duplicate {
- return OldHTTPClient{}.RequestTraceAttrs(req, attrs)
- }
- return attrs
+ return CurrentHTTPClient{}.RequestTraceAttrs(req)
}
// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp)
- if c.duplicate {
- return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs)
- }
- return attrs
+ return CurrentHTTPClient{}.ResponseTraceAttrs(resp)
}
func (c HTTPClient) Status(code int) (codes.Code, string) {
@@ -277,47 +235,14 @@ func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts {
addOptions: set,
}
- if c.duplicate {
- attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
- set := metric.WithAttributeSet(attribute.NewSet(attributes...))
- opts["old"] = MetricOpts{
- measurement: set,
- addOptions: set,
- }
- }
-
return opts
}
func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) {
- if s.requestBodySize == nil || s.requestDuration == nil {
- // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
- return
- }
-
- s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
- s.requestDuration.Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption())
-
- if s.duplicate {
- s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions())
- s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption())
- }
-}
-
-func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) {
- if s.responseBytesCounter == nil {
- // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
- return
- }
-
- s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions())
+ s.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
+ s.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption())
}
func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.TraceAttributes(host)
- if s.duplicate {
- return OldHTTPClient{}.TraceAttributes(host, attrs)
- }
-
- return attrs
+ return CurrentHTTPClient{}.TraceAttributes(host)
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
index f2cf8a152..1bb207b80 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
@@ -5,10 +5,11 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/
// Generate semconv package:
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/common_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=common_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconvtest_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
index 53976b0d5..28c51a3b3 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
@@ -17,9 +17,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type RequestTraceAttrsOpts struct {
@@ -196,7 +194,7 @@ func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.
return semconvNew.HTTPRequestMethodGet, orig
}
-func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter
if https {
return semconvNew.URLScheme("https")
}
@@ -247,36 +245,6 @@ func (n CurrentHTTPServer) Route(route string) attribute.KeyValue {
return semconvNew.HTTPRoute(route)
}
-func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{}
- }
-
- var err error
- requestBodySizeHistogram, err := meter.Int64Histogram(
- semconvNew.HTTPServerRequestBodySizeName,
- metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription),
- )
- handleErr(err)
-
- responseBodySizeHistogram, err := meter.Int64Histogram(
- semconvNew.HTTPServerResponseBodySizeName,
- metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription),
- )
- handleErr(err)
- requestDurationHistogram, err := meter.Float64Histogram(
- semconvNew.HTTPServerRequestDurationName,
- metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit),
- metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription),
- metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
- )
- handleErr(err)
-
- return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram
-}
-
func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
num := len(additionalAttributes) + 3
var host string
@@ -472,30 +440,6 @@ func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.
return semconvNew.HTTPRequestMethodGet, orig
}
-func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Histogram{}, noop.Float64Histogram{}
- }
-
- var err error
- requestBodySize, err := meter.Int64Histogram(
- semconvNew.HTTPClientRequestBodySizeName,
- metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription),
- )
- handleErr(err)
-
- requestDuration, err := meter.Float64Histogram(
- semconvNew.HTTPClientRequestDurationName,
- metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit),
- metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription),
- metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
- )
- handleErr(err)
-
- return requestBodySize, requestDuration
-}
-
func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
num := len(additionalAttributes) + 2
var h string
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
index bc1f7751d..96422ad1e 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
@@ -14,7 +14,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0"
)
// SplitHostPort splits a network address hostport of the form "host",
@@ -53,10 +53,10 @@ func SplitHostPort(hostport string) (host string, port int) {
if err != nil {
return
}
- return host, int(p) // nolint: gosec // Byte size checked 16 above.
+ return host, int(p) //nolint:gosec // Byte size checked 16 above.
}
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
+func requiredHTTPPort(https bool, port int) int { //nolint:revive // ignore linter
if https {
if port > 0 && port != 443 {
return port
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
deleted file mode 100644
index ba7fccf1e..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconv/v120.0.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
-
-import (
- "errors"
- "io"
- "net/http"
- "slices"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-type OldHTTPServer struct{}
-
-// RequestTraceAttrs returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs)
-}
-
-func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
- return semconvutil.NetTransport(network)
-}
-
-// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
-//
-// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
-func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue {
- if resp.ReadBytes > 0 {
- attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes)))
- }
- if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) {
- // This is not in the semantic conventions, but is historically provided
- attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error()))
- }
- if resp.WriteBytes > 0 {
- attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes)))
- }
- if resp.StatusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode))
- }
- if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) {
- // This is not in the semantic conventions, but is historically provided
- attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error()))
- }
-
- return attributes
-}
-
-// Route returns the attribute for the route.
-func (o OldHTTPServer) Route(route string) attribute.KeyValue {
- return semconv.HTTPRoute(route)
-}
-
-// HTTPStatusCode returns the attribute for the HTTP status code.
-// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added.
-func HTTPStatusCode(status int) attribute.KeyValue {
- return semconv.HTTPStatusCode(status)
-}
-
-// Server HTTP metrics.
-const (
- serverRequestSize = "http.server.request.size" // Incoming request bytes total
- serverResponseSize = "http.server.response.size" // Incoming response bytes total
- serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
-)
-
-func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
- }
- var err error
- requestBytesCounter, err := meter.Int64Counter(
- serverRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- responseBytesCounter, err := meter.Int64Counter(
- serverResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- serverLatencyMeasure, err := meter.Float64Histogram(
- serverDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of inbound HTTP requests."),
- )
- handleErr(err)
-
- return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
-}
-
-func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- n := len(additionalAttributes) + 3
- var host string
- var p int
- if server == "" {
- host, p = SplitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = SplitHostPort(server)
- if p < 0 {
- _, p = SplitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- if statusCode > 0 {
- n++
- }
-
- attributes := slices.Grow(additionalAttributes, n)
- attributes = append(attributes,
- semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
- o.scheme(req.TLS != nil),
- semconv.NetHostName(host))
-
- if hostPort > 0 {
- attributes = append(attributes, semconv.NetHostPort(hostPort))
- }
- if protoName != "" {
- attributes = append(attributes, semconv.NetProtocolName(protoName))
- }
- if protoVersion != "" {
- attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- return attributes
-}
-
-func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return semconv.HTTPSchemeHTTPS
- }
- return semconv.HTTPSchemeHTTP
-}
-
-type OldHTTPClient struct{}
-
-func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPClientRequest(req, attrs)
-}
-
-func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPClientResponse(resp, attrs)
-}
-
-func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- http.status_code int
- net.peer.name string
- net.peer.port int
- */
-
- n := 2 // method, peer name.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- var requestHost string
- var requestPort int
- for _, hostport := range []string{h, req.Header.Get("Host")} {
- requestHost, requestPort = SplitHostPort(hostport)
- if requestHost != "" || requestPort > 0 {
- break
- }
- }
-
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
- if port > 0 {
- n++
- }
-
- if statusCode > 0 {
- n++
- }
-
- attributes := slices.Grow(additionalAttributes, n)
- attributes = append(attributes,
- semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
- semconv.NetPeerName(requestHost),
- )
-
- if port > 0 {
- attributes = append(attributes, semconv.NetPeerPort(port))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- return attributes
-}
-
-// Client HTTP metrics.
-const (
- clientRequestSize = "http.client.request.size" // Incoming request bytes total
- clientResponseSize = "http.client.response.size" // Incoming response bytes total
- clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
-)
-
-func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
- }
- requestBytesCounter, err := meter.Int64Counter(
- clientRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- responseBytesCounter, err := meter.Int64Counter(
- clientResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- latencyMeasure, err := meter.Float64Histogram(
- clientDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of outbound HTTP requests."),
- )
- handleErr(err)
-
- return requestBytesCounter, responseBytesCounter, latencyMeasure
-}
-
-// TraceAttributes returns attributes for httptrace.
-func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue {
- return append(attrs, semconv.NetHostName(host))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
deleted file mode 100644
index 7aa5f99e8..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-// Generate semconvutil package:
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
deleted file mode 100644
index b99735479..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/httpconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconvutil provides OpenTelemetry semantic convention utilities.
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-import (
- "fmt"
- "net/http"
- "slices"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-type HTTPServerRequestOptions struct {
- // If set, this is used as value for the "http.client_ip" attribute.
- HTTPClientIP string
-}
-
-// HTTPClientResponse returns trace attributes for an HTTP response received by a
-// client from a server. It will return the following attributes if the related
-// values are defined in resp: "http.status.code",
-// "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// HTTPClientResponse(resp, ClientRequest(resp.Request)))
-func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ClientResponse(resp, attrs)
-}
-
-// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.url", "http.method",
-// "net.peer.name". The following attributes are returned if the related values
-// are defined in req: "net.peer.port", "user_agent.original",
-// "http.request_content_length".
-func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ClientRequest(req, attrs)
-}
-
-// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.method", "net.peer.name".
-// The following attributes are returned if the
-// related values are defined in req: "net.peer.port".
-func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue {
- return hc.ClientRequestMetrics(req)
-}
-
-// HTTPClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func HTTPClientStatus(code int) (codes.Code, string) {
- return hc.ClientStatus(code)
-}
-
-// HTTPServerRequest returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.target", "net.host.name". The following attributes are returned if
-// they related values are defined in req: "net.host.port", "net.sock.peer.addr",
-// "net.sock.peer.port", "user_agent.original", "http.client_ip".
-func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ServerRequest(server, req, opts, attrs)
-}
-
-// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "net.host.name". The following attributes are returned if they related
-// values are defined in req: "net.host.port".
-func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- return hc.ServerRequestMetrics(server, req)
-}
-
-// HTTPServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func HTTPServerStatus(code int) (codes.Code, string) {
- return hc.ServerStatus(code)
-}
-
-// httpConv are the HTTP semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type httpConv struct {
- NetConv *netConv
-
- HTTPClientIPKey attribute.Key
- HTTPMethodKey attribute.Key
- HTTPRequestContentLengthKey attribute.Key
- HTTPResponseContentLengthKey attribute.Key
- HTTPRouteKey attribute.Key
- HTTPSchemeHTTP attribute.KeyValue
- HTTPSchemeHTTPS attribute.KeyValue
- HTTPStatusCodeKey attribute.Key
- HTTPTargetKey attribute.Key
- HTTPURLKey attribute.Key
- UserAgentOriginalKey attribute.Key
-}
-
-var hc = &httpConv{
- NetConv: nc,
-
- HTTPClientIPKey: semconv.HTTPClientIPKey,
- HTTPMethodKey: semconv.HTTPMethodKey,
- HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
- HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
- HTTPRouteKey: semconv.HTTPRouteKey,
- HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
- HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
- HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
- HTTPTargetKey: semconv.HTTPTargetKey,
- HTTPURLKey: semconv.HTTPURLKey,
- UserAgentOriginalKey: semconv.UserAgentOriginalKey,
-}
-
-// ClientResponse returns attributes for an HTTP response received by a client
-// from a server. The following attributes are returned if the related values
-// are defined in resp: "http.status.code", "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// ClientResponse(resp, ClientRequest(resp.Request))
-func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.status_code int
- http.response_content_length int
- */
- var n int
- if resp.StatusCode > 0 {
- n++
- }
- if resp.ContentLength > 0 {
- n++
- }
- if n == 0 {
- return attrs
- }
-
- attrs = slices.Grow(attrs, n)
- if resp.StatusCode > 0 {
- attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
- }
- if resp.ContentLength > 0 {
- attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
- }
- return attrs
-}
-
-// ClientRequest returns attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.url", "http.method",
-// "net.peer.name". The following attributes are returned if the related values
-// are defined in req: "net.peer.port", "user_agent.original",
-// "http.request_content_length", "user_agent.original".
-func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- user_agent.original string
- http.url string
- net.peer.name string
- net.peer.port int
- http.request_content_length int
- */
-
- /* The following semantic conventions are not returned:
- http.status_code This requires the response. See ClientResponse.
- http.response_content_length This requires the response. See ClientResponse.
- net.sock.family This requires the socket used.
- net.sock.peer.addr This requires the socket used.
- net.sock.peer.name This requires the socket used.
- net.sock.peer.port This requires the socket used.
- http.resend_count This is something outside of a single request.
- net.protocol.name The value is the Request is ignored, and the go client will always use "http".
- net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0.
- */
- n := 3 // URL, peer name, proto, and method.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- if req.ContentLength > 0 {
- n++
- }
-
- attrs = slices.Grow(attrs, n)
- attrs = append(attrs, c.method(req.Method))
-
- var u string
- if req.URL != nil {
- // Remove any username/password info that may be in the URL.
- userinfo := req.URL.User
- req.URL.User = nil
- u = req.URL.String()
- // Restore any username/password info that was removed.
- req.URL.User = userinfo
- }
- attrs = append(attrs, c.HTTPURLKey.String(u))
-
- attrs = append(attrs, c.NetConv.PeerName(peer))
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if l := req.ContentLength; l > 0 {
- attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
- }
-
- return attrs
-}
-
-// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.method", "net.peer.name".
-// The following attributes are returned if the related values
-// are defined in req: "net.peer.port".
-func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- net.peer.name string
- net.peer.port int
- */
-
- n := 2 // method, peer name.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer))
-
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- return attrs
-}
-
-// ServerRequest returns attributes for an HTTP request received by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.target", "net.host.name". The following attributes are returned if they
-// related values are defined in req: "net.host.port", "net.sock.peer.addr",
-// "net.sock.peer.port", "user_agent.original", "http.client_ip",
-// "net.protocol.name", "net.protocol.version".
-func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- http.scheme string
- net.host.name string
- net.host.port int
- net.sock.peer.addr string
- net.sock.peer.port int
- user_agent.original string
- http.client_ip string
- net.protocol.name string Note: not set if the value is "http".
- net.protocol.version string
- http.target string Note: doesn't include the query parameter.
- */
-
- /* The following semantic conventions are not returned:
- http.status_code This requires the response.
- http.request_content_length This requires the len() of body, which can mutate it.
- http.response_content_length This requires the response.
- http.route This is not available.
- net.sock.peer.name This would require a DNS lookup.
- net.sock.host.addr The request doesn't have access to the underlying socket.
- net.sock.host.port The request doesn't have access to the underlying socket.
-
- */
- n := 4 // Method, scheme, proto, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- peer, peerPort := splitHostPort(req.RemoteAddr)
- if peer != "" {
- n++
- if peerPort > 0 {
- n++
- }
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
-
- // For client IP, use, in order:
- // 1. The value passed in the options
- // 2. The value in the X-Forwarded-For header
- // 3. The peer address
- clientIP := opts.HTTPClientIP
- if clientIP == "" {
- clientIP = serverClientIP(req.Header.Get("X-Forwarded-For"))
- if clientIP == "" {
- clientIP = peer
- }
- }
- if clientIP != "" {
- n++
- }
-
- var target string
- if req.URL != nil {
- target = req.URL.Path
- if target != "" {
- n++
- }
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" && protoName != "http" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- attrs = slices.Grow(attrs, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
-
- if peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
- if peerPort > 0 {
- attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
- }
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if clientIP != "" {
- attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
- }
-
- if target != "" {
- attrs = append(attrs, c.HTTPTargetKey.String(target))
- }
-
- if protoName != "" && protoName != "http" {
- attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
- }
-
- return attrs
-}
-
-// ServerRequestMetrics returns metric attributes for an HTTP request received
-// by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "net.host.name". The following attributes are returned if they related
-// values are defined in req: "net.host.port".
-func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.scheme string
- http.route string
- http.method string
- http.status_code int
- net.host.name string
- net.host.port int
- net.protocol.name string Note: not set if the value is "http".
- net.protocol.version string
- */
-
- n := 3 // Method, scheme, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.methodMetric(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
- if protoName != "" {
- attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
- }
-
- return attrs
-}
-
-func (c *httpConv) method(method string) attribute.KeyValue {
- if method == "" {
- return c.HTTPMethodKey.String(http.MethodGet)
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) methodMetric(method string) attribute.KeyValue {
- method = strings.ToUpper(method)
- switch method {
- case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
- default:
- method = "_OTHER"
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return c.HTTPSchemeHTTPS
- }
- return c.HTTPSchemeHTTP
-}
-
-func serverClientIP(xForwardedFor string) string {
- if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
- xForwardedFor = xForwardedFor[:idx]
- }
- return xForwardedFor
-}
-
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
- if https {
- if port > 0 && port != 443 {
- return port
- }
- } else {
- if port > 0 && port != 80 {
- return port
- }
- }
- return -1
-}
-
-// Return the request host and port from the first non-empty source.
-func firstHostPort(source ...string) (host string, port int) {
- for _, hostport := range source {
- host, port = splitHostPort(hostport)
- if host != "" || port > 0 {
- break
- }
- }
- return
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 400 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 500 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
deleted file mode 100644
index df97255e4..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/netconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-import (
- "net"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-// NetTransport returns a trace attribute describing the transport protocol of the
-// passed network. See the net.Dial for information about acceptable network
-// values.
-func NetTransport(network string) attribute.KeyValue {
- return nc.Transport(network)
-}
-
-// netConv are the network semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type netConv struct {
- NetHostNameKey attribute.Key
- NetHostPortKey attribute.Key
- NetPeerNameKey attribute.Key
- NetPeerPortKey attribute.Key
- NetProtocolName attribute.Key
- NetProtocolVersion attribute.Key
- NetSockFamilyKey attribute.Key
- NetSockPeerAddrKey attribute.Key
- NetSockPeerPortKey attribute.Key
- NetSockHostAddrKey attribute.Key
- NetSockHostPortKey attribute.Key
- NetTransportOther attribute.KeyValue
- NetTransportTCP attribute.KeyValue
- NetTransportUDP attribute.KeyValue
- NetTransportInProc attribute.KeyValue
-}
-
-var nc = &netConv{
- NetHostNameKey: semconv.NetHostNameKey,
- NetHostPortKey: semconv.NetHostPortKey,
- NetPeerNameKey: semconv.NetPeerNameKey,
- NetPeerPortKey: semconv.NetPeerPortKey,
- NetProtocolName: semconv.NetProtocolNameKey,
- NetProtocolVersion: semconv.NetProtocolVersionKey,
- NetSockFamilyKey: semconv.NetSockFamilyKey,
- NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
- NetSockPeerPortKey: semconv.NetSockPeerPortKey,
- NetSockHostAddrKey: semconv.NetSockHostAddrKey,
- NetSockHostPortKey: semconv.NetSockHostPortKey,
- NetTransportOther: semconv.NetTransportOther,
- NetTransportTCP: semconv.NetTransportTCP,
- NetTransportUDP: semconv.NetTransportUDP,
- NetTransportInProc: semconv.NetTransportInProc,
-}
-
-func (c *netConv) Transport(network string) attribute.KeyValue {
- switch network {
- case "tcp", "tcp4", "tcp6":
- return c.NetTransportTCP
- case "udp", "udp4", "udp6":
- return c.NetTransportUDP
- case "unix", "unixgram", "unixpacket":
- return c.NetTransportInProc
- default:
- // "ip:*", "ip4:*", and "ip6:*" all are considered other.
- return c.NetTransportOther
- }
-}
-
-// Host returns attributes for a network host address.
-func (c *netConv) Host(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.HostName(h))
- if p > 0 {
- attrs = append(attrs, c.HostPort(p))
- }
- return attrs
-}
-
-func (c *netConv) HostName(name string) attribute.KeyValue {
- return c.NetHostNameKey.String(name)
-}
-
-func (c *netConv) HostPort(port int) attribute.KeyValue {
- return c.NetHostPortKey.Int(port)
-}
-
-func family(network, address string) string {
- switch network {
- case "unix", "unixgram", "unixpacket":
- return "unix"
- default:
- if ip := net.ParseIP(address); ip != nil {
- if ip.To4() == nil {
- return "inet6"
- }
- return "inet"
- }
- }
- return ""
-}
-
-// Peer returns attributes for a network peer address.
-func (c *netConv) Peer(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.PeerName(h))
- if p > 0 {
- attrs = append(attrs, c.PeerPort(p))
- }
- return attrs
-}
-
-func (c *netConv) PeerName(name string) attribute.KeyValue {
- return c.NetPeerNameKey.String(name)
-}
-
-func (c *netConv) PeerPort(port int) attribute.KeyValue {
- return c.NetPeerPortKey.Int(port)
-}
-
-func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
- return c.NetSockPeerAddrKey.String(addr)
-}
-
-func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
- return c.NetSockPeerPortKey.Int(port)
-}
-
-// splitHostPort splits a network address hostport of the form "host",
-// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
-// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
-// port.
-//
-// An empty host is returned if it is not provided or unparsable. A negative
-// port is returned if it is not provided or unparsable.
-func splitHostPort(hostport string) (host string, port int) {
- port = -1
-
- if strings.HasPrefix(hostport, "[") {
- addrEnd := strings.LastIndex(hostport, "]")
- if addrEnd < 0 {
- // Invalid hostport.
- return
- }
- if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
- host = hostport[1:addrEnd]
- return
- }
- } else {
- if i := strings.LastIndex(hostport, ":"); i < 0 {
- host = hostport
- return
- }
- }
-
- host, pStr, err := net.SplitHostPort(hostport)
- if err != nil {
- return
- }
-
- p, err := strconv.ParseUint(pStr, 10, 16)
- if err != nil {
- return
- }
- return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
-}
-
-func netProtocol(proto string) (name string, version string) {
- name, version, _ = strings.Cut(proto, "/")
- switch name {
- case "HTTP":
- name = "http"
- case "QUIC":
- name = "quic"
- case "SPDY":
- name = "spdy"
- default:
- name = strings.ToLower(name)
- }
- return name, version
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
index 44b86ad86..514ae6753 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -11,14 +11,14 @@ import (
"sync/atomic"
"time"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/propagation"
-
"go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
)
// Transport implements the http.RoundTripper interface and wraps
@@ -129,6 +129,37 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
res, err := t.rt.RoundTrip(r)
+
+ // Defer metrics recording function to record the metrics on error or no error.
+ defer func() {
+ metricAttributes := semconv.MetricAttributes{
+ Req: r,
+ AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
+ }
+
+ if err == nil {
+ metricAttributes.StatusCode = res.StatusCode
+ }
+
+ metricOpts := t.semconv.MetricOptions(metricAttributes)
+
+ metricData := semconv.MetricData{
+ RequestSize: bw.BytesRead(),
+ }
+
+ if err == nil {
+ readRecordFunc := func(int64) {}
+ res.Body = newWrappedBody(span, readRecordFunc, res.Body)
+ }
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ metricData.ElapsedTime = elapsedTime
+
+ t.semconv.RecordMetrics(ctx, metricData, metricOpts)
+ }()
+
if err != nil {
// set error type attribute if the error is part of the predefined
// error types.
@@ -141,35 +172,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
span.SetStatus(codes.Error, err.Error())
span.End()
- return res, err
- }
-
- // metrics
- metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{
- Req: r,
- StatusCode: res.StatusCode,
- AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
- })
- // For handling response bytes we leverage a callback when the client reads the http response
- readRecordFunc := func(n int64) {
- t.semconv.RecordResponseSize(ctx, n, metricOpts)
+ return res, err
}
// traces
span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
span.SetStatus(t.semconv.Status(res.StatusCode))
- res.Body = newWrappedBody(span, readRecordFunc, res.Body)
-
- // Use floating point division here for higher precision (instead of Millisecond method).
- elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
-
- t.semconv.RecordMetrics(ctx, semconv.MetricData{
- RequestSize: bw.BytesRead(),
- ElapsedTime: elapsedTime,
- }, metricOpts)
-
return res, nil
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
index 6be4c1fde..dfb53cf1f 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -5,6 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
// Version is the current release version of the otelhttp instrumentation.
func Version() string {
- return "0.61.0"
+ return "0.63.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml
new file mode 100644
index 000000000..128d61a22
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.clomonitor.yml
@@ -0,0 +1,3 @@
+exemptions:
+ - check: artifacthub_badge
+ reason: "Artifact Hub doesn't support Go packages"
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
index 6bf3abc41..2b53a25e1 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -7,3 +7,4 @@ ans
nam
valu
thirdparty
+addOpt
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 888e5da80..b01762ffc 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -10,6 +10,7 @@ linters:
- depguard
- errcheck
- errorlint
+ - gocritic
- godot
- gosec
- govet
@@ -66,8 +67,6 @@ linters:
desc: Do not use cross-module internal packages.
- pkg: go.opentelemetry.io/otel/internal/internaltest
desc: Do not use cross-module internal packages.
- - pkg: go.opentelemetry.io/otel/internal/matchers
- desc: Do not use cross-module internal packages.
otlp-internal:
files:
- '!**/exporters/otlp/internal/**/*.go'
@@ -88,6 +87,18 @@ linters:
deny:
- pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
desc: Do not use cross-module internal packages.
+ gocritic:
+ disabled-checks:
+ - appendAssign
+ - commentedOutCode
+ - dupArg
+ - hugeParam
+ - importShadow
+ - preferDecodeRune
+ - rangeValCopy
+ - unnamedResult
+ - whyNoLint
+ enable-all: true
godot:
exclude:
# Exclude links.
@@ -169,7 +180,10 @@ linters:
- fmt.Print
- fmt.Printf
- fmt.Println
+ - name: unused-parameter
+ - name: unused-receiver
- name: unnecessary-stmt
+ - name: use-any
- name: useless-break
- name: var-declaration
- name: var-naming
@@ -190,6 +204,10 @@ linters:
- legacy
- std-error-handling
rules:
+ - linters:
+ - revive
+ path: schema/v.*/types/.*
+ text: avoid meaningless package names
# TODO: Having appropriate comments for exported objects helps development,
# even for objects in internal packages. Appropriate comments for all
# exported objects should be added and this exclusion removed.
@@ -222,10 +240,6 @@ linters:
- linters:
- gosec
text: 'G402: TLS MinVersion too low.'
- paths:
- - third_party$
- - builtin$
- - examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
@@ -235,14 +249,12 @@ formatters:
- goimports
- golines
settings:
+ gofumpt:
+ extra-rules: true
goimports:
local-prefixes:
- - go.opentelemetry.io
+ - go.opentelemetry.io/otel
golines:
max-len: 120
exclusions:
generated: lax
- paths:
- - third_party$
- - builtin$
- - examples$
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
index 40d62fa2e..532850588 100644
--- a/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -2,5 +2,8 @@ http://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects
+# Weaver model URL for semantic-conventions repository.
+https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
+http://4.3.2.1:78/user/123
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 648e4abab..f3abcfdc2 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -11,6 +11,148 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
+## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
+
+This release is the last to support [Go 1.23].
+The next release will require at least [Go 1.24].
+
+### Added
+
+- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772)
+- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939)
+ - `ContainerLabel`
+ - `DBOperationParameter`
+ - `DBSystemParameter`
+ - `HTTPRequestHeader`
+ - `HTTPResponseHeader`
+ - `K8SCronJobAnnotation`
+ - `K8SCronJobLabel`
+ - `K8SDaemonSetAnnotation`
+ - `K8SDaemonSetLabel`
+ - `K8SDeploymentAnnotation`
+ - `K8SDeploymentLabel`
+ - `K8SJobAnnotation`
+ - `K8SJobLabel`
+ - `K8SNamespaceAnnotation`
+ - `K8SNamespaceLabel`
+ - `K8SNodeAnnotation`
+ - `K8SNodeLabel`
+ - `K8SPodAnnotation`
+ - `K8SPodLabel`
+ - `K8SReplicaSetAnnotation`
+ - `K8SReplicaSetLabel`
+ - `K8SStatefulSetAnnotation`
+ - `K8SStatefulSetLabel`
+ - `ProcessEnvironmentVariable`
+ - `RPCConnectRPCRequestMetadata`
+ - `RPCConnectRPCResponseMetadata`
+ - `RPCGRPCRequestMetadata`
+ - `RPCGRPCResponseMetadata`
+- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962)
+- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968)
+- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179)
+- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001)
+- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`.
+ Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209)
+- The `go.opentelemetry.io/otel/semconv/v1.36.0` package.
+ The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041)
+- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111)
+- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`.
+ Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121)
+- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`.
+ Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133)
+- Support testing of [Go 1.25]. (#7187)
+- The `go.opentelemetry.io/otel/semconv/v1.37.0` package.
+ The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254)
+
+### Changed
+
+- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791)
+- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908)
+- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094)
+
+### Fixed
+
+- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002)
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088)
+- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195)
+- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199)
+
+### Deprecated
+
+- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111)
+- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166)
+
+## [0.59.1] 2025-07-21
+
+### Changed
+
+- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046)
+- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled.
+ It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044)
+
+### Fixed
+
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets.
+ E.g. `{spans}`. (#7044)
+
+## [1.37.0/0.59.0/0.13.0] 2025-06-25
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.33.0` package.
+ The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799)
+- The `go.opentelemetry.io/otel/semconv/v1.34.0` package.
+ The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812)
+- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
+- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
+- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825)
+- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825)
+- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839)
+
+### Changed
+
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836)
+- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864)
+- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898)
+
+### Fixed
+
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710)
+- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822)
+- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914)
+
+### Removed
+
+- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770)
+
+## [0.12.2] 2025-05-22
+
+### Fixed
+
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804)
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804)
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804)
+
+## [0.12.1] 2025-05-21
+
+### Fixes
+
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800)
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800)
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800)
+
## [1.36.0/0.58.0/0.12.0] 2025-05-20
### Added
@@ -3288,7 +3430,11 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD
+[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
+[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
+[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
+[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0
[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
@@ -3381,6 +3527,7 @@ It contains api and sdk for trace and meter.
+[Go 1.25]: https://go.dev/doc/go1.25
[Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 945a07d2b..26a03aed1 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
-* @MrAlias @XSAM @dashpole @pellared @dmathieu
+* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 1902dac05..0b3ae855c 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -109,10 +109,9 @@ A PR is considered **ready to merge** when:
This is not enforced through automation, but needs to be validated by the
maintainer merging.
- * The qualified approvals need to be from [Approver]s/[Maintainer]s
- affiliated with different companies. Two qualified approvals from
- [Approver]s or [Maintainer]s affiliated with the same company counts as a
- single qualified approval.
+ * At least one of the qualified approvals need to be from an
+ [Approver]/[Maintainer] affiliated with a different company than the author
+ of the PR.
* PRs introducing changes that have already been discussed and consensus
reached only need one qualified approval. The discussion and resolution
needs to be linked to the PR.
@@ -193,6 +192,35 @@ should have `go test -bench` output in their description.
should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
output in their description.
+## Dependencies
+
+This project uses [Go Modules] for dependency management. All modules will use
+`go.mod` to explicitly list all direct and indirect dependencies, ensuring a
+clear dependency graph. The `go.sum` file for each module will be committed to
+the repository and used to verify the integrity of downloaded modules,
+preventing malicious tampering.
+
+This project uses automated dependency update tools (i.e. dependabot,
+renovatebot) to manage updates to dependencies. This ensures that dependencies
+are kept up-to-date with the latest security patches and features and are
+reviewed before being merged. If you would like to propose a change to a
+dependency it should be done through a pull request that updates the `go.mod`
+file and includes a description of the change.
+
+See the [versioning and compatibility](./VERSIONING.md) policy for more details
+about dependency compatibility.
+
+[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more
+
+### Environment Dependencies
+
+This project does not partition dependencies based on the environment (i.e.
+`development`, `staging`, `production`).
+
+Only the dependencies explicitly included in the released modules have be
+tested and verified to work with the released code. No other guarantee is made
+about the compatibility of other dependencies.
+
## Documentation
Each (non-internal, non-test) package must be documented using
@@ -234,6 +262,10 @@ For a non-comprehensive but foundational overview of these best practices
the [Effective Go](https://golang.org/doc/effective_go.html) documentation
is an excellent starting place.
+We also recommend following the
+[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
+that collects common comments made during reviews of Go code.
+
As a convenience for developers building this project the `make precommit`
will format, lint, validate, and in some cases fix the changes you plan to
submit. This check will need to pass for your changes to be able to be
@@ -587,6 +619,10 @@ See also:
### Testing
+We allow using [`testify`](https://github.com/stretchr/testify) even though
+it is seen as non-idiomatic according to
+the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page.
+
The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the
@@ -641,20 +677,28 @@ should be canceled.
## Approvers and Maintainers
-### Triagers
+### Maintainers
-- [Alex Kats](https://github.com/akats7), Capital One
-- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
+- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832))
+- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70))
+- [Robert PajÄ…k](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2))
+- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA))
+- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A))
+
+For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
### Approvers
-### Maintainers
+- [Flc](https://github.com/flc1125), Independent
+
+For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
-- [Damien Mathieu](https://github.com/dmathieu), Elastic
-- [David Ashpole](https://github.com/dashpole), Google
-- [Robert PajÄ…k](https://github.com/pellared), Splunk
-- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
-- [Tyler Yahn](https://github.com/MrAlias), Splunk
+### Triagers
+
+- [Alex Kats](https://github.com/akats7), Capital One
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
+
+For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
### Emeritus
@@ -666,6 +710,8 @@ should be canceled.
- [Josh MacDonald](https://github.com/jmacd)
- [Liz Fong-Jones](https://github.com/lizthegrey)
+For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
+
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/otel/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 62a56f4d3..bc0f1f92d 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -34,9 +34,6 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
MULTIMOD = $(TOOLS)/multimod
$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
-SEMCONVGEN = $(TOOLS)/semconvgen
-$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
-
CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
@@ -71,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools
-tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
@@ -284,7 +281,7 @@ semconv-generate: $(SEMCONVKIT)
docker run --rm \
-u $(DOCKER_USER) \
--env HOME=/tmp/weaver \
- --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \
+ --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \
--mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
--mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
$(WEAVER_IMAGE) registry generate \
@@ -293,7 +290,7 @@ semconv-generate: $(SEMCONVKIT)
--param tag=$(TAG) \
go \
/home/weaver/target
- $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
+ $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)"
.PHONY: gorelease
gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index b60078812..6b7ab5f21 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -7,6 +7,7 @@
[](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
[](https://www.bestpractices.dev/projects/9996)
[](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go)
+[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license)
[](https://cloud-native.slack.com/archives/C01NPAXACKT)
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
@@ -52,18 +53,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|----------|------------|--------------|
+| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 |
+| Ubuntu | 1.25 | 386 |
| Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 |
+| Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.24 | arm64 |
| Ubuntu | 1.23 | arm64 |
+| macOS 13 | 1.25 | amd64 |
| macOS 13 | 1.24 | amd64 |
| macOS 13 | 1.23 | amd64 |
+| macOS | 1.25 | arm64 |
| macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 |
+| Windows | 1.25 | amd64 |
| Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 |
+| Windows | 1.25 | 386 |
| Windows | 1.24 | 386 |
| Windows | 1.23 | 386 |
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 7c1a9119d..1ddcdef03 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -112,6 +112,29 @@ It is critical you make sure the version you push upstream is correct.
Finally create a Release for the new `` on GitHub.
The release body should include all the release notes from the Changelog for this release.
+### Sign the Release Artifact
+
+To ensure we comply with CNCF best practices, we need to sign the release artifact.
+The tarball attached to the GitHub release needs to be signed with your GPG key.
+
+Follow [these steps] to sign the release artifact and upload it to GitHub.
+You can use [this script] to verify the contents of the tarball before signing it.
+
+Be sure to use the correct GPG key when signing the release artifact.
+
+```terminal
+gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz
+```
+
+You can verify the signature with:
+
+```terminal
+gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz
+```
+
+[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases
+[this script]: https://github.com/MrAlias/attest-sh
+
## Post-Release
### Contrib Repository
diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml
new file mode 100644
index 000000000..8041fc62e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml
@@ -0,0 +1,203 @@
+header:
+ schema-version: "1.0.0"
+ expiration-date: "2026-08-04T00:00:00.000Z"
+ last-updated: "2025-08-04"
+ last-reviewed: "2025-08-04"
+ commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8
+ project-url: https://github.com/open-telemetry/opentelemetry-go
+ project-release: "v1.37.0"
+ changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md
+ license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE
+
+project-lifecycle:
+ status: active
+ bug-fixes-only: false
+ core-maintainers:
+ - https://github.com/dmathieu
+ - https://github.com/dashpole
+ - https://github.com/pellared
+ - https://github.com/XSAM
+ - https://github.com/MrAlias
+ release-process: |
+ See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md
+
+contribution-policy:
+ accepts-pull-requests: true
+ accepts-automated-pull-requests: true
+ automated-tools-list:
+ - automated-tool: dependabot
+ action: allowed
+ comment: Automated dependency updates are accepted.
+ - automated-tool: renovatebot
+ action: allowed
+ comment: Automated dependency updates are accepted.
+ - automated-tool: opentelemetrybot
+ action: allowed
+ comment: Automated OpenTelemetry actions are accepted.
+ contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md
+
+documentation:
+ - https://pkg.go.dev/go.opentelemetry.io/otel
+ - https://opentelemetry.io/docs/instrumentation/go/
+
+distribution-points:
+ - pkg:golang/go.opentelemetry.io/otel
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin
+ - pkg:golang/go.opentelemetry.io/otel/metric
+ - pkg:golang/go.opentelemetry.io/otel/sdk
+ - pkg:golang/go.opentelemetry.io/otel/sdk/metric
+ - pkg:golang/go.opentelemetry.io/otel/trace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus
+ - pkg:golang/go.opentelemetry.io/otel/log
+ - pkg:golang/go.opentelemetry.io/otel/log/logtest
+ - pkg:golang/go.opentelemetry.io/otel/sdk/log
+ - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog
+ - pkg:golang/go.opentelemetry.io/otel/schema
+
+security-artifacts:
+ threat-model:
+ threat-model-created: false
+ comment: |
+ No formal threat model created yet.
+ self-assessment:
+ self-assessment-created: false
+ comment: |
+ No formal self-assessment yet.
+
+security-testing:
+ - tool-type: sca
+ tool-name: Dependabot
+ tool-version: latest
+ tool-url: https://github.com/dependabot
+ tool-rulesets:
+ - built-in
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ Automated dependency updates.
+ - tool-type: sast
+ tool-name: golangci-lint
+ tool-version: latest
+ tool-url: https://github.com/golangci/golangci-lint
+ tool-rulesets:
+ - built-in
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ Static analysis in CI.
+ - tool-type: fuzzing
+ tool-name: OSS-Fuzz
+ tool-version: latest
+ tool-url: https://github.com/google/oss-fuzz
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: false
+ before-release: false
+ comment: |
+ OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details.
+ - tool-type: sast
+ tool-name: CodeQL
+ tool-version: latest
+ tool-url: https://github.com/github/codeql
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details.
+ - tool-type: sca
+ tool-name: govulncheck
+ tool-version: latest
+ tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration.
+
+security-assessments:
+ - auditor-name: 7ASecurity
+ auditor-url: https://7asecurity.com
+ auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf
+ report-year: 2023
+ comment: |
+ This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository.
+
+security-contacts:
+ - type: email
+ value: cncf-opentelemetry-security@lists.cncf.io
+ primary: true
+ - type: website
+ value: https://github.com/open-telemetry/opentelemetry-go/security/policy
+ primary: false
+
+vulnerability-reporting:
+ accepts-vulnerability-reports: true
+ email-contact: cncf-opentelemetry-security@lists.cncf.io
+ security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy
+ comment: |
+ Security issues should be reported via email or GitHub security policy page.
+
+dependencies:
+ third-party-packages: true
+ dependencies-lists:
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod
+ dependencies-lifecycle:
+ policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ comment: |
+ Dependency lifecycle managed via go.mod and renovatebot.
+ env-dependencies-policy:
+ policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ comment: |
+ See contributing policy for environment usage.
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
index 318e42fca..6333d34b3 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -78,7 +78,7 @@ func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &bytes.Buffer{}
},
},
@@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string {
for iter.Next() {
i, keyValue := iter.IndexedAttribute()
if i > 0 {
- _, _ = buf.WriteRune(',')
+ _ = buf.WriteByte(',')
}
copyAndEscape(buf, string(keyValue.Key))
- _, _ = buf.WriteRune('=')
+ _ = buf.WriteByte('=')
if keyValue.Value.Type() == STRING {
copyAndEscape(buf, keyValue.Value.AsString())
@@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) {
for _, ch := range val {
switch ch {
case '=', ',', escapeChar:
- _, _ = buf.WriteRune(escapeChar)
+ _ = buf.WriteByte(escapeChar)
}
_, _ = buf.WriteRune(ch)
}
}
-// Valid returns true if this encoder ID was allocated by
-// `NewEncoderID`. Invalid encoder IDs will not be cached.
+// Valid reports whether this encoder ID was allocated by
+// [NewEncoderID]. Invalid encoder IDs will not be cached.
func (id EncoderID) Valid() bool {
return id.value != 0
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
index 3eeaa5d44..624ebbe38 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/filter.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -15,8 +15,8 @@ type Filter func(KeyValue) bool
//
// If keys is empty a deny-all filter is returned.
func NewAllowKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return false }
+ if len(keys) == 0 {
+ return func(KeyValue) bool { return false }
}
allowed := make(map[Key]struct{}, len(keys))
@@ -34,8 +34,8 @@ func NewAllowKeysFilter(keys ...Key) Filter {
//
// If keys is empty an allow-all filter is returned.
func NewDenyKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return true }
+ if len(keys) == 0 {
+ return func(KeyValue) bool { return true }
}
forbid := make(map[Key]struct{}, len(keys))
diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
index b76d2bbfd..087550430 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
@@ -12,7 +12,7 @@ import (
)
// BoolSliceValue converts a bool slice into an array with same elements as slice.
-func BoolSliceValue(v []bool) interface{} {
+func BoolSliceValue(v []bool) any {
var zero bool
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} {
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
-func Int64SliceValue(v []int64) interface{} {
+func Int64SliceValue(v []int64) any {
var zero int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} {
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
-func Float64SliceValue(v []float64) interface{} {
+func Float64SliceValue(v []float64) any {
var zero float64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} {
}
// StringSliceValue converts a string slice into an array with same elements as slice.
-func StringSliceValue(v []string) interface{} {
+func StringSliceValue(v []string) any {
var zero string
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} {
}
// AsBoolSlice converts a bool array into a slice into with same elements as array.
-func AsBoolSlice(v interface{}) []bool {
+func AsBoolSlice(v any) []bool {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool {
}
// AsInt64Slice converts an int64 array into a slice into with same elements as array.
-func AsInt64Slice(v interface{}) []int64 {
+func AsInt64Slice(v any) []int64 {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 {
}
// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
-func AsFloat64Slice(v interface{}) []float64 {
+func AsFloat64Slice(v any) []float64 {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 {
}
// AsStringSlice converts a string array into a slice into with same elements as array.
-func AsStringSlice(v interface{}) []string {
+func AsStringSlice(v any) []string {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
index f2ba89ce4..8df6249f0 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -25,8 +25,8 @@ type oneIterator struct {
attr KeyValue
}
-// Next moves the iterator to the next position. Returns false if there are no
-// more attributes.
+// Next moves the iterator to the next position.
+// Next reports whether there are more attributes.
func (i *Iterator) Next() bool {
i.idx++
return i.idx < i.Len()
@@ -106,7 +106,8 @@ func (oi *oneIterator) advance() {
}
}
-// Next returns true if there is another attribute available.
+// Next moves the iterator to the next position.
+// Next reports whether there is another attribute available.
func (m *MergeIterator) Next() bool {
if m.one.done && m.two.done {
return false
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
index d9a22c650..80a9e5643 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/key.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue {
}
}
-// Defined returns true for non-empty keys.
+// Defined reports whether the key is not empty.
func (k Key) Defined() bool {
return len(k) != 0
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
index 3028f9a40..8c6928ca7 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/kv.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -13,7 +13,7 @@ type KeyValue struct {
Value Value
}
-// Valid returns if kv is a valid OpenTelemetry attribute.
+// Valid reports whether kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool {
return kv.Key.Defined() && kv.Value.Type() != INVALID
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 6cbefcead..64735d382 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -31,11 +31,11 @@ type (
// Distinct is a unique identifier of a Set.
//
- // Distinct is designed to be ensures equivalence stability: comparisons
- // will return the save value across versions. For this reason, Distinct
- // should always be used as a map key instead of a Set.
+ // Distinct is designed to ensure equivalence stability: comparisons will
+ // return the same value across versions. For this reason, Distinct should
+ // always be used as a map key instead of a Set.
Distinct struct {
- iface interface{}
+ iface any
}
// Sortable implements sort.Interface, used for sorting KeyValue.
@@ -70,7 +70,7 @@ func (d Distinct) reflectValue() reflect.Value {
return reflect.ValueOf(d.iface)
}
-// Valid returns true if this value refers to a valid Set.
+// Valid reports whether this value refers to a valid Set.
func (d Distinct) Valid() bool {
return d.iface != nil
}
@@ -120,7 +120,7 @@ func (l *Set) Value(k Key) (Value, bool) {
return Value{}, false
}
-// HasValue tests whether a key is defined in this set.
+// HasValue reports whether a key is defined in this set.
func (l *Set) HasValue(k Key) bool {
if l == nil {
return false
@@ -155,7 +155,7 @@ func (l *Set) Equivalent() Distinct {
return l.equivalent
}
-// Equals returns true if the argument set is equivalent to this set.
+// Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent()
}
@@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct {
// computeDistinctFixed computes a Distinct for small slices. It returns nil
// if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) interface{} {
+func computeDistinctFixed(kvs []KeyValue) any {
switch len(kvs) {
case 1:
return [1]KeyValue(kvs)
@@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
// computeDistinctReflect computes a Distinct using reflection, works for any
// size input.
-func computeDistinctReflect(kvs []KeyValue) interface{} {
+func computeDistinctReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
}
// MarshalLog is the marshaling function used by the logging system to represent this Set.
-func (l Set) MarshalLog() interface{} {
+func (l Set) MarshalLog() any {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit()
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index 817eecacf..653c33a86 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -22,7 +22,7 @@ type Value struct {
vtype Type
numeric uint64
stringly string
- slice interface{}
+ slice any
}
const (
@@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string {
type unknownValueType struct{}
-// AsInterface returns Value's data as interface{}.
-func (v Value) AsInterface() interface{} {
+// AsInterface returns Value's data as any.
+func (v Value) AsInterface() any {
switch v.Type() {
case BOOL:
return v.AsBool()
@@ -262,7 +262,7 @@ func (v Value) Emit() string {
func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct {
Type string
- Value interface{}
+ Value any
}
jsonVal.Type = v.Type().String()
jsonVal.Value = v.AsInterface()
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 0e1fe2422..f83a448ec 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
// Baggage name is a valid, non-empty UTF-8 string.
func validateBaggageName(s string) bool {
- if len(s) == 0 {
+ if s == "" {
return false
}
@@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool {
// validateKey checks if the string is a valid W3C Baggage key.
func validateKey(s string) bool {
- if len(s) == 0 {
+ if s == "" {
return false
}
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index 49a35b122..d48847ed8 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return errors.New("nil receiver passed to UnmarshalJSON")
}
- var x interface{}
+ var x any
if err := json.Unmarshal(b, &x); err != nil {
return err
}
@@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) {
if !ok {
return nil, fmt.Errorf("invalid code: %d", *c)
}
- return []byte(fmt.Sprintf("%q", str)), nil
+ return fmt.Appendf(nil, "%q", str), nil
}
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
index 51fb76b30..a311fbb48 100644
--- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
-FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python
-FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver
+FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
+FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index adbca7d34..86d7f4ba0 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -41,22 +41,22 @@ func GetLogger() logr.Logger {
// Info prints messages about the general state of the API or SDK.
// This should usually be less than 5 messages a minute.
-func Info(msg string, keysAndValues ...interface{}) {
+func Info(msg string, keysAndValues ...any) {
GetLogger().V(4).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
-func Error(err error, msg string, keysAndValues ...interface{}) {
+func Error(err error, msg string, keysAndValues ...any) {
GetLogger().Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
-func Debug(msg string, keysAndValues ...interface{}) {
+func Debug(msg string, keysAndValues ...any) {
GetLogger().V(8).Info(msg, keysAndValues...)
}
// Warn prints messages about warnings in the API or SDK.
// Not an error but is likely more important than an informational event.
-func Warn(msg string, keysAndValues ...interface{}) {
+func Warn(msg string, keysAndValues ...any) {
GetLogger().V(1).Info(msg, keysAndValues...)
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
index 49e4ac4fa..bf5cf3119 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -26,6 +26,7 @@ import (
"sync/atomic"
"go.opentelemetry.io/auto/sdk"
+
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/otel/metric/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
index ebda5026d..051882602 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -20,7 +20,7 @@ type Baggage struct{}
var _ TextMapPropagator = Baggage{}
// Inject sets baggage key-values from ctx into the carrier.
-func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
+func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
bStr := baggage.FromContext(ctx).String()
if bStr != "" {
carrier.Set(baggageHeader, bStr)
@@ -30,7 +30,7 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
// Extract returns a copy of parent with the baggage from the carrier added.
// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked
// for multiple values extraction. Otherwise, Get is called.
-func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
+func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
if multiCarrier, ok := carrier.(ValuesGetter); ok {
return extractMultiBaggage(parent, multiCarrier)
}
@@ -38,7 +38,7 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context
}
// Fields returns the keys who's values are set with Inject.
-func (b Baggage) Fields() []string {
+func (Baggage) Fields() []string {
return []string{baggageHeader}
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
index 5c8c26ea2..0a32c59aa 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -20,7 +20,7 @@ type TextMapCarrier interface {
// must never be done outside of a new major release.
// Set stores the key-value pair.
- Set(key string, value string)
+ Set(key, value string)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
@@ -88,7 +88,7 @@ func (hc HeaderCarrier) Values(key string) []string {
}
// Set stores the key-value pair.
-func (hc HeaderCarrier) Set(key string, value string) {
+func (hc HeaderCarrier) Set(key, value string) {
http.Header(hc).Set(key, value)
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 6870e316d..6692d2665 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -36,7 +36,7 @@ var (
)
// Inject injects the trace context from ctx into carrier.
-func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
+func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
sc := trace.SpanContextFromContext(ctx)
if !sc.IsValid() {
return
@@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont
return trace.ContextWithRemoteSpanContext(ctx, sc)
}
-func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
+func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
h := carrier.Get(traceparentHeader)
if h == "" {
return trace.SpanContext{}
@@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool {
}
// Fields returns the keys who's values are set with Inject.
-func (tc TraceContext) Fields() []string {
+func (TraceContext) Fields() []string {
return []string{traceparentHeader, tracestateHeader}
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
deleted file mode 100644
index 82e1f46b4..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.20.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
deleted file mode 100644
index 6685c392b..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
+++ /dev/null
@@ -1,1198 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Describes HTTP attributes.
-const (
- // HTTPMethodKey is the attribute Key conforming to the "http.method"
- // semantic conventions. It represents the hTTP request method.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- HTTPMethodKey = attribute.Key("http.method")
-
- // HTTPStatusCodeKey is the attribute Key conforming to the
- // "http.status_code" semantic conventions. It represents the [HTTP
- // response status code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If and only if one was
- // received/sent.)
- // Stability: stable
- // Examples: 200
- HTTPStatusCodeKey = attribute.Key("http.status_code")
-)
-
-// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
-// semantic conventions. It represents the hTTP request method.
-func HTTPMethod(val string) attribute.KeyValue {
- return HTTPMethodKey.String(val)
-}
-
-// HTTPStatusCode returns an attribute KeyValue conforming to the
-// "http.status_code" semantic conventions. It represents the [HTTP response
-// status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPStatusCode(val int) attribute.KeyValue {
- return HTTPStatusCodeKey.Int(val)
-}
-
-// HTTP Server spans attributes
-const (
- // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
- // semantic conventions. It represents the URI scheme identifying the used
- // protocol.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'http', 'https'
- HTTPSchemeKey = attribute.Key("http.scheme")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route (path template in
- // the format used by the respective server framework). See note below
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if it's available)
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
- // if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
-// semantic conventions. It represents the URI scheme identifying the used
-// protocol.
-func HTTPScheme(val string) attribute.KeyValue {
- return HTTPSchemeKey.String(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route (path template in the
-// format used by the respective server framework). See note below
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the name identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'click', 'exception'
- EventNameKey = attribute.Key("event.name")
-
- // EventDomainKey is the attribute Key conforming to the "event.domain"
- // semantic conventions. It represents the domain identifies the business
- // context for the events.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: Events across different domains may have same `event.name`, yet be
- // unrelated events.
- EventDomainKey = attribute.Key("event.domain")
-)
-
-var (
- // Events from browser apps
- EventDomainBrowser = EventDomainKey.String("browser")
- // Events from mobile apps
- EventDomainDevice = EventDomainKey.String("device")
- // Events from Kubernetes
- EventDomainK8S = EventDomainKey.String("k8s")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the name identifies the event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetTransportKey is the attribute Key conforming to the "net.transport"
- // semantic conventions. It represents the transport protocol used. See
- // note below.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- NetTransportKey = attribute.Key("net.transport")
-
- // NetProtocolNameKey is the attribute Key conforming to the
- // "net.protocol.name" semantic conventions. It represents the application
- // layer protocol used. The value SHOULD be normalized to lowercase.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- NetProtocolNameKey = attribute.Key("net.protocol.name")
-
- // NetProtocolVersionKey is the attribute Key conforming to the
- // "net.protocol.version" semantic conventions. It represents the version
- // of the application layer protocol used. See note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3.1.1'
- // Note: `net.protocol.version` refers to the version of the protocol used
- // and might be different from the protocol client's version. If the HTTP
- // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
- // this attribute should be set to `1.1`.
- NetProtocolVersionKey = attribute.Key("net.protocol.version")
-
- // NetSockPeerNameKey is the attribute Key conforming to the
- // "net.sock.peer.name" semantic conventions. It represents the remote
- // socket peer name.
- //
- // Type: string
- // RequirementLevel: Recommended (If available and different from
- // `net.peer.name` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 'proxy.example.com'
- NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
-
- // NetSockPeerAddrKey is the attribute Key conforming to the
- // "net.sock.peer.addr" semantic conventions. It represents the remote
- // socket peer address: IPv4 or IPv6 for internet protocols, path for local
- // communication,
- // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '127.0.0.1', '/tmp/mysql.sock'
- NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
-
- // NetSockPeerPortKey is the attribute Key conforming to the
- // "net.sock.peer.port" semantic conventions. It represents the remote
- // socket peer port.
- //
- // Type: int
- // RequirementLevel: Recommended (If defined for the address family and if
- // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 16456
- NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
-
- // NetSockFamilyKey is the attribute Key conforming to the
- // "net.sock.family" semantic conventions. It represents the protocol
- // [address
- // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
- // which is used for communication.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If different than `inet` and if
- // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
- // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
- // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
- // instrumentations that follow previous versions of this document.)
- // Stability: stable
- // Examples: 'inet6', 'bluetooth'
- NetSockFamilyKey = attribute.Key("net.sock.family")
-
- // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
- // semantic conventions. It represents the logical remote hostname, see
- // note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com'
- // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
- // extra DNS lookup.
- NetPeerNameKey = attribute.Key("net.peer.name")
-
- // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
- // semantic conventions. It represents the logical remote port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- NetPeerPortKey = attribute.Key("net.peer.port")
-
- // NetHostNameKey is the attribute Key conforming to the "net.host.name"
- // semantic conventions. It represents the logical local hostname or
- // similar, see note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'localhost'
- NetHostNameKey = attribute.Key("net.host.name")
-
- // NetHostPortKey is the attribute Key conforming to the "net.host.port"
- // semantic conventions. It represents the logical local port number,
- // preferably the one that the peer used to connect
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 8080
- NetHostPortKey = attribute.Key("net.host.port")
-
- // NetSockHostAddrKey is the attribute Key conforming to the
- // "net.sock.host.addr" semantic conventions. It represents the local
- // socket address. Useful in case of a multi-IP host.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '192.168.0.1'
- NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
-
- // NetSockHostPortKey is the attribute Key conforming to the
- // "net.sock.host.port" semantic conventions. It represents the local
- // socket port number.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If defined for the address
- // family and if different than `net.host.port` and if `net.sock.host.addr`
- // is set. In other cases, it is still recommended to set this.)
- // Stability: stable
- // Examples: 35555
- NetSockHostPortKey = attribute.Key("net.sock.host.port")
-)
-
-var (
- // ip_tcp
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Named or anonymous pipe. See note below
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- NetTransportOther = NetTransportKey.String("other")
-)
-
-var (
- // IPv4 address
- NetSockFamilyInet = NetSockFamilyKey.String("inet")
- // IPv6 address
- NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
- // Unix domain socket path
- NetSockFamilyUnix = NetSockFamilyKey.String("unix")
-)
-
-// NetProtocolName returns an attribute KeyValue conforming to the
-// "net.protocol.name" semantic conventions. It represents the application
-// layer protocol used. The value SHOULD be normalized to lowercase.
-func NetProtocolName(val string) attribute.KeyValue {
- return NetProtocolNameKey.String(val)
-}
-
-// NetProtocolVersion returns an attribute KeyValue conforming to the
-// "net.protocol.version" semantic conventions. It represents the version of
-// the application layer protocol used. See note below.
-func NetProtocolVersion(val string) attribute.KeyValue {
- return NetProtocolVersionKey.String(val)
-}
-
-// NetSockPeerName returns an attribute KeyValue conforming to the
-// "net.sock.peer.name" semantic conventions. It represents the remote socket
-// peer name.
-func NetSockPeerName(val string) attribute.KeyValue {
- return NetSockPeerNameKey.String(val)
-}
-
-// NetSockPeerAddr returns an attribute KeyValue conforming to the
-// "net.sock.peer.addr" semantic conventions. It represents the remote socket
-// peer address: IPv4 or IPv6 for internet protocols, path for local
-// communication,
-// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
-func NetSockPeerAddr(val string) attribute.KeyValue {
- return NetSockPeerAddrKey.String(val)
-}
-
-// NetSockPeerPort returns an attribute KeyValue conforming to the
-// "net.sock.peer.port" semantic conventions. It represents the remote socket
-// peer port.
-func NetSockPeerPort(val int) attribute.KeyValue {
- return NetSockPeerPortKey.Int(val)
-}
-
-// NetPeerName returns an attribute KeyValue conforming to the
-// "net.peer.name" semantic conventions. It represents the logical remote
-// hostname, see note below.
-func NetPeerName(val string) attribute.KeyValue {
- return NetPeerNameKey.String(val)
-}
-
-// NetPeerPort returns an attribute KeyValue conforming to the
-// "net.peer.port" semantic conventions. It represents the logical remote port
-// number
-func NetPeerPort(val int) attribute.KeyValue {
- return NetPeerPortKey.Int(val)
-}
-
-// NetHostName returns an attribute KeyValue conforming to the
-// "net.host.name" semantic conventions. It represents the logical local
-// hostname or similar, see note below.
-func NetHostName(val string) attribute.KeyValue {
- return NetHostNameKey.String(val)
-}
-
-// NetHostPort returns an attribute KeyValue conforming to the
-// "net.host.port" semantic conventions. It represents the logical local port
-// number, preferably the one that the peer used to connect
-func NetHostPort(val int) attribute.KeyValue {
- return NetHostPortKey.Int(val)
-}
-
-// NetSockHostAddr returns an attribute KeyValue conforming to the
-// "net.sock.host.addr" semantic conventions. It represents the local socket
-// address. Useful in case of a multi-IP host.
-func NetSockHostAddr(val string) attribute.KeyValue {
- return NetSockHostAddrKey.String(val)
-}
-
-// NetSockHostPort returns an attribute KeyValue conforming to the
-// "net.sock.host.port" semantic conventions. It represents the local socket
-// port number.
-func NetSockHostPort(val int) attribute.KeyValue {
- return NetSockHostPortKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetHostConnectionTypeKey is the attribute Key conforming to the
- // "net.host.connection.type" semantic conventions. It represents the
- // internet connection type currently being used by the host.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'wifi'
- NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
-
- // NetHostConnectionSubtypeKey is the attribute Key conforming to the
- // "net.host.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'LTE'
- NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
-
- // NetHostCarrierNameKey is the attribute Key conforming to the
- // "net.host.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'sprint'
- NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
-
- // NetHostCarrierMccKey is the attribute Key conforming to the
- // "net.host.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '310'
- NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
-
- // NetHostCarrierMncKey is the attribute Key conforming to the
- // "net.host.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '001'
- NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
-
- // NetHostCarrierIccKey is the attribute Key conforming to the
- // "net.host.carrier.icc" semantic conventions. It represents the ISO
- // 3166-1 alpha-2 2-character country code associated with the mobile
- // carrier network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'DE'
- NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
-)
-
-var (
- // wifi
- NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
- // wired
- NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
- // cell
- NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
- // unavailable
- NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
- // unknown
- NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
-)
-
-var (
- // GPRS
- NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
- // EDGE
- NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
- // UMTS
- NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
- // CDMA
- NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
- // IDEN
- NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
- // EHRPD
- NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
- // GSM
- NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
-)
-
-// NetHostCarrierName returns an attribute KeyValue conforming to the
-// "net.host.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetHostCarrierName(val string) attribute.KeyValue {
- return NetHostCarrierNameKey.String(val)
-}
-
-// NetHostCarrierMcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mcc" semantic conventions. It represents the mobile
-// carrier country code.
-func NetHostCarrierMcc(val string) attribute.KeyValue {
- return NetHostCarrierMccKey.String(val)
-}
-
-// NetHostCarrierMnc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mnc" semantic conventions. It represents the mobile
-// carrier network code.
-func NetHostCarrierMnc(val string) attribute.KeyValue {
- return NetHostCarrierMncKey.String(val)
-}
-
-// NetHostCarrierIcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetHostCarrierIcc(val string) attribute.KeyValue {
- return NetHostCarrierIccKey.String(val)
-}
-
-// Semantic conventions for HTTP client and server Spans.
-const (
- // HTTPRequestContentLengthKey is the attribute Key conforming to the
- // "http.request_content_length" semantic conventions. It represents the
- // size of the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
-
- // HTTPResponseContentLengthKey is the attribute Key conforming to the
- // "http.response_content_length" semantic conventions. It represents the
- // size of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
-)
-
-// HTTPRequestContentLength returns an attribute KeyValue conforming to the
-// "http.request_content_length" semantic conventions. It represents the size
-// of the request payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestContentLength(val int) attribute.KeyValue {
- return HTTPRequestContentLengthKey.Int(val)
-}
-
-// HTTPResponseContentLength returns an attribute KeyValue conforming to the
-// "http.response_content_length" semantic conventions. It represents the size
-// of the response payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseContentLength(val int) attribute.KeyValue {
- return HTTPResponseContentLengthKey.Int(val)
-}
-
-// Semantic convention describing per-message attributes populated on messaging
-// spans or links.
-const (
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the [conversation ID](#conversations) identifying the conversation to
- // which the message belongs, represented as a string. Sometimes called
- // "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
- // the "messaging.message.payload_size_bytes" semantic conventions. It
- // represents the (uncompressed) size of the message payload in bytes. Also
- // use this attribute if it is unknown whether the compressed or
- // uncompressed payload size is reported.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2738
- MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
-
- // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
- // conforming to the "messaging.message.payload_compressed_size_bytes"
- // semantic conventions. It represents the compressed size of the message
- // payload in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2048
- MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
-)
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the [conversation ID](#conversations) identifying the
-// conversation to which the message belongs, represented as a string.
-// Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
-// to the "messaging.message.payload_size_bytes" semantic conventions. It
-// represents the (uncompressed) size of the message payload in bytes. Also use
-// this attribute if it is unknown whether the compressed or uncompressed
-// payload size is reported.
-func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadSizeBytesKey.Int(val)
-}
-
-// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
-// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
-// conventions. It represents the compressed size of the message payload in
-// bytes.
-func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
-}
-
-// Semantic convention for attributes that describe messaging destination on
-// broker
-const (
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker does not have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-)
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// Semantic convention for attributes that describe messaging source on broker
-const (
- // MessagingSourceNameKey is the attribute Key conforming to the
- // "messaging.source.name" semantic conventions. It represents the message
- // source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Source name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker does not have such notion, the source name SHOULD uniquely
- // identify the broker.
- MessagingSourceNameKey = attribute.Key("messaging.source.name")
-
- // MessagingSourceTemplateKey is the attribute Key conforming to the
- // "messaging.source.template" semantic conventions. It represents the low
- // cardinality representation of the messaging source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Source names could be constructed from templates. An example would
- // be a source name involving a user name or product id. Although the
- // source name in this case is of high cardinality, the underlying template
- // is of low cardinality and can be effectively used for grouping and
- // aggregation.
- MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
-
- // MessagingSourceTemporaryKey is the attribute Key conforming to the
- // "messaging.source.temporary" semantic conventions. It represents a
- // boolean that is true if the message source is temporary and might not
- // exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
-
- // MessagingSourceAnonymousKey is the attribute Key conforming to the
- // "messaging.source.anonymous" semantic conventions. It represents a
- // boolean that is true if the message source is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
-)
-
-// MessagingSourceName returns an attribute KeyValue conforming to the
-// "messaging.source.name" semantic conventions. It represents the message
-// source name
-func MessagingSourceName(val string) attribute.KeyValue {
- return MessagingSourceNameKey.String(val)
-}
-
-// MessagingSourceTemplate returns an attribute KeyValue conforming to the
-// "messaging.source.template" semantic conventions. It represents the low
-// cardinality representation of the messaging source name
-func MessagingSourceTemplate(val string) attribute.KeyValue {
- return MessagingSourceTemplateKey.String(val)
-}
-
-// MessagingSourceTemporary returns an attribute KeyValue conforming to the
-// "messaging.source.temporary" semantic conventions. It represents a boolean
-// that is true if the message source is temporary and might not exist anymore
-// after messages are processed.
-func MessagingSourceTemporary(val bool) attribute.KeyValue {
- return MessagingSourceTemporaryKey.Bool(val)
-}
-
-// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
-// "messaging.source.anonymous" semantic conventions. It represents a boolean
-// that is true if the message source is anonymous (could be unnamed or have
-// auto-generated name).
-func MessagingSourceAnonymous(val bool) attribute.KeyValue {
- return MessagingSourceAnonymousKey.Bool(val)
-}
-
-// Attributes for RabbitMQ
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If not empty.)
- // Stability: stable
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// Attributes for Apache Kafka
-const (
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaClientIDKey is the attribute Key conforming to the
- // "messaging.kafka.client_id" semantic conventions. It represents the
- // client ID for the Consumer or Producer that is handling the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client-5'
- MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
-
- // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
- // the "messaging.kafka.destination.partition" semantic conventions. It
- // represents the partition the message is sent to.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
-
- // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
- // "messaging.kafka.source.partition" semantic conventions. It represents
- // the partition the message is received from.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: ConditionallyRequired (If value is `true`. When
- // missing, the value is assumed to be `false`.)
- // Stability: stable
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaClientID returns an attribute KeyValue conforming to the
-// "messaging.kafka.client_id" semantic conventions. It represents the client
-// ID for the Consumer or Producer that is handling the message.
-func MessagingKafkaClientID(val string) attribute.KeyValue {
- return MessagingKafkaClientIDKey.String(val)
-}
-
-// MessagingKafkaDestinationPartition returns an attribute KeyValue
-// conforming to the "messaging.kafka.destination.partition" semantic
-// conventions. It represents the partition the message is sent to.
-func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
- return MessagingKafkaDestinationPartitionKey.Int(val)
-}
-
-// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
-// the "messaging.kafka.source.partition" semantic conventions. It represents
-// the partition the message is received from.
-func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
- return MessagingKafkaSourcePartitionKey.Int(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// Attributes for Apache RocketMQ
-const (
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqClientIDKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_id" semantic conventions. It represents the
- // unique identifier for each client.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myhost@8742@s8083jm'
- MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delay time level is not specified.)
- // Stability: stable
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delivery timestamp is not specified.)
- // Stability: stable
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
- // Stability: stable
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
-// "messaging.rocketmq.client_id" semantic conventions. It represents the
-// unique identifier for each client.
-func MessagingRocketmqClientID(val string) attribute.KeyValue {
- return MessagingRocketmqClientIDKey.String(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-)
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
deleted file mode 100644
index 0d1f55a8f..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.20.0 version of the OpenTelemetry specification.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
deleted file mode 100644
index 637763932..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// This semantic convention defines the attributes used to represent a feature
-// flag evaluation as an event.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// RPC received/sent message.
-const (
- // MessageTypeKey is the attribute Key conforming to the "message.type"
- // semantic conventions. It represents the whether this is a received or
- // sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessageTypeKey = attribute.Key("message.type")
-
- // MessageIDKey is the attribute Key conforming to the "message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
-
- // MessageCompressedSizeKey is the attribute Key conforming to the
- // "message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
-
- // MessageUncompressedSizeKey is the attribute Key conforming to the
- // "message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
-
-// MessageID returns an attribute KeyValue conforming to the "message.id"
-// semantic conventions. It represents the mUST be calculated as two different
-// counters starting from `1` one for sent messages and one for received
-// message.
-func MessageID(val int) attribute.KeyValue {
- return MessageIDKey.Int(val)
-}
-
-// MessageCompressedSize returns an attribute KeyValue conforming to the
-// "message.compressed_size" semantic conventions. It represents the compressed
-// size of the message in bytes.
-func MessageCompressedSize(val int) attribute.KeyValue {
- return MessageCompressedSizeKey.Int(val)
-}
-
-// MessageUncompressedSize returns an attribute KeyValue conforming to the
-// "message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func MessageUncompressedSize(val int) attribute.KeyValue {
- return MessageUncompressedSizeKey.Int(val)
-}
-
-// The attributes used to report a single exception associated with a span.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example above](#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
deleted file mode 100644
index 9c1840631..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-// HTTP scheme attributes.
-var (
- HTTPSchemeHTTP = HTTPSchemeKey.String("http")
- HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
deleted file mode 100644
index 3d44dae27..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
+++ /dev/null
@@ -1,2060 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The web browser in which the application represented by the resource is
-// running. The `browser.*` attributes MUST be used only for resources that
-// represent applications running in a web browser (regardless of whether
-// running on a mobile or desktop device).
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS)
-const (
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
-// on Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
- // [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the task
- // definition family this task definition is a member of.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for this task definition.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
-// task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the task
-// definition family this task definition is a member of.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// this task definition.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Resources specific to Amazon Web Services.
-const (
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-)
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// Heroku dyno metadata
-const (
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-)
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageTagKey is the attribute Key conforming to the
- // "container.image.tag" semantic conventions. It represents the container
- // image tag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- ContainerImageTagKey = attribute.Key("container.image.tag")
-)
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageTag returns an attribute KeyValue conforming to the
-// "container.image.tag" semantic conventions. It represents the container
-// image tag.
-func ContainerImageTag(val string) attribute.KeyValue {
- return ContainerImageTagKey.String(val)
-}
-
-// The software deployment.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'staging', 'production'
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment
-// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
-// deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// The device on which the process represented by this resource is running.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human readable version of
- // the device model rather than a machine readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// A serverless instance.
-const (
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `/`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run:** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-)
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// A host is defined as a general computing instance.
-const (
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- HostArchKey = attribute.Key("host.arch")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID. For Cloud, this
- // value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image as defined in [Version
- // Attributes](README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID. For
-// Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image as defined in [Version
-// Attributes](README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// A Kubernetes Cluster.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// A Kubernetes Node object.
-const (
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-)
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// A Kubernetes Namespace.
-const (
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-)
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// A Kubernetes Pod object.
-const (
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-)
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// A container in a
-// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
-const (
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-)
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// A Kubernetes ReplicaSet object.
-const (
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-)
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// A Kubernetes Deployment object.
-const (
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-)
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// A Kubernetes StatefulSet object.
-const (
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-)
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// A Kubernetes DaemonSet object.
-const (
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-)
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// A Kubernetes Job object.
-const (
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-)
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// A Kubernetes CronJob object.
-const (
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-)
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- OSTypeKey = attribute.Key("os.type")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-)
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// The single (language) runtime instance which is monitored.
-const (
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-)
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available,
- // the value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-)
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-k8s-pod-deployment-1',
- // '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to distinguish instances of the same
- // service that exist at the same time (e.g. instances of a horizontally
- // scaled service). It is preferable for the ID to be persistent and stay
- // the same for the lifetime of the service instance, however it is
- // acceptable that the ID is ephemeral and changes during important
- // lifetime events for the service (e.g. service restarts). If the service
- // has no inherent unique ID that can be used as the value of this
- // attribute it is recommended to generate a random Version 1 or Version 4
- // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetryAutoVersionKey is the attribute Key conforming to the
- // "telemetry.auto.version" semantic conventions. It represents the version
- // string of the auto instrumentation agent, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.2.3'
- TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
-)
-
-// TelemetryAutoVersion returns an attribute KeyValue conforming to the
-// "telemetry.auto.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent, if used.
-func TelemetryAutoVersion(val string) attribute.KeyValue {
- return TelemetryAutoVersionKey.String(val)
-}
-
-// Resource describing the packaged software running the application code. Web
-// engines are typically executed using process.runtime.
-const (
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-)
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry
-// Scope's concepts.
-const (
- // OTelLibraryNameKey is the attribute Key conforming to the
- // "otel.library.name" semantic conventions. It represents the deprecated,
- // use the `otel.scope.name` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelLibraryNameKey = attribute.Key("otel.library.name")
-
- // OTelLibraryVersionKey is the attribute Key conforming to the
- // "otel.library.version" semantic conventions. It represents the
- // deprecated, use the `otel.scope.version` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '1.0.0'
- OTelLibraryVersionKey = attribute.Key("otel.library.version")
-)
-
-// OTelLibraryName returns an attribute KeyValue conforming to the
-// "otel.library.name" semantic conventions. It represents the deprecated, use
-// the `otel.scope.name` attribute.
-func OTelLibraryName(val string) attribute.KeyValue {
- return OTelLibraryNameKey.String(val)
-}
-
-// OTelLibraryVersion returns an attribute KeyValue conforming to the
-// "otel.library.version" semantic conventions. It represents the deprecated,
-// use the `otel.scope.version` attribute.
-func OTelLibraryVersion(val string) attribute.KeyValue {
- return OTelLibraryVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
deleted file mode 100644
index 95d0210e3..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/
-const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
deleted file mode 100644
index 90b1b0452..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
+++ /dev/null
@@ -1,2599 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-)
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// The attributes described in this section are rather generic. They may be
-// used in any Log Record they apply to.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Span attributes used by AWS Lambda (in addition to general `faas`
-// attributes).
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for CloudEvents. CloudEvents is a specification on how to define
-// event data in a standard way. These attributes can be attached to spans when
-// performing operations with CloudEvents, regardless of the protocol being
-// used.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// Semantic conventions for the OpenTracing Shim
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span does not depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The attributes used to perform database client calls.
-const (
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents an identifier for the database management
- // system (DBMS) product being used. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- DBSystemKey = attribute.Key("db.system")
-
- // DBConnectionStringKey is the attribute Key conforming to the
- // "db.connection_string" semantic conventions. It represents the
- // connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
-
- // DBUserKey is the attribute Key conforming to the "db.user" semantic
- // conventions. It represents the username for accessing the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
-
- // DBJDBCDriverClassnameKey is the attribute Key conforming to the
- // "db.jdbc.driver_classname" semantic conventions. It represents the
- // fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
- // driver used to connect.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
-
- // DBNameKey is the attribute Key conforming to the "db.name" semantic
- // conventions. It represents the this attribute is used to report the name
- // of the database being accessed. For commands that switch the database,
- // this should be set to the target database (even if the command fails).
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If applicable.)
- // Stability: stable
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called
- // "schema name". In case there are multiple layers that could be
- // considered for database name (e.g. Oracle instance name and schema
- // name), the database name to be used is the more specific layer (e.g.
- // Oracle schema name).
- DBNameKey = attribute.Key("db.name")
-
- // DBStatementKey is the attribute Key conforming to the "db.statement"
- // semantic conventions. It represents the database statement being
- // executed.
- //
- // Type: string
- // RequirementLevel: Recommended (Should be collected by default only if
- // there is sanitization that excludes sensitive information.)
- // Stability: stable
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- DBStatementKey = attribute.Key("db.statement")
-
- // DBOperationKey is the attribute Key conforming to the "db.operation"
- // semantic conventions. It represents the name of the operation being
- // executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If `db.statement` is not
- // applicable.)
- // Stability: stable
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to
- // attempt any client-side parsing of `db.statement` just to get this
- // property, but it should be set if the operation name is provided by the
- // library being instrumented. If the SQL statement has an ambiguous
- // operation, or performs more than one operation, this value may be
- // omitted.
- DBOperationKey = attribute.Key("db.operation")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBConnectionString returns an attribute KeyValue conforming to the
-// "db.connection_string" semantic conventions. It represents the connection
-// string used to connect to the database. It is recommended to remove embedded
-// credentials.
-func DBConnectionString(val string) attribute.KeyValue {
- return DBConnectionStringKey.String(val)
-}
-
-// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
-// conventions. It represents the username for accessing the database.
-func DBUser(val string) attribute.KeyValue {
- return DBUserKey.String(val)
-}
-
-// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
-// "db.jdbc.driver_classname" semantic conventions. It represents the
-// fully-qualified class name of the [Java Database Connectivity
-// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
-// used to connect.
-func DBJDBCDriverClassname(val string) attribute.KeyValue {
- return DBJDBCDriverClassnameKey.String(val)
-}
-
-// DBName returns an attribute KeyValue conforming to the "db.name" semantic
-// conventions. It represents the this attribute is used to report the name of
-// the database being accessed. For commands that switch the database, this
-// should be set to the target database (even if the command fails).
-func DBName(val string) attribute.KeyValue {
- return DBNameKey.String(val)
-}
-
-// DBStatement returns an attribute KeyValue conforming to the
-// "db.statement" semantic conventions. It represents the database statement
-// being executed.
-func DBStatement(val string) attribute.KeyValue {
- return DBStatementKey.String(val)
-}
-
-// DBOperation returns an attribute KeyValue conforming to the
-// "db.operation" semantic conventions. It represents the name of the operation
-// being executed, e.g. the [MongoDB command
-// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
-// such as `findAndModify`, or the SQL keyword.
-func DBOperation(val string) attribute.KeyValue {
- return DBOperationKey.String(val)
-}
-
-// Connection-level attributes for Microsoft SQL Server
-const (
- // DBMSSQLInstanceNameKey is the attribute Key conforming to the
- // "db.mssql.instance_name" semantic conventions. It represents the
- // Microsoft SQL Server [instance
- // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named
- // instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
- // longer required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-)
-
-// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
-// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
-// SQL Server [instance
-// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
-// connecting to. This name is used to determine the port of a named instance.
-func DBMSSQLInstanceName(val string) attribute.KeyValue {
- return DBMSSQLInstanceNameKey.String(val)
-}
-
-// Call-level attributes for Cassandra
-const (
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraTableKey is the attribute Key conforming to the
- // "db.cassandra.table" semantic conventions. It represents the name of the
- // primary table that the operation is acting upon, including the keyspace
- // name (if applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra
- // rather than sql. It is not recommended to attempt any client-side
- // parsing of `db.statement` just to get this property, but it should be
- // set if it is provided by the library being instrumented. If the
- // operation is acting upon an anonymous table, or more than one table,
- // this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraTable returns an attribute KeyValue conforming to the
-// "db.cassandra.table" semantic conventions. It represents the name of the
-// primary table that the operation is acting upon, including the keyspace name
-// (if applicable).
-func DBCassandraTable(val string) attribute.KeyValue {
- return DBCassandraTableKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// Call-level attributes for Redis
-const (
- // DBRedisDBIndexKey is the attribute Key conforming to the
- // "db.redis.database_index" semantic conventions. It represents the index
- // of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To
- // be used instead of the generic `db.name` attribute.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If other than the default
- // database (`0`).)
- // Stability: stable
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-)
-
-// DBRedisDBIndex returns an attribute KeyValue conforming to the
-// "db.redis.database_index" semantic conventions. It represents the index of
-// the database being accessed as used in the [`SELECT`
-// command](https://redis.io/commands/select), provided as an integer. To be
-// used instead of the generic `db.name` attribute.
-func DBRedisDBIndex(val int) attribute.KeyValue {
- return DBRedisDBIndexKey.Int(val)
-}
-
-// Call-level attributes for MongoDB
-const (
- // DBMongoDBCollectionKey is the attribute Key conforming to the
- // "db.mongodb.collection" semantic conventions. It represents the
- // collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-)
-
-// DBMongoDBCollection returns an attribute KeyValue conforming to the
-// "db.mongodb.collection" semantic conventions. It represents the collection
-// being accessed within the database stated in `db.name`.
-func DBMongoDBCollection(val string) attribute.KeyValue {
- return DBMongoDBCollectionKey.String(val)
-}
-
-// Call-level attributes for SQL databases
-const (
- // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
- // semantic conventions. It represents the name of the primary table that
- // the operation is acting upon, including the database name (if
- // applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting
- // upon an anonymous table, or more than one table, this value MUST NOT be
- // set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-)
-
-// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
-// semantic conventions. It represents the name of the primary table that the
-// operation is acting upon, including the database name (if applicable).
-func DBSQLTable(val string) attribute.KeyValue {
- return DBSQLTableKey.String(val)
-}
-
-// Call-level attributes for Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (when performing one of the
- // operations in this list)
- // Stability: stable
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
- // default))
- // Stability: stable
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBContainerKey is the attribute Key conforming to the
- // "db.cosmosdb.container" semantic conventions. It represents the cosmos
- // DB container name.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if available)
- // Stability: stable
- // Examples: 'anystring'
- DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (if response was received)
- // Stability: stable
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (when response was received and
- // contained sub-code.)
- // Stability: stable
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: ConditionallyRequired (when available)
- // Stability: stable
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBContainer returns an attribute KeyValue conforming to the
-// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
-// container name.
-func DBCosmosDBContainer(val string) attribute.KeyValue {
- return DBCosmosDBContainerKey.String(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
-// concepts.
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// This semantic convention describes an instance of a function that runs
-// without provisioning or managing of servers (also known as serverless
-// functions or Function as a Service (FaaS)) with spans.
-const (
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: For the server/consumer span on the incoming side,
- // `faas.trigger` MUST be set.
- //
- // Clients invoking FaaS instances usually cannot set `faas.trigger`,
- // since they would typically need to look in the payload to determine
- // the event type. If clients set it, it should be the same as the
- // trigger that corresponding incoming would have (i.e., this has
- // nothing to do with the underlying transport used to make the API
- // call to invoke the lambda, which is often HTTP).
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// Semantic Convention for FaaS triggered as a response to some data source
-// operation such as a database or filesystem read/write.
-const (
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-)
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// Contains additional attributes for outgoing FaaS spans.
-const (
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (For some cloud providers, like
- // AWS or GCP, the region in which a function is hosted is essential to
- // uniquely identify the function and also part of its endpoint. Since it's
- // part of the endpoint being called, the region is always known to
- // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
- // If the region is unknown to the client or not required for identifying
- // the invoked function, setting `faas.invoked_region` is optional.)
- // Stability: stable
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](../../resource/semantic_conventions/README.md#service)
- // of the remote service. SHOULD be equal to the actual `service.name`
- // resource attribute of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](../../resource/semantic_conventions/README.md#service) of
-// the remote service. SHOULD be equal to the actual `service.name` resource
-// attribute of the remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// These attributes may be used for any operation with an authenticated and/or
-// authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-)
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// Semantic Convention for HTTP Client
-const (
- // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
- // conventions. It represents the full HTTP request URL in the form
- // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
- // not transmitted over HTTP, but if it is known, it should be included
- // nevertheless.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Note: `http.url` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case the
- // attribute's value should be `https://www.example.com/`.
- HTTPURLKey = attribute.Key("http.url")
-
- // HTTPResendCountKey is the attribute Key conforming to the
- // "http.resend_count" semantic conventions. It represents the ordinal
- // number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Recommended (if and only if request was retried.)
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPResendCountKey = attribute.Key("http.resend_count")
-)
-
-// HTTPURL returns an attribute KeyValue conforming to the "http.url"
-// semantic conventions. It represents the full HTTP request URL in the form
-// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
-// transmitted over HTTP, but if it is known, it should be included
-// nevertheless.
-func HTTPURL(val string) attribute.KeyValue {
- return HTTPURLKey.String(val)
-}
-
-// HTTPResendCount returns an attribute KeyValue conforming to the
-// "http.resend_count" semantic conventions. It represents the ordinal number
-// of request resending attempt (for any reason, including redirects).
-func HTTPResendCount(val int) attribute.KeyValue {
- return HTTPResendCountKey.Int(val)
-}
-
-// Semantic Convention for HTTP Server
-const (
- // HTTPTargetKey is the attribute Key conforming to the "http.target"
- // semantic conventions. It represents the full request target as passed in
- // a HTTP request line or equivalent.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '/users/12314/?q=ddds'
- HTTPTargetKey = attribute.Key("http.target")
-
- // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
- // semantic conventions. It represents the IP address of the original
- // client behind all proxies, if known (e.g. from
- // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '83.164.160.102'
- // Note: This is not necessarily the same as `net.sock.peer.addr`, which
- // would
- // identify the network-level peer, which may be a proxy.
- //
- // This attribute should be set when a source of information different
- // from the one used for `net.sock.peer.addr`, is available even if that
- // other
- // source just confirms the same value as `net.sock.peer.addr`.
- // Rationale: For `net.sock.peer.addr`, one typically does not know if it
- // comes from a proxy, reverse proxy, or the actual client. Setting
- // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
- // one is at least somewhat confident that the address is not that of
- // the closest proxy.
- HTTPClientIPKey = attribute.Key("http.client_ip")
-)
-
-// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
-// semantic conventions. It represents the full request target as passed in a
-// HTTP request line or equivalent.
-func HTTPTarget(val string) attribute.KeyValue {
- return HTTPTargetKey.String(val)
-}
-
-// HTTPClientIP returns an attribute KeyValue conforming to the
-// "http.client_ip" semantic conventions. It represents the IP address of the
-// original client behind all proxies, if known (e.g. from
-// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
-func HTTPClientIP(val string) attribute.KeyValue {
- return HTTPClientIPKey.String(val)
-}
-
-// The `aws` conventions apply to operations using the AWS SDK. They map
-// request or response parameters in AWS SDK API calls to attributes on a Span.
-// The conventions have been collected over time based on feedback from AWS
-// users of tracing and will continue to evolve as new interesting conventions
-// are found.
-// Some descriptions are also provided for populating general OpenTelemetry
-// semantic conventions based on these APIs.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-)
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// DynamoDB.CreateTable
-const (
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// DynamoDB.ListTables
-const (
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the the
-// number of items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// DynamoDB.Query
-const (
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// DynamoDB.Scan
-const (
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-)
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// DynamoDB.UpdateTable
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // the `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// Attributes that exist for S3 request types.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// Semantic conventions to apply when instrumenting the GraphQL implementation.
-// They map GraphQL operations to attributes on a Span.
-const (
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// General attributes used in messaging systems.
-const (
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents a string
- // identifying the messaging system.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
- MessagingSystemKey = attribute.Key("messaging.system")
-
- // MessagingOperationKey is the attribute Key conforming to the
- // "messaging.operation" semantic conventions. It represents a string
- // identifying the kind of messaging operation as defined in the [Operation
- // names](#operation-names) section above.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationKey = attribute.Key("messaging.operation")
-
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the span describes an
- // operation on a batch of messages.)
- // Stability: stable
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-)
-
-var (
- // publish
- MessagingOperationPublish = MessagingOperationKey.String("publish")
- // receive
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // process
- MessagingOperationProcess = MessagingOperationKey.String("process")
-)
-
-// MessagingSystem returns an attribute KeyValue conforming to the
-// "messaging.system" semantic conventions. It represents a string identifying
-// the messaging system.
-func MessagingSystem(val string) attribute.KeyValue {
- return MessagingSystemKey.String(val)
-}
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// Semantic convention for a consumer of messages received from a messaging
-// system
-const (
- // MessagingConsumerIDKey is the attribute Key conforming to the
- // "messaging.consumer.id" semantic conventions. It represents the
- // identifier for the consumer receiving a message. For Kafka, set it to
- // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
- // both are present, or only `messaging.kafka.consumer.group`. For brokers,
- // such as RabbitMQ and Artemis, set it to the `client_id` of the client
- // consuming the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mygroup - client-6'
- MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
-)
-
-// MessagingConsumerID returns an attribute KeyValue conforming to the
-// "messaging.consumer.id" semantic conventions. It represents the identifier
-// for the consumer receiving a message. For Kafka, set it to
-// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
-// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
-// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
-// message.
-func MessagingConsumerID(val string) attribute.KeyValue {
- return MessagingConsumerIDKey.String(val)
-}
-
-// Semantic conventions for remote procedure calls.
-const (
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCSystemKey = attribute.Key("rpc.system")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// Tech-specific attributes for gRPC.
-const (
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
-const (
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // does not specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If other than the default
- // version (`1.0`))
- // Stability: stable
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If response is not successful.)
- // Stability: stable
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-)
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// does not specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// Tech-specific attributes for Connect RPC.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If response is not successful
- // and if error code available.)
- // Stability: stable
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
deleted file mode 100644
index 2de1fc3c6..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.26.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
deleted file mode 100644
index d8dc822b2..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
+++ /dev/null
@@ -1,8996 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The Android platform on which the Android application is running.
-const (
- // AndroidOSAPILevelKey is the attribute Key conforming to the
- // "android.os.api_level" semantic conventions. It represents the uniquely
- // identifies the framework API revision offered by a version
- // (`os.version`) of the android operating system. More information can be
- // found
- // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '33', '32'
- AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
-)
-
-// AndroidOSAPILevel returns an attribute KeyValue conforming to the
-// "android.os.api_level" semantic conventions. It represents the uniquely
-// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found
-// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
-func AndroidOSAPILevel(val string) attribute.KeyValue {
- return AndroidOSAPILevelKey.String(val)
-}
-
-// ASP.NET Core attributes
-const (
- // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.result" semantic conventions. It represents
- // the rate-limiting result, shows whether the lease was acquired or
- // contains a rejection reason
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'acquired', 'request_canceled'
- AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
-
- // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
- // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
- // represents the full type name of the
- // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
- // implementation that handled the exception.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if and only if the exception
- // was handled by this handler.)
- // Stability: stable
- // Examples: 'Contoso.MyHandler'
- AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
-
- // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
- // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
- // It represents the aSP.NET Core exception middleware handling result
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'handled', 'unhandled'
- AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
-
- // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
- // the rate limiting policy name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fixed', 'sliding', 'token'
- AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
-
- // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
- // "aspnetcore.request.is_unhandled" semantic conventions. It represents
- // the flag indicating if request was handled by the application pipeline.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
-
- // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
- // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
- // value that indicates whether the matched route is a fallback route.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
-
- // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
- // "aspnetcore.routing.match_status" semantic conventions. It represents
- // the match result - success or failure
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'success', 'failure'
- AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
-)
-
-var (
- // Lease was acquired
- AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
- // Lease request was rejected by the endpoint limiter
- AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
- // Lease request was rejected by the global limiter
- AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
- // Lease request was canceled
- AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
-)
-
-var (
- // Exception was handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
- // Exception was not handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
- // Exception handling was skipped because the response had started
- AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
- // Exception handling didn't run because the request was aborted
- AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
-)
-
-var (
- // Match succeeded
- AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
- // Match failed
- AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
-)
-
-// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
-// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
-// represents the full type name of the
-// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
-// implementation that handled the exception.
-func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
- return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
-}
-
-// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
-// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
-// the rate limiting policy name.
-func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
- return AspnetcoreRateLimitingPolicyKey.String(val)
-}
-
-// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
-// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
-// the flag indicating if request was handled by the application pipeline.
-func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
- return AspnetcoreRequestIsUnhandledKey.Bool(val)
-}
-
-// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
-// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
-// value that indicates whether the matched route is a fallback route.
-func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
- return AspnetcoreRoutingIsFallbackKey.Bool(val)
-}
-
-// Generic attributes for AWS services.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes for AWS DynamoDB.
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the number of
-// items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// Attributes for AWS Elastic Container Service (ECS).
-const (
- // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
- // semantic conventions. It represents the ID of a running ECS task. The ID
- // MUST be extracted from `task.arn`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
- // populated.)
- // Stability: experimental
- // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
- // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
- // running [ECS
- // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
- // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the family
- // name of the [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
- // used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for the task definition used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSTaskID returns an attribute KeyValue conforming to the
-// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
-// ECS task. The ID MUST be extracted from `task.arn`.
-func AWSECSTaskID(val string) attribute.KeyValue {
- return AWSECSTaskIDKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
-// [ECS
-// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the family name of
-// the [ECS task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
-// used to create the ECS task.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// the task definition used to create the ECS task.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Attributes for AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Attributes for AWS Logs.
-const (
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-)
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// Attributes for AWS Lambda.
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for AWS S3.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// The web browser attributes
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// These attributes may be used to describe the client in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ClientAddressKey is the attribute Key conforming to the "client.address"
- // semantic conventions. It represents the client address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.address` SHOULD represent the client address
- // behind any intermediaries, for example proxies, if it's available.
- ClientAddressKey = attribute.Key("client.address")
-
- // ClientPortKey is the attribute Key conforming to the "client.port"
- // semantic conventions. It represents the client port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.port` SHOULD represent the client port behind
- // any intermediaries, for example proxies, if it's available.
- ClientPortKey = attribute.Key("client.port")
-)
-
-// ClientAddress returns an attribute KeyValue conforming to the
-// "client.address" semantic conventions. It represents the client address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func ClientAddress(val string) attribute.KeyValue {
- return ClientAddressKey.String(val)
-}
-
-// ClientPort returns an attribute KeyValue conforming to the "client.port"
-// semantic conventions. It represents the client port number.
-func ClientPort(val int) attribute.KeyValue {
- return ClientPortKey.Int(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS).
-const (
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Apps
- CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Bare Metal Solution (BMS)
- CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
-// Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// Attributes for CloudEvents.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeStacktraceKey is the attribute Key conforming to the
- // "code.stacktrace" semantic conventions. It represents a stacktrace as a
- // string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'at
- // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- CodeStacktraceKey = attribute.Key("code.stacktrace")
-)
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeStacktrace returns an attribute KeyValue conforming to the
-// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func CodeStacktrace(val string) attribute.KeyValue {
- return CodeStacktraceKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerCommandKey is the attribute Key conforming to the
- // "container.command" semantic conventions. It represents the command used
- // to run the container (i.e. the command name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol'
- // Note: If using embedded credentials or sensitive data, it is recommended
- // to remove them to prevent potential leakage.
- ContainerCommandKey = attribute.Key("container.command")
-
- // ContainerCommandArgsKey is the attribute Key conforming to the
- // "container.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) run by the
- // container. [2]
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol, --config, config.yaml'
- ContainerCommandArgsKey = attribute.Key("container.command_args")
-
- // ContainerCommandLineKey is the attribute Key conforming to the
- // "container.command_line" semantic conventions. It represents the full
- // command run by the container as a single string representing the full
- // command. [2]
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol --config config.yaml'
- ContainerCommandLineKey = attribute.Key("container.command_line")
-
- // ContainerCPUStateKey is the attribute Key conforming to the
- // "container.cpu.state" semantic conventions. It represents the CPU state
- // for this data point.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'user', 'kernel'
- ContainerCPUStateKey = attribute.Key("container.cpu.state")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerImageIDKey is the attribute Key conforming to the
- // "container.image.id" semantic conventions. It represents the runtime
- // specific image identifier. Usually a hash algorithm followed by a UUID.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
- // Note: Docker defines a sha256 of the image id; `container.image.id`
- // corresponds to the `Image` field from the Docker container inspect
- // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
- // endpoint.
- // K8S defines a link to the container registry repository with digest
- // `"imageID": "registry.azurecr.io
- // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
- // The ID is assigned by the container runtime and can vary in different
- // environments. Consider using `oci.manifest.digest` if it is important to
- // identify the same image in different environments/runtimes.
- ContainerImageIDKey = attribute.Key("container.image.id")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageRepoDigestsKey is the attribute Key conforming to the
- // "container.image.repo_digests" semantic conventions. It represents the
- // repo digests of the container image as provided by the container
- // runtime.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
- // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
- // Note:
- // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
- // and
- // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
- // report those under the `RepoDigests` field.
- ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
-
- // ContainerImageTagsKey is the attribute Key conforming to the
- // "container.image.tags" semantic conventions. It represents the container
- // image tags. An example can be found in [Docker Image
- // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
- // Should be only the `` section of the full name for example from
- // `registry.example.com/my-org/my-image:`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'v1.27.1', '3.5.7-0'
- ContainerImageTagsKey = attribute.Key("container.image.tags")
-
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-)
-
-var (
- // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
- ContainerCPUStateUser = ContainerCPUStateKey.String("user")
- // When CPU is used by the system (host OS)
- ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
- // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
- ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
-)
-
-// ContainerCommand returns an attribute KeyValue conforming to the
-// "container.command" semantic conventions. It represents the command used to
-// run the container (i.e. the command name).
-func ContainerCommand(val string) attribute.KeyValue {
- return ContainerCommandKey.String(val)
-}
-
-// ContainerCommandArgs returns an attribute KeyValue conforming to the
-// "container.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) run by the
-// container. [2]
-func ContainerCommandArgs(val ...string) attribute.KeyValue {
- return ContainerCommandArgsKey.StringSlice(val)
-}
-
-// ContainerCommandLine returns an attribute KeyValue conforming to the
-// "container.command_line" semantic conventions. It represents the full
-// command run by the container as a single string representing the full
-// command. [2]
-func ContainerCommandLine(val string) attribute.KeyValue {
- return ContainerCommandLineKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerImageID returns an attribute KeyValue conforming to the
-// "container.image.id" semantic conventions. It represents the runtime
-// specific image identifier. Usually a hash algorithm followed by a UUID.
-func ContainerImageID(val string) attribute.KeyValue {
- return ContainerImageIDKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
-// "container.image.repo_digests" semantic conventions. It represents the repo
-// digests of the container image as provided by the container runtime.
-func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
- return ContainerImageRepoDigestsKey.StringSlice(val)
-}
-
-// ContainerImageTags returns an attribute KeyValue conforming to the
-// "container.image.tags" semantic conventions. It represents the container
-// image tags. An example can be found in [Docker Image
-// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
-// Should be only the `` section of the full name for example from
-// `registry.example.com/my-org/my-image:`.
-func ContainerImageTags(val ...string) attribute.KeyValue {
- return ContainerImageTagsKey.StringSlice(val)
-}
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// This group defines the attributes used to describe telemetry in the context
-// of databases.
-const (
- // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
- // "db.client.connections.pool.name" semantic conventions. It represents
- // the name of the connection pool; unique within the instrumented
- // application. In case the connection pool implementation doesn't provide
- // a name, instrumentation should use a combination of `server.address` and
- // `server.port` attributes formatted as `server.address:server.port`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myDataSource'
- DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
-
- // DBClientConnectionsStateKey is the attribute Key conforming to the
- // "db.client.connections.state" semantic conventions. It represents the
- // state of a connection in the pool
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle'
- DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
-
- // DBCollectionNameKey is the attribute Key conforming to the
- // "db.collection.name" semantic conventions. It represents the name of a
- // collection (table, container) within the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'public.users', 'customers'
- // Note: If the collection name is parsed from the query, it SHOULD match
- // the value provided in the query and may be qualified with the schema and
- // database name.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBCollectionNameKey = attribute.Key("db.collection.name")
-
- // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
- // semantic conventions. It represents the name of the database, fully
- // qualified within the server address and port.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'customers', 'test.users'
- // Note: If a database system has multiple namespace components, they
- // SHOULD be concatenated (potentially using database system specific
- // conventions) from most general to most specific namespace component, and
- // more specific namespaces SHOULD NOT be captured without the more general
- // namespaces, to ensure that "startswith" queries for the more general
- // namespaces will be valid.
- // Semantic conventions for individual database systems SHOULD document
- // what `db.namespace` means in the context of that system.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBNamespaceKey = attribute.Key("db.namespace")
-
- // DBOperationNameKey is the attribute Key conforming to the
- // "db.operation.name" semantic conventions. It represents the name of the
- // operation or command being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: It is RECOMMENDED to capture the value as provided by the
- // application without attempting to do any case normalization.
- DBOperationNameKey = attribute.Key("db.operation.name")
-
- // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
- // semantic conventions. It represents the database query being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
- // "WuValue"'
- DBQueryTextKey = attribute.Key("db.query.text")
-
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents the database management system (DBMS) product
- // as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual DBMS may differ from the one identified by the client.
- // For example, when using PostgreSQL client libraries to connect to a
- // CockroachDB, the `db.system` is set to `postgresql` based on the
- // instrumentation's best knowledge.
- DBSystemKey = attribute.Key("db.system")
-)
-
-var (
- // idle
- DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
- // used
- DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
-// the "db.client.connections.pool.name" semantic conventions. It represents
-// the name of the connection pool; unique within the instrumented application.
-// In case the connection pool implementation doesn't provide a name,
-// instrumentation should use a combination of `server.address` and
-// `server.port` attributes formatted as `server.address:server.port`.
-func DBClientConnectionsPoolName(val string) attribute.KeyValue {
- return DBClientConnectionsPoolNameKey.String(val)
-}
-
-// DBCollectionName returns an attribute KeyValue conforming to the
-// "db.collection.name" semantic conventions. It represents the name of a
-// collection (table, container) within the database.
-func DBCollectionName(val string) attribute.KeyValue {
- return DBCollectionNameKey.String(val)
-}
-
-// DBNamespace returns an attribute KeyValue conforming to the
-// "db.namespace" semantic conventions. It represents the name of the database,
-// fully qualified within the server address and port.
-func DBNamespace(val string) attribute.KeyValue {
- return DBNamespaceKey.String(val)
-}
-
-// DBOperationName returns an attribute KeyValue conforming to the
-// "db.operation.name" semantic conventions. It represents the name of the
-// operation or command being executed.
-func DBOperationName(val string) attribute.KeyValue {
- return DBOperationNameKey.String(val)
-}
-
-// DBQueryText returns an attribute KeyValue conforming to the
-// "db.query.text" semantic conventions. It represents the database query being
-// executed.
-func DBQueryText(val string) attribute.KeyValue {
- return DBQueryTextKey.String(val)
-}
-
-// This group defines attributes for Cassandra.
-const (
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// This group defines attributes for Azure Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// This group defines attributes for Elasticsearch.
-const (
- // DBElasticsearchClusterNameKey is the attribute Key conforming to the
- // "db.elasticsearch.cluster.name" semantic conventions. It represents the
- // represents the identifier of an Elasticsearch cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
- DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
-
- // DBElasticsearchNodeNameKey is the attribute Key conforming to the
- // "db.elasticsearch.node.name" semantic conventions. It represents the
- // represents the human-readable identifier of the node/instance to which a
- // request was routed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-0000000001'
- DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
-)
-
-// DBElasticsearchClusterName returns an attribute KeyValue conforming to
-// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
-// represents the identifier of an Elasticsearch cluster.
-func DBElasticsearchClusterName(val string) attribute.KeyValue {
- return DBElasticsearchClusterNameKey.String(val)
-}
-
-// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
-// "db.elasticsearch.node.name" semantic conventions. It represents the
-// represents the human-readable identifier of the node/instance to which a
-// request was routed.
-func DBElasticsearchNodeName(val string) attribute.KeyValue {
- return DBElasticsearchNodeNameKey.String(val)
-}
-
-// Attributes for software deployments.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'staging', 'production'
- // Note: `deployment.environment` does not affect the uniqueness
- // constraints defined through
- // the `service.namespace`, `service.name` and `service.instance.id`
- // resource attributes.
- // This implies that resources carrying the following attribute
- // combinations MUST be
- // considered to be identifying the same service:
- //
- // * `service.name=frontend`, `deployment.environment=production`
- // * `service.name=frontend`, `deployment.environment=staging`.
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
-// (aka deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// Attributes that represents an occurrence of a lifecycle transition on the
-// Android platform.
-const (
- // AndroidStateKey is the attribute Key conforming to the "android.state"
- // semantic conventions. It represents the deprecated use the
- // `device.app.lifecycle` event definition including `android.state` as a
- // payload field instead.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The Android lifecycle states are defined in [Activity lifecycle
- // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
- // and from which the `OS identifiers` are derived.
- AndroidStateKey = attribute.Key("android.state")
-)
-
-var (
- // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
- AndroidStateCreated = AndroidStateKey.String("created")
- // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
- AndroidStateBackground = AndroidStateKey.String("background")
- // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
- AndroidStateForeground = AndroidStateKey.String("foreground")
-)
-
-// These attributes may be used to describe the receiver of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // DestinationAddressKey is the attribute Key conforming to the
- // "destination.address" semantic conventions. It represents the
- // destination address - domain name if available without reverse DNS
- // lookup; otherwise, IP address or Unix domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the source side, and when communicating through
- // an intermediary, `destination.address` SHOULD represent the destination
- // address behind any intermediaries, for example proxies, if it's
- // available.
- DestinationAddressKey = attribute.Key("destination.address")
-
- // DestinationPortKey is the attribute Key conforming to the
- // "destination.port" semantic conventions. It represents the destination
- // port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- DestinationPortKey = attribute.Key("destination.port")
-)
-
-// DestinationAddress returns an attribute KeyValue conforming to the
-// "destination.address" semantic conventions. It represents the destination
-// address - domain name if available without reverse DNS lookup; otherwise, IP
-// address or Unix domain socket name.
-func DestinationAddress(val string) attribute.KeyValue {
- return DestinationAddressKey.String(val)
-}
-
-// DestinationPort returns an attribute KeyValue conforming to the
-// "destination.port" semantic conventions. It represents the destination port
-// number
-func DestinationPort(val int) attribute.KeyValue {
- return DestinationPortKey.Int(val)
-}
-
-// Describes device attributes.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine-readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human-readable version of
- // the device model rather than a machine-readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// These attributes may be used for any disk related operation.
-const (
- // DiskIoDirectionKey is the attribute Key conforming to the
- // "disk.io.direction" semantic conventions. It represents the disk IO
- // operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read'
- DiskIoDirectionKey = attribute.Key("disk.io.direction")
-)
-
-var (
- // read
- DiskIoDirectionRead = DiskIoDirectionKey.String("read")
- // write
- DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
-)
-
-// The shared attributes used to report a DNS query.
-const (
- // DNSQuestionNameKey is the attribute Key conforming to the
- // "dns.question.name" semantic conventions. It represents the name being
- // queried.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.example.com', 'opentelemetry.io'
- // Note: If the name field contains non-printable characters (below 32 or
- // above 126), those characters should be represented as escaped base 10
- // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
- // carriage returns, and line feeds should be converted to \t, \r, and \n
- // respectively.
- DNSQuestionNameKey = attribute.Key("dns.question.name")
-)
-
-// DNSQuestionName returns an attribute KeyValue conforming to the
-// "dns.question.name" semantic conventions. It represents the name being
-// queried.
-func DNSQuestionName(val string) attribute.KeyValue {
- return DNSQuestionNameKey.String(val)
-}
-
-// Attributes for operations with an authenticated and/or authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// The shared attributes used to report an error.
-const (
- // ErrorTypeKey is the attribute Key conforming to the "error.type"
- // semantic conventions. It represents the describes a class of error the
- // operation ended with.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'timeout', 'java.net.UnknownHostException',
- // 'server_certificate_invalid', '500'
- // Note: The `error.type` SHOULD be predictable, and SHOULD have low
- // cardinality.
- //
- // When `error.type` is set to a type (e.g., an exception type), its
- // canonical class name identifying the type within the artifact SHOULD be
- // used.
- //
- // Instrumentations SHOULD document the list of errors they report.
- //
- // The cardinality of `error.type` within one instrumentation library
- // SHOULD be low.
- // Telemetry consumers that aggregate data from multiple instrumentation
- // libraries and applications
- // should be prepared for `error.type` to have high cardinality at query
- // time when no
- // additional filters are applied.
- //
- // If the operation has completed successfully, instrumentations SHOULD NOT
- // set `error.type`.
- //
- // If a specific domain defines its own set of error identifiers (such as
- // HTTP or gRPC status codes),
- // it's RECOMMENDED to:
- //
- // * Use a domain-specific attribute
- // * Set `error.type` to capture all errors, regardless of whether they are
- // defined within the domain-specific set or not.
- ErrorTypeKey = attribute.Key("error.type")
-)
-
-var (
- // A fallback error value to be used when the instrumentation doesn't define a custom value
- ErrorTypeOther = ErrorTypeKey.String("_OTHER")
-)
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the identifies the class / type of
- // event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'browser.mouse.click', 'device.app.lifecycle'
- // Note: Event names are subject to the same rules as [attribute
- // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
- // Notably, event names are namespaced to avoid collisions and provide a
- // clean separation of semantics for events in separate domains like
- // browser, mobile, and kubernetes.
- EventNameKey = attribute.Key("event.name")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the identifies the class / type of
-// event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example for recording span
- // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// FaaS attributes
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `/`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run (Services):** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// Attributes for Feature Flags.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// Describes file attributes.
-const (
- // FileDirectoryKey is the attribute Key conforming to the "file.directory"
- // semantic conventions. It represents the directory where the file is
- // located. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/user', 'C:\\Program Files\\MyApp'
- FileDirectoryKey = attribute.Key("file.directory")
-
- // FileExtensionKey is the attribute Key conforming to the "file.extension"
- // semantic conventions. It represents the file extension, excluding the
- // leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: When the file name has multiple extensions (example.tar.gz), only
- // the last one should be captured ("gz", not "tar.gz").
- FileExtensionKey = attribute.Key("file.extension")
-
- // FileNameKey is the attribute Key conforming to the "file.name" semantic
- // conventions. It represents the name of the file including the extension,
- // without the directory.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.png'
- FileNameKey = attribute.Key("file.name")
-
- // FilePathKey is the attribute Key conforming to the "file.path" semantic
- // conventions. It represents the full path to the file, including the file
- // name. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/alice/example.png', 'C:\\Program
- // Files\\MyApp\\myapp.exe'
- FilePathKey = attribute.Key("file.path")
-
- // FileSizeKey is the attribute Key conforming to the "file.size" semantic
- // conventions. It represents the file size in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- FileSizeKey = attribute.Key("file.size")
-)
-
-// FileDirectory returns an attribute KeyValue conforming to the
-// "file.directory" semantic conventions. It represents the directory where the
-// file is located. It should include the drive letter, when appropriate.
-func FileDirectory(val string) attribute.KeyValue {
- return FileDirectoryKey.String(val)
-}
-
-// FileExtension returns an attribute KeyValue conforming to the
-// "file.extension" semantic conventions. It represents the file extension,
-// excluding the leading dot.
-func FileExtension(val string) attribute.KeyValue {
- return FileExtensionKey.String(val)
-}
-
-// FileName returns an attribute KeyValue conforming to the "file.name"
-// semantic conventions. It represents the name of the file including the
-// extension, without the directory.
-func FileName(val string) attribute.KeyValue {
- return FileNameKey.String(val)
-}
-
-// FilePath returns an attribute KeyValue conforming to the "file.path"
-// semantic conventions. It represents the full path to the file, including the
-// file name. It should include the drive letter, when appropriate.
-func FilePath(val string) attribute.KeyValue {
- return FilePathKey.String(val)
-}
-
-// FileSize returns an attribute KeyValue conforming to the "file.size"
-// semantic conventions. It represents the file size in bytes.
-func FileSize(val int) attribute.KeyValue {
- return FileSizeKey.Int(val)
-}
-
-// Attributes for Google Cloud Run.
-const (
- // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.execution" semantic conventions. It represents the
- // name of the Cloud Run
- // [execution](https://cloud.google.com/run/docs/managing/job-executions)
- // being run for the Job, as set by the
- // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'job-name-xxxx', 'sample-job-mdw84'
- GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
-
- // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
- // index for a task within an execution as provided by the
- // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1
- GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
-)
-
-// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
-// of the Cloud Run
-// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
-// run for the Job, as set by the
-// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobExecution(val string) attribute.KeyValue {
- return GCPCloudRunJobExecutionKey.String(val)
-}
-
-// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
-// for a task within an execution as provided by the
-// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
- return GCPCloudRunJobTaskIndexKey.Int(val)
-}
-
-// Attributes for Google Compute Engine (GCE).
-const (
- // GCPGceInstanceHostnameKey is the attribute Key conforming to the
- // "gcp.gce.instance.hostname" semantic conventions. It represents the
- // hostname of a GCE instance. This is the full value of the default or
- // [custom
- // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-host1234.example.com',
- // 'sample-vm.us-west1-b.c.my-project.internal'
- GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
-
- // GCPGceInstanceNameKey is the attribute Key conforming to the
- // "gcp.gce.instance.name" semantic conventions. It represents the instance
- // name of a GCE instance. This is the value provided by `host.name`, the
- // visible name of the instance in the Cloud Console UI, and the prefix for
- // the default hostname of the instance as defined by the [default internal
- // DNS
- // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-1', 'my-vm-name'
- GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
-)
-
-// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
-// of a GCE instance. This is the full value of the default or [custom
-// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
-func GCPGceInstanceHostname(val string) attribute.KeyValue {
- return GCPGceInstanceHostnameKey.String(val)
-}
-
-// GCPGceInstanceName returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.name" semantic conventions. It represents the instance
-// name of a GCE instance. This is the value provided by `host.name`, the
-// visible name of the instance in the Cloud Console UI, and the prefix for the
-// default hostname of the instance as defined by the [default internal DNS
-// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
-func GCPGceInstanceName(val string) attribute.KeyValue {
- return GCPGceInstanceNameKey.String(val)
-}
-
-// The attributes used to describe telemetry in the context of LLM (Large
-// Language Models) requests and responses.
-const (
- // GenAiCompletionKey is the attribute Key conforming to the
- // "gen_ai.completion" semantic conventions. It represents the full
- // response received from the LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
- // Paris.'}]"
- // Note: It's RECOMMENDED to format completions as JSON string matching
- // [OpenAI messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiCompletionKey = attribute.Key("gen_ai.completion")
-
- // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
- // semantic conventions. It represents the full prompt sent to an LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'user', 'content': 'What is the capital of
- // France?'}]"
- // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
- // messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiPromptKey = attribute.Key("gen_ai.prompt")
-
- // GenAiRequestMaxTokensKey is the attribute Key conforming to the
- // "gen_ai.request.max_tokens" semantic conventions. It represents the
- // maximum number of tokens the LLM generates for a request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
-
- // GenAiRequestModelKey is the attribute Key conforming to the
- // "gen_ai.request.model" semantic conventions. It represents the name of
- // the LLM a request is being made to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4'
- GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
-
- // GenAiRequestTemperatureKey is the attribute Key conforming to the
- // "gen_ai.request.temperature" semantic conventions. It represents the
- // temperature setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0.0
- GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
-
- // GenAiRequestTopPKey is the attribute Key conforming to the
- // "gen_ai.request.top_p" semantic conventions. It represents the top_p
- // sampling setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0
- GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
-
- // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
- // "gen_ai.response.finish_reasons" semantic conventions. It represents the
- // array of reasons the model stopped generating tokens, corresponding to
- // each generation received.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'stop'
- GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
-
- // GenAiResponseIDKey is the attribute Key conforming to the
- // "gen_ai.response.id" semantic conventions. It represents the unique
- // identifier for the completion.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'chatcmpl-123'
- GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
-
- // GenAiResponseModelKey is the attribute Key conforming to the
- // "gen_ai.response.model" semantic conventions. It represents the name of
- // the LLM a response was generated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4-0613'
- GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
-
- // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
- // semantic conventions. It represents the Generative AI product as
- // identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'openai'
- // Note: The actual GenAI product may differ from the one identified by the
- // client. For example, when using OpenAI client libraries to communicate
- // with Mistral, the `gen_ai.system` is set to `openai` based on the
- // instrumentation's best knowledge.
- GenAiSystemKey = attribute.Key("gen_ai.system")
-
- // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM response (completion).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 180
- GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
-
- // GenAiUsagePromptTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM prompt.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
-)
-
-var (
- // OpenAI
- GenAiSystemOpenai = GenAiSystemKey.String("openai")
-)
-
-// GenAiCompletion returns an attribute KeyValue conforming to the
-// "gen_ai.completion" semantic conventions. It represents the full response
-// received from the LLM.
-func GenAiCompletion(val string) attribute.KeyValue {
- return GenAiCompletionKey.String(val)
-}
-
-// GenAiPrompt returns an attribute KeyValue conforming to the
-// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
-// an LLM.
-func GenAiPrompt(val string) attribute.KeyValue {
- return GenAiPromptKey.String(val)
-}
-
-// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
-// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
-// number of tokens the LLM generates for a request.
-func GenAiRequestMaxTokens(val int) attribute.KeyValue {
- return GenAiRequestMaxTokensKey.Int(val)
-}
-
-// GenAiRequestModel returns an attribute KeyValue conforming to the
-// "gen_ai.request.model" semantic conventions. It represents the name of the
-// LLM a request is being made to.
-func GenAiRequestModel(val string) attribute.KeyValue {
- return GenAiRequestModelKey.String(val)
-}
-
-// GenAiRequestTemperature returns an attribute KeyValue conforming to the
-// "gen_ai.request.temperature" semantic conventions. It represents the
-// temperature setting for the LLM request.
-func GenAiRequestTemperature(val float64) attribute.KeyValue {
- return GenAiRequestTemperatureKey.Float64(val)
-}
-
-// GenAiRequestTopP returns an attribute KeyValue conforming to the
-// "gen_ai.request.top_p" semantic conventions. It represents the top_p
-// sampling setting for the LLM request.
-func GenAiRequestTopP(val float64) attribute.KeyValue {
- return GenAiRequestTopPKey.Float64(val)
-}
-
-// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
-// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
-// array of reasons the model stopped generating tokens, corresponding to each
-// generation received.
-func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
- return GenAiResponseFinishReasonsKey.StringSlice(val)
-}
-
-// GenAiResponseID returns an attribute KeyValue conforming to the
-// "gen_ai.response.id" semantic conventions. It represents the unique
-// identifier for the completion.
-func GenAiResponseID(val string) attribute.KeyValue {
- return GenAiResponseIDKey.String(val)
-}
-
-// GenAiResponseModel returns an attribute KeyValue conforming to the
-// "gen_ai.response.model" semantic conventions. It represents the name of the
-// LLM a response was generated from.
-func GenAiResponseModel(val string) attribute.KeyValue {
- return GenAiResponseModelKey.String(val)
-}
-
-// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
-// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
-// number of tokens used in the LLM response (completion).
-func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
- return GenAiUsageCompletionTokensKey.Int(val)
-}
-
-// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
-// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
-// of tokens used in the LLM prompt.
-func GenAiUsagePromptTokens(val int) attribute.KeyValue {
- return GenAiUsagePromptTokensKey.Int(val)
-}
-
-// Attributes for GraphQL.
-const (
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// Attributes for the Android platform on which the Android application is
-// running.
-const (
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-)
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// A host is defined as a computing instance. For example, physical servers,
-// virtual machines, switches or disk array.
-const (
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- HostArchKey = attribute.Key("host.arch")
-
- // HostCPUCacheL2SizeKey is the attribute Key conforming to the
- // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
- // of level 2 memory cache available to the processor (in Bytes).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12288000
- HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
-
- // HostCPUFamilyKey is the attribute Key conforming to the
- // "host.cpu.family" semantic conventions. It represents the family or
- // generation of the CPU.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', 'PA-RISC 1.1e'
- HostCPUFamilyKey = attribute.Key("host.cpu.family")
-
- // HostCPUModelIDKey is the attribute Key conforming to the
- // "host.cpu.model.id" semantic conventions. It represents the model
- // identifier. It provides more granular information about the CPU,
- // distinguishing it from other CPUs within the same family.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', '9000/778/B180L'
- HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
-
- // HostCPUModelNameKey is the attribute Key conforming to the
- // "host.cpu.model.name" semantic conventions. It represents the model
- // designation of the processor.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
- HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
-
- // HostCPUSteppingKey is the attribute Key conforming to the
- // "host.cpu.stepping" semantic conventions. It represents the stepping or
- // core revisions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1', 'r1p1'
- HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
-
- // HostCPUVendorIDKey is the attribute Key conforming to the
- // "host.cpu.vendor.id" semantic conventions. It represents the processor
- // manufacturer identifier. A maximum 12-character string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'GenuineIntel'
- // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
- // ID string in EBX, EDX and ECX registers. Writing these to memory in this
- // order results in a 12-character string.
- HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
-
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID or host OS image ID.
- // For Cloud, this value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image or host OS as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-
- // HostIPKey is the attribute Key conforming to the "host.ip" semantic
- // conventions. It represents the available IP addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
- // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
- // addresses MUST be specified in the [RFC
- // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
- HostIPKey = attribute.Key("host.ip")
-
- // HostMacKey is the attribute Key conforming to the "host.mac" semantic
- // conventions. It represents the available MAC addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
- // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
- // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
- // as hyphen-separated octets in uppercase hexadecimal form from most to
- // least significant.
- HostMacKey = attribute.Key("host.mac")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
-// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
-// level 2 memory cache available to the processor (in Bytes).
-func HostCPUCacheL2Size(val int) attribute.KeyValue {
- return HostCPUCacheL2SizeKey.Int(val)
-}
-
-// HostCPUFamily returns an attribute KeyValue conforming to the
-// "host.cpu.family" semantic conventions. It represents the family or
-// generation of the CPU.
-func HostCPUFamily(val string) attribute.KeyValue {
- return HostCPUFamilyKey.String(val)
-}
-
-// HostCPUModelID returns an attribute KeyValue conforming to the
-// "host.cpu.model.id" semantic conventions. It represents the model
-// identifier. It provides more granular information about the CPU,
-// distinguishing it from other CPUs within the same family.
-func HostCPUModelID(val string) attribute.KeyValue {
- return HostCPUModelIDKey.String(val)
-}
-
-// HostCPUModelName returns an attribute KeyValue conforming to the
-// "host.cpu.model.name" semantic conventions. It represents the model
-// designation of the processor.
-func HostCPUModelName(val string) attribute.KeyValue {
- return HostCPUModelNameKey.String(val)
-}
-
-// HostCPUStepping returns an attribute KeyValue conforming to the
-// "host.cpu.stepping" semantic conventions. It represents the stepping or core
-// revisions.
-func HostCPUStepping(val string) attribute.KeyValue {
- return HostCPUSteppingKey.String(val)
-}
-
-// HostCPUVendorID returns an attribute KeyValue conforming to the
-// "host.cpu.vendor.id" semantic conventions. It represents the processor
-// manufacturer identifier. A maximum 12-character string.
-func HostCPUVendorID(val string) attribute.KeyValue {
- return HostCPUVendorIDKey.String(val)
-}
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID or host
-// OS image ID. For Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image or host OS as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
-// conventions. It represents the available IP addresses of the host, excluding
-// loopback interfaces.
-func HostIP(val ...string) attribute.KeyValue {
- return HostIPKey.StringSlice(val)
-}
-
-// HostMac returns an attribute KeyValue conforming to the "host.mac"
-// semantic conventions. It represents the available MAC addresses of the host,
-// excluding loopback interfaces.
-func HostMac(val ...string) attribute.KeyValue {
- return HostMacKey.StringSlice(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// Semantic convention attributes in the HTTP namespace.
-const (
- // HTTPConnectionStateKey is the attribute Key conforming to the
- // "http.connection.state" semantic conventions. It represents the state of
- // the HTTP connection in the HTTP connection pool.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'active', 'idle'
- HTTPConnectionStateKey = attribute.Key("http.connection.state")
-
- // HTTPRequestBodySizeKey is the attribute Key conforming to the
- // "http.request.body.size" semantic conventions. It represents the size of
- // the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
-
- // HTTPRequestMethodKey is the attribute Key conforming to the
- // "http.request.method" semantic conventions. It represents the hTTP
- // request method.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- // Note: HTTP request method value SHOULD be "known" to the
- // instrumentation.
- // By default, this convention defines "known" methods as the ones listed
- // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
- // and the PATCH method defined in
- // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
- //
- // If the HTTP request method is not known to instrumentation, it MUST set
- // the `http.request.method` attribute to `_OTHER`.
- //
- // If the HTTP instrumentation could end up converting valid HTTP request
- // methods to `_OTHER`, then it MUST provide a way to override
- // the list of known HTTP methods. If this override is done via environment
- // variable, then the environment variable MUST be named
- // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
- // list of case-sensitive known HTTP methods
- // (this list MUST be a full override of the default known method, it is
- // not a list of known methods in addition to the defaults).
- //
- // HTTP method names are case-sensitive and `http.request.method` attribute
- // value MUST match a known HTTP method name exactly.
- // Instrumentations for specific web frameworks that consider HTTP methods
- // to be case insensitive, SHOULD populate a canonical equivalent.
- // Tracing instrumentations that do so, MUST also set
- // `http.request.method_original` to the original value.
- HTTPRequestMethodKey = attribute.Key("http.request.method")
-
- // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
- // "http.request.method_original" semantic conventions. It represents the
- // original HTTP method sent by the client in the request line.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GeT', 'ACL', 'foo'
- HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
-
- // HTTPRequestResendCountKey is the attribute Key conforming to the
- // "http.request.resend_count" semantic conventions. It represents the
- // ordinal number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
-
- // HTTPRequestSizeKey is the attribute Key conforming to the
- // "http.request.size" semantic conventions. It represents the total size
- // of the request in bytes. This should be the total number of bytes sent
- // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
- // and HTTP/3), headers, and request body if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPRequestSizeKey = attribute.Key("http.request.size")
-
- // HTTPResponseBodySizeKey is the attribute Key conforming to the
- // "http.response.body.size" semantic conventions. It represents the size
- // of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
-
- // HTTPResponseSizeKey is the attribute Key conforming to the
- // "http.response.size" semantic conventions. It represents the total size
- // of the response in bytes. This should be the total number of bytes sent
- // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
- // HTTP/3), headers, and response body and trailers if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPResponseSizeKey = attribute.Key("http.response.size")
-
- // HTTPResponseStatusCodeKey is the attribute Key conforming to the
- // "http.response.status_code" semantic conventions. It represents the
- // [HTTP response status
- // code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 200
- HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route, that is, the path
- // template in the format used by the respective server framework.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-var (
- // active state
- HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
- // idle state
- HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
-)
-
-var (
- // CONNECT method
- HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
- // DELETE method
- HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
- // GET method
- HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
- // HEAD method
- HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
- // OPTIONS method
- HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
- // PATCH method
- HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
- // POST method
- HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
- // PUT method
- HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
- // TRACE method
- HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
- // Any HTTP method that the instrumentation has no prior knowledge of
- HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
-)
-
-// HTTPRequestBodySize returns an attribute KeyValue conforming to the
-// "http.request.body.size" semantic conventions. It represents the size of the
-// request payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestBodySize(val int) attribute.KeyValue {
- return HTTPRequestBodySizeKey.Int(val)
-}
-
-// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
-// "http.request.method_original" semantic conventions. It represents the
-// original HTTP method sent by the client in the request line.
-func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
- return HTTPRequestMethodOriginalKey.String(val)
-}
-
-// HTTPRequestResendCount returns an attribute KeyValue conforming to the
-// "http.request.resend_count" semantic conventions. It represents the ordinal
-// number of request resending attempt (for any reason, including redirects).
-func HTTPRequestResendCount(val int) attribute.KeyValue {
- return HTTPRequestResendCountKey.Int(val)
-}
-
-// HTTPRequestSize returns an attribute KeyValue conforming to the
-// "http.request.size" semantic conventions. It represents the total size of
-// the request in bytes. This should be the total number of bytes sent over the
-// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and request body if any.
-func HTTPRequestSize(val int) attribute.KeyValue {
- return HTTPRequestSizeKey.Int(val)
-}
-
-// HTTPResponseBodySize returns an attribute KeyValue conforming to the
-// "http.response.body.size" semantic conventions. It represents the size of
-// the response payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseBodySize(val int) attribute.KeyValue {
- return HTTPResponseBodySizeKey.Int(val)
-}
-
-// HTTPResponseSize returns an attribute KeyValue conforming to the
-// "http.response.size" semantic conventions. It represents the total size of
-// the response in bytes. This should be the total number of bytes sent over
-// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and response body and trailers if any.
-func HTTPResponseSize(val int) attribute.KeyValue {
- return HTTPResponseSizeKey.Int(val)
-}
-
-// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
-// "http.response.status_code" semantic conventions. It represents the [HTTP
-// response status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPResponseStatusCode(val int) attribute.KeyValue {
- return HTTPResponseStatusCodeKey.Int(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route, that is, the path
-// template in the format used by the respective server framework.
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Java Virtual machine related attributes.
-const (
- // JvmBufferPoolNameKey is the attribute Key conforming to the
- // "jvm.buffer.pool.name" semantic conventions. It represents the name of
- // the buffer pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mapped', 'direct'
- // Note: Pool names are generally obtained via
- // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
- JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
-
- // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
- // semantic conventions. It represents the name of the garbage collector
- // action.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'end of minor GC', 'end of major GC'
- // Note: Garbage collector action is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
- JvmGcActionKey = attribute.Key("jvm.gc.action")
-
- // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
- // semantic conventions. It represents the name of the garbage collector.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Young Generation', 'G1 Old Generation'
- // Note: Garbage collector name is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
- JvmGcNameKey = attribute.Key("jvm.gc.name")
-
- // JvmMemoryPoolNameKey is the attribute Key conforming to the
- // "jvm.memory.pool.name" semantic conventions. It represents the name of
- // the memory pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
- // Note: Pool names are generally obtained via
- // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
- JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
-
- // JvmMemoryTypeKey is the attribute Key conforming to the
- // "jvm.memory.type" semantic conventions. It represents the type of
- // memory.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'heap', 'non_heap'
- JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
-
- // JvmThreadDaemonKey is the attribute Key conforming to the
- // "jvm.thread.daemon" semantic conventions. It represents the whether the
- // thread is daemon or not.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
-
- // JvmThreadStateKey is the attribute Key conforming to the
- // "jvm.thread.state" semantic conventions. It represents the state of the
- // thread.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'runnable', 'blocked'
- JvmThreadStateKey = attribute.Key("jvm.thread.state")
-)
-
-var (
- // Heap memory
- JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
- // Non-heap memory
- JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
-)
-
-var (
- // A thread that has not yet started is in this state
- JvmThreadStateNew = JvmThreadStateKey.String("new")
- // A thread executing in the Java virtual machine is in this state
- JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
- // A thread that is blocked waiting for a monitor lock is in this state
- JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
- // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
- JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
- // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
- JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
- // A thread that has exited is in this state
- JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
-)
-
-// JvmBufferPoolName returns an attribute KeyValue conforming to the
-// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
-// buffer pool.
-func JvmBufferPoolName(val string) attribute.KeyValue {
- return JvmBufferPoolNameKey.String(val)
-}
-
-// JvmGcAction returns an attribute KeyValue conforming to the
-// "jvm.gc.action" semantic conventions. It represents the name of the garbage
-// collector action.
-func JvmGcAction(val string) attribute.KeyValue {
- return JvmGcActionKey.String(val)
-}
-
-// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
-// semantic conventions. It represents the name of the garbage collector.
-func JvmGcName(val string) attribute.KeyValue {
- return JvmGcNameKey.String(val)
-}
-
-// JvmMemoryPoolName returns an attribute KeyValue conforming to the
-// "jvm.memory.pool.name" semantic conventions. It represents the name of the
-// memory pool.
-func JvmMemoryPoolName(val string) attribute.KeyValue {
- return JvmMemoryPoolNameKey.String(val)
-}
-
-// JvmThreadDaemon returns an attribute KeyValue conforming to the
-// "jvm.thread.daemon" semantic conventions. It represents the whether the
-// thread is daemon or not.
-func JvmThreadDaemon(val bool) attribute.KeyValue {
- return JvmThreadDaemonKey.Bool(val)
-}
-
-// Kubernetes resource attributes.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-
- // K8SClusterUIDKey is the attribute Key conforming to the
- // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
- // the cluster, set to the UID of the `kube-system` namespace.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
- // Note: K8S doesn't have support for obtaining a cluster ID. If this is
- // ever
- // added, we will recommend collecting the `k8s.cluster.uid` through the
- // official APIs. In the meantime, we are able to use the `uid` of the
- // `kube-system` namespace as a proxy for cluster ID. Read on for the
- // rationale.
- //
- // Every object created in a K8S cluster is assigned a distinct UID. The
- // `kube-system` namespace is used by Kubernetes itself and will exist
- // for the lifetime of the cluster. Using the `uid` of the `kube-system`
- // namespace is a reasonable proxy for the K8S ClusterID as it will only
- // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
- // UUIDs as standardized by
- // [ISO/IEC 9834-8 and ITU-T
- // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
- // Which states:
- //
- // > If generated according to one of the mechanisms defined in Rec.
- // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
- // different from all other UUIDs generated before 3603 A.D., or is
- // extremely likely to be different (depending on the mechanism chosen).
- //
- // Therefore, UIDs between clusters should be extremely unlikely to
- // conflict.
- K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
-
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-
- // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
- // conforming to the "k8s.container.status.last_terminated_reason" semantic
- // conventions. It represents the last terminated reason of the Container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Evicted', 'Error'
- K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// K8SClusterUID returns an attribute KeyValue conforming to the
-// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
-// cluster, set to the UID of the `kube-system` namespace.
-func K8SClusterUID(val string) attribute.KeyValue {
- return K8SClusterUIDKey.String(val)
-}
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
-// conforming to the "k8s.container.status.last_terminated_reason" semantic
-// conventions. It represents the last terminated reason of the Container.
-func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
- return K8SContainerStatusLastTerminatedReasonKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// Log attributes
-const (
- // LogIostreamKey is the attribute Key conforming to the "log.iostream"
- // semantic conventions. It represents the stream associated with the log.
- // See below for a list of well-known values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- LogIostreamKey = attribute.Key("log.iostream")
-)
-
-var (
- // Logs from stdout stream
- LogIostreamStdout = LogIostreamKey.String("stdout")
- // Events from stderr stream
- LogIostreamStderr = LogIostreamKey.String("stderr")
-)
-
-// Attributes for a file to which log was emitted.
-const (
- // LogFileNameKey is the attribute Key conforming to the "log.file.name"
- // semantic conventions. It represents the basename of the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'audit.log'
- LogFileNameKey = attribute.Key("log.file.name")
-
- // LogFileNameResolvedKey is the attribute Key conforming to the
- // "log.file.name_resolved" semantic conventions. It represents the
- // basename of the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'uuid.log'
- LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
-
- // LogFilePathKey is the attribute Key conforming to the "log.file.path"
- // semantic conventions. It represents the full path to the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/log/mysql/audit.log'
- LogFilePathKey = attribute.Key("log.file.path")
-
- // LogFilePathResolvedKey is the attribute Key conforming to the
- // "log.file.path_resolved" semantic conventions. It represents the full
- // path to the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/lib/docker/uuid.log'
- LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
-)
-
-// LogFileName returns an attribute KeyValue conforming to the
-// "log.file.name" semantic conventions. It represents the basename of the
-// file.
-func LogFileName(val string) attribute.KeyValue {
- return LogFileNameKey.String(val)
-}
-
-// LogFileNameResolved returns an attribute KeyValue conforming to the
-// "log.file.name_resolved" semantic conventions. It represents the basename of
-// the file, with symlinks resolved.
-func LogFileNameResolved(val string) attribute.KeyValue {
- return LogFileNameResolvedKey.String(val)
-}
-
-// LogFilePath returns an attribute KeyValue conforming to the
-// "log.file.path" semantic conventions. It represents the full path to the
-// file.
-func LogFilePath(val string) attribute.KeyValue {
- return LogFilePathKey.String(val)
-}
-
-// LogFilePathResolved returns an attribute KeyValue conforming to the
-// "log.file.path_resolved" semantic conventions. It represents the full path
-// to the file, with symlinks resolved.
-func LogFilePathResolved(val string) attribute.KeyValue {
- return LogFilePathResolvedKey.String(val)
-}
-
-// The generic attributes that may be used in any Log Record.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Attributes describing telemetry around messaging systems and messaging
-// activities.
-const (
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-
- // MessagingClientIDKey is the attribute Key conforming to the
- // "messaging.client.id" semantic conventions. It represents a unique
- // identifier for the client that consumes or produces a message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'client-5', 'myhost@8742@s8083jm'
- MessagingClientIDKey = attribute.Key("messaging.client.id")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker doesn't have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationPartitionIDKey is the attribute Key conforming to
- // the "messaging.destination.partition.id" semantic conventions. It
- // represents the identifier of the partition messages are sent to or
- // received from, unique within the `messaging.destination.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1'
- MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
- // to the "messaging.destination_publish.anonymous" semantic conventions.
- // It represents a boolean that is true if the publish message destination
- // is anonymous (could be unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
-
- // MessagingDestinationPublishNameKey is the attribute Key conforming to
- // the "messaging.destination_publish.name" semantic conventions. It
- // represents the name of the original destination the message was
- // published to
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: The name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker doesn't have such notion, the original destination name
- // SHOULD uniquely identify the broker.
- MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
-
- // MessagingMessageBodySizeKey is the attribute Key conforming to the
- // "messaging.message.body.size" semantic conventions. It represents the
- // size of the message body in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1439
- // Note: This can refer to both the compressed or uncompressed body size.
- // If both sizes are known, the uncompressed
- // body size should be used.
- MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the conversation ID identifying the conversation to which the message
- // belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
- // "messaging.message.envelope.size" semantic conventions. It represents
- // the size of the message body and metadata in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2738
- // Note: This can refer to both the compressed or uncompressed size. If
- // both sizes are known, the uncompressed
- // size should be used.
- MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
-
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingOperationNameKey is the attribute Key conforming to the
- // "messaging.operation.name" semantic conventions. It represents the
- // system-specific name of the messaging operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack', 'nack', 'send'
- MessagingOperationNameKey = attribute.Key("messaging.operation.name")
-
- // MessagingOperationTypeKey is the attribute Key conforming to the
- // "messaging.operation.type" semantic conventions. It represents a string
- // identifying the type of the messaging operation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
-
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents the messaging
- // system as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual messaging system may differ from the one known by the
- // client. For example, when using Kafka client libraries to communicate
- // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
- // the instrumentation's best knowledge.
- MessagingSystemKey = attribute.Key("messaging.system")
-)
-
-var (
- // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
- MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
- // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
- MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
- // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
- MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
- // One or more messages are delivered to or processed by a consumer
- MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
- // One or more messages are settled
- MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
-)
-
-var (
- // Apache ActiveMQ
- MessagingSystemActivemq = MessagingSystemKey.String("activemq")
- // Amazon Simple Queue Service (SQS)
- MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
- // Azure Event Grid
- MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
- // Azure Event Hubs
- MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
- // Azure Service Bus
- MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
- // Google Cloud Pub/Sub
- MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
- // Java Message Service
- MessagingSystemJms = MessagingSystemKey.String("jms")
- // Apache Kafka
- MessagingSystemKafka = MessagingSystemKey.String("kafka")
- // RabbitMQ
- MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
- // Apache RocketMQ
- MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
-)
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// MessagingClientID returns an attribute KeyValue conforming to the
-// "messaging.client.id" semantic conventions. It represents a unique
-// identifier for the client that consumes or produces a message.
-func MessagingClientID(val string) attribute.KeyValue {
- return MessagingClientIDKey.String(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationPartitionID returns an attribute KeyValue conforming
-// to the "messaging.destination.partition.id" semantic conventions. It
-// represents the identifier of the partition messages are sent to or received
-// from, unique within the `messaging.destination.name`.
-func MessagingDestinationPartitionID(val string) attribute.KeyValue {
- return MessagingDestinationPartitionIDKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationPublishAnonymous returns an attribute KeyValue
-// conforming to the "messaging.destination_publish.anonymous" semantic
-// conventions. It represents a boolean that is true if the publish message
-// destination is anonymous (could be unnamed or have auto-generated name).
-func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationPublishAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationPublishName returns an attribute KeyValue conforming
-// to the "messaging.destination_publish.name" semantic conventions. It
-// represents the name of the original destination the message was published to
-func MessagingDestinationPublishName(val string) attribute.KeyValue {
- return MessagingDestinationPublishNameKey.String(val)
-}
-
-// MessagingMessageBodySize returns an attribute KeyValue conforming to the
-// "messaging.message.body.size" semantic conventions. It represents the size
-// of the message body in bytes.
-func MessagingMessageBodySize(val int) attribute.KeyValue {
- return MessagingMessageBodySizeKey.Int(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the conversation ID identifying the conversation to which the
-// message belongs, represented as a string. Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
-// the "messaging.message.envelope.size" semantic conventions. It represents
-// the size of the message body and metadata in bytes.
-func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
- return MessagingMessageEnvelopeSizeKey.Int(val)
-}
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingOperationName returns an attribute KeyValue conforming to the
-// "messaging.operation.name" semantic conventions. It represents the
-// system-specific name of the messaging operation.
-func MessagingOperationName(val string) attribute.KeyValue {
- return MessagingOperationNameKey.String(val)
-}
-
-// This group describes attributes specific to Apache Kafka.
-const (
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// This group describes attributes specific to RabbitMQ.
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-
- // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
- // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
- // It represents the rabbitMQ message delivery tag
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 123
- MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
-// conventions. It represents the rabbitMQ message delivery tag
-func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
- return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
-}
-
-// This group describes attributes specific to RocketMQ.
-const (
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// This group describes attributes specific to GCP Pub/Sub.
-const (
- // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
- // It represents the ack deadline in seconds set for the modify ack
- // deadline request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
-
- // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
- // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
- // represents the ack id for a given message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack_id'
- MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
-
- // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
- // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
- // semantic conventions. It represents the delivery attempt for a given
- // message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
-
- // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
- // It represents the ordering key for a given message. If the attribute is
- // not present, the message does not have an ordering key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ordering_key'
- MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
-)
-
-// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
-// conventions. It represents the ack deadline in seconds set for the modify
-// ack deadline request.
-func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
-// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
-// represents the ack id for a given message.
-func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckIDKey.String(val)
-}
-
-// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
-// conventions. It represents the delivery attempt for a given message.
-func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
-// conventions. It represents the ordering key for a given message. If the
-// attribute is not present, the message does not have an ordering key.
-func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
-}
-
-// This group describes attributes specific to Azure Service Bus.
-const (
- // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
- // conforming to the "messaging.servicebus.destination.subscription_name"
- // semantic conventions. It represents the name of the subscription in the
- // topic messages are received from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mySubscription'
- MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
-
- // MessagingServicebusDispositionStatusKey is the attribute Key conforming
- // to the "messaging.servicebus.disposition_status" semantic conventions.
- // It represents the describes the [settlement
- // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
-
- // MessagingServicebusMessageDeliveryCountKey is the attribute Key
- // conforming to the "messaging.servicebus.message.delivery_count" semantic
- // conventions. It represents the number of deliveries that have been
- // attempted for this message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
-
- // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
- // conforming to the "messaging.servicebus.message.enqueued_time" semantic
- // conventions. It represents the UTC epoch seconds at which the message
- // has been accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
-)
-
-var (
- // Message is completed
- MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
- // Message is abandoned
- MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
- // Message is sent to dead letter queue
- MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
- // Message is deferred
- MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
-)
-
-// MessagingServicebusDestinationSubscriptionName returns an attribute
-// KeyValue conforming to the
-// "messaging.servicebus.destination.subscription_name" semantic conventions.
-// It represents the name of the subscription in the topic messages are
-// received from.
-func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
- return MessagingServicebusDestinationSubscriptionNameKey.String(val)
-}
-
-// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.delivery_count" semantic
-// conventions. It represents the number of deliveries that have been attempted
-// for this message.
-func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
- return MessagingServicebusMessageDeliveryCountKey.Int(val)
-}
-
-// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
-}
-
-// This group describes attributes specific to Azure Event Hubs.
-const (
- // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
- // the "messaging.eventhubs.consumer.group" semantic conventions. It
- // represents the name of the consumer group the event consumer is
- // associated with.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'indexer'
- MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
-
- // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
- // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
- // It represents the UTC epoch seconds at which the message has been
- // accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
-)
-
-// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
-// to the "messaging.eventhubs.consumer.group" semantic conventions. It
-// represents the name of the consumer group the event consumer is associated
-// with.
-func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
- return MessagingEventhubsConsumerGroupKey.String(val)
-}
-
-// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetworkCarrierIccKey is the attribute Key conforming to the
- // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
- // alpha-2 2-character country code associated with the mobile carrier
- // network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'DE'
- NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
-
- // NetworkCarrierMccKey is the attribute Key conforming to the
- // "network.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '310'
- NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
-
- // NetworkCarrierMncKey is the attribute Key conforming to the
- // "network.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '001'
- NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
-
- // NetworkCarrierNameKey is the attribute Key conforming to the
- // "network.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'sprint'
- NetworkCarrierNameKey = attribute.Key("network.carrier.name")
-
- // NetworkConnectionSubtypeKey is the attribute Key conforming to the
- // "network.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'LTE'
- NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
-
- // NetworkConnectionTypeKey is the attribute Key conforming to the
- // "network.connection.type" semantic conventions. It represents the
- // internet connection type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'wifi'
- NetworkConnectionTypeKey = attribute.Key("network.connection.type")
-
- // NetworkIoDirectionKey is the attribute Key conforming to the
- // "network.io.direction" semantic conventions. It represents the network
- // IO operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'transmit'
- NetworkIoDirectionKey = attribute.Key("network.io.direction")
-
- // NetworkLocalAddressKey is the attribute Key conforming to the
- // "network.local.address" semantic conventions. It represents the local
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkLocalAddressKey = attribute.Key("network.local.address")
-
- // NetworkLocalPortKey is the attribute Key conforming to the
- // "network.local.port" semantic conventions. It represents the local port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkLocalPortKey = attribute.Key("network.local.port")
-
- // NetworkPeerAddressKey is the attribute Key conforming to the
- // "network.peer.address" semantic conventions. It represents the peer
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkPeerAddressKey = attribute.Key("network.peer.address")
-
- // NetworkPeerPortKey is the attribute Key conforming to the
- // "network.peer.port" semantic conventions. It represents the peer port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkPeerPortKey = attribute.Key("network.peer.port")
-
- // NetworkProtocolNameKey is the attribute Key conforming to the
- // "network.protocol.name" semantic conventions. It represents the [OSI
- // application layer](https://osi-model.com/application-layer/) or non-OSI
- // equivalent.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkProtocolNameKey = attribute.Key("network.protocol.name")
-
- // NetworkProtocolVersionKey is the attribute Key conforming to the
- // "network.protocol.version" semantic conventions. It represents the
- // actual version of the protocol used for network communication.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.1', '2'
- // Note: If protocol version is subject to negotiation (for example using
- // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
- // SHOULD be set to the negotiated version. If the actual protocol version
- // is not known, this attribute SHOULD NOT be set.
- NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
-
- // NetworkTransportKey is the attribute Key conforming to the
- // "network.transport" semantic conventions. It represents the [OSI
- // transport layer](https://osi-model.com/transport-layer/) or
- // [inter-process communication
- // method](https://wikipedia.org/wiki/Inter-process_communication).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tcp', 'udp'
- // Note: The value SHOULD be normalized to lowercase.
- //
- // Consider always setting the transport when setting a port number, since
- // a port number is ambiguous without knowing the transport. For example
- // different processes could be listening on TCP port 12345 and UDP port
- // 12345.
- NetworkTransportKey = attribute.Key("network.transport")
-
- // NetworkTypeKey is the attribute Key conforming to the "network.type"
- // semantic conventions. It represents the [OSI network
- // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ipv4', 'ipv6'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkTypeKey = attribute.Key("network.type")
-)
-
-var (
- // GPRS
- NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
- // EDGE
- NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
- // UMTS
- NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
- // CDMA
- NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
- // IDEN
- NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
- // EHRPD
- NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
- // GSM
- NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
-)
-
-var (
- // wifi
- NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
- // wired
- NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
- // cell
- NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
- // unavailable
- NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
- // unknown
- NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
-)
-
-var (
- // transmit
- NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
- // receive
- NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
-)
-
-var (
- // TCP
- NetworkTransportTCP = NetworkTransportKey.String("tcp")
- // UDP
- NetworkTransportUDP = NetworkTransportKey.String("udp")
- // Named or anonymous pipe
- NetworkTransportPipe = NetworkTransportKey.String("pipe")
- // Unix domain socket
- NetworkTransportUnix = NetworkTransportKey.String("unix")
-)
-
-var (
- // IPv4
- NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
- // IPv6
- NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
-)
-
-// NetworkCarrierIcc returns an attribute KeyValue conforming to the
-// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetworkCarrierIcc(val string) attribute.KeyValue {
- return NetworkCarrierIccKey.String(val)
-}
-
-// NetworkCarrierMcc returns an attribute KeyValue conforming to the
-// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
-// country code.
-func NetworkCarrierMcc(val string) attribute.KeyValue {
- return NetworkCarrierMccKey.String(val)
-}
-
-// NetworkCarrierMnc returns an attribute KeyValue conforming to the
-// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
-// network code.
-func NetworkCarrierMnc(val string) attribute.KeyValue {
- return NetworkCarrierMncKey.String(val)
-}
-
-// NetworkCarrierName returns an attribute KeyValue conforming to the
-// "network.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetworkCarrierName(val string) attribute.KeyValue {
- return NetworkCarrierNameKey.String(val)
-}
-
-// NetworkLocalAddress returns an attribute KeyValue conforming to the
-// "network.local.address" semantic conventions. It represents the local
-// address of the network connection - IP address or Unix domain socket name.
-func NetworkLocalAddress(val string) attribute.KeyValue {
- return NetworkLocalAddressKey.String(val)
-}
-
-// NetworkLocalPort returns an attribute KeyValue conforming to the
-// "network.local.port" semantic conventions. It represents the local port
-// number of the network connection.
-func NetworkLocalPort(val int) attribute.KeyValue {
- return NetworkLocalPortKey.Int(val)
-}
-
-// NetworkPeerAddress returns an attribute KeyValue conforming to the
-// "network.peer.address" semantic conventions. It represents the peer address
-// of the network connection - IP address or Unix domain socket name.
-func NetworkPeerAddress(val string) attribute.KeyValue {
- return NetworkPeerAddressKey.String(val)
-}
-
-// NetworkPeerPort returns an attribute KeyValue conforming to the
-// "network.peer.port" semantic conventions. It represents the peer port number
-// of the network connection.
-func NetworkPeerPort(val int) attribute.KeyValue {
- return NetworkPeerPortKey.Int(val)
-}
-
-// NetworkProtocolName returns an attribute KeyValue conforming to the
-// "network.protocol.name" semantic conventions. It represents the [OSI
-// application layer](https://osi-model.com/application-layer/) or non-OSI
-// equivalent.
-func NetworkProtocolName(val string) attribute.KeyValue {
- return NetworkProtocolNameKey.String(val)
-}
-
-// NetworkProtocolVersion returns an attribute KeyValue conforming to the
-// "network.protocol.version" semantic conventions. It represents the actual
-// version of the protocol used for network communication.
-func NetworkProtocolVersion(val string) attribute.KeyValue {
- return NetworkProtocolVersionKey.String(val)
-}
-
-// An OCI image manifest.
-const (
- // OciManifestDigestKey is the attribute Key conforming to the
- // "oci.manifest.digest" semantic conventions. It represents the digest of
- // the OCI image manifest. For container images specifically is the digest
- // by which the container image is known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
- // Note: Follows [OCI Image Manifest
- // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
- // and specifically the [Digest
- // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
- // An example can be found in [Example Image
- // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
- OciManifestDigestKey = attribute.Key("oci.manifest.digest")
-)
-
-// OciManifestDigest returns an attribute KeyValue conforming to the
-// "oci.manifest.digest" semantic conventions. It represents the digest of the
-// OCI image manifest. For container images specifically is the digest by which
-// the container image is known.
-func OciManifestDigest(val string) attribute.KeyValue {
- return OciManifestDigestKey.String(val)
-}
-
-// Attributes used by the OpenTracing Shim layer.
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span doesn't depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
- // semantic conventions. It represents the unique identifier for a
- // particular build or compilation of the operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
- OSBuildIDKey = attribute.Key("os.build_id")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- OSTypeKey = attribute.Key("os.type")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
-// semantic conventions. It represents the unique identifier for a particular
-// build or compilation of the operating system.
-func OSBuildID(val string) attribute.KeyValue {
- return OSBuildIDKey.String(val)
-}
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// Attributes reserved for OpenTelemetry
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](/docs/resource/README.md#service) of the remote
- // service. SHOULD be equal to the actual `service.name` resource attribute
- // of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](/docs/resource/README.md#service) of the remote service.
-// SHOULD be equal to the actual `service.name` resource attribute of the
-// remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessContextSwitchTypeKey is the attribute Key conforming to the
- // "process.context_switch_type" semantic conventions. It represents the
- // specifies whether the context switches for this data point were
- // voluntary or involuntary.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
-
- // ProcessCreationTimeKey is the attribute Key conforming to the
- // "process.creation.time" semantic conventions. It represents the date and
- // time the process was created, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:25:34.853Z'
- ProcessCreationTimeKey = attribute.Key("process.creation.time")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessExitCodeKey is the attribute Key conforming to the
- // "process.exit.code" semantic conventions. It represents the exit code of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 127
- ProcessExitCodeKey = attribute.Key("process.exit.code")
-
- // ProcessExitTimeKey is the attribute Key conforming to the
- // "process.exit.time" semantic conventions. It represents the date and
- // time the process exited, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:26:12.315Z'
- ProcessExitTimeKey = attribute.Key("process.exit.time")
-
- // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
- // "process.group_leader.pid" semantic conventions. It represents the PID
- // of the process's group leader. This is also the process group ID (PGID)
- // of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 23
- ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
-
- // ProcessInteractiveKey is the attribute Key conforming to the
- // "process.interactive" semantic conventions. It represents the whether
- // the process is connected to an interactive shell.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessInteractiveKey = attribute.Key("process.interactive")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-
- // ProcessPagingFaultTypeKey is the attribute Key conforming to the
- // "process.paging.fault_type" semantic conventions. It represents the type
- // of page fault for this data point. Type `major` is for major/hard page
- // faults, and `minor` is for minor/soft page faults.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PPID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessRealUserIDKey is the attribute Key conforming to the
- // "process.real_user.id" semantic conventions. It represents the real user
- // ID (RUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000
- ProcessRealUserIDKey = attribute.Key("process.real_user.id")
-
- // ProcessRealUserNameKey is the attribute Key conforming to the
- // "process.real_user.name" semantic conventions. It represents the
- // username of the real user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessRealUserNameKey = attribute.Key("process.real_user.name")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessSavedUserIDKey is the attribute Key conforming to the
- // "process.saved_user.id" semantic conventions. It represents the saved
- // user ID (SUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1002
- ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
-
- // ProcessSavedUserNameKey is the attribute Key conforming to the
- // "process.saved_user.name" semantic conventions. It represents the
- // username of the saved user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
-
- // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
- // "process.session_leader.pid" semantic conventions. It represents the PID
- // of the process's session leader. This is also the session ID (SID) of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 14
- ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
-
- // ProcessUserIDKey is the attribute Key conforming to the
- // "process.user.id" semantic conventions. It represents the effective user
- // ID (EUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1001
- ProcessUserIDKey = attribute.Key("process.user.id")
-
- // ProcessUserNameKey is the attribute Key conforming to the
- // "process.user.name" semantic conventions. It represents the username of
- // the effective user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessUserNameKey = attribute.Key("process.user.name")
-
- // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
- // semantic conventions. It represents the virtual process identifier.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12
- // Note: The process ID within a PID namespace. This is not necessarily
- // unique across all processes on the host but it is unique within the
- // process namespace that the process exists within.
- ProcessVpidKey = attribute.Key("process.vpid")
-)
-
-var (
- // voluntary
- ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
- // involuntary
- ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
-)
-
-var (
- // major
- ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
- // minor
- ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
-)
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCreationTime returns an attribute KeyValue conforming to the
-// "process.creation.time" semantic conventions. It represents the date and
-// time the process was created, in ISO 8601 format.
-func ProcessCreationTime(val string) attribute.KeyValue {
- return ProcessCreationTimeKey.String(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessExitCode returns an attribute KeyValue conforming to the
-// "process.exit.code" semantic conventions. It represents the exit code of the
-// process.
-func ProcessExitCode(val int) attribute.KeyValue {
- return ProcessExitCodeKey.Int(val)
-}
-
-// ProcessExitTime returns an attribute KeyValue conforming to the
-// "process.exit.time" semantic conventions. It represents the date and time
-// the process exited, in ISO 8601 format.
-func ProcessExitTime(val string) attribute.KeyValue {
- return ProcessExitTimeKey.String(val)
-}
-
-// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
-// "process.group_leader.pid" semantic conventions. It represents the PID of
-// the process's group leader. This is also the process group ID (PGID) of the
-// process.
-func ProcessGroupLeaderPID(val int) attribute.KeyValue {
- return ProcessGroupLeaderPIDKey.Int(val)
-}
-
-// ProcessInteractive returns an attribute KeyValue conforming to the
-// "process.interactive" semantic conventions. It represents the whether the
-// process is connected to an interactive shell.
-func ProcessInteractive(val bool) attribute.KeyValue {
- return ProcessInteractiveKey.Bool(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PPID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessRealUserID returns an attribute KeyValue conforming to the
-// "process.real_user.id" semantic conventions. It represents the real user ID
-// (RUID) of the process.
-func ProcessRealUserID(val int) attribute.KeyValue {
- return ProcessRealUserIDKey.Int(val)
-}
-
-// ProcessRealUserName returns an attribute KeyValue conforming to the
-// "process.real_user.name" semantic conventions. It represents the username of
-// the real user of the process.
-func ProcessRealUserName(val string) attribute.KeyValue {
- return ProcessRealUserNameKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessSavedUserID returns an attribute KeyValue conforming to the
-// "process.saved_user.id" semantic conventions. It represents the saved user
-// ID (SUID) of the process.
-func ProcessSavedUserID(val int) attribute.KeyValue {
- return ProcessSavedUserIDKey.Int(val)
-}
-
-// ProcessSavedUserName returns an attribute KeyValue conforming to the
-// "process.saved_user.name" semantic conventions. It represents the username
-// of the saved user.
-func ProcessSavedUserName(val string) attribute.KeyValue {
- return ProcessSavedUserNameKey.String(val)
-}
-
-// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
-// "process.session_leader.pid" semantic conventions. It represents the PID of
-// the process's session leader. This is also the session ID (SID) of the
-// process.
-func ProcessSessionLeaderPID(val int) attribute.KeyValue {
- return ProcessSessionLeaderPIDKey.Int(val)
-}
-
-// ProcessUserID returns an attribute KeyValue conforming to the
-// "process.user.id" semantic conventions. It represents the effective user ID
-// (EUID) of the process.
-func ProcessUserID(val int) attribute.KeyValue {
- return ProcessUserIDKey.Int(val)
-}
-
-// ProcessUserName returns an attribute KeyValue conforming to the
-// "process.user.name" semantic conventions. It represents the username of the
-// effective user of the process.
-func ProcessUserName(val string) attribute.KeyValue {
- return ProcessUserNameKey.String(val)
-}
-
-// ProcessVpid returns an attribute KeyValue conforming to the
-// "process.vpid" semantic conventions. It represents the virtual process
-// identifier.
-func ProcessVpid(val int) attribute.KeyValue {
- return ProcessVpidKey.Int(val)
-}
-
-// Attributes for process CPU
-const (
- // ProcessCPUStateKey is the attribute Key conforming to the
- // "process.cpu.state" semantic conventions. It represents the CPU state of
- // the process.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessCPUStateKey = attribute.Key("process.cpu.state")
-)
-
-var (
- // system
- ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
- // user
- ProcessCPUStateUser = ProcessCPUStateKey.String("user")
- // wait
- ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
-)
-
-// Attributes for remote procedure calls.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // doesn't specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCMessageCompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
-
- // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- RPCMessageIDKey = attribute.Key("rpc.message.id")
-
- // RPCMessageTypeKey is the attribute Key conforming to the
- // "rpc.message.type" semantic conventions. It represents the whether this
- // is a received or sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageTypeKey = attribute.Key("rpc.message.type")
-
- // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCSystemKey = attribute.Key("rpc.system")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-var (
- // sent
- RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
- // received
- RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// doesn't specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
-// "rpc.message.compressed_size" semantic conventions. It represents the
-// compressed size of the message in bytes.
-func RPCMessageCompressedSize(val int) attribute.KeyValue {
- return RPCMessageCompressedSizeKey.Int(val)
-}
-
-// RPCMessageID returns an attribute KeyValue conforming to the
-// "rpc.message.id" semantic conventions. It represents the mUST be calculated
-// as two different counters starting from `1` one for sent messages and one
-// for received message.
-func RPCMessageID(val int) attribute.KeyValue {
- return RPCMessageIDKey.Int(val)
-}
-
-// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
-// the "rpc.message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func RPCMessageUncompressedSize(val int) attribute.KeyValue {
- return RPCMessageUncompressedSizeKey.Int(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// These attributes may be used to describe the server in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ServerAddressKey is the attribute Key conforming to the "server.address"
- // semantic conventions. It represents the server domain name if available
- // without reverse DNS lookup; otherwise, IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.address` SHOULD represent the server address
- // behind any intermediaries, for example proxies, if it's available.
- ServerAddressKey = attribute.Key("server.address")
-
- // ServerPortKey is the attribute Key conforming to the "server.port"
- // semantic conventions. It represents the server port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.port` SHOULD represent the server port behind
- // any intermediaries, for example proxies, if it's available.
- ServerPortKey = attribute.Key("server.port")
-)
-
-// ServerAddress returns an attribute KeyValue conforming to the
-// "server.address" semantic conventions. It represents the server domain name
-// if available without reverse DNS lookup; otherwise, IP address or Unix
-// domain socket name.
-func ServerAddress(val string) attribute.KeyValue {
- return ServerAddressKey.String(val)
-}
-
-// ServerPort returns an attribute KeyValue conforming to the "server.port"
-// semantic conventions. It represents the server port number.
-func ServerPort(val int) attribute.KeyValue {
- return ServerPortKey.Int(val)
-}
-
-// A service instance.
-const (
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to
- // distinguish instances of the same service that exist at the same time
- // (e.g. instances of a horizontally scaled
- // service).
- //
- // Implementations, such as SDKs, are recommended to generate a random
- // Version 1 or Version 4 [RFC
- // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
- // inherent unique ID as the source of
- // this value if stability is desirable. In that case, the ID SHOULD be
- // used as source of a UUID Version 5 and
- // SHOULD use the following UUID as the namespace:
- // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
- //
- // UUIDs are typically recommended, as only an opaque value for the
- // purposes of identifying a service instance is
- // needed. Similar to what can be seen in the man page for the
- // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
- // file, the underlying
- // data, such as pod name and namespace should be treated as confidential,
- // being the user's choice to expose it
- // or not via another resource attribute.
- //
- // For applications running behind an application server (like unicorn), we
- // do not recommend using one identifier
- // for all processes participating in the application. Instead, it's
- // recommended each division (e.g. a worker
- // thread in unicorn) to have its own instance.id.
- //
- // It's not recommended for a Collector to set `service.instance.id` if it
- // can't unambiguously determine the
- // service instance that is generating that telemetry. For instance,
- // creating an UUID based on `pod.name` will
- // likely be wrong, as the Collector might not know from which container
- // within that pod the telemetry originated.
- // However, Collectors can set the `service.instance.id` if they can
- // unambiguously determine the service instance
- // for that telemetry. This is typically the case for scraping receivers,
- // as they know the target address and
- // port.
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
- // `process.executable.name` is not available, the value MUST be set to
- // `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation. The format is not defined by these
- // conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0', 'a01dbef8a'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation. The format is not defined by these
-// conventions.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// Session is defined as the period of time encompassing all activities
-// performed by the application and the actions executed by the end user.
-// Consequently, a Session is represented as a collection of Logs, Events, and
-// Spans emitted by the Client Application throughout the Session's duration.
-// Each Session is assigned a unique identifier, which is included as an
-// attribute in the Logs, Events, and Spans generated during the Session's
-// lifecycle.
-// When a session reaches end of life, typically due to user inactivity or
-// session timeout, a new session identifier will be assigned. The previous
-// session identifier may be provided by the instrumentation so that telemetry
-// backends can link the two sessions.
-const (
- // SessionIDKey is the attribute Key conforming to the "session.id"
- // semantic conventions. It represents a unique id to identify a session.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionIDKey = attribute.Key("session.id")
-
- // SessionPreviousIDKey is the attribute Key conforming to the
- // "session.previous_id" semantic conventions. It represents the previous
- // `session.id` for this user, when known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionPreviousIDKey = attribute.Key("session.previous_id")
-)
-
-// SessionID returns an attribute KeyValue conforming to the "session.id"
-// semantic conventions. It represents a unique id to identify a session.
-func SessionID(val string) attribute.KeyValue {
- return SessionIDKey.String(val)
-}
-
-// SessionPreviousID returns an attribute KeyValue conforming to the
-// "session.previous_id" semantic conventions. It represents the previous
-// `session.id` for this user, when known.
-func SessionPreviousID(val string) attribute.KeyValue {
- return SessionPreviousIDKey.String(val)
-}
-
-// SignalR attributes
-const (
- // SignalrConnectionStatusKey is the attribute Key conforming to the
- // "signalr.connection.status" semantic conventions. It represents the
- // signalR HTTP connection closure status.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'app_shutdown', 'timeout'
- SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
-
- // SignalrTransportKey is the attribute Key conforming to the
- // "signalr.transport" semantic conventions. It represents the [SignalR
- // transport
- // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'web_sockets', 'long_polling'
- SignalrTransportKey = attribute.Key("signalr.transport")
-)
-
-var (
- // The connection was closed normally
- SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
- // The connection was closed due to a timeout
- SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
- // The connection was closed because the app is shutting down
- SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
-)
-
-var (
- // ServerSentEvents protocol
- SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
- // LongPolling protocol
- SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
- // WebSockets protocol
- SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
-)
-
-// These attributes may be used to describe the sender of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // SourceAddressKey is the attribute Key conforming to the "source.address"
- // semantic conventions. It represents the source address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the destination side, and when communicating
- // through an intermediary, `source.address` SHOULD represent the source
- // address behind any intermediaries, for example proxies, if it's
- // available.
- SourceAddressKey = attribute.Key("source.address")
-
- // SourcePortKey is the attribute Key conforming to the "source.port"
- // semantic conventions. It represents the source port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- SourcePortKey = attribute.Key("source.port")
-)
-
-// SourceAddress returns an attribute KeyValue conforming to the
-// "source.address" semantic conventions. It represents the source address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func SourceAddress(val string) attribute.KeyValue {
- return SourceAddressKey.String(val)
-}
-
-// SourcePort returns an attribute KeyValue conforming to the "source.port"
-// semantic conventions. It represents the source port number
-func SourcePort(val int) attribute.KeyValue {
- return SourcePortKey.Int(val)
-}
-
-// Describes System attributes
-const (
- // SystemDeviceKey is the attribute Key conforming to the "system.device"
- // semantic conventions. It represents the device identifier
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '(identifier)'
- SystemDeviceKey = attribute.Key("system.device")
-)
-
-// SystemDevice returns an attribute KeyValue conforming to the
-// "system.device" semantic conventions. It represents the device identifier
-func SystemDevice(val string) attribute.KeyValue {
- return SystemDeviceKey.String(val)
-}
-
-// Describes System CPU attributes
-const (
- // SystemCPULogicalNumberKey is the attribute Key conforming to the
- // "system.cpu.logical_number" semantic conventions. It represents the
- // logical CPU number [0..n-1]
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1
- SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
-
- // SystemCPUStateKey is the attribute Key conforming to the
- // "system.cpu.state" semantic conventions. It represents the state of the
- // CPU
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle', 'interrupt'
- SystemCPUStateKey = attribute.Key("system.cpu.state")
-)
-
-var (
- // user
- SystemCPUStateUser = SystemCPUStateKey.String("user")
- // system
- SystemCPUStateSystem = SystemCPUStateKey.String("system")
- // nice
- SystemCPUStateNice = SystemCPUStateKey.String("nice")
- // idle
- SystemCPUStateIdle = SystemCPUStateKey.String("idle")
- // iowait
- SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
- // interrupt
- SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
- // steal
- SystemCPUStateSteal = SystemCPUStateKey.String("steal")
-)
-
-// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
-// "system.cpu.logical_number" semantic conventions. It represents the logical
-// CPU number [0..n-1]
-func SystemCPULogicalNumber(val int) attribute.KeyValue {
- return SystemCPULogicalNumberKey.Int(val)
-}
-
-// Describes System Memory attributes
-const (
- // SystemMemoryStateKey is the attribute Key conforming to the
- // "system.memory.state" semantic conventions. It represents the memory
- // state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free', 'cached'
- SystemMemoryStateKey = attribute.Key("system.memory.state")
-)
-
-var (
- // used
- SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
- // free
- SystemMemoryStateFree = SystemMemoryStateKey.String("free")
- // shared
- SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
- // buffers
- SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
- // cached
- SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
-)
-
-// Describes System Memory Paging attributes
-const (
- // SystemPagingDirectionKey is the attribute Key conforming to the
- // "system.paging.direction" semantic conventions. It represents the paging
- // access direction
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'in'
- SystemPagingDirectionKey = attribute.Key("system.paging.direction")
-
- // SystemPagingStateKey is the attribute Key conforming to the
- // "system.paging.state" semantic conventions. It represents the memory
- // paging state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free'
- SystemPagingStateKey = attribute.Key("system.paging.state")
-
- // SystemPagingTypeKey is the attribute Key conforming to the
- // "system.paging.type" semantic conventions. It represents the memory
- // paging type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'minor'
- SystemPagingTypeKey = attribute.Key("system.paging.type")
-)
-
-var (
- // in
- SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
- // out
- SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
-)
-
-var (
- // used
- SystemPagingStateUsed = SystemPagingStateKey.String("used")
- // free
- SystemPagingStateFree = SystemPagingStateKey.String("free")
-)
-
-var (
- // major
- SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
- // minor
- SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
-)
-
-// Describes Filesystem attributes
-const (
- // SystemFilesystemModeKey is the attribute Key conforming to the
- // "system.filesystem.mode" semantic conventions. It represents the
- // filesystem mode
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'rw, ro'
- SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
-
- // SystemFilesystemMountpointKey is the attribute Key conforming to the
- // "system.filesystem.mountpoint" semantic conventions. It represents the
- // filesystem mount path
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/mnt/data'
- SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
-
- // SystemFilesystemStateKey is the attribute Key conforming to the
- // "system.filesystem.state" semantic conventions. It represents the
- // filesystem state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'used'
- SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
-
- // SystemFilesystemTypeKey is the attribute Key conforming to the
- // "system.filesystem.type" semantic conventions. It represents the
- // filesystem type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ext4'
- SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
-)
-
-var (
- // used
- SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
- // free
- SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
- // reserved
- SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
-)
-
-var (
- // fat32
- SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
- // exfat
- SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
- // ntfs
- SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
- // refs
- SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
- // hfsplus
- SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
- // ext4
- SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
-)
-
-// SystemFilesystemMode returns an attribute KeyValue conforming to the
-// "system.filesystem.mode" semantic conventions. It represents the filesystem
-// mode
-func SystemFilesystemMode(val string) attribute.KeyValue {
- return SystemFilesystemModeKey.String(val)
-}
-
-// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
-// the "system.filesystem.mountpoint" semantic conventions. It represents the
-// filesystem mount path
-func SystemFilesystemMountpoint(val string) attribute.KeyValue {
- return SystemFilesystemMountpointKey.String(val)
-}
-
-// Describes Network attributes
-const (
- // SystemNetworkStateKey is the attribute Key conforming to the
- // "system.network.state" semantic conventions. It represents a stateless
- // protocol MUST NOT set this attribute
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'close_wait'
- SystemNetworkStateKey = attribute.Key("system.network.state")
-)
-
-var (
- // close
- SystemNetworkStateClose = SystemNetworkStateKey.String("close")
- // close_wait
- SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
- // closing
- SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
- // delete
- SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
- // established
- SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
- // fin_wait_1
- SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
- // fin_wait_2
- SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
- // last_ack
- SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
- // listen
- SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
- // syn_recv
- SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
- // syn_sent
- SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
- // time_wait
- SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
-)
-
-// Describes System Process attributes
-const (
- // SystemProcessStatusKey is the attribute Key conforming to the
- // "system.process.status" semantic conventions. It represents the process
- // state, e.g., [Linux Process State
- // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'running'
- SystemProcessStatusKey = attribute.Key("system.process.status")
-)
-
-var (
- // running
- SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
- // sleeping
- SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
- // stopped
- SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
- // defunct
- SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
-)
-
-// Attributes for telemetry SDK.
-const (
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
- // to `opentelemetry`.
- // If another SDK, like a fork or a vendor-provided implementation, is
- // used, this SDK MUST set the
- // `telemetry.sdk.name` attribute to the fully-qualified class or module
- // name of this SDK's main entry point
- // or another suitable identifier depending on the language.
- // The identifier `opentelemetry` is reserved and MUST NOT be used in this
- // case.
- // All custom identifiers SHOULD be stable across different versions of an
- // implementation.
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-
- // TelemetryDistroNameKey is the attribute Key conforming to the
- // "telemetry.distro.name" semantic conventions. It represents the name of
- // the auto instrumentation agent or distribution, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'parts-unlimited-java'
- // Note: Official auto instrumentation agents and distributions SHOULD set
- // the `telemetry.distro.name` attribute to
- // a string starting with `opentelemetry-`, e.g.
- // `opentelemetry-java-instrumentation`.
- TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
-
- // TelemetryDistroVersionKey is the attribute Key conforming to the
- // "telemetry.distro.version" semantic conventions. It represents the
- // version string of the auto instrumentation agent or distribution, if
- // used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2.3'
- TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // rust
- TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// TelemetryDistroName returns an attribute KeyValue conforming to the
-// "telemetry.distro.name" semantic conventions. It represents the name of the
-// auto instrumentation agent or distribution, if used.
-func TelemetryDistroName(val string) attribute.KeyValue {
- return TelemetryDistroNameKey.String(val)
-}
-
-// TelemetryDistroVersion returns an attribute KeyValue conforming to the
-// "telemetry.distro.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent or distribution, if used.
-func TelemetryDistroVersion(val string) attribute.KeyValue {
- return TelemetryDistroVersionKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// Semantic convention attributes in the TLS namespace.
-const (
- // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
- // semantic conventions. It represents the string indicating the
- // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
- // used during the current connection.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
- // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
- // Note: The values allowed for `tls.cipher` MUST be one of the
- // `Descriptions` of the [registered TLS Cipher
- // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
- TLSCipherKey = attribute.Key("tls.cipher")
-
- // TLSClientCertificateKey is the attribute Key conforming to the
- // "tls.client.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the client. This is
- // usually mutually-exclusive of `client.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSClientCertificateKey = attribute.Key("tls.client.certificate")
-
- // TLSClientCertificateChainKey is the attribute Key conforming to the
- // "tls.client.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the client. This is usually mutually-exclusive of
- // `client.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
-
- // TLSClientHashMd5Key is the attribute Key conforming to the
- // "tls.client.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
-
- // TLSClientHashSha1Key is the attribute Key conforming to the
- // "tls.client.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
-
- // TLSClientHashSha256Key is the attribute Key conforming to the
- // "tls.client.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
-
- // TLSClientIssuerKey is the attribute Key conforming to the
- // "tls.client.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSClientIssuerKey = attribute.Key("tls.client.issuer")
-
- // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
- // semantic conventions. It represents a hash that identifies clients based
- // on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSClientJa3Key = attribute.Key("tls.client.ja3")
-
- // TLSClientNotAfterKey is the attribute Key conforming to the
- // "tls.client.not_after" semantic conventions. It represents the date/Time
- // indicating when client certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
-
- // TLSClientNotBeforeKey is the attribute Key conforming to the
- // "tls.client.not_before" semantic conventions. It represents the
- // date/Time indicating when client certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
-
- // TLSClientServerNameKey is the attribute Key conforming to the
- // "tls.client.server_name" semantic conventions. It represents the also
- // called an SNI, this tells the server which hostname to which the client
- // is attempting to connect to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry.io'
- TLSClientServerNameKey = attribute.Key("tls.client.server_name")
-
- // TLSClientSubjectKey is the attribute Key conforming to the
- // "tls.client.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
- TLSClientSubjectKey = attribute.Key("tls.client.subject")
-
- // TLSClientSupportedCiphersKey is the attribute Key conforming to the
- // "tls.client.supported_ciphers" semantic conventions. It represents the
- // array of ciphers offered by the client during the client hello.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
- TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
-
- // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
- // conventions. It represents the string indicating the curve used for the
- // given cipher, when applicable
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'secp256r1'
- TLSCurveKey = attribute.Key("tls.curve")
-
- // TLSEstablishedKey is the attribute Key conforming to the
- // "tls.established" semantic conventions. It represents the boolean flag
- // indicating if the TLS negotiation was successful and transitioned to an
- // encrypted tunnel.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSEstablishedKey = attribute.Key("tls.established")
-
- // TLSNextProtocolKey is the attribute Key conforming to the
- // "tls.next_protocol" semantic conventions. It represents the string
- // indicating the protocol being tunneled. Per the values in the [IANA
- // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
- // this string should be lower case.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'http/1.1'
- TLSNextProtocolKey = attribute.Key("tls.next_protocol")
-
- // TLSProtocolNameKey is the attribute Key conforming to the
- // "tls.protocol.name" semantic conventions. It represents the normalized
- // lowercase protocol name parsed from original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- TLSProtocolNameKey = attribute.Key("tls.protocol.name")
-
- // TLSProtocolVersionKey is the attribute Key conforming to the
- // "tls.protocol.version" semantic conventions. It represents the numeric
- // part of the version parsed from the original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2', '3'
- TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
-
- // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
- // semantic conventions. It represents the boolean flag indicating if this
- // TLS connection was resumed from an existing TLS negotiation.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSResumedKey = attribute.Key("tls.resumed")
-
- // TLSServerCertificateKey is the attribute Key conforming to the
- // "tls.server.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the server. This is
- // usually mutually-exclusive of `server.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSServerCertificateKey = attribute.Key("tls.server.certificate")
-
- // TLSServerCertificateChainKey is the attribute Key conforming to the
- // "tls.server.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the server. This is usually mutually-exclusive of
- // `server.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
-
- // TLSServerHashMd5Key is the attribute Key conforming to the
- // "tls.server.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
-
- // TLSServerHashSha1Key is the attribute Key conforming to the
- // "tls.server.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
-
- // TLSServerHashSha256Key is the attribute Key conforming to the
- // "tls.server.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
-
- // TLSServerIssuerKey is the attribute Key conforming to the
- // "tls.server.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSServerIssuerKey = attribute.Key("tls.server.issuer")
-
- // TLSServerJa3sKey is the attribute Key conforming to the
- // "tls.server.ja3s" semantic conventions. It represents a hash that
- // identifies servers based on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
-
- // TLSServerNotAfterKey is the attribute Key conforming to the
- // "tls.server.not_after" semantic conventions. It represents the date/Time
- // indicating when server certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
-
- // TLSServerNotBeforeKey is the attribute Key conforming to the
- // "tls.server.not_before" semantic conventions. It represents the
- // date/Time indicating when server certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
-
- // TLSServerSubjectKey is the attribute Key conforming to the
- // "tls.server.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // server.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
- TLSServerSubjectKey = attribute.Key("tls.server.subject")
-)
-
-var (
- // ssl
- TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
- // tls
- TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
-)
-
-// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
-// semantic conventions. It represents the string indicating the
-// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
-// during the current connection.
-func TLSCipher(val string) attribute.KeyValue {
- return TLSCipherKey.String(val)
-}
-
-// TLSClientCertificate returns an attribute KeyValue conforming to the
-// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the client. This is usually
-// mutually-exclusive of `client.certificate_chain` since this value also
-// exists in that list.
-func TLSClientCertificate(val string) attribute.KeyValue {
- return TLSClientCertificateKey.String(val)
-}
-
-// TLSClientCertificateChain returns an attribute KeyValue conforming to the
-// "tls.client.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the client. This is usually mutually-exclusive of `client.certificate` since
-// that value should be the first certificate in the chain.
-func TLSClientCertificateChain(val ...string) attribute.KeyValue {
- return TLSClientCertificateChainKey.StringSlice(val)
-}
-
-// TLSClientHashMd5 returns an attribute KeyValue conforming to the
-// "tls.client.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashMd5(val string) attribute.KeyValue {
- return TLSClientHashMd5Key.String(val)
-}
-
-// TLSClientHashSha1 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha1(val string) attribute.KeyValue {
- return TLSClientHashSha1Key.String(val)
-}
-
-// TLSClientHashSha256 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha256(val string) attribute.KeyValue {
- return TLSClientHashSha256Key.String(val)
-}
-
-// TLSClientIssuer returns an attribute KeyValue conforming to the
-// "tls.client.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSClientIssuer(val string) attribute.KeyValue {
- return TLSClientIssuerKey.String(val)
-}
-
-// TLSClientJa3 returns an attribute KeyValue conforming to the
-// "tls.client.ja3" semantic conventions. It represents a hash that identifies
-// clients based on how they perform an SSL/TLS handshake.
-func TLSClientJa3(val string) attribute.KeyValue {
- return TLSClientJa3Key.String(val)
-}
-
-// TLSClientNotAfter returns an attribute KeyValue conforming to the
-// "tls.client.not_after" semantic conventions. It represents the date/Time
-// indicating when client certificate is no longer considered valid.
-func TLSClientNotAfter(val string) attribute.KeyValue {
- return TLSClientNotAfterKey.String(val)
-}
-
-// TLSClientNotBefore returns an attribute KeyValue conforming to the
-// "tls.client.not_before" semantic conventions. It represents the date/Time
-// indicating when client certificate is first considered valid.
-func TLSClientNotBefore(val string) attribute.KeyValue {
- return TLSClientNotBeforeKey.String(val)
-}
-
-// TLSClientServerName returns an attribute KeyValue conforming to the
-// "tls.client.server_name" semantic conventions. It represents the also called
-// an SNI, this tells the server which hostname to which the client is
-// attempting to connect to.
-func TLSClientServerName(val string) attribute.KeyValue {
- return TLSClientServerNameKey.String(val)
-}
-
-// TLSClientSubject returns an attribute KeyValue conforming to the
-// "tls.client.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the client.
-func TLSClientSubject(val string) attribute.KeyValue {
- return TLSClientSubjectKey.String(val)
-}
-
-// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
-// "tls.client.supported_ciphers" semantic conventions. It represents the array
-// of ciphers offered by the client during the client hello.
-func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
- return TLSClientSupportedCiphersKey.StringSlice(val)
-}
-
-// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
-// semantic conventions. It represents the string indicating the curve used for
-// the given cipher, when applicable
-func TLSCurve(val string) attribute.KeyValue {
- return TLSCurveKey.String(val)
-}
-
-// TLSEstablished returns an attribute KeyValue conforming to the
-// "tls.established" semantic conventions. It represents the boolean flag
-// indicating if the TLS negotiation was successful and transitioned to an
-// encrypted tunnel.
-func TLSEstablished(val bool) attribute.KeyValue {
- return TLSEstablishedKey.Bool(val)
-}
-
-// TLSNextProtocol returns an attribute KeyValue conforming to the
-// "tls.next_protocol" semantic conventions. It represents the string
-// indicating the protocol being tunneled. Per the values in the [IANA
-// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
-// this string should be lower case.
-func TLSNextProtocol(val string) attribute.KeyValue {
- return TLSNextProtocolKey.String(val)
-}
-
-// TLSProtocolVersion returns an attribute KeyValue conforming to the
-// "tls.protocol.version" semantic conventions. It represents the numeric part
-// of the version parsed from the original string of the negotiated [SSL/TLS
-// protocol
-// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-func TLSProtocolVersion(val string) attribute.KeyValue {
- return TLSProtocolVersionKey.String(val)
-}
-
-// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
-// semantic conventions. It represents the boolean flag indicating if this TLS
-// connection was resumed from an existing TLS negotiation.
-func TLSResumed(val bool) attribute.KeyValue {
- return TLSResumedKey.Bool(val)
-}
-
-// TLSServerCertificate returns an attribute KeyValue conforming to the
-// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the server. This is usually
-// mutually-exclusive of `server.certificate_chain` since this value also
-// exists in that list.
-func TLSServerCertificate(val string) attribute.KeyValue {
- return TLSServerCertificateKey.String(val)
-}
-
-// TLSServerCertificateChain returns an attribute KeyValue conforming to the
-// "tls.server.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the server. This is usually mutually-exclusive of `server.certificate` since
-// that value should be the first certificate in the chain.
-func TLSServerCertificateChain(val ...string) attribute.KeyValue {
- return TLSServerCertificateChainKey.StringSlice(val)
-}
-
-// TLSServerHashMd5 returns an attribute KeyValue conforming to the
-// "tls.server.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashMd5(val string) attribute.KeyValue {
- return TLSServerHashMd5Key.String(val)
-}
-
-// TLSServerHashSha1 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha1(val string) attribute.KeyValue {
- return TLSServerHashSha1Key.String(val)
-}
-
-// TLSServerHashSha256 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha256(val string) attribute.KeyValue {
- return TLSServerHashSha256Key.String(val)
-}
-
-// TLSServerIssuer returns an attribute KeyValue conforming to the
-// "tls.server.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSServerIssuer(val string) attribute.KeyValue {
- return TLSServerIssuerKey.String(val)
-}
-
-// TLSServerJa3s returns an attribute KeyValue conforming to the
-// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
-// servers based on how they perform an SSL/TLS handshake.
-func TLSServerJa3s(val string) attribute.KeyValue {
- return TLSServerJa3sKey.String(val)
-}
-
-// TLSServerNotAfter returns an attribute KeyValue conforming to the
-// "tls.server.not_after" semantic conventions. It represents the date/Time
-// indicating when server certificate is no longer considered valid.
-func TLSServerNotAfter(val string) attribute.KeyValue {
- return TLSServerNotAfterKey.String(val)
-}
-
-// TLSServerNotBefore returns an attribute KeyValue conforming to the
-// "tls.server.not_before" semantic conventions. It represents the date/Time
-// indicating when server certificate is first considered valid.
-func TLSServerNotBefore(val string) attribute.KeyValue {
- return TLSServerNotBeforeKey.String(val)
-}
-
-// TLSServerSubject returns an attribute KeyValue conforming to the
-// "tls.server.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the server.
-func TLSServerSubject(val string) attribute.KeyValue {
- return TLSServerSubjectKey.String(val)
-}
-
-// Attributes describing URL.
-const (
- // URLDomainKey is the attribute Key conforming to the "url.domain"
- // semantic conventions. It represents the domain extracted from the
- // `url.full`, such as "opentelemetry.io".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
- // '[1080:0:0:0:8:800:200C:417A]'
- // Note: In some cases a URL may refer to an IP and/or port directly,
- // without a domain name. In this case, the IP address would go to the
- // domain field. If the URL contains a [literal IPv6
- // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
- // `[` and `]`, the `[` and `]` characters should also be captured in the
- // domain field.
- URLDomainKey = attribute.Key("url.domain")
-
- // URLExtensionKey is the attribute Key conforming to the "url.extension"
- // semantic conventions. It represents the file extension extracted from
- // the `url.full`, excluding the leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: The file extension is only set if it exists, as not every url has
- // a file extension. When the file name has multiple extensions
- // `example.tar.gz`, only the last one should be captured `gz`, not
- // `tar.gz`.
- URLExtensionKey = attribute.Key("url.extension")
-
- // URLFragmentKey is the attribute Key conforming to the "url.fragment"
- // semantic conventions. It represents the [URI
- // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'SemConv'
- URLFragmentKey = attribute.Key("url.fragment")
-
- // URLFullKey is the attribute Key conforming to the "url.full" semantic
- // conventions. It represents the absolute URL describing a network
- // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // '//localhost'
- // Note: For network calls, URL usually has
- // `scheme://host[:port][path][?query][#fragment]` format, where the
- // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
- // included nevertheless.
- // `url.full` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case username and
- // password SHOULD be redacted and attribute's value SHOULD be
- // `https://REDACTED:REDACTED@www.example.com/`.
- // `url.full` SHOULD capture the absolute URL when it is available (or can
- // be reconstructed). Sensitive content provided in `url.full` SHOULD be
- // scrubbed when instrumentations can identify it.
- URLFullKey = attribute.Key("url.full")
-
- // URLOriginalKey is the attribute Key conforming to the "url.original"
- // semantic conventions. It represents the unmodified original URL as seen
- // in the event source.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // 'search?q=OpenTelemetry'
- // Note: In network monitoring, the observed URL may be a full URL, whereas
- // in access logs, the URL is often just represented as a path. This field
- // is meant to represent the URL as it was observed, complete or not.
- // `url.original` might contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case password and
- // username SHOULD NOT be redacted and attribute's value SHOULD remain the
- // same.
- URLOriginalKey = attribute.Key("url.original")
-
- // URLPathKey is the attribute Key conforming to the "url.path" semantic
- // conventions. It represents the [URI
- // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/search'
- // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLPathKey = attribute.Key("url.path")
-
- // URLPortKey is the attribute Key conforming to the "url.port" semantic
- // conventions. It represents the port extracted from the `url.full`
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 443
- URLPortKey = attribute.Key("url.port")
-
- // URLQueryKey is the attribute Key conforming to the "url.query" semantic
- // conventions. It represents the [URI
- // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'q=OpenTelemetry'
- // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLQueryKey = attribute.Key("url.query")
-
- // URLRegisteredDomainKey is the attribute Key conforming to the
- // "url.registered_domain" semantic conventions. It represents the highest
- // registered url domain, stripped of the subdomain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.com', 'foo.co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org). For example, the registered domain for
- // `foo.example.com` is `example.com`. Trying to approximate this by simply
- // taking the last two labels will not work well for TLDs such as `co.uk`.
- URLRegisteredDomainKey = attribute.Key("url.registered_domain")
-
- // URLSchemeKey is the attribute Key conforming to the "url.scheme"
- // semantic conventions. It represents the [URI
- // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
- // identifying the used protocol.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https', 'ftp', 'telnet'
- URLSchemeKey = attribute.Key("url.scheme")
-
- // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
- // semantic conventions. It represents the subdomain portion of a fully
- // qualified domain name includes all of the names except the host name
- // under the registered_domain. In a partially qualified domain, or if the
- // qualification level of the full name cannot be determined, subdomain
- // contains all of the names below the registered domain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'east', 'sub2.sub1'
- // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
- // the domain has multiple levels of subdomain, such as
- // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
- // with no trailing period.
- URLSubdomainKey = attribute.Key("url.subdomain")
-
- // URLTemplateKey is the attribute Key conforming to the "url.template"
- // semantic conventions. It represents the low-cardinality template of an
- // [absolute path
- // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
- URLTemplateKey = attribute.Key("url.template")
-
- // URLTopLevelDomainKey is the attribute Key conforming to the
- // "url.top_level_domain" semantic conventions. It represents the effective
- // top level domain (eTLD), also known as the domain suffix, is the last
- // part of the domain name. For example, the top level domain for
- // example.com is `com`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com', 'co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org).
- URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
-)
-
-// URLDomain returns an attribute KeyValue conforming to the "url.domain"
-// semantic conventions. It represents the domain extracted from the
-// `url.full`, such as "opentelemetry.io".
-func URLDomain(val string) attribute.KeyValue {
- return URLDomainKey.String(val)
-}
-
-// URLExtension returns an attribute KeyValue conforming to the
-// "url.extension" semantic conventions. It represents the file extension
-// extracted from the `url.full`, excluding the leading dot.
-func URLExtension(val string) attribute.KeyValue {
- return URLExtensionKey.String(val)
-}
-
-// URLFragment returns an attribute KeyValue conforming to the
-// "url.fragment" semantic conventions. It represents the [URI
-// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
-func URLFragment(val string) attribute.KeyValue {
- return URLFragmentKey.String(val)
-}
-
-// URLFull returns an attribute KeyValue conforming to the "url.full"
-// semantic conventions. It represents the absolute URL describing a network
-// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
-func URLFull(val string) attribute.KeyValue {
- return URLFullKey.String(val)
-}
-
-// URLOriginal returns an attribute KeyValue conforming to the
-// "url.original" semantic conventions. It represents the unmodified original
-// URL as seen in the event source.
-func URLOriginal(val string) attribute.KeyValue {
- return URLOriginalKey.String(val)
-}
-
-// URLPath returns an attribute KeyValue conforming to the "url.path"
-// semantic conventions. It represents the [URI
-// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
-func URLPath(val string) attribute.KeyValue {
- return URLPathKey.String(val)
-}
-
-// URLPort returns an attribute KeyValue conforming to the "url.port"
-// semantic conventions. It represents the port extracted from the `url.full`
-func URLPort(val int) attribute.KeyValue {
- return URLPortKey.Int(val)
-}
-
-// URLQuery returns an attribute KeyValue conforming to the "url.query"
-// semantic conventions. It represents the [URI
-// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
-func URLQuery(val string) attribute.KeyValue {
- return URLQueryKey.String(val)
-}
-
-// URLRegisteredDomain returns an attribute KeyValue conforming to the
-// "url.registered_domain" semantic conventions. It represents the highest
-// registered url domain, stripped of the subdomain.
-func URLRegisteredDomain(val string) attribute.KeyValue {
- return URLRegisteredDomainKey.String(val)
-}
-
-// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
-// semantic conventions. It represents the [URI
-// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
-// identifying the used protocol.
-func URLScheme(val string) attribute.KeyValue {
- return URLSchemeKey.String(val)
-}
-
-// URLSubdomain returns an attribute KeyValue conforming to the
-// "url.subdomain" semantic conventions. It represents the subdomain portion of
-// a fully qualified domain name includes all of the names except the host name
-// under the registered_domain. In a partially qualified domain, or if the
-// qualification level of the full name cannot be determined, subdomain
-// contains all of the names below the registered domain.
-func URLSubdomain(val string) attribute.KeyValue {
- return URLSubdomainKey.String(val)
-}
-
-// URLTemplate returns an attribute KeyValue conforming to the
-// "url.template" semantic conventions. It represents the low-cardinality
-// template of an [absolute path
-// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
-func URLTemplate(val string) attribute.KeyValue {
- return URLTemplateKey.String(val)
-}
-
-// URLTopLevelDomain returns an attribute KeyValue conforming to the
-// "url.top_level_domain" semantic conventions. It represents the effective top
-// level domain (eTLD), also known as the domain suffix, is the last part of
-// the domain name. For example, the top level domain for example.com is `com`.
-func URLTopLevelDomain(val string) attribute.KeyValue {
- return URLTopLevelDomainKey.String(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentNameKey is the attribute Key conforming to the
- // "user_agent.name" semantic conventions. It represents the name of the
- // user-agent extracted from original. Usually refers to the browser's
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Safari', 'YourApp'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
- // from original string. In the case of using a user-agent for non-browser
- // products, such as microservices with multiple names/versions inside the
- // `user_agent.original`, the most significant name SHOULD be selected. In
- // such a scenario it should align with `user_agent.version`
- UserAgentNameKey = attribute.Key("user_agent.name")
-
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
- // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
- // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
- // grpc-java-okhttp/1.27.2'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-
- // UserAgentVersionKey is the attribute Key conforming to the
- // "user_agent.version" semantic conventions. It represents the version of
- // the user-agent extracted from original. Usually refers to the browser's
- // version
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.1.2', '1.0.0'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's
- // version from original string. In the case of using a user-agent for
- // non-browser products, such as microservices with multiple names/versions
- // inside the `user_agent.original`, the most significant version SHOULD be
- // selected. In such a scenario it should align with `user_agent.name`
- UserAgentVersionKey = attribute.Key("user_agent.version")
-)
-
-// UserAgentName returns an attribute KeyValue conforming to the
-// "user_agent.name" semantic conventions. It represents the name of the
-// user-agent extracted from original. Usually refers to the browser's name.
-func UserAgentName(val string) attribute.KeyValue {
- return UserAgentNameKey.String(val)
-}
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
-
-// UserAgentVersion returns an attribute KeyValue conforming to the
-// "user_agent.version" semantic conventions. It represents the version of the
-// user-agent extracted from original. Usually refers to the browser's version
-func UserAgentVersion(val string) attribute.KeyValue {
- return UserAgentVersionKey.String(val)
-}
-
-// The attributes used to describe the packaged software running the
-// application code.
-const (
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-)
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
deleted file mode 100644
index bfaee0d56..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
deleted file mode 100644
index fcdb9f485..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
+++ /dev/null
@@ -1,1307 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-const (
-
- // ContainerCPUTime is the metric conforming to the "container.cpu.time"
- // semantic conventions. It represents the total CPU time consumed.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ContainerCPUTimeName = "container.cpu.time"
- ContainerCPUTimeUnit = "s"
- ContainerCPUTimeDescription = "Total CPU time consumed"
-
- // ContainerMemoryUsage is the metric conforming to the
- // "container.memory.usage" semantic conventions. It represents the memory
- // usage of the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerMemoryUsageName = "container.memory.usage"
- ContainerMemoryUsageUnit = "By"
- ContainerMemoryUsageDescription = "Memory usage of the container."
-
- // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
- // conventions. It represents the disk bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerDiskIoName = "container.disk.io"
- ContainerDiskIoUnit = "By"
- ContainerDiskIoDescription = "Disk bytes for the container."
-
- // ContainerNetworkIo is the metric conforming to the "container.network.io"
- // semantic conventions. It represents the network bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerNetworkIoName = "container.network.io"
- ContainerNetworkIoUnit = "By"
- ContainerNetworkIoDescription = "Network bytes for the container."
-
- // DBClientOperationDuration is the metric conforming to the
- // "db.client.operation.duration" semantic conventions. It represents the
- // duration of database client operations.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientOperationDurationName = "db.client.operation.duration"
- DBClientOperationDurationUnit = "s"
- DBClientOperationDurationDescription = "Duration of database client operations."
-
- // DBClientConnectionCount is the metric conforming to the
- // "db.client.connection.count" semantic conventions. It represents the number
- // of connections that are currently in state described by the `state`
- // attribute.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionCountName = "db.client.connection.count"
- DBClientConnectionCountUnit = "{connection}"
- DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
-
- // DBClientConnectionIdleMax is the metric conforming to the
- // "db.client.connection.idle.max" semantic conventions. It represents the
- // maximum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
- DBClientConnectionIdleMaxUnit = "{connection}"
- DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
-
- // DBClientConnectionIdleMin is the metric conforming to the
- // "db.client.connection.idle.min" semantic conventions. It represents the
- // minimum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMinName = "db.client.connection.idle.min"
- DBClientConnectionIdleMinUnit = "{connection}"
- DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
-
- // DBClientConnectionMax is the metric conforming to the
- // "db.client.connection.max" semantic conventions. It represents the maximum
- // number of open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionMaxName = "db.client.connection.max"
- DBClientConnectionMaxUnit = "{connection}"
- DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
-
- // DBClientConnectionPendingRequests is the metric conforming to the
- // "db.client.connection.pending_requests" semantic conventions. It represents
- // the number of pending requests for an open connection, cumulative for the
- // entire pool.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
- DBClientConnectionPendingRequestsUnit = "{request}"
- DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
-
- // DBClientConnectionTimeouts is the metric conforming to the
- // "db.client.connection.timeouts" semantic conventions. It represents the
- // number of connection timeouts that have occurred trying to obtain a
- // connection from the pool.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
- DBClientConnectionTimeoutsUnit = "{timeout}"
- DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
-
- // DBClientConnectionCreateTime is the metric conforming to the
- // "db.client.connection.create_time" semantic conventions. It represents the
- // time it took to create a new connection.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionCreateTimeName = "db.client.connection.create_time"
- DBClientConnectionCreateTimeUnit = "s"
- DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
-
- // DBClientConnectionWaitTime is the metric conforming to the
- // "db.client.connection.wait_time" semantic conventions. It represents the
- // time it took to obtain an open connection from the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
- DBClientConnectionWaitTimeUnit = "s"
- DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
-
- // DBClientConnectionUseTime is the metric conforming to the
- // "db.client.connection.use_time" semantic conventions. It represents the time
- // between borrowing a connection and returning it to the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionUseTimeName = "db.client.connection.use_time"
- DBClientConnectionUseTimeUnit = "s"
- DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
-
- // DBClientConnectionsUsage is the metric conforming to the
- // "db.client.connections.usage" semantic conventions. It represents the
- // deprecated, use `db.client.connection.count` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsUsageName = "db.client.connections.usage"
- DBClientConnectionsUsageUnit = "{connection}"
- DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
-
- // DBClientConnectionsIdleMax is the metric conforming to the
- // "db.client.connections.idle.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
- DBClientConnectionsIdleMaxUnit = "{connection}"
- DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
-
- // DBClientConnectionsIdleMin is the metric conforming to the
- // "db.client.connections.idle.min" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.min` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
- DBClientConnectionsIdleMinUnit = "{connection}"
- DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
-
- // DBClientConnectionsMax is the metric conforming to the
- // "db.client.connections.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsMaxName = "db.client.connections.max"
- DBClientConnectionsMaxUnit = "{connection}"
- DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
-
- // DBClientConnectionsPendingRequests is the metric conforming to the
- // "db.client.connections.pending_requests" semantic conventions. It represents
- // the deprecated, use `db.client.connection.pending_requests` instead.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
- DBClientConnectionsPendingRequestsUnit = "{request}"
- DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
-
- // DBClientConnectionsTimeouts is the metric conforming to the
- // "db.client.connections.timeouts" semantic conventions. It represents the
- // deprecated, use `db.client.connection.timeouts` instead.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
- DBClientConnectionsTimeoutsUnit = "{timeout}"
- DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
-
- // DBClientConnectionsCreateTime is the metric conforming to the
- // "db.client.connections.create_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.create_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
- DBClientConnectionsCreateTimeUnit = "ms"
- DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsWaitTime is the metric conforming to the
- // "db.client.connections.wait_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
- DBClientConnectionsWaitTimeUnit = "ms"
- DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsUseTime is the metric conforming to the
- // "db.client.connections.use_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
- // changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsUseTimeName = "db.client.connections.use_time"
- DBClientConnectionsUseTimeUnit = "ms"
- DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
- // semantic conventions. It represents the measures the time taken to perform a
- // DNS lookup.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DNSLookupDurationName = "dns.lookup.duration"
- DNSLookupDurationUnit = "s"
- DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
-
- // AspnetcoreRoutingMatchAttempts is the metric conforming to the
- // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
- // number of requests that were attempted to be matched to an endpoint.
- // Instrument: counter
- // Unit: {match_attempt}
- // Stability: Stable
- AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
- AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
- AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
-
- // AspnetcoreDiagnosticsExceptions is the metric conforming to the
- // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
- // number of exceptions caught by exception handling middleware.
- // Instrument: counter
- // Unit: {exception}
- // Stability: Stable
- AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
- AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
- AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
-
- // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
- // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
- // represents the number of requests that are currently active on the server
- // that hold a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
- AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
- AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
-
- // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
- // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
- // represents the duration of rate limiting lease held by requests on the
- // server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
- AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
- AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
-
- // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
- // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
- // represents the time the request spent in a queue waiting to acquire a rate
- // limiting lease.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
- AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
- AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
- // represents the number of requests that are currently queued, waiting to
- // acquire a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
- AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
- AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
- // number of requests that tried to acquire a rate limiting lease.
- // Instrument: counter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
- AspnetcoreRateLimitingRequestsUnit = "{request}"
- AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
-
- // KestrelActiveConnections is the metric conforming to the
- // "kestrel.active_connections" semantic conventions. It represents the number
- // of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelActiveConnectionsName = "kestrel.active_connections"
- KestrelActiveConnectionsUnit = "{connection}"
- KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // KestrelConnectionDuration is the metric conforming to the
- // "kestrel.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelConnectionDurationName = "kestrel.connection.duration"
- KestrelConnectionDurationUnit = "s"
- KestrelConnectionDurationDescription = "The duration of connections on the server."
-
- // KestrelRejectedConnections is the metric conforming to the
- // "kestrel.rejected_connections" semantic conventions. It represents the
- // number of connections rejected by the server.
- // Instrument: counter
- // Unit: {connection}
- // Stability: Stable
- KestrelRejectedConnectionsName = "kestrel.rejected_connections"
- KestrelRejectedConnectionsUnit = "{connection}"
- KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
-
- // KestrelQueuedConnections is the metric conforming to the
- // "kestrel.queued_connections" semantic conventions. It represents the number
- // of connections that are currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelQueuedConnectionsName = "kestrel.queued_connections"
- KestrelQueuedConnectionsUnit = "{connection}"
- KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
-
- // KestrelQueuedRequests is the metric conforming to the
- // "kestrel.queued_requests" semantic conventions. It represents the number of
- // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
- // currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- KestrelQueuedRequestsName = "kestrel.queued_requests"
- KestrelQueuedRequestsUnit = "{request}"
- KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
-
- // KestrelUpgradedConnections is the metric conforming to the
- // "kestrel.upgraded_connections" semantic conventions. It represents the
- // number of connections that are currently upgraded (WebSockets). .
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
- KestrelUpgradedConnectionsUnit = "{connection}"
- KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
-
- // KestrelTLSHandshakeDuration is the metric conforming to the
- // "kestrel.tls_handshake.duration" semantic conventions. It represents the
- // duration of TLS handshakes on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
- KestrelTLSHandshakeDurationUnit = "s"
- KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
-
- // KestrelActiveTLSHandshakes is the metric conforming to the
- // "kestrel.active_tls_handshakes" semantic conventions. It represents the
- // number of TLS handshakes that are currently in progress on the server.
- // Instrument: updowncounter
- // Unit: {handshake}
- // Stability: Stable
- KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
- KestrelActiveTLSHandshakesUnit = "{handshake}"
- KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
-
- // SignalrServerConnectionDuration is the metric conforming to the
- // "signalr.server.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- SignalrServerConnectionDurationName = "signalr.server.connection.duration"
- SignalrServerConnectionDurationUnit = "s"
- SignalrServerConnectionDurationDescription = "The duration of connections on the server."
-
- // SignalrServerActiveConnections is the metric conforming to the
- // "signalr.server.active_connections" semantic conventions. It represents the
- // number of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- SignalrServerActiveConnectionsName = "signalr.server.active_connections"
- SignalrServerActiveConnectionsUnit = "{connection}"
- SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's logic execution.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInvokeDurationName = "faas.invoke_duration"
- FaaSInvokeDurationUnit = "s"
- FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
-
- // FaaSInitDuration is the metric conforming to the "faas.init_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's initialization, such as a cold start.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInitDurationName = "faas.init_duration"
- FaaSInitDurationUnit = "s"
- FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
-
- // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
- // conventions. It represents the number of invocation cold starts.
- // Instrument: counter
- // Unit: {coldstart}
- // Stability: Experimental
- FaaSColdstartsName = "faas.coldstarts"
- FaaSColdstartsUnit = "{coldstart}"
- FaaSColdstartsDescription = "Number of invocation cold starts"
-
- // FaaSErrors is the metric conforming to the "faas.errors" semantic
- // conventions. It represents the number of invocation errors.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- FaaSErrorsName = "faas.errors"
- FaaSErrorsUnit = "{error}"
- FaaSErrorsDescription = "Number of invocation errors"
-
- // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
- // conventions. It represents the number of successful invocations.
- // Instrument: counter
- // Unit: {invocation}
- // Stability: Experimental
- FaaSInvocationsName = "faas.invocations"
- FaaSInvocationsUnit = "{invocation}"
- FaaSInvocationsDescription = "Number of successful invocations"
-
- // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
- // conventions. It represents the number of invocation timeouts.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- FaaSTimeoutsName = "faas.timeouts"
- FaaSTimeoutsUnit = "{timeout}"
- FaaSTimeoutsDescription = "Number of invocation timeouts"
-
- // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
- // conventions. It represents the distribution of max memory usage per
- // invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSMemUsageName = "faas.mem_usage"
- FaaSMemUsageUnit = "By"
- FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
-
- // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
- // conventions. It represents the distribution of CPU usage per invocation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSCPUUsageName = "faas.cpu_usage"
- FaaSCPUUsageUnit = "s"
- FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
-
- // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
- // conventions. It represents the distribution of net I/O usage per invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSNetIoName = "faas.net_io"
- FaaSNetIoUnit = "By"
- FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
-
- // HTTPServerRequestDuration is the metric conforming to the
- // "http.server.request.duration" semantic conventions. It represents the
- // duration of HTTP server requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPServerRequestDurationName = "http.server.request.duration"
- HTTPServerRequestDurationUnit = "s"
- HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
-
- // HTTPServerActiveRequests is the metric conforming to the
- // "http.server.active_requests" semantic conventions. It represents the number
- // of active HTTP server requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPServerActiveRequestsName = "http.server.active_requests"
- HTTPServerActiveRequestsUnit = "{request}"
- HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
-
- // HTTPServerRequestBodySize is the metric conforming to the
- // "http.server.request.body.size" semantic conventions. It represents the size
- // of HTTP server request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerRequestBodySizeName = "http.server.request.body.size"
- HTTPServerRequestBodySizeUnit = "By"
- HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
-
- // HTTPServerResponseBodySize is the metric conforming to the
- // "http.server.response.body.size" semantic conventions. It represents the
- // size of HTTP server response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerResponseBodySizeName = "http.server.response.body.size"
- HTTPServerResponseBodySizeUnit = "By"
- HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
-
- // HTTPClientRequestDuration is the metric conforming to the
- // "http.client.request.duration" semantic conventions. It represents the
- // duration of HTTP client requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPClientRequestDurationName = "http.client.request.duration"
- HTTPClientRequestDurationUnit = "s"
- HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
-
- // HTTPClientRequestBodySize is the metric conforming to the
- // "http.client.request.body.size" semantic conventions. It represents the size
- // of HTTP client request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientRequestBodySizeName = "http.client.request.body.size"
- HTTPClientRequestBodySizeUnit = "By"
- HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
-
- // HTTPClientResponseBodySize is the metric conforming to the
- // "http.client.response.body.size" semantic conventions. It represents the
- // size of HTTP client response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientResponseBodySizeName = "http.client.response.body.size"
- HTTPClientResponseBodySizeUnit = "By"
- HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
-
- // HTTPClientOpenConnections is the metric conforming to the
- // "http.client.open_connections" semantic conventions. It represents the
- // number of outbound HTTP connections that are currently active or idle on the
- // client.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- HTTPClientOpenConnectionsName = "http.client.open_connections"
- HTTPClientOpenConnectionsUnit = "{connection}"
- HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
-
- // HTTPClientConnectionDuration is the metric conforming to the
- // "http.client.connection.duration" semantic conventions. It represents the
- // duration of the successfully established outbound HTTP connections.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- HTTPClientConnectionDurationName = "http.client.connection.duration"
- HTTPClientConnectionDurationUnit = "s"
- HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
-
- // HTTPClientActiveRequests is the metric conforming to the
- // "http.client.active_requests" semantic conventions. It represents the number
- // of active HTTP requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPClientActiveRequestsName = "http.client.active_requests"
- HTTPClientActiveRequestsUnit = "{request}"
- HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
-
- // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
- // conventions. It represents the measure of initial memory requested.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmMemoryInitName = "jvm.memory.init"
- JvmMemoryInitUnit = "By"
- JvmMemoryInitDescription = "Measure of initial memory requested."
-
- // JvmSystemCPUUtilization is the metric conforming to the
- // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
- // CPU utilization for the whole system as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
- JvmSystemCPUUtilizationUnit = "1"
- JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
-
- // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
- // semantic conventions. It represents the average CPU load of the whole system
- // for the last minute as reported by the JVM.
- // Instrument: gauge
- // Unit: {run_queue_item}
- // Stability: Experimental
- JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
- JvmSystemCPULoad1mUnit = "{run_queue_item}"
- JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
-
- // JvmBufferMemoryUsage is the metric conforming to the
- // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
- // memory used by buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
- JvmBufferMemoryUsageUnit = "By"
- JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
-
- // JvmBufferMemoryLimit is the metric conforming to the
- // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
- // total memory capacity of buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
- JvmBufferMemoryLimitUnit = "By"
- JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
-
- // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
- // conventions. It represents the number of buffers in the pool.
- // Instrument: updowncounter
- // Unit: {buffer}
- // Stability: Experimental
- JvmBufferCountName = "jvm.buffer.count"
- JvmBufferCountUnit = "{buffer}"
- JvmBufferCountDescription = "Number of buffers in the pool."
-
- // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
- // conventions. It represents the measure of memory used.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedName = "jvm.memory.used"
- JvmMemoryUsedUnit = "By"
- JvmMemoryUsedDescription = "Measure of memory used."
-
- // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
- // semantic conventions. It represents the measure of memory committed.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryCommittedName = "jvm.memory.committed"
- JvmMemoryCommittedUnit = "By"
- JvmMemoryCommittedDescription = "Measure of memory committed."
-
- // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
- // conventions. It represents the measure of max obtainable memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryLimitName = "jvm.memory.limit"
- JvmMemoryLimitUnit = "By"
- JvmMemoryLimitDescription = "Measure of max obtainable memory."
-
- // JvmMemoryUsedAfterLastGc is the metric conforming to the
- // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
- // measure of memory used, as measured after the most recent garbage collection
- // event on this pool.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
- JvmMemoryUsedAfterLastGcUnit = "By"
- JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
-
- // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
- // conventions. It represents the duration of JVM garbage collection actions.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- JvmGcDurationName = "jvm.gc.duration"
- JvmGcDurationUnit = "s"
- JvmGcDurationDescription = "Duration of JVM garbage collection actions."
-
- // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
- // conventions. It represents the number of executing platform threads.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Stable
- JvmThreadCountName = "jvm.thread.count"
- JvmThreadCountUnit = "{thread}"
- JvmThreadCountDescription = "Number of executing platform threads."
-
- // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
- // conventions. It represents the number of classes loaded since JVM start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassLoadedName = "jvm.class.loaded"
- JvmClassLoadedUnit = "{class}"
- JvmClassLoadedDescription = "Number of classes loaded since JVM start."
-
- // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
- // semantic conventions. It represents the number of classes unloaded since JVM
- // start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassUnloadedName = "jvm.class.unloaded"
- JvmClassUnloadedUnit = "{class}"
- JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
-
- // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
- // conventions. It represents the number of classes currently loaded.
- // Instrument: updowncounter
- // Unit: {class}
- // Stability: Stable
- JvmClassCountName = "jvm.class.count"
- JvmClassCountUnit = "{class}"
- JvmClassCountDescription = "Number of classes currently loaded."
-
- // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
- // conventions. It represents the number of processors available to the Java
- // virtual machine.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Stable
- JvmCPUCountName = "jvm.cpu.count"
- JvmCPUCountUnit = "{cpu}"
- JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
-
- // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
- // conventions. It represents the cPU time used by the process as reported by
- // the JVM.
- // Instrument: counter
- // Unit: s
- // Stability: Stable
- JvmCPUTimeName = "jvm.cpu.time"
- JvmCPUTimeUnit = "s"
- JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
-
- // JvmCPURecentUtilization is the metric conforming to the
- // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
- // CPU utilization for the process as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Stable
- JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
- JvmCPURecentUtilizationUnit = "1"
- JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
-
- // MessagingPublishDuration is the metric conforming to the
- // "messaging.publish.duration" semantic conventions. It represents the
- // measures the duration of publish operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingPublishDurationName = "messaging.publish.duration"
- MessagingPublishDurationUnit = "s"
- MessagingPublishDurationDescription = "Measures the duration of publish operation."
-
- // MessagingReceiveDuration is the metric conforming to the
- // "messaging.receive.duration" semantic conventions. It represents the
- // measures the duration of receive operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingReceiveDurationName = "messaging.receive.duration"
- MessagingReceiveDurationUnit = "s"
- MessagingReceiveDurationDescription = "Measures the duration of receive operation."
-
- // MessagingProcessDuration is the metric conforming to the
- // "messaging.process.duration" semantic conventions. It represents the
- // measures the duration of process operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingProcessDurationName = "messaging.process.duration"
- MessagingProcessDurationUnit = "s"
- MessagingProcessDurationDescription = "Measures the duration of process operation."
-
- // MessagingPublishMessages is the metric conforming to the
- // "messaging.publish.messages" semantic conventions. It represents the
- // measures the number of published messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingPublishMessagesName = "messaging.publish.messages"
- MessagingPublishMessagesUnit = "{message}"
- MessagingPublishMessagesDescription = "Measures the number of published messages."
-
- // MessagingReceiveMessages is the metric conforming to the
- // "messaging.receive.messages" semantic conventions. It represents the
- // measures the number of received messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingReceiveMessagesName = "messaging.receive.messages"
- MessagingReceiveMessagesUnit = "{message}"
- MessagingReceiveMessagesDescription = "Measures the number of received messages."
-
- // MessagingProcessMessages is the metric conforming to the
- // "messaging.process.messages" semantic conventions. It represents the
- // measures the number of processed messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingProcessMessagesName = "messaging.process.messages"
- MessagingProcessMessagesUnit = "{message}"
- MessagingProcessMessagesDescription = "Measures the number of processed messages."
-
- // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
- // conventions. It represents the total CPU seconds broken down by different
- // states.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ProcessCPUTimeName = "process.cpu.time"
- ProcessCPUTimeUnit = "s"
- ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
-
- // ProcessCPUUtilization is the metric conforming to the
- // "process.cpu.utilization" semantic conventions. It represents the difference
- // in process.cpu.time since the last measurement, divided by the elapsed time
- // and number of CPUs available to the process.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- ProcessCPUUtilizationName = "process.cpu.utilization"
- ProcessCPUUtilizationUnit = "1"
- ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
-
- // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
- // semantic conventions. It represents the amount of physical memory in use.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryUsageName = "process.memory.usage"
- ProcessMemoryUsageUnit = "By"
- ProcessMemoryUsageDescription = "The amount of physical memory in use."
-
- // ProcessMemoryVirtual is the metric conforming to the
- // "process.memory.virtual" semantic conventions. It represents the amount of
- // committed virtual memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryVirtualName = "process.memory.virtual"
- ProcessMemoryVirtualUnit = "By"
- ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
-
- // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
- // conventions. It represents the disk bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessDiskIoName = "process.disk.io"
- ProcessDiskIoUnit = "By"
- ProcessDiskIoDescription = "Disk bytes transferred."
-
- // ProcessNetworkIo is the metric conforming to the "process.network.io"
- // semantic conventions. It represents the network bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessNetworkIoName = "process.network.io"
- ProcessNetworkIoUnit = "By"
- ProcessNetworkIoDescription = "Network bytes transferred."
-
- // ProcessThreadCount is the metric conforming to the "process.thread.count"
- // semantic conventions. It represents the process threads count.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Experimental
- ProcessThreadCountName = "process.thread.count"
- ProcessThreadCountUnit = "{thread}"
- ProcessThreadCountDescription = "Process threads count."
-
- // ProcessOpenFileDescriptorCount is the metric conforming to the
- // "process.open_file_descriptor.count" semantic conventions. It represents the
- // number of file descriptors in use by the process.
- // Instrument: updowncounter
- // Unit: {count}
- // Stability: Experimental
- ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
- ProcessOpenFileDescriptorCountUnit = "{count}"
- ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
-
- // ProcessContextSwitches is the metric conforming to the
- // "process.context_switches" semantic conventions. It represents the number of
- // times the process has been context switched.
- // Instrument: counter
- // Unit: {count}
- // Stability: Experimental
- ProcessContextSwitchesName = "process.context_switches"
- ProcessContextSwitchesUnit = "{count}"
- ProcessContextSwitchesDescription = "Number of times the process has been context switched."
-
- // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
- // semantic conventions. It represents the number of page faults the process
- // has made.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- ProcessPagingFaultsName = "process.paging.faults"
- ProcessPagingFaultsUnit = "{fault}"
- ProcessPagingFaultsDescription = "Number of page faults the process has made."
-
- // RPCServerDuration is the metric conforming to the "rpc.server.duration"
- // semantic conventions. It represents the measures the duration of inbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCServerDurationName = "rpc.server.duration"
- RPCServerDurationUnit = "ms"
- RPCServerDurationDescription = "Measures the duration of inbound RPC."
-
- // RPCServerRequestSize is the metric conforming to the
- // "rpc.server.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerRequestSizeName = "rpc.server.request.size"
- RPCServerRequestSizeUnit = "By"
- RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCServerResponseSize is the metric conforming to the
- // "rpc.server.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerResponseSizeName = "rpc.server.response.size"
- RPCServerResponseSizeUnit = "By"
- RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCServerRequestsPerRPC is the metric conforming to the
- // "rpc.server.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
- RPCServerRequestsPerRPCUnit = "{count}"
- RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCServerResponsesPerRPC is the metric conforming to the
- // "rpc.server.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
- RPCServerResponsesPerRPCUnit = "{count}"
- RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // RPCClientDuration is the metric conforming to the "rpc.client.duration"
- // semantic conventions. It represents the measures the duration of outbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCClientDurationName = "rpc.client.duration"
- RPCClientDurationUnit = "ms"
- RPCClientDurationDescription = "Measures the duration of outbound RPC."
-
- // RPCClientRequestSize is the metric conforming to the
- // "rpc.client.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientRequestSizeName = "rpc.client.request.size"
- RPCClientRequestSizeUnit = "By"
- RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCClientResponseSize is the metric conforming to the
- // "rpc.client.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientResponseSizeName = "rpc.client.response.size"
- RPCClientResponseSizeUnit = "By"
- RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCClientRequestsPerRPC is the metric conforming to the
- // "rpc.client.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
- RPCClientRequestsPerRPCUnit = "{count}"
- RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCClientResponsesPerRPC is the metric conforming to the
- // "rpc.client.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
- RPCClientResponsesPerRPCUnit = "{count}"
- RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
- // conventions. It represents the seconds each logical CPU spent on each mode.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemCPUTimeName = "system.cpu.time"
- SystemCPUTimeUnit = "s"
- SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
-
- // SystemCPUUtilization is the metric conforming to the
- // "system.cpu.utilization" semantic conventions. It represents the difference
- // in system.cpu.time since the last measurement, divided by the elapsed time
- // and number of logical CPUs.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- SystemCPUUtilizationName = "system.cpu.utilization"
- SystemCPUUtilizationUnit = "1"
- SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
-
- // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
- // semantic conventions. It represents the reports the current frequency of the
- // CPU in Hz.
- // Instrument: gauge
- // Unit: {Hz}
- // Stability: Experimental
- SystemCPUFrequencyName = "system.cpu.frequency"
- SystemCPUFrequencyUnit = "{Hz}"
- SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
-
- // SystemCPUPhysicalCount is the metric conforming to the
- // "system.cpu.physical.count" semantic conventions. It represents the reports
- // the number of actual physical processor cores on the hardware.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPUPhysicalCountName = "system.cpu.physical.count"
- SystemCPUPhysicalCountUnit = "{cpu}"
- SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
-
- // SystemCPULogicalCount is the metric conforming to the
- // "system.cpu.logical.count" semantic conventions. It represents the reports
- // the number of logical (virtual) processor cores created by the operating
- // system to manage multitasking.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPULogicalCountName = "system.cpu.logical.count"
- SystemCPULogicalCountUnit = "{cpu}"
- SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
-
- // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
- // semantic conventions. It represents the reports memory in use by state.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryUsageName = "system.memory.usage"
- SystemMemoryUsageUnit = "By"
- SystemMemoryUsageDescription = "Reports memory in use by state."
-
- // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
- // semantic conventions. It represents the total memory available in the
- // system.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryLimitName = "system.memory.limit"
- SystemMemoryLimitUnit = "By"
- SystemMemoryLimitDescription = "Total memory available in the system."
-
- // SystemMemoryShared is the metric conforming to the "system.memory.shared"
- // semantic conventions. It represents the shared memory used (mostly by
- // tmpfs).
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemorySharedName = "system.memory.shared"
- SystemMemorySharedUnit = "By"
- SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
-
- // SystemMemoryUtilization is the metric conforming to the
- // "system.memory.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemMemoryUtilizationName = "system.memory.utilization"
- SystemMemoryUtilizationUnit = "1"
-
- // SystemPagingUsage is the metric conforming to the "system.paging.usage"
- // semantic conventions. It represents the unix swap or windows pagefile usage.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemPagingUsageName = "system.paging.usage"
- SystemPagingUsageUnit = "By"
- SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
-
- // SystemPagingUtilization is the metric conforming to the
- // "system.paging.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingUtilizationName = "system.paging.utilization"
- SystemPagingUtilizationUnit = "1"
-
- // SystemPagingFaults is the metric conforming to the "system.paging.faults"
- // semantic conventions.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingFaultsName = "system.paging.faults"
- SystemPagingFaultsUnit = "{fault}"
-
- // SystemPagingOperations is the metric conforming to the
- // "system.paging.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingOperationsName = "system.paging.operations"
- SystemPagingOperationsUnit = "{operation}"
-
- // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskIoName = "system.disk.io"
- SystemDiskIoUnit = "By"
-
- // SystemDiskOperations is the metric conforming to the
- // "system.disk.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskOperationsName = "system.disk.operations"
- SystemDiskOperationsUnit = "{operation}"
-
- // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
- // semantic conventions. It represents the time disk spent activated.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskIoTimeName = "system.disk.io_time"
- SystemDiskIoTimeUnit = "s"
- SystemDiskIoTimeDescription = "Time disk spent activated"
-
- // SystemDiskOperationTime is the metric conforming to the
- // "system.disk.operation_time" semantic conventions. It represents the sum of
- // the time each operation took to complete.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskOperationTimeName = "system.disk.operation_time"
- SystemDiskOperationTimeUnit = "s"
- SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
-
- // SystemDiskMerged is the metric conforming to the "system.disk.merged"
- // semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskMergedName = "system.disk.merged"
- SystemDiskMergedUnit = "{operation}"
-
- // SystemFilesystemUsage is the metric conforming to the
- // "system.filesystem.usage" semantic conventions.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUsageName = "system.filesystem.usage"
- SystemFilesystemUsageUnit = "By"
-
- // SystemFilesystemUtilization is the metric conforming to the
- // "system.filesystem.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUtilizationName = "system.filesystem.utilization"
- SystemFilesystemUtilizationUnit = "1"
-
- // SystemNetworkDropped is the metric conforming to the
- // "system.network.dropped" semantic conventions. It represents the count of
- // packets that are dropped or discarded even though there was no error.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- SystemNetworkDroppedName = "system.network.dropped"
- SystemNetworkDroppedUnit = "{packet}"
- SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
-
- // SystemNetworkPackets is the metric conforming to the
- // "system.network.packets" semantic conventions.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkPacketsName = "system.network.packets"
- SystemNetworkPacketsUnit = "{packet}"
-
- // SystemNetworkErrors is the metric conforming to the "system.network.errors"
- // semantic conventions. It represents the count of network errors detected.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- SystemNetworkErrorsName = "system.network.errors"
- SystemNetworkErrorsUnit = "{error}"
- SystemNetworkErrorsDescription = "Count of network errors detected"
-
- // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkIoName = "system.network.io"
- SystemNetworkIoUnit = "By"
-
- // SystemNetworkConnections is the metric conforming to the
- // "system.network.connections" semantic conventions.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkConnectionsName = "system.network.connections"
- SystemNetworkConnectionsUnit = "{connection}"
-
- // SystemProcessCount is the metric conforming to the "system.process.count"
- // semantic conventions. It represents the total number of processes in each
- // state.
- // Instrument: updowncounter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCountName = "system.process.count"
- SystemProcessCountUnit = "{process}"
- SystemProcessCountDescription = "Total number of processes in each state"
-
- // SystemProcessCreated is the metric conforming to the
- // "system.process.created" semantic conventions. It represents the total
- // number of processes created over uptime of the host.
- // Instrument: counter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCreatedName = "system.process.created"
- SystemProcessCreatedUnit = "{process}"
- SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
-
- // SystemLinuxMemoryAvailable is the metric conforming to the
- // "system.linux.memory.available" semantic conventions. It represents an
- // estimate of how much memory is available for starting new applications,
- // without causing swapping.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemLinuxMemoryAvailableName = "system.linux.memory.available"
- SystemLinuxMemoryAvailableUnit = "By"
- SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md
new file mode 100644
index 000000000..248054789
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md
@@ -0,0 +1,41 @@
+
+# Migration from v1.36.0 to v1.37.0
+
+The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions.
+
+## Removed
+
+The following declarations have been removed.
+Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions.
+
+If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use.
+If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case.
+
+- `ContainerRuntime`
+- `ContainerRuntimeKey`
+- `GenAIOpenAIRequestServiceTierAuto`
+- `GenAIOpenAIRequestServiceTierDefault`
+- `GenAIOpenAIRequestServiceTierKey`
+- `GenAIOpenAIResponseServiceTier`
+- `GenAIOpenAIResponseServiceTierKey`
+- `GenAIOpenAIResponseSystemFingerprint`
+- `GenAIOpenAIResponseSystemFingerprintKey`
+- `GenAISystemAWSBedrock`
+- `GenAISystemAnthropic`
+- `GenAISystemAzureAIInference`
+- `GenAISystemAzureAIOpenAI`
+- `GenAISystemCohere`
+- `GenAISystemDeepseek`
+- `GenAISystemGCPGemini`
+- `GenAISystemGCPGenAI`
+- `GenAISystemGCPVertexAI`
+- `GenAISystemGroq`
+- `GenAISystemIBMWatsonxAI`
+- `GenAISystemKey`
+- `GenAISystemMistralAI`
+- `GenAISystemOpenAI`
+- `GenAISystemPerplexity`
+- `GenAISystemXai`
+
+[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions
+[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md
new file mode 100644
index 000000000..d795247f3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.37.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go
new file mode 100644
index 000000000..b6b27498f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go
@@ -0,0 +1,15193 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Namespace: android
+const (
+ // AndroidAppStateKey is the attribute Key conforming to the "android.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "created"
+ // Note: The Android lifecycle states are defined in
+ // [Activity lifecycle callbacks], and from which the `OS identifiers` are
+ // derived.
+ //
+ // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc
+ AndroidAppStateKey = attribute.Key("android.app.state")
+
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version (`os.version`) of
+ // the android operating system. More information can be found in the
+ // [Android API levels documentation].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "33", "32"
+ //
+ // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found in the
+// [Android API levels documentation].
+//
+// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// Enum values for android.app.state
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called in the app for the first time.
+ //
+ // Stability: development
+ AndroidAppStateCreated = AndroidAppStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity,
+ // Context.stopService() has been called when the app was in the foreground
+ // state.
+ //
+ // Stability: development
+ AndroidAppStateBackground = AndroidAppStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called when the app was in either the created
+ // or background states.
+ //
+ // Stability: development
+ AndroidAppStateForeground = AndroidAppStateKey.String("foreground")
+)
+
+// Namespace: app
+const (
+ // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0",
+ // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123"
+ AppBuildIDKey = attribute.Key("app.build_id")
+
+ // AppInstallationIDKey is the attribute Key conforming to the
+ // "app.installation.id" semantic conventions. It represents a unique identifier
+ // representing the installation of an application on a specific device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092"
+ // Note: Its value SHOULD persist across launches of the same application
+ // installation, including through application upgrades.
+ // It SHOULD change if the application is uninstalled or if all applications of
+ // the vendor are uninstalled.
+ // Additionally, users might be able to reset this value (e.g. by clearing
+ // application data).
+ // If an app is installed multiple times on the same device (e.g. in different
+ // accounts on Android), each `app.installation.id` SHOULD have a different
+ // value.
+ // If multiple OpenTelemetry SDKs are used within the same application, they
+ // SHOULD use the same value for `app.installation.id`.
+ // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the
+ // `app.installation.id`.
+ //
+ // For iOS, this value SHOULD be equal to the [vendor identifier].
+ //
+ // For Android, examples of `app.installation.id` implementations include:
+ //
+ // - [Firebase Installation ID].
+ // - A globally unique UUID which is persisted across sessions in your
+ // application.
+ // - [App set ID].
+ // - [`Settings.getString(Settings.Secure.ANDROID_ID)`].
+ //
+ // More information about Android identifier best practices can be found in the
+ // [Android user data IDs guide].
+ //
+ // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor
+ // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations
+ // [App set ID]: https://developer.android.com/identity/app-set-id
+ // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID
+ // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids
+ AppInstallationIDKey = attribute.Key("app.installation.id")
+
+ // AppJankFrameCountKey is the attribute Key conforming to the
+ // "app.jank.frame_count" semantic conventions. It represents a number of frame
+ // renders that experienced jank.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 9, 42
+ // Note: Depending on platform limitations, the value provided MAY be
+ // approximation.
+ AppJankFrameCountKey = attribute.Key("app.jank.frame_count")
+
+ // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period"
+ // semantic conventions. It represents the time period, in seconds, for which
+ // this jank is being reported.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 5.0, 10.24
+ AppJankPeriodKey = attribute.Key("app.jank.period")
+
+ // AppJankThresholdKey is the attribute Key conforming to the
+ // "app.jank.threshold" semantic conventions. It represents the minimum
+ // rendering threshold for this jank, in seconds.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.016, 0.7, 1.024
+ AppJankThresholdKey = attribute.Key("app.jank.threshold")
+
+ // AppScreenCoordinateXKey is the attribute Key conforming to the
+ // "app.screen.coordinate.x" semantic conventions. It represents the x
+ // (horizontal) coordinate of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 131
+ AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x")
+
+ // AppScreenCoordinateYKey is the attribute Key conforming to the
+ // "app.screen.coordinate.y" semantic conventions. It represents the y
+ // (vertical) component of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12, 99
+ AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y")
+
+ // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id"
+ // semantic conventions. It represents an identifier that uniquely
+ // differentiates this widget from other widgets in the same application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetIDKey = attribute.Key("app.widget.id")
+
+ // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name"
+ // semantic conventions. It represents the name of an application widget.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "submit", "attack", "Clear Cart"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetNameKey = attribute.Key("app.widget.name")
+)
+
+// AppBuildID returns an attribute KeyValue conforming to the "app.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the application.
+func AppBuildID(val string) attribute.KeyValue {
+ return AppBuildIDKey.String(val)
+}
+
+// AppInstallationID returns an attribute KeyValue conforming to the
+// "app.installation.id" semantic conventions. It represents a unique identifier
+// representing the installation of an application on a specific device.
+func AppInstallationID(val string) attribute.KeyValue {
+ return AppInstallationIDKey.String(val)
+}
+
+// AppJankFrameCount returns an attribute KeyValue conforming to the
+// "app.jank.frame_count" semantic conventions. It represents a number of frame
+// renders that experienced jank.
+func AppJankFrameCount(val int) attribute.KeyValue {
+ return AppJankFrameCountKey.Int(val)
+}
+
+// AppJankPeriod returns an attribute KeyValue conforming to the
+// "app.jank.period" semantic conventions. It represents the time period, in
+// seconds, for which this jank is being reported.
+func AppJankPeriod(val float64) attribute.KeyValue {
+ return AppJankPeriodKey.Float64(val)
+}
+
+// AppJankThreshold returns an attribute KeyValue conforming to the
+// "app.jank.threshold" semantic conventions. It represents the minimum rendering
+// threshold for this jank, in seconds.
+func AppJankThreshold(val float64) attribute.KeyValue {
+ return AppJankThresholdKey.Float64(val)
+}
+
+// AppScreenCoordinateX returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.x" semantic conventions. It represents the x
+// (horizontal) coordinate of a screen coordinate, in screen pixels.
+func AppScreenCoordinateX(val int) attribute.KeyValue {
+ return AppScreenCoordinateXKey.Int(val)
+}
+
+// AppScreenCoordinateY returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical)
+// component of a screen coordinate, in screen pixels.
+func AppScreenCoordinateY(val int) attribute.KeyValue {
+ return AppScreenCoordinateYKey.Int(val)
+}
+
+// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id"
+// semantic conventions. It represents an identifier that uniquely differentiates
+// this widget from other widgets in the same application.
+func AppWidgetID(val string) attribute.KeyValue {
+ return AppWidgetIDKey.String(val)
+}
+
+// AppWidgetName returns an attribute KeyValue conforming to the
+// "app.widget.name" semantic conventions. It represents the name of an
+// application widget.
+func AppWidgetName(val string) attribute.KeyValue {
+ return AppWidgetNameKey.String(val)
+}
+
+// Namespace: artifact
+const (
+ // ArtifactAttestationFilenameKey is the attribute Key conforming to the
+ // "artifact.attestation.filename" semantic conventions. It represents the
+ // provenance filename of the built attestation which directly relates to the
+ // build artifact filename. This filename SHOULD accompany the artifact at
+ // publish time. See the [SLSA Relationship] specification for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0.attestation",
+ // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation",
+ // "file-name-package.tar.gz.intoto.json1"
+ //
+ // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+ ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename")
+
+ // ArtifactAttestationHashKey is the attribute Key conforming to the
+ // "artifact.attestation.hash" semantic conventions. It represents the full
+ // [hash value (see glossary)], of the built attestation. Some envelopes in the
+ // [software attestation space] also refer to this as the **digest**.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408"
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+ ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash")
+
+ // ArtifactAttestationIDKey is the attribute Key conforming to the
+ // "artifact.attestation.id" semantic conventions. It represents the id of the
+ // build [software attestation].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ //
+ // [software attestation]: https://slsa.dev/attestation-model
+ ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id")
+
+ // ArtifactFilenameKey is the attribute Key conforming to the
+ // "artifact.filename" semantic conventions. It represents the human readable
+ // file name of the artifact, typically generated during build and release
+ // processes. Often includes the package name and version in the file name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0",
+ // "release-1.tar.gz", "file-name-package.tar.gz"
+ // Note: This file name can also act as the [Package Name]
+ // in cases where the package ecosystem maps accordingly.
+ // Additionally, the artifact [can be published]
+ // for others, but that is not a guarantee.
+ //
+ // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model
+ // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain
+ ArtifactFilenameKey = attribute.Key("artifact.filename")
+
+ // ArtifactHashKey is the attribute Key conforming to the "artifact.hash"
+ // semantic conventions. It represents the full [hash value (see glossary)],
+ // often found in checksum.txt on a release of the artifact and used to verify
+ // package integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9"
+ // Note: The specific algorithm used to create the cryptographic hash value is
+ // not defined. In situations where an artifact has multiple
+ // cryptographic hashes, it is up to the implementer to choose which
+ // hash value to set here; this should be the most secure hash algorithm
+ // that is suitable for the situation and consistent with the
+ // corresponding attestation. The implementer can then provide the other
+ // hash values through an additional set of attribute extensions as they
+ // deem necessary.
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ ArtifactHashKey = attribute.Key("artifact.hash")
+
+ // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl"
+ // semantic conventions. It represents the [Package URL] of the
+ // [package artifact] provides a standard way to identify and locate the
+ // packaged artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pkg:github/package-url/purl-spec@1209109710924",
+ // "pkg:npm/foo@12.12.3"
+ //
+ // [Package URL]: https://github.com/package-url/purl-spec
+ // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+ ArtifactPurlKey = attribute.Key("artifact.purl")
+
+ // ArtifactVersionKey is the attribute Key conforming to the "artifact.version"
+ // semantic conventions. It represents the version of the artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v0.1.0", "1.2.1", "122691-build"
+ ArtifactVersionKey = attribute.Key("artifact.version")
+)
+
+// ArtifactAttestationFilename returns an attribute KeyValue conforming to the
+// "artifact.attestation.filename" semantic conventions. It represents the
+// provenance filename of the built attestation which directly relates to the
+// build artifact filename. This filename SHOULD accompany the artifact at
+// publish time. See the [SLSA Relationship] specification for more information.
+//
+// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+func ArtifactAttestationFilename(val string) attribute.KeyValue {
+ return ArtifactAttestationFilenameKey.String(val)
+}
+
+// ArtifactAttestationHash returns an attribute KeyValue conforming to the
+// "artifact.attestation.hash" semantic conventions. It represents the full
+// [hash value (see glossary)], of the built attestation. Some envelopes in the
+// [software attestation space] also refer to this as the **digest**.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+func ArtifactAttestationHash(val string) attribute.KeyValue {
+ return ArtifactAttestationHashKey.String(val)
+}
+
+// ArtifactAttestationID returns an attribute KeyValue conforming to the
+// "artifact.attestation.id" semantic conventions. It represents the id of the
+// build [software attestation].
+//
+// [software attestation]: https://slsa.dev/attestation-model
+func ArtifactAttestationID(val string) attribute.KeyValue {
+ return ArtifactAttestationIDKey.String(val)
+}
+
+// ArtifactFilename returns an attribute KeyValue conforming to the
+// "artifact.filename" semantic conventions. It represents the human readable
+// file name of the artifact, typically generated during build and release
+// processes. Often includes the package name and version in the file name.
+func ArtifactFilename(val string) attribute.KeyValue {
+ return ArtifactFilenameKey.String(val)
+}
+
+// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash"
+// semantic conventions. It represents the full [hash value (see glossary)],
+// often found in checksum.txt on a release of the artifact and used to verify
+// package integrity.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+func ArtifactHash(val string) attribute.KeyValue {
+ return ArtifactHashKey.String(val)
+}
+
+// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl"
+// semantic conventions. It represents the [Package URL] of the
+// [package artifact] provides a standard way to identify and locate the packaged
+// artifact.
+//
+// [Package URL]: https://github.com/package-url/purl-spec
+// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+func ArtifactPurl(val string) attribute.KeyValue {
+ return ArtifactPurlKey.String(val)
+}
+
+// ArtifactVersion returns an attribute KeyValue conforming to the
+// "artifact.version" semantic conventions. It represents the version of the
+// artifact.
+func ArtifactVersion(val string) attribute.KeyValue {
+ return ArtifactVersionKey.String(val)
+}
+
+// Namespace: aws
+const (
+ // AWSBedrockGuardrailIDKey is the attribute Key conforming to the
+ // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+ // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+ // prevent unwanted behavior from model responses or user messages.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "sgi5gkybzqak"
+ //
+ // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+ AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id")
+
+ // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the
+ // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the
+ // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a
+ // bank of information that can be queried by models to generate more relevant
+ // responses and augment prompts.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "XFWUPB9PAW"
+ //
+ // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+ AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id")
+
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the
+ // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `AttributeDefinitions` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "AttributeName": "string", "AttributeType": "string" }"
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lives", "id"
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+ // of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+ // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+ // "string", "WriteCapacityUnits": number }"
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of the
+ // `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+ // value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "CatsTable"
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }"
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `GlobalSecondaryIndexes`
+ // request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }"
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value of
+ // the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "name_to_group"
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the
+ // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+ // the JSON-serialized value of the `ItemCollectionMetrics` response field.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+ // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+ // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+ // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }"
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of the
+ // `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes":
+ // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" } }"
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value of
+ // the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems,
+ // ProductReviews"
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+ // parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+ // the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+ // the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of the
+ // `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of the
+ // `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ALL_ATTRIBUTES", "COUNT"
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the number of
+ // items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+ // the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "Cats"
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the value
+ // of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS cluster].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ //
+ // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container instance].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9"
+ //
+ // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch type]
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn"
+ // semantic conventions. It represents the ARN of a running [ECS task].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ //
+ // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family name of
+ // the [ECS task definition] used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-family"
+ //
+ // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID MUST
+ // be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision for
+ // the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "8", "26"
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+
+ // AWSExtendedRequestIDKey is the attribute Key conforming to the
+ // "aws.extended_request_id" semantic conventions. It represents the AWS
+ // extended request ID as returned in the response header `x-amz-id-2`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ="
+ AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id")
+
+ // AWSKinesisStreamNameKey is the attribute Key conforming to the
+ // "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+ // AWS Kinesis [stream] the request refers to. Corresponds to the
+ // `--stream-name` parameter of the Kinesis [describe-stream] operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-stream-name"
+ //
+ // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+ // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+ AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name")
+
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+ // ARN as provided on the `Context` passed to the function (
+ // `Lambda-Runtime-Invoked-Function-Arn` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias"
+ // Note: This may be different from `cloud.resource_id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+
+ // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the
+ // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+ // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+ // function. It's contents are read by Lambda and used to trigger a function.
+ // This isn't available in the lambda execution context or the lambda runtime
+ // environtment. This is going to be populated by the AWS SDK for each language
+ // when that UUID is present. Some of these operations are
+ // Create/Delete/Get/List/Update EventSourceMapping.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7"
+ //
+ // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+ AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+ // Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*"
+ // Note: See the [log group ARN format documentation].
+ //
+ // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of the
+ // AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/aws/lambda/my-function", "opentelemetry-service"
+ // Note: Multiple log groups must be supported for cases like multi-container
+ // applications, where a single application has sidecar containers, and each
+ // write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+ // AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ // Note: See the [log stream ARN format documentation]. One log group can
+ // contain several log streams, so these ARNs necessarily identify both a log
+ // group and a log stream.
+ //
+ // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+ // AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in the
+ // response headers `x-amzn-requestid`, `x-amzn-request-id` or
+ // `x-amz-request-id`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ"
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request refers to.
+ // Corresponds to the `--bucket` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-bucket-name"
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source object
+ // (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3 API].
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [upload-part-copy]
+ //
+ //
+ // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean"
+ // Note: The `delete` attribute is only applicable to the [delete-object]
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3 API].
+ //
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `key` attribute is applicable to all object-related S3 operations,
+ // i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [delete-object]
+ // - [get-object]
+ // - [head-object]
+ // - [put-object]
+ // - [restore-object]
+ // - [select-object-content]
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [create-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html
+ // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html
+ // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html
+ // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html
+ // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number of
+ // the part being uploaded in a multipart-upload operation. This is a positive
+ // integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the [upload-part]
+ // and [upload-part-copy] operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter of
+ // the
+ // [upload-part operation within the S3 API].
+ //
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id"
+ // semantic conventions. It represents the upload ID that identifies the
+ // multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ"
+ // Note: The `upload_id` attribute applies to S3 multipart-upload operations and
+ // corresponds to the `--upload-id` parameter
+ // of the [S3 API] multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the
+ // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN
+ // of the Secret stored in the Secrets Mangger.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters"
+ AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn")
+
+ // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn"
+ // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon
+ // SNS [topic] is a logical access point that acts as a communication channel.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE"
+ //
+ // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+ AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn")
+
+ // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url"
+ // semantic conventions. It represents the URL of the AWS SQS Queue. It's a
+ // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is
+ // used to access the queue and perform actions on it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue"
+ AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url")
+
+ // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the
+ // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+ // of the AWS Step Functions Activity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting"
+ AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn")
+
+ // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the
+ // "aws.step_functions.state_machine.arn" semantic conventions. It represents
+ // the ARN of the AWS Step Functions State Machine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1"
+ AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn")
+)
+
+// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the
+// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+// prevent unwanted behavior from model responses or user messages.
+//
+// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+func AWSBedrockGuardrailID(val string) attribute.KeyValue {
+ return AWSBedrockGuardrailIDKey.String(val)
+}
+
+// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the
+// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of
+// information that can be queried by models to generate more relevant responses
+// and augment prompts.
+//
+// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue {
+ return AWSBedrockKnowledgeBaseIDKey.String(val)
+}
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents
+// the JSON-serialized value of each item in the `AttributeDefinitions` request
+// field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the
+// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value
+// of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the
+// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+// value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field.
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of the
+// `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to
+// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+// the JSON-serialized value of the `ItemCollectionMetrics` response field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+// field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of the
+// `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming
+// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+// the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the
+// `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value of
+// the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+// [ECS cluster].
+//
+// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container instance].
+//
+// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS task].
+//
+// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task definition] used to create the ECS task.
+//
+// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id"
+// semantic conventions. It represents the ID of a running ECS task. The ID MUST
+// be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// AWSExtendedRequestID returns an attribute KeyValue conforming to the
+// "aws.extended_request_id" semantic conventions. It represents the AWS extended
+// request ID as returned in the response header `x-amz-id-2`.
+func AWSExtendedRequestID(val string) attribute.KeyValue {
+ return AWSExtendedRequestIDKey.String(val)
+}
+
+// AWSKinesisStreamName returns an attribute KeyValue conforming to the
+// "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name`
+// parameter of the Kinesis [describe-stream] operation.
+//
+// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+func AWSKinesisStreamName(val string) attribute.KeyValue {
+ return AWSKinesisStreamNameKey.String(val)
+}
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+// ARN as provided on the `Context` passed to the function (
+// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next`
+// applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the
+// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+// function. It's contents are read by Lambda and used to trigger a function.
+// This isn't available in the lambda execution context or the lambda runtime
+// environtment. This is going to be populated by the AWS SDK for each language
+// when that UUID is present. Some of these operations are
+// Create/Delete/Get/List/Update EventSourceMapping.
+//
+// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+func AWSLambdaResourceMappingID(val string) attribute.KeyValue {
+ return AWSLambdaResourceMappingIDKey.String(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+// AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id"
+// semantic conventions. It represents the AWS request ID as returned in the
+// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`
+// .
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket"
+// semantic conventions. It represents the S3 bucket name the request refers to.
+// Corresponds to the `--bucket` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object (in
+// the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete"
+// semantic conventions. It represents the delete request container that
+// specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic
+// conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the
+// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of
+// the Secret stored in the Secrets Mangger.
+func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue {
+ return AWSSecretsmanagerSecretARNKey.String(val)
+}
+
+// AWSSNSTopicARN returns an attribute KeyValue conforming to the
+// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS
+// Topic. An Amazon SNS [topic] is a logical access point that acts as a
+// communication channel.
+//
+// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+func AWSSNSTopicARN(val string) attribute.KeyValue {
+ return AWSSNSTopicARNKey.String(val)
+}
+
+// AWSSQSQueueURL returns an attribute KeyValue conforming to the
+// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS
+// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service
+// (SQS) and is used to access the queue and perform actions on it.
+func AWSSQSQueueURL(val string) attribute.KeyValue {
+ return AWSSQSQueueURLKey.String(val)
+}
+
+// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the
+// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+// of the AWS Step Functions Activity.
+func AWSStepFunctionsActivityARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsActivityARNKey.String(val)
+}
+
+// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to
+// the "aws.step_functions.state_machine.arn" semantic conventions. It represents
+// the ARN of the AWS Step Functions State Machine.
+func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsStateMachineARNKey.String(val)
+}
+
+// Enum values for aws.ecs.launchtype
+var (
+ // Amazon EC2
+ // Stability: development
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // Amazon Fargate
+ // Stability: development
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Namespace: azure
+const (
+ // AzureClientIDKey is the attribute Key conforming to the "azure.client.id"
+ // semantic conventions. It represents the unique identifier of the client
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1"
+ AzureClientIDKey = attribute.Key("azure.client.id")
+
+ // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.connection.mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode")
+
+ // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the
+ // "azure.cosmosdb.consistency.level" semantic conventions. It represents the
+ // account or request [consistency level].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong",
+ // "Session"
+ //
+ // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels
+ AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level")
+
+ // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to
+ // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It
+ // represents the list of regions contacted during operation in the order that
+ // they were contacted. If there is more than one region listed, it indicates
+ // that the operation was performed on multiple regions i.e. cross-regional
+ // call.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "North Central US", "Australia East", "Australia Southeast"
+ // Note: Region name matches the format of `displayName` in [Azure Location API]
+ //
+ // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location
+ AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions")
+
+ // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents
+ // the number of request units consumed by the operation.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 46.18, 1.0
+ AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge")
+
+ // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+ // request payload size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size")
+
+ // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents
+ // the cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000, 1002
+ AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code")
+
+ // AzureResourceProviderNamespaceKey is the attribute Key conforming to the
+ // "azure.resource_provider.namespace" semantic conventions. It represents the
+ // [Azure Resource Provider Namespace] as recognized by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus"
+ //
+ // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+ AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace")
+
+ // AzureServiceRequestIDKey is the attribute Key conforming to the
+ // "azure.service.request.id" semantic conventions. It represents the unique
+ // identifier of the service request. It's generated by the Azure service and
+ // returned with the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00000000-0000-0000-0000-000000000000"
+ AzureServiceRequestIDKey = attribute.Key("azure.service.request.id")
+)
+
+// AzureClientID returns an attribute KeyValue conforming to the
+// "azure.client.id" semantic conventions. It represents the unique identifier of
+// the client instance.
+func AzureClientID(val string) attribute.KeyValue {
+ return AzureClientIDKey.String(val)
+}
+
+// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue
+// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic
+// conventions. It represents the list of regions contacted during operation in
+// the order that they were contacted. If there is more than one region listed,
+// it indicates that the operation was performed on multiple regions i.e.
+// cross-regional call.
+func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue {
+ return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val)
+}
+
+// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming
+// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It
+// represents the number of request units consumed by the operation.
+func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue {
+ return AzureCosmosDBOperationRequestChargeKey.Float64(val)
+}
+
+// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the
+// "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+// request payload size in bytes.
+func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue {
+ return AzureCosmosDBRequestBodySizeKey.Int(val)
+}
+
+// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to
+// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It
+// represents the cosmos DB sub status code.
+func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue {
+ return AzureCosmosDBResponseSubStatusCodeKey.Int(val)
+}
+
+// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the
+// "azure.resource_provider.namespace" semantic conventions. It represents the
+// [Azure Resource Provider Namespace] as recognized by the client.
+//
+// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+func AzureResourceProviderNamespace(val string) attribute.KeyValue {
+ return AzureResourceProviderNamespaceKey.String(val)
+}
+
+// AzureServiceRequestID returns an attribute KeyValue conforming to the
+// "azure.service.request.id" semantic conventions. It represents the unique
+// identifier of the service request. It's generated by the Azure service and
+// returned with the response.
+func AzureServiceRequestID(val string) attribute.KeyValue {
+ return AzureServiceRequestIDKey.String(val)
+}
+
+// Enum values for azure.cosmosdb.connection.mode
+var (
+ // Gateway (HTTP) connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct")
+)
+
+// Enum values for azure.cosmosdb.consistency.level
+var (
+ // Strong
+ // Stability: development
+ AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong")
+ // Bounded Staleness
+ // Stability: development
+ AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness")
+ // Session
+ // Stability: development
+ AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session")
+ // Eventual
+ // Stability: development
+ AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual")
+ // Consistent Prefix
+ // Stability: development
+ AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix")
+)
+
+// Namespace: browser
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.brands`).
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the "browser.language"
+ // semantic conventions. It represents the preferred language of the user using
+ // the browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "en", "en-US", "fr", "fr-FR"
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the browser is
+ // running on a mobile device.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be
+ // left unset.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the "browser.platform"
+ // semantic conventions. It represents the platform on which the browser is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Windows", "macOS", "Android"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
+ // be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the
+ // [W3C User-Agent Client Hints specification]. Note that some (but not all) of
+ // these values can overlap with values in the
+ // [`os.type` and `os.name` attributes]. However, for consistency, the values in
+ // the `browser.platform` attribute should capture the exact value that the user
+ // agent provides.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform
+ // [`os.type` and `os.name` attributes]: ./os.md
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands"
+// semantic conventions. It represents the array of brand name and version
+// separated by a space.
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred language
+// of the user using the browser.
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile"
+// semantic conventions. It represents a boolean that is true if the browser is
+// running on a mobile device.
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running.
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// Namespace: cassandra
+const (
+ // CassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "cassandra.consistency.level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from [CQL].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html
+ CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level")
+
+ // CassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "cassandra.coordinator.dc" semantic conventions. It represents the data
+ // center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: us-west-2
+ CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc")
+
+ // CassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+ // coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af
+ CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id")
+
+ // CassandraPageSizeKey is the attribute Key conforming to the
+ // "cassandra.page.size" semantic conventions. It represents the fetch size used
+ // for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 5000
+ CassandraPageSizeKey = attribute.Key("cassandra.page.size")
+
+ // CassandraQueryIdempotentKey is the attribute Key conforming to the
+ // "cassandra.query.idempotent" semantic conventions. It represents the whether
+ // or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent")
+
+ // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the
+ // "cassandra.speculative_execution.count" semantic conventions. It represents
+ // the number of times a query was speculatively executed. Not set or `0` if the
+ // query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 2
+ CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count")
+)
+
+// CassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.dc" semantic conventions. It represents the data center
+// of the coordinating node for a query.
+func CassandraCoordinatorDC(val string) attribute.KeyValue {
+ return CassandraCoordinatorDCKey.String(val)
+}
+
+// CassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+// coordinating node for a query.
+func CassandraCoordinatorID(val string) attribute.KeyValue {
+ return CassandraCoordinatorIDKey.String(val)
+}
+
+// CassandraPageSize returns an attribute KeyValue conforming to the
+// "cassandra.page.size" semantic conventions. It represents the fetch size used
+// for paging, i.e. how many rows will be returned at once.
+func CassandraPageSize(val int) attribute.KeyValue {
+ return CassandraPageSizeKey.Int(val)
+}
+
+// CassandraQueryIdempotent returns an attribute KeyValue conforming to the
+// "cassandra.query.idempotent" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func CassandraQueryIdempotent(val bool) attribute.KeyValue {
+ return CassandraQueryIdempotentKey.Bool(val)
+}
+
+// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to
+// the "cassandra.speculative_execution.count" semantic conventions. It
+// represents the number of times a query was speculatively executed. Not set or
+// `0` if the query was not executed speculatively.
+func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return CassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// Enum values for cassandra.consistency.level
+var (
+ // All
+ // Stability: development
+ CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all")
+ // Each Quorum
+ // Stability: development
+ CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum")
+ // Quorum
+ // Stability: development
+ CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum")
+ // Local Quorum
+ // Stability: development
+ CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum")
+ // One
+ // Stability: development
+ CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one")
+ // Two
+ // Stability: development
+ CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two")
+ // Three
+ // Stability: development
+ CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three")
+ // Local One
+ // Stability: development
+ CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one")
+ // Any
+ // Stability: development
+ CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any")
+ // Serial
+ // Stability: development
+ CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial")
+ // Local Serial
+ // Stability: development
+ CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Namespace: cicd
+const (
+ // CICDPipelineActionNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.action.name" semantic conventions. It represents the kind of
+ // action a pipeline run is performing.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BUILD", "RUN", "SYNC"
+ CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name")
+
+ // CICDPipelineNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.name" semantic conventions. It represents the human readable
+ // name of the pipeline within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Build and Test", "Lint", "Deploy Go Project",
+ // "deploy_to_environment"
+ CICDPipelineNameKey = attribute.Key("cicd.pipeline.name")
+
+ // CICDPipelineResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.result" semantic conventions. It represents the result of a
+ // pipeline run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineResultKey = attribute.Key("cicd.pipeline.result")
+
+ // CICDPipelineRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.id" semantic conventions. It represents the unique
+ // identifier of a pipeline run within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "120912"
+ CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id")
+
+ // CICDPipelineRunStateKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline
+ // run goes through these states during its lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pending", "executing", "finalizing"
+ CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state")
+
+ // CICDPipelineRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+ // the pipeline run, providing the complete address in order to locate and
+ // identify the pipeline run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full")
+
+ // CICDPipelineTaskNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.name" semantic conventions. It represents the human
+ // readable name of a task within a pipeline. Task here most closely aligns with
+ // a [computing process] in a pipeline. Other terms for tasks include commands,
+ // steps, and procedures.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary"
+ //
+ // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+ CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name")
+
+ // CICDPipelineTaskRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+ // identifier of a task run within a pipeline.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "12097"
+ CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id")
+
+ // CICDPipelineTaskRunResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.result" semantic conventions. It represents the
+ // result of a task run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result")
+
+ // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+ // [URL] of the pipeline task run, providing the complete address in order to
+ // locate and identify the pipeline task run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full")
+
+ // CICDPipelineTaskTypeKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.type" semantic conventions. It represents the type of the
+ // task within a pipeline.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "build", "test", "deploy"
+ CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type")
+
+ // CICDSystemComponentKey is the attribute Key conforming to the
+ // "cicd.system.component" semantic conventions. It represents the name of a
+ // component of the CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "controller", "scheduler", "agent"
+ CICDSystemComponentKey = attribute.Key("cicd.system.component")
+
+ // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id"
+ // semantic conventions. It represents the unique identifier of a worker within
+ // a CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "abc123", "10.0.1.2", "controller"
+ CICDWorkerIDKey = attribute.Key("cicd.worker.id")
+
+ // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name"
+ // semantic conventions. It represents the name of a worker within a CICD
+ // system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "agent-abc", "controller", "Ubuntu LTS"
+ CICDWorkerNameKey = attribute.Key("cicd.worker.name")
+
+ // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state"
+ // semantic conventions. It represents the state of a CICD worker / agent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle", "busy", "down"
+ CICDWorkerStateKey = attribute.Key("cicd.worker.state")
+
+ // CICDWorkerURLFullKey is the attribute Key conforming to the
+ // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+ // worker, providing the complete address in order to locate and identify the
+ // worker.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://cicd.example.org/worker/abc123"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full")
+)
+
+// CICDPipelineName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.name" semantic conventions. It represents the human readable
+// name of the pipeline within a CI/CD system.
+func CICDPipelineName(val string) attribute.KeyValue {
+ return CICDPipelineNameKey.String(val)
+}
+
+// CICDPipelineRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.id" semantic conventions. It represents the unique
+// identifier of a pipeline run within a CI/CD system.
+func CICDPipelineRunID(val string) attribute.KeyValue {
+ return CICDPipelineRunIDKey.String(val)
+}
+
+// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+// the pipeline run, providing the complete address in order to locate and
+// identify the pipeline run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineRunURLFullKey.String(val)
+}
+
+// CICDPipelineTaskName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.name" semantic conventions. It represents the human
+// readable name of a task within a pipeline. Task here most closely aligns with
+// a [computing process] in a pipeline. Other terms for tasks include commands,
+// steps, and procedures.
+//
+// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+func CICDPipelineTaskName(val string) attribute.KeyValue {
+ return CICDPipelineTaskNameKey.String(val)
+}
+
+// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+// identifier of a task run within a pipeline.
+func CICDPipelineTaskRunID(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunIDKey.String(val)
+}
+
+// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+// [URL] of the pipeline task run, providing the complete address in order to
+// locate and identify the pipeline task run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunURLFullKey.String(val)
+}
+
+// CICDSystemComponent returns an attribute KeyValue conforming to the
+// "cicd.system.component" semantic conventions. It represents the name of a
+// component of the CICD system.
+func CICDSystemComponent(val string) attribute.KeyValue {
+ return CICDSystemComponentKey.String(val)
+}
+
+// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id"
+// semantic conventions. It represents the unique identifier of a worker within a
+// CICD system.
+func CICDWorkerID(val string) attribute.KeyValue {
+ return CICDWorkerIDKey.String(val)
+}
+
+// CICDWorkerName returns an attribute KeyValue conforming to the
+// "cicd.worker.name" semantic conventions. It represents the name of a worker
+// within a CICD system.
+func CICDWorkerName(val string) attribute.KeyValue {
+ return CICDWorkerNameKey.String(val)
+}
+
+// CICDWorkerURLFull returns an attribute KeyValue conforming to the
+// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+// worker, providing the complete address in order to locate and identify the
+// worker.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDWorkerURLFull(val string) attribute.KeyValue {
+ return CICDWorkerURLFullKey.String(val)
+}
+
+// Enum values for cicd.pipeline.action.name
+var (
+ // The pipeline run is executing a build.
+ // Stability: development
+ CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD")
+ // The pipeline run is executing.
+ // Stability: development
+ CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN")
+ // The pipeline run is executing a sync.
+ // Stability: development
+ CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC")
+)
+
+// Enum values for cicd.pipeline.result
+var (
+ // The pipeline run finished successfully.
+ // Stability: development
+ CICDPipelineResultSuccess = CICDPipelineResultKey.String("success")
+ // The pipeline run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the pipeline run.
+ // Stability: development
+ CICDPipelineResultFailure = CICDPipelineResultKey.String("failure")
+ // The pipeline run failed due to an error in the CICD system, eg. due to the
+ // worker being killed.
+ // Stability: development
+ CICDPipelineResultError = CICDPipelineResultKey.String("error")
+ // A timeout caused the pipeline run to be interrupted.
+ // Stability: development
+ CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout")
+ // The pipeline run was cancelled, eg. by a user manually cancelling the
+ // pipeline run.
+ // Stability: development
+ CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation")
+ // The pipeline run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineResultSkip = CICDPipelineResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.run.state
+var (
+ // The run pending state spans from the event triggering the pipeline run until
+ // the execution of the run starts (eg. time spent in a queue, provisioning
+ // agents, creating run resources).
+ //
+ // Stability: development
+ CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending")
+ // The executing state spans the execution of any run tasks (eg. build, test).
+ // Stability: development
+ CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing")
+ // The finalizing state spans from when the run has finished executing (eg.
+ // cleanup of run resources).
+ // Stability: development
+ CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing")
+)
+
+// Enum values for cicd.pipeline.task.run.result
+var (
+ // The task run finished successfully.
+ // Stability: development
+ CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success")
+ // The task run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure")
+ // The task run failed due to an error in the CICD system, eg. due to the worker
+ // being killed.
+ // Stability: development
+ CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error")
+ // A timeout caused the task run to be interrupted.
+ // Stability: development
+ CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout")
+ // The task run was cancelled, eg. by a user manually cancelling the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation")
+ // The task run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.task.type
+var (
+ // build
+ // Stability: development
+ CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build")
+ // test
+ // Stability: development
+ CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test")
+ // deploy
+ // Stability: development
+ CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy")
+)
+
+// Enum values for cicd.worker.state
+var (
+ // The worker is not performing work for the CICD system. It is available to the
+ // CICD system to perform work on (online / idle).
+ // Stability: development
+ CICDWorkerStateAvailable = CICDWorkerStateKey.String("available")
+ // The worker is performing work for the CICD system.
+ // Stability: development
+ CICDWorkerStateBusy = CICDWorkerStateKey.String("busy")
+ // The worker is not available to the CICD system (disconnected / down).
+ // Stability: development
+ CICDWorkerStateOffline = CICDWorkerStateKey.String("offline")
+)
+
+// Namespace: client
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.address` SHOULD represent the client address behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port" semantic
+ // conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.port` SHOULD represent the client port behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the "client.address"
+// semantic conventions. It represents the client address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// Namespace: cloud
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id"
+ // semantic conventions. It represents the cloud account ID the resource is
+ // assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "111111111111", "opentelemetry"
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to increase
+ // availability. Availability zone represents the zone where the resource is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-east-1c"
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic
+ // conventions. It represents the geographical region within a cloud provider.
+ // When associated with a resource, this attribute specifies the region where
+ // the resource operates. When calling services or APIs deployed on a cloud,
+ // this attribute identifies the region where the called destination is
+ // deployed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1", "us-east-1"
+ // Note: Refer to your provider's docs to see the available regions, for example
+ // [Alibaba Cloud regions], [AWS regions], [Azure regions],
+ // [Google Cloud regions], or [Tencent Cloud regions].
+ //
+ // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm
+ // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/
+ // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/
+ // [Google Cloud regions]: https://cloud.google.com/about/locations
+ // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id"
+ // semantic conventions. It represents the cloud provider-specific native
+ // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a
+ // [fully qualified resource ID] on Azure, a [full resource name] on GCP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function",
+ // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID",
+ // "/subscriptions//resourceGroups/
+ // /providers/Microsoft.Web/sites//functions/"
+ // Note: On some cloud providers, it may not be possible to determine the full
+ // ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud provider.
+ // The following well-known definitions MUST be used if you set this attribute
+ // and they apply:
+ //
+ // - **AWS Lambda:** The function [ARN].
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias suffix]
+ // with the resolved function version, as the same runtime instance may be
+ // invocable with
+ // multiple different aliases.
+ // - **GCP:** The [URI of the resource]
+ // - **Azure:** The [Fully Qualified Resource ID] of the invoked function,
+ // *not* the function app, having the form
+ //
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`
+ // .
+ // This means that a span attribute MUST be used, as an Azure function app
+ // can host multiple functions that would usually share
+ // a TracerProvider.
+ //
+ //
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+ // [full resource name]: https://google.aip.dev/122#full-resource-names
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html
+ // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names
+ // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the "cloud.region"
+// semantic conventions. It represents the geographical region within a cloud
+// provider. When associated with a resource, this attribute specifies the region
+// where the resource operates. When calling services or APIs deployed on a
+// cloud, this attribute identifies the region where the called destination is
+// deployed.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name]
+// on GCP).
+//
+// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+// [full resource name]: https://google.aip.dev/122#full-resource-names
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Enum values for cloud.platform
+var (
+ // Alibaba Cloud Elastic Compute Service
+ // Stability: development
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ // Stability: development
+ CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ // Stability: development
+ CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ // Stability: development
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ // Stability: development
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ // Stability: development
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ // Stability: development
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ // Stability: development
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ // Stability: development
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ // Stability: development
+ CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ // Stability: development
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm")
+ // Azure Container Apps
+ // Stability: development
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps")
+ // Azure Container Instances
+ // Stability: development
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances")
+ // Azure Kubernetes Service
+ // Stability: development
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks")
+ // Azure Functions
+ // Stability: development
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions")
+ // Azure App Service
+ // Stability: development
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service")
+ // Azure Red Hat OpenShift
+ // Stability: development
+ CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift")
+ // Google Bare Metal Solution (BMS)
+ // Stability: development
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ // Stability: development
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ // Stability: development
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ // Stability: development
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ // Stability: development
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ // Stability: development
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ // Stability: development
+ CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ // Stability: development
+ CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Compute on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute")
+ // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ // Stability: development
+ CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ // Stability: development
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ // Stability: development
+ CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Enum values for cloud.provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ // Stability: development
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ // Stability: development
+ CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud")
+ // Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud")
+ // Tencent Cloud
+ // Stability: development
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// Namespace: cloudevents
+const (
+ // CloudEventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the [event_id]
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001"
+ //
+ // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+ CloudEventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudEventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the [source]
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123",
+ // "my-service"
+ //
+ // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+ CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudEventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents specification] which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ //
+ // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+ CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudEventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the [subject]
+ // of the event in the context of the event producer (identified by source).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: mynewfile.jpg
+ //
+ // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+ CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudEventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the [event_type]
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2"
+ //
+ // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+ CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudEventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the [event_id]
+// uniquely identifies the event.
+//
+// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+func CloudEventsEventID(val string) attribute.KeyValue {
+ return CloudEventsEventIDKey.String(val)
+}
+
+// CloudEventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the [source]
+// identifies the context in which an event happened.
+//
+// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+func CloudEventsEventSource(val string) attribute.KeyValue {
+ return CloudEventsEventSourceKey.String(val)
+}
+
+// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the
+// "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents specification] which the event uses.
+//
+// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+func CloudEventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudEventsEventSpecVersionKey.String(val)
+}
+
+// CloudEventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the [subject]
+// of the event in the context of the event producer (identified by source).
+//
+// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+func CloudEventsEventSubject(val string) attribute.KeyValue {
+ return CloudEventsEventSubjectKey.String(val)
+}
+
+// CloudEventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the [event_type]
+// contains a value describing the type of event related to the originating
+// occurrence.
+//
+// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+func CloudEventsEventType(val string) attribute.KeyValue {
+ return CloudEventsEventTypeKey.String(val)
+}
+
+// Namespace: cloudfoundry
+const (
+ // CloudFoundryAppIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_id`. This is the same value as
+ // reported by `cf app --guid`.
+ CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id")
+
+ // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+ // of the application instance. 0 when just one instance is active.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0", "1"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the application instance index for applications
+ // deployed on the runtime.
+ //
+ // Application instrumentation should use the value from environment
+ // variable `CF_INSTANCE_INDEX`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id")
+
+ // CloudFoundryAppNameKey is the attribute Key conforming to the
+ // "cloudfoundry.app.name" semantic conventions. It represents the name of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-app-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_name`. This is the same value
+ // as reported by `cf apps`.
+ CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name")
+
+ // CloudFoundryOrgIDKey is the attribute Key conforming to the
+ // "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+ // CloudFoundry org the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_id`. This is the same value as
+ // reported by `cf org --guid`.
+ CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id")
+
+ // CloudFoundryOrgNameKey is the attribute Key conforming to the
+ // "cloudfoundry.org.name" semantic conventions. It represents the name of the
+ // CloudFoundry organization the app is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_name`. This is the same value as
+ // reported by `cf orgs`.
+ CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name")
+
+ // CloudFoundryProcessIDKey is the attribute Key conforming to the
+ // "cloudfoundry.process.id" semantic conventions. It represents the UID
+ // identifying the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to
+ // `VCAP_APPLICATION.app_id` for applications deployed to the runtime.
+ // For system components, this could be the actual PID.
+ CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id")
+
+ // CloudFoundryProcessTypeKey is the attribute Key conforming to the
+ // "cloudfoundry.process.type" semantic conventions. It represents the type of
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "web"
+ // Note: CloudFoundry applications can consist of multiple jobs. Usually the
+ // main process will be of type `web`. There can be additional background
+ // tasks or side-cars with different process types.
+ CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type")
+
+ // CloudFoundrySpaceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_id`. This is the same value as
+ // reported by `cf space --guid`.
+ CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id")
+
+ // CloudFoundrySpaceNameKey is the attribute Key conforming to the
+ // "cloudfoundry.space.name" semantic conventions. It represents the name of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-space-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_name`. This is the same value as
+ // reported by `cf spaces`.
+ CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name")
+
+ // CloudFoundrySystemIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.id" semantic conventions. It represents a guid or
+ // another name describing the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cf/gorouter"
+ // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope].
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the component name, e.g. "gorouter", for
+ // CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.id` should be set to
+ // `spec.deployment/spec.name`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id")
+
+ // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+ // describing the concrete instance of the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the vm id for CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.instance.id` should be set to `spec.id`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id")
+)
+
+// CloudFoundryAppID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+// application.
+func CloudFoundryAppID(val string) attribute.KeyValue {
+ return CloudFoundryAppIDKey.String(val)
+}
+
+// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+// of the application instance. 0 when just one instance is active.
+func CloudFoundryAppInstanceID(val string) attribute.KeyValue {
+ return CloudFoundryAppInstanceIDKey.String(val)
+}
+
+// CloudFoundryAppName returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.name" semantic conventions. It represents the name of the
+// application.
+func CloudFoundryAppName(val string) attribute.KeyValue {
+ return CloudFoundryAppNameKey.String(val)
+}
+
+// CloudFoundryOrgID returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+// CloudFoundry org the application is running in.
+func CloudFoundryOrgID(val string) attribute.KeyValue {
+ return CloudFoundryOrgIDKey.String(val)
+}
+
+// CloudFoundryOrgName returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.name" semantic conventions. It represents the name of the
+// CloudFoundry organization the app is running in.
+func CloudFoundryOrgName(val string) attribute.KeyValue {
+ return CloudFoundryOrgNameKey.String(val)
+}
+
+// CloudFoundryProcessID returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.id" semantic conventions. It represents the UID
+// identifying the process.
+func CloudFoundryProcessID(val string) attribute.KeyValue {
+ return CloudFoundryProcessIDKey.String(val)
+}
+
+// CloudFoundryProcessType returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.type" semantic conventions. It represents the type of
+// process.
+func CloudFoundryProcessType(val string) attribute.KeyValue {
+ return CloudFoundryProcessTypeKey.String(val)
+}
+
+// CloudFoundrySpaceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceID(val string) attribute.KeyValue {
+ return CloudFoundrySpaceIDKey.String(val)
+}
+
+// CloudFoundrySpaceName returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.name" semantic conventions. It represents the name of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceName(val string) attribute.KeyValue {
+ return CloudFoundrySpaceNameKey.String(val)
+}
+
+// CloudFoundrySystemID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.id" semantic conventions. It represents a guid or another
+// name describing the event source.
+func CloudFoundrySystemID(val string) attribute.KeyValue {
+ return CloudFoundrySystemIDKey.String(val)
+}
+
+// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+// describing the concrete instance of the event source.
+func CloudFoundrySystemInstanceID(val string) attribute.KeyValue {
+ return CloudFoundrySystemInstanceIDKey.String(val)
+}
+
+// Namespace: code
+const (
+ // CodeColumnNumberKey is the attribute Key conforming to the
+ // "code.column.number" semantic conventions. It represents the column number in
+ // `code.file.path` best representing the operation. It SHOULD point within the
+ // code unit named in `code.function.name`. This attribute MUST NOT be used on
+ // the Profile signal since the data is already captured in 'message Line'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeColumnNumberKey = attribute.Key("code.column.number")
+
+ // CodeFilePathKey is the attribute Key conforming to the "code.file.path"
+ // semantic conventions. It represents the source code file name that identifies
+ // the code unit as uniquely as possible (preferably an absolute file path).
+ // This attribute MUST NOT be used on the Profile signal since the data is
+ // already captured in 'message Function'. This constraint is imposed to prevent
+ // redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: /usr/local/MyApplication/content_root/app/index.php
+ CodeFilePathKey = attribute.Key("code.file.path")
+
+ // CodeFunctionNameKey is the attribute Key conforming to the
+ // "code.function.name" semantic conventions. It represents the method or
+ // function fully-qualified name without arguments. The value should fit the
+ // natural representation of the language runtime, which is also likely the same
+ // used within `code.stacktrace` attribute value. This attribute MUST NOT be
+ // used on the Profile signal since the data is already captured in 'message
+ // Function'. This constraint is imposed to prevent redundancy and maintain data
+ // integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "com.example.MyHttpService.serveRequest",
+ // "GuzzleHttp\Client::transfer", "fopen"
+ // Note: Values and format depends on each language runtime, thus it is
+ // impossible to provide an exhaustive list of examples.
+ // The values are usually the same (or prefixes of) the ones found in native
+ // stack trace representation stored in
+ // `code.stacktrace` without information on arguments.
+ //
+ // Examples:
+ //
+ // - Java method: `com.example.MyHttpService.serveRequest`
+ // - Java anonymous class method: `com.mycompany.Main$1.myMethod`
+ // - Java lambda method:
+ // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod`
+ // - PHP function: `GuzzleHttp\Client::transfer`
+ // - Go function: `github.com/my/repo/pkg.foo.func5`
+ // - Elixir: `OpenTelemetry.Ctx.new`
+ // - Erlang: `opentelemetry_ctx:new`
+ // - Rust: `playground::my_module::my_cool_func`
+ // - C function: `fopen`
+ CodeFunctionNameKey = attribute.Key("code.function.name")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.line.number"
+ // semantic conventions. It represents the line number in `code.file.path` best
+ // representing the operation. It SHOULD point within the code unit named in
+ // `code.function.name`. This attribute MUST NOT be used on the Profile signal
+ // since the data is already captured in 'message Line'. This constraint is
+ // imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeLineNumberKey = attribute.Key("code.line.number")
+
+ // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace"
+ // semantic conventions. It represents a stacktrace as a string in the natural
+ // representation for the language runtime. The representation is identical to
+ // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile
+ // signal since the data is already captured in 'message Location'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ //
+ // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumnNumber returns an attribute KeyValue conforming to the
+// "code.column.number" semantic conventions. It represents the column number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeColumnNumber(val int) attribute.KeyValue {
+ return CodeColumnNumberKey.Int(val)
+}
+
+// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path"
+// semantic conventions. It represents the source code file name that identifies
+// the code unit as uniquely as possible (preferably an absolute file path). This
+// attribute MUST NOT be used on the Profile signal since the data is already
+// captured in 'message Function'. This constraint is imposed to prevent
+// redundancy and maintain data integrity.
+func CodeFilePath(val string) attribute.KeyValue {
+ return CodeFilePathKey.String(val)
+}
+
+// CodeFunctionName returns an attribute KeyValue conforming to the
+// "code.function.name" semantic conventions. It represents the method or
+// function fully-qualified name without arguments. The value should fit the
+// natural representation of the language runtime, which is also likely the same
+// used within `code.stacktrace` attribute value. This attribute MUST NOT be used
+// on the Profile signal since the data is already captured in 'message
+// Function'. This constraint is imposed to prevent redundancy and maintain data
+// integrity.
+func CodeFunctionName(val string) attribute.KeyValue {
+ return CodeFunctionNameKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the
+// "code.line.number" semantic conventions. It represents the line number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a string
+// in the natural representation for the language runtime. The representation is
+// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the
+// Profile signal since the data is already captured in 'message Location'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+//
+// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// Namespace: container
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used to
+ // run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol"
+ // Note: If using embedded credentials or sensitive data, it is recommended to
+ // remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol", "--config", "config.yaml"
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full command
+ // run by the container as a single string representing the full command.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol --config config.yaml"
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCSIPluginNameKey is the attribute Key conforming to the
+ // "container.csi.plugin.name" semantic conventions. It represents the name of
+ // the CSI ([Container Storage Interface]) plugin used by the volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pd.csi.storage.gke.io"
+ // Note: This can sometimes be referred to as a "driver" in CSI implementations.
+ // This should represent the `name` field of the GetPluginInfo RPC.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name")
+
+ // ContainerCSIVolumeIDKey is the attribute Key conforming to the
+ // "container.csi.volume.id" semantic conventions. It represents the unique
+ // volume ID returned by the CSI ([Container Storage Interface]) plugin.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk"
+ // Note: This can sometimes be referred to as a "volume handle" in CSI
+ // implementations. This should represent the `Volume.volume_id` field in CSI
+ // spec.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id" semantic
+ // conventions. It represents the container ID. Usually a UUID, as for example
+ // used to [identify Docker containers]. The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a3bf90e006b2"
+ //
+ // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime specific
+ // image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f"
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect [API]
+ // endpoint.
+ // K8s defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`
+ // .
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ //
+ // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of the
+ // image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gcr.io/opentelemetry/operator"
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the repo
+ // digests of the container image as provided by the container runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
+ // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
+ // Note: [Docker] and [CRI] report those under the `RepoDigests` field.
+ //
+ // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+ // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image Inspect]. Should be only
+ // the `` section of the full name for example from
+ // `registry.example.com/my-org/my-image:`.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v1.27.1", "3.5.7-0"
+ //
+ // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-autoconf"
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeDescriptionKey is the attribute Key conforming to the
+ // "container.runtime.description" semantic conventions. It represents a
+ // description about the runtime which could include, for example details about
+ // the CRI/API version being used or other customisations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker://19.3.1 - CRI: 1.22.0"
+ ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description")
+
+ // ContainerRuntimeNameKey is the attribute Key conforming to the
+ // "container.runtime.name" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker", "containerd", "rkt"
+ ContainerRuntimeNameKey = attribute.Key("container.runtime.name")
+
+ // ContainerRuntimeVersionKey is the attribute Key conforming to the
+ // "container.runtime.version" semantic conventions. It represents the version
+ // of the runtime of this process, as returned by the runtime without
+ // modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0.0
+ ContainerRuntimeVersionKey = attribute.Key("container.runtime.version")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container.
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full command
+// run by the container as a single string representing the full command.
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerCSIPluginName returns an attribute KeyValue conforming to the
+// "container.csi.plugin.name" semantic conventions. It represents the name of
+// the CSI ([Container Storage Interface]) plugin used by the volume.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIPluginName(val string) attribute.KeyValue {
+ return ContainerCSIPluginNameKey.String(val)
+}
+
+// ContainerCSIVolumeID returns an attribute KeyValue conforming to the
+// "container.csi.volume.id" semantic conventions. It represents the unique
+// volume ID returned by the CSI ([Container Storage Interface]) plugin.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIVolumeID(val string) attribute.KeyValue {
+ return ContainerCSIVolumeIDKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the "container.id"
+// semantic conventions. It represents the container ID. Usually a UUID, as for
+// example used to [identify Docker containers]. The UUID might be abbreviated.
+//
+// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime specific
+// image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container image
+// tags. An example can be found in [Docker Image Inspect]. Should be only the
+// `` section of the full name for example from
+// `registry.example.com/my-org/my-image:`.
+//
+// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerLabel returns an attribute KeyValue conforming to the
+// "container.label" semantic conventions. It represents the container labels,
+// `` being the label name, the value being the label value.
+func ContainerLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("container.label."+key, val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the "container.name"
+// semantic conventions. It represents the container name used by container
+// runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntimeDescription returns an attribute KeyValue conforming to the
+// "container.runtime.description" semantic conventions. It represents a
+// description about the runtime which could include, for example details about
+// the CRI/API version being used or other customisations.
+func ContainerRuntimeDescription(val string) attribute.KeyValue {
+ return ContainerRuntimeDescriptionKey.String(val)
+}
+
+// ContainerRuntimeName returns an attribute KeyValue conforming to the
+// "container.runtime.name" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntimeName(val string) attribute.KeyValue {
+ return ContainerRuntimeNameKey.String(val)
+}
+
+// ContainerRuntimeVersion returns an attribute KeyValue conforming to the
+// "container.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ContainerRuntimeVersion(val string) attribute.KeyValue {
+ return ContainerRuntimeVersionKey.String(val)
+}
+
+// Namespace: cpu
+const (
+ // CPULogicalNumberKey is the attribute Key conforming to the
+ // "cpu.logical_number" semantic conventions. It represents the logical CPU
+ // number [0..n-1].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ CPULogicalNumberKey = attribute.Key("cpu.logical_number")
+
+ // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic
+ // conventions. It represents the mode of the CPU.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "user", "system"
+ CPUModeKey = attribute.Key("cpu.mode")
+)
+
+// CPULogicalNumber returns an attribute KeyValue conforming to the
+// "cpu.logical_number" semantic conventions. It represents the logical CPU
+// number [0..n-1].
+func CPULogicalNumber(val int) attribute.KeyValue {
+ return CPULogicalNumberKey.Int(val)
+}
+
+// Enum values for cpu.mode
+var (
+ // User
+ // Stability: development
+ CPUModeUser = CPUModeKey.String("user")
+ // System
+ // Stability: development
+ CPUModeSystem = CPUModeKey.String("system")
+ // Nice
+ // Stability: development
+ CPUModeNice = CPUModeKey.String("nice")
+ // Idle
+ // Stability: development
+ CPUModeIdle = CPUModeKey.String("idle")
+ // IO Wait
+ // Stability: development
+ CPUModeIOWait = CPUModeKey.String("iowait")
+ // Interrupt
+ // Stability: development
+ CPUModeInterrupt = CPUModeKey.String("interrupt")
+ // Steal
+ // Stability: development
+ CPUModeSteal = CPUModeKey.String("steal")
+ // Kernel
+ // Stability: development
+ CPUModeKernel = CPUModeKey.String("kernel")
+)
+
+// Namespace: db
+const (
+ // DBClientConnectionPoolNameKey is the attribute Key conforming to the
+ // "db.client.connection.pool.name" semantic conventions. It represents the name
+ // of the connection pool; unique within the instrumented application. In case
+ // the connection pool implementation doesn't provide a name, instrumentation
+ // SHOULD use a combination of parameters that would make the name unique, for
+ // example, combining attributes `server.address`, `server.port`, and
+ // `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+ // Instrumentations that generate connection pool name following different
+ // patterns SHOULD document it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myDataSource"
+ DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name")
+
+ // DBClientConnectionStateKey is the attribute Key conforming to the
+ // "db.client.connection.state" semantic conventions. It represents the state of
+ // a connection in the pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle"
+ DBClientConnectionStateKey = attribute.Key("db.client.connection.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "public.users", "customers"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The collection name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple collections
+ // in non-batch operations.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // collection name then that collection name SHOULD be used.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic
+ // conventions. It represents the name of the database, fully qualified within
+ // the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "customers", "test.users"
+ // Note: If a database system has multiple namespace components, they SHOULD be
+ // concatenated from the most general to the most specific namespace component,
+ // using `|` as a separator between the components. Any missing components (and
+ // their associated separators) SHOULD be omitted.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application without
+ // attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationBatchSizeKey is the attribute Key conforming to the
+ // "db.operation.batch.size" semantic conventions. It represents the number of
+ // queries included in a batch operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 2, 3, 4
+ // Note: Operations are only considered batches when they contain two or more
+ // operations, and so `db.operation.batch.size` SHOULD never be `1`.
+ DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size")
+
+ // DBOperationNameKey is the attribute Key conforming to the "db.operation.name"
+ // semantic conventions. It represents the name of the operation or command
+ // being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "findAndModify", "HMSET", "SELECT"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The operation name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple operations
+ // in non-batch operations.
+ //
+ // If spaces can occur in the operation name, multiple consecutive spaces
+ // SHOULD be normalized to a single space.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // operation name
+ // then that operation name SHOULD be used prepended by `BATCH `,
+ // otherwise `db.operation.name` SHOULD be `BATCH` or some other database
+ // system specific term if more applicable.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary"
+ // semantic conventions. It represents the low cardinality summary of a database
+ // query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get
+ // user by id"
+ // Note: The query summary describes a class of database queries and is useful
+ // as a grouping key, especially when analyzing telemetry for database
+ // calls involving complex queries.
+ //
+ // Summary may be available to the instrumentation through
+ // instrumentation hooks or other means. If it is not available,
+ // instrumentations
+ // that support query parsing SHOULD generate a summary following
+ // [Generating query summary]
+ // section.
+ //
+ // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query
+ DBQuerySummaryKey = attribute.Key("db.query.summary")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?"
+ // Note: For sanitization see [Sanitization of `db.query.text`].
+ // For batch operations, if the individual operations are known to have the same
+ // query text then that query text SHOULD be used, otherwise all of the
+ // individual query texts SHOULD be concatenated with separator `; ` or some
+ // other database system specific separator if more applicable.
+ // Parameterized query text SHOULD NOT be sanitized. Even though parameterized
+ // query text can potentially have sensitive data, by using a parameterized
+ // query the user is giving a strong signal that any sensitive data will be
+ // passed as parameter values, and the benefit to observability of capturing the
+ // static part of the query text by default outweighs the risk.
+ //
+ // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBResponseReturnedRowsKey is the attribute Key conforming to the
+ // "db.response.returned_rows" semantic conventions. It represents the number of
+ // rows returned by the operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10, 30, 1000
+ DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows")
+
+ // DBResponseStatusCodeKey is the attribute Key conforming to the
+ // "db.response.status_code" semantic conventions. It represents the database
+ // response status code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "102", "ORA-17002", "08P01", "404"
+ // Note: The status code returned by the database. Usually it represents an
+ // error code, but may also represent partial success, warning, or differentiate
+ // between various types of successful outcomes.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.response.status_code` means in the context of that system.
+ DBResponseStatusCodeKey = attribute.Key("db.response.status_code")
+
+ // DBStoredProcedureNameKey is the attribute Key conforming to the
+ // "db.stored_procedure.name" semantic conventions. It represents the name of a
+ // stored procedure within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GetCustomer"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // stored procedure name then that stored procedure name SHOULD be used.
+ DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name")
+
+ // DBSystemNameKey is the attribute Key conforming to the "db.system.name"
+ // semantic conventions. It represents the database management system (DBMS)
+ // product as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ // Note: The actual DBMS may differ from the one identified by the client. For
+ // example, when using PostgreSQL client libraries to connect to a CockroachDB,
+ // the `db.system.name` is set to `postgresql` based on the instrumentation's
+ // best knowledge.
+ DBSystemNameKey = attribute.Key("db.system.name")
+)
+
+// DBClientConnectionPoolName returns an attribute KeyValue conforming to the
+// "db.client.connection.pool.name" semantic conventions. It represents the name
+// of the connection pool; unique within the instrumented application. In case
+// the connection pool implementation doesn't provide a name, instrumentation
+// SHOULD use a combination of parameters that would make the name unique, for
+// example, combining attributes `server.address`, `server.port`, and
+// `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+// Instrumentations that generate connection pool name following different
+// patterns SHOULD document it.
+func DBClientConnectionPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the "db.namespace"
+// semantic conventions. It represents the name of the database, fully qualified
+// within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationBatchSize returns an attribute KeyValue conforming to the
+// "db.operation.batch.size" semantic conventions. It represents the number of
+// queries included in a batch operation.
+func DBOperationBatchSize(val int) attribute.KeyValue {
+ return DBOperationBatchSizeKey.Int(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBOperationParameter returns an attribute KeyValue conforming to the
+// "db.operation.parameter" semantic conventions. It represents a database
+// operation parameter, with `` being the parameter name, and the attribute
+// value being a string representation of the parameter value.
+func DBOperationParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.operation.parameter."+key, val)
+}
+
+// DBQueryParameter returns an attribute KeyValue conforming to the
+// "db.query.parameter" semantic conventions. It represents a database query
+// parameter, with `` being the parameter name, and the attribute value
+// being a string representation of the parameter value.
+func DBQueryParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.query.parameter."+key, val)
+}
+
+// DBQuerySummary returns an attribute KeyValue conforming to the
+// "db.query.summary" semantic conventions. It represents the low cardinality
+// summary of a database query.
+func DBQuerySummary(val string) attribute.KeyValue {
+ return DBQuerySummaryKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the "db.query.text"
+// semantic conventions. It represents the database query being executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// DBResponseReturnedRows returns an attribute KeyValue conforming to the
+// "db.response.returned_rows" semantic conventions. It represents the number of
+// rows returned by the operation.
+func DBResponseReturnedRows(val int) attribute.KeyValue {
+ return DBResponseReturnedRowsKey.Int(val)
+}
+
+// DBResponseStatusCode returns an attribute KeyValue conforming to the
+// "db.response.status_code" semantic conventions. It represents the database
+// response status code.
+func DBResponseStatusCode(val string) attribute.KeyValue {
+ return DBResponseStatusCodeKey.String(val)
+}
+
+// DBStoredProcedureName returns an attribute KeyValue conforming to the
+// "db.stored_procedure.name" semantic conventions. It represents the name of a
+// stored procedure within the database.
+func DBStoredProcedureName(val string) attribute.KeyValue {
+ return DBStoredProcedureNameKey.String(val)
+}
+
+// Enum values for db.client.connection.state
+var (
+ // idle
+ // Stability: development
+ DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle")
+ // used
+ // Stability: development
+ DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used")
+)
+
+// Enum values for db.system.name
+var (
+ // Some other SQL database. Fallback only.
+ // Stability: development
+ DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql")
+ // [Adabas (Adaptable Database System)]
+ // Stability: development
+ //
+ // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas
+ DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas")
+ // [Actian Ingres]
+ // Stability: development
+ //
+ // [Actian Ingres]: https://www.actian.com/databases/ingres/
+ DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres")
+ // [Amazon DynamoDB]
+ // Stability: development
+ //
+ // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/
+ DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb")
+ // [Amazon Redshift]
+ // Stability: development
+ //
+ // [Amazon Redshift]: https://aws.amazon.com/redshift/
+ DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift")
+ // [Azure Cosmos DB]
+ // Stability: development
+ //
+ // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db
+ DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb")
+ // [InterSystems Caché]
+ // Stability: development
+ //
+ // [InterSystems Caché]: https://www.intersystems.com/products/cache/
+ DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache")
+ // [Apache Cassandra]
+ // Stability: development
+ //
+ // [Apache Cassandra]: https://cassandra.apache.org/
+ DBSystemNameCassandra = DBSystemNameKey.String("cassandra")
+ // [ClickHouse]
+ // Stability: development
+ //
+ // [ClickHouse]: https://clickhouse.com/
+ DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse")
+ // [CockroachDB]
+ // Stability: development
+ //
+ // [CockroachDB]: https://www.cockroachlabs.com/
+ DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb")
+ // [Couchbase]
+ // Stability: development
+ //
+ // [Couchbase]: https://www.couchbase.com/
+ DBSystemNameCouchbase = DBSystemNameKey.String("couchbase")
+ // [Apache CouchDB]
+ // Stability: development
+ //
+ // [Apache CouchDB]: https://couchdb.apache.org/
+ DBSystemNameCouchDB = DBSystemNameKey.String("couchdb")
+ // [Apache Derby]
+ // Stability: development
+ //
+ // [Apache Derby]: https://db.apache.org/derby/
+ DBSystemNameDerby = DBSystemNameKey.String("derby")
+ // [Elasticsearch]
+ // Stability: development
+ //
+ // [Elasticsearch]: https://www.elastic.co/elasticsearch
+ DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch")
+ // [Firebird]
+ // Stability: development
+ //
+ // [Firebird]: https://www.firebirdsql.org/
+ DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql")
+ // [Google Cloud Spanner]
+ // Stability: development
+ //
+ // [Google Cloud Spanner]: https://cloud.google.com/spanner
+ DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner")
+ // [Apache Geode]
+ // Stability: development
+ //
+ // [Apache Geode]: https://geode.apache.org/
+ DBSystemNameGeode = DBSystemNameKey.String("geode")
+ // [H2 Database]
+ // Stability: development
+ //
+ // [H2 Database]: https://h2database.com/
+ DBSystemNameH2database = DBSystemNameKey.String("h2database")
+ // [Apache HBase]
+ // Stability: development
+ //
+ // [Apache HBase]: https://hbase.apache.org/
+ DBSystemNameHBase = DBSystemNameKey.String("hbase")
+ // [Apache Hive]
+ // Stability: development
+ //
+ // [Apache Hive]: https://hive.apache.org/
+ DBSystemNameHive = DBSystemNameKey.String("hive")
+ // [HyperSQL Database]
+ // Stability: development
+ //
+ // [HyperSQL Database]: https://hsqldb.org/
+ DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb")
+ // [IBM Db2]
+ // Stability: development
+ //
+ // [IBM Db2]: https://www.ibm.com/db2
+ DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2")
+ // [IBM Informix]
+ // Stability: development
+ //
+ // [IBM Informix]: https://www.ibm.com/products/informix
+ DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix")
+ // [IBM Netezza]
+ // Stability: development
+ //
+ // [IBM Netezza]: https://www.ibm.com/products/netezza
+ DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza")
+ // [InfluxDB]
+ // Stability: development
+ //
+ // [InfluxDB]: https://www.influxdata.com/
+ DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb")
+ // [Instant]
+ // Stability: development
+ //
+ // [Instant]: https://www.instantdb.com/
+ DBSystemNameInstantDB = DBSystemNameKey.String("instantdb")
+ // [MariaDB]
+ // Stability: stable
+ //
+ // [MariaDB]: https://mariadb.org/
+ DBSystemNameMariaDB = DBSystemNameKey.String("mariadb")
+ // [Memcached]
+ // Stability: development
+ //
+ // [Memcached]: https://memcached.org/
+ DBSystemNameMemcached = DBSystemNameKey.String("memcached")
+ // [MongoDB]
+ // Stability: development
+ //
+ // [MongoDB]: https://www.mongodb.com/
+ DBSystemNameMongoDB = DBSystemNameKey.String("mongodb")
+ // [Microsoft SQL Server]
+ // Stability: stable
+ //
+ // [Microsoft SQL Server]: https://www.microsoft.com/sql-server
+ DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server")
+ // [MySQL]
+ // Stability: stable
+ //
+ // [MySQL]: https://www.mysql.com/
+ DBSystemNameMySQL = DBSystemNameKey.String("mysql")
+ // [Neo4j]
+ // Stability: development
+ //
+ // [Neo4j]: https://neo4j.com/
+ DBSystemNameNeo4j = DBSystemNameKey.String("neo4j")
+ // [OpenSearch]
+ // Stability: development
+ //
+ // [OpenSearch]: https://opensearch.org/
+ DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch")
+ // [Oracle Database]
+ // Stability: development
+ //
+ // [Oracle Database]: https://www.oracle.com/database/
+ DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db")
+ // [PostgreSQL]
+ // Stability: stable
+ //
+ // [PostgreSQL]: https://www.postgresql.org/
+ DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql")
+ // [Redis]
+ // Stability: development
+ //
+ // [Redis]: https://redis.io/
+ DBSystemNameRedis = DBSystemNameKey.String("redis")
+ // [SAP HANA]
+ // Stability: development
+ //
+ // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html
+ DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana")
+ // [SAP MaxDB]
+ // Stability: development
+ //
+ // [SAP MaxDB]: https://maxdb.sap.com/
+ DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb")
+ // [SQLite]
+ // Stability: development
+ //
+ // [SQLite]: https://www.sqlite.org/
+ DBSystemNameSQLite = DBSystemNameKey.String("sqlite")
+ // [Teradata]
+ // Stability: development
+ //
+ // [Teradata]: https://www.teradata.com/
+ DBSystemNameTeradata = DBSystemNameKey.String("teradata")
+ // [Trino]
+ // Stability: development
+ //
+ // [Trino]: https://trino.io/
+ DBSystemNameTrino = DBSystemNameKey.String("trino")
+)
+
+// Namespace: deployment
+const (
+ // DeploymentEnvironmentNameKey is the attribute Key conforming to the
+ // "deployment.environment.name" semantic conventions. It represents the name of
+ // the [deployment environment] (aka deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "staging", "production"
+ // Note: `deployment.environment.name` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id` resource
+ // attributes.
+ // This implies that resources carrying the following attribute combinations
+ // MUST be
+ // considered to be identifying the same service:
+ //
+ // - `service.name=frontend`, `deployment.environment.name=production`
+ // - `service.name=frontend`, `deployment.environment.name=staging`.
+ //
+ //
+ // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+ DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name")
+
+ // DeploymentIDKey is the attribute Key conforming to the "deployment.id"
+ // semantic conventions. It represents the id of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1208"
+ DeploymentIDKey = attribute.Key("deployment.id")
+
+ // DeploymentNameKey is the attribute Key conforming to the "deployment.name"
+ // semantic conventions. It represents the name of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "deploy my app", "deploy-frontend"
+ DeploymentNameKey = attribute.Key("deployment.name")
+
+ // DeploymentStatusKey is the attribute Key conforming to the
+ // "deployment.status" semantic conventions. It represents the status of the
+ // deployment.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ DeploymentStatusKey = attribute.Key("deployment.status")
+)
+
+// DeploymentEnvironmentName returns an attribute KeyValue conforming to the
+// "deployment.environment.name" semantic conventions. It represents the name of
+// the [deployment environment] (aka deployment tier).
+//
+// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+func DeploymentEnvironmentName(val string) attribute.KeyValue {
+ return DeploymentEnvironmentNameKey.String(val)
+}
+
+// DeploymentID returns an attribute KeyValue conforming to the "deployment.id"
+// semantic conventions. It represents the id of the deployment.
+func DeploymentID(val string) attribute.KeyValue {
+ return DeploymentIDKey.String(val)
+}
+
+// DeploymentName returns an attribute KeyValue conforming to the
+// "deployment.name" semantic conventions. It represents the name of the
+// deployment.
+func DeploymentName(val string) attribute.KeyValue {
+ return DeploymentNameKey.String(val)
+}
+
+// Enum values for deployment.status
+var (
+ // failed
+ // Stability: development
+ DeploymentStatusFailed = DeploymentStatusKey.String("failed")
+ // succeeded
+ // Stability: development
+ DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded")
+)
+
+// Namespace: destination
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the destination
+ // address - domain name if available without reverse DNS lookup; otherwise, IP
+ // address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the source side, and when communicating through an
+ // intermediary, `destination.address` SHOULD represent the destination address
+ // behind any intermediaries, for example proxies, if it's available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the "destination.port"
+ // semantic conventions. It represents the destination port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number.
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Namespace: device
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123456789012345", "01:23:45:67:89:AB"
+ // Note: Its value SHOULD be identical for all apps on a device and it SHOULD
+ // NOT change if an app is uninstalled and re-installed.
+ // However, it might be resettable by the user for all apps on a device.
+ // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be
+ // used as values.
+ //
+ // More information about Android identifier best practices can be found in the
+ // [Android user data IDs guide].
+ //
+ // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution
+ // > should be taken when storing personal data or anything which can identify a
+ // > user. GDPR and data protection laws may apply,
+ // > ensure you do your own due diligence.> Due to these reasons, this
+ // > identifier is not recommended for consumer applications and will likely
+ // > result in rejection from both Google Play and App Store.
+ // > However, it may be appropriate for specific enterprise scenarios, such as
+ // > kiosk devices or enterprise-managed devices, with appropriate compliance
+ // > clearance.
+ // > Any instrumentation providing this identifier MUST implement it as an
+ // > opt-in feature.> See [`app.installation.id`]> for a more
+ // > privacy-preserving alternative.
+ //
+ // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids
+ // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of the
+ // device manufacturer.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apple", "Samsung"
+ // Note: The Android OS provides this field via [Build]. iOS apps SHOULD
+ // hardcode the value `Apple`.
+ //
+ // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone3,4", "SM-G920F"
+ // Note: It's recommended this value represents a machine-readable version of
+ // the model identifier rather than the market or consumer-friendly name of the
+ // device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the "device.model.name"
+ // semantic conventions. It represents the marketing name for the device model.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone 6s Plus", "Samsung Galaxy S6"
+ // Note: It's recommended this value represents a human-readable version of the
+ // device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic
+// conventions. It represents a unique identifier representing the device.
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer.
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device.
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name for
+// the device model.
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// Namespace: disk
+const (
+ // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction"
+ // semantic conventions. It represents the disk IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "read"
+ DiskIODirectionKey = attribute.Key("disk.io.direction")
+)
+
+// Enum values for disk.io.direction
+var (
+ // read
+ // Stability: development
+ DiskIODirectionRead = DiskIODirectionKey.String("read")
+ // write
+ // Stability: development
+ DiskIODirectionWrite = DiskIODirectionKey.String("write")
+)
+
+// Namespace: dns
+const (
+ // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic
+ // conventions. It represents the list of IPv4 or IPv6 addresses resolved during
+ // DNS lookup.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ DNSAnswersKey = attribute.Key("dns.answers")
+
+ // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name"
+ // semantic conventions. It represents the name being queried.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.example.com", "opentelemetry.io"
+ // Note: If the name field contains non-printable characters (below 32 or above
+ // 126), those characters should be represented as escaped base 10 integers
+ // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns,
+ // and line feeds should be converted to \t, \r, and \n respectively.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers"
+// semantic conventions. It represents the list of IPv4 or IPv6 addresses
+// resolved during DNS lookup.
+func DNSAnswers(val ...string) attribute.KeyValue {
+ return DNSAnswersKey.StringSlice(val)
+}
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Namespace: elasticsearch
+const (
+ // ElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "elasticsearch.node.name" semantic conventions. It represents the represents
+ // the human-readable identifier of the node/instance to which a request was
+ // routed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-0000000001"
+ ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name")
+)
+
+// ElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "elasticsearch.node.name" semantic conventions. It represents the represents
+// the human-readable identifier of the node/instance to which a request was
+// routed.
+func ElasticsearchNodeName(val string) attribute.KeyValue {
+ return ElasticsearchNodeNameKey.String(val)
+}
+
+// Namespace: enduser
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic
+ // conventions. It represents the unique identifier of an end user in the
+ // system. It maybe a username, email address, or other identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "username"
+ // Note: Unique identifier of an end user in the system.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (PII) information.
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id"
+ // semantic conventions. It represents the pseudonymous identifier of an end
+ // user. This identifier should be a random value that is not directly linked or
+ // associated with the end user's actual identity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "QdH5CAWJgqVT4rOr0qtumf"
+ // Note: Pseudonymous identifier of an end user.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (linkable PII) information.
+ EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the unique identifier of an end user in
+// the system. It maybe a username, email address, or other identifier.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserPseudoID returns an attribute KeyValue conforming to the
+// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous
+// identifier of an end user. This identifier should be a random value that is
+// not directly linked or associated with the end user's actual identity.
+func EnduserPseudoID(val string) attribute.KeyValue {
+ return EnduserPseudoIDKey.String(val)
+}
+
+// Namespace: error
+const (
+ // ErrorMessageKey is the attribute Key conforming to the "error.message"
+ // semantic conventions. It represents a message providing more detail about an
+ // error in human-readable form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Unexpected input type: string", "The user has exceeded their
+ // storage quota"
+ // Note: `error.message` should provide additional context and detail about an
+ // error.
+ // It is NOT RECOMMENDED to duplicate the value of `error.type` in
+ // `error.message`.
+ // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in
+ // `error.message`.
+ //
+ // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded
+ // cardinality and overlap with span status.
+ ErrorMessageKey = attribute.Key("error.message")
+
+ // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic
+ // conventions. It represents the describes a class of error the operation ended
+ // with.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "timeout", "java.net.UnknownHostException",
+ // "server_certificate_invalid", "500"
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library SHOULD be
+ // low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query time
+ // when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT set
+ // `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as HTTP
+ // or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // - Use a domain-specific attribute
+ // - Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+// ErrorMessage returns an attribute KeyValue conforming to the "error.message"
+// semantic conventions. It represents a message providing more detail about an
+// error in human-readable form.
+func ErrorMessage(val string) attribute.KeyValue {
+ return ErrorMessageKey.String(val)
+}
+
+// Enum values for error.type
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a
+ // custom value.
+ //
+ // Stability: stable
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Namespace: exception
+const (
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "Division by zero", "Can't convert 'int' object to str implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the exception
+ // should be preferred over the static type in languages that support it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "java.net.ConnectException", "OSError"
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the "exception.type"
+// semantic conventions. It represents the type of the exception (its
+// fully-qualified class name, if applicable). The dynamic type of the exception
+// should be preferred over the static type in languages that support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// Namespace: faas
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the serverless
+ // function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron Expression].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0/5 * * * ? *
+ //
+ // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name of
+ // the source on which the triggering operation was performed. For example, in
+ // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+ // database name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myBucketName", "myDbName"
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or S3 is
+ // the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myFile.txt", "myTableName"
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the describes
+ // the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string containing
+ // the time when the data was accessed in the [ISO 8601] format expressed in
+ // [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a string,
+ // that will be potentially reused for other invocations to the same
+ // function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de"
+ // Note: - **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation ID of
+ // the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name"
+ // semantic conventions. It represents the name of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: my-function
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+ // function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud region of
+ // the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: eu-central-1
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+ // function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory"
+ // semantic conventions. It represents the amount of memory available to the
+ // serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: It's recommended to set this attribute since e.g. too little memory can
+ // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+ // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+ // information (which must be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this runtime
+ // instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-function", "myazurefunctionapp/some-function-name"
+ // Note: This is the name of the function as configured/deployed on the FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function.name`]
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud providers/products:
+ //
+ // - **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ //
+ //
+ // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation time
+ // in the [ISO 8601] format expressed in [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic
+ // conventions. It represents the type of the trigger which caused this function
+ // invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic
+ // conventions. It represents the immutable version of the function being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "26", "pinkfroid-00002"
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // - **AWS Lambda:** The [function version]
+ // (an integer represented as a decimal string).
+ // - **Google Cloud Run (Services):** The [revision]
+ // (i.e., the function name plus the revision suffix).
+ // - **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment variable].
+ // - **Azure Functions:** Not applicable. Do not set this attribute.
+ //
+ //
+ // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html
+ // [revision]: https://cloud.google.com/run/docs/managing/revisions
+ // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart"
+// semantic conventions. It represents a boolean that is true if the serverless
+// function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic
+// conventions. It represents a string containing the schedule period as
+// [Cron Expression].
+//
+// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of the
+// source on which the triggering operation was performed. For example, in Cloud
+// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database
+// name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3 is
+// the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO 8601] format expressed in
+// [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance"
+// semantic conventions. It represents the execution environment ID as a string,
+// that will be potentially reused for other invocations to the same
+// function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID of
+// the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region of
+// the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic
+// conventions. It represents the name of the single function that this runtime
+// instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic
+// conventions. It represents a string containing the function invocation time in
+// the [ISO 8601] format expressed in [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the "faas.version"
+// semantic conventions. It represents the immutable version of the function
+// being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Enum values for faas.document.operation
+var (
+ // When a new object is created.
+ // Stability: development
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified.
+ // Stability: development
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted.
+ // Stability: development
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Enum values for faas.invoked_provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ // Stability: development
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// Enum values for faas.trigger
+var (
+ // A response to some data source operation such as a database or filesystem
+ // read/write
+ // Stability: development
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ // Stability: development
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ // Stability: development
+ FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ // Stability: development
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ // Stability: development
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Namespace: feature_flag
+const (
+ // FeatureFlagContextIDKey is the attribute Key conforming to the
+ // "feature_flag.context.id" semantic conventions. It represents the unique
+ // identifier for the flag evaluation context. For example, the targeting key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db"
+ FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id")
+
+ // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key"
+ // semantic conventions. It represents the lookup key of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "logo-color"
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider.name" semantic conventions. It represents the
+ // identifies the feature flag provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "Flag Manager"
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name")
+
+ // FeatureFlagResultReasonKey is the attribute Key conforming to the
+ // "feature_flag.result.reason" semantic conventions. It represents the reason
+ // code which shows how a feature flag value was determined.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "static", "targeting_match", "error", "default"
+ FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason")
+
+ // FeatureFlagResultValueKey is the attribute Key conforming to the
+ // "feature_flag.result.value" semantic conventions. It represents the evaluated
+ // value of the feature flag.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "#ff0000", true, 3
+ // Note: With some feature flag providers, feature flag results can be quite
+ // large or contain private or sensitive details.
+ // Because of this, `feature_flag.result.variant` is often the preferred
+ // attribute if it is available.
+ //
+ // It may be desirable to redact or otherwise limit the size and scope of
+ // `feature_flag.result.value` if possible.
+ // Because the evaluated flag value is unstructured and may be any type, it is
+ // left to the instrumentation author to determine how best to achieve this.
+ FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value")
+
+ // FeatureFlagResultVariantKey is the attribute Key conforming to the
+ // "feature_flag.result.variant" semantic conventions. It represents a semantic
+ // identifier for an evaluated flag value.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "red", "true", "on"
+ // Note: A semantic identifier, commonly referred to as a variant, provides a
+ // means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant")
+
+ // FeatureFlagSetIDKey is the attribute Key conforming to the
+ // "feature_flag.set.id" semantic conventions. It represents the identifier of
+ // the [flag set] to which the feature flag belongs.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "proj-1", "ab98sgs", "service1/dev"
+ //
+ // [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+ FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id")
+
+ // FeatureFlagVersionKey is the attribute Key conforming to the
+ // "feature_flag.version" semantic conventions. It represents the version of the
+ // ruleset used during the evaluation. This may be any stable value which
+ // uniquely identifies the ruleset.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "1", "01ABCDEF"
+ FeatureFlagVersionKey = attribute.Key("feature_flag.version")
+)
+
+// FeatureFlagContextID returns an attribute KeyValue conforming to the
+// "feature_flag.context.id" semantic conventions. It represents the unique
+// identifier for the flag evaluation context. For example, the targeting key.
+func FeatureFlagContextID(val string) attribute.KeyValue {
+ return FeatureFlagContextIDKey.String(val)
+}
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the lookup key of the
+// feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider.name" semantic conventions. It represents the
+// identifies the feature flag provider.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagResultVariant returns an attribute KeyValue conforming to the
+// "feature_flag.result.variant" semantic conventions. It represents a semantic
+// identifier for an evaluated flag value.
+func FeatureFlagResultVariant(val string) attribute.KeyValue {
+ return FeatureFlagResultVariantKey.String(val)
+}
+
+// FeatureFlagSetID returns an attribute KeyValue conforming to the
+// "feature_flag.set.id" semantic conventions. It represents the identifier of
+// the [flag set] to which the feature flag belongs.
+//
+// [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+func FeatureFlagSetID(val string) attribute.KeyValue {
+ return FeatureFlagSetIDKey.String(val)
+}
+
+// FeatureFlagVersion returns an attribute KeyValue conforming to the
+// "feature_flag.version" semantic conventions. It represents the version of the
+// ruleset used during the evaluation. This may be any stable value which
+// uniquely identifies the ruleset.
+func FeatureFlagVersion(val string) attribute.KeyValue {
+ return FeatureFlagVersionKey.String(val)
+}
+
+// Enum values for feature_flag.result.reason
+var (
+ // The resolved value is static (no dynamic evaluation).
+ // Stability: release_candidate
+ FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static")
+ // The resolved value fell back to a pre-configured value (no dynamic evaluation
+ // occurred or dynamic evaluation yielded no result).
+ // Stability: release_candidate
+ FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default")
+ // The resolved value was the result of a dynamic evaluation, such as a rule or
+ // specific user-targeting.
+ // Stability: release_candidate
+ FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match")
+ // The resolved value was the result of pseudorandom assignment.
+ // Stability: release_candidate
+ FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split")
+ // The resolved value was retrieved from cache.
+ // Stability: release_candidate
+ FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached")
+ // The resolved value was the result of the flag being disabled in the
+ // management system.
+ // Stability: release_candidate
+ FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled")
+ // The reason for the resolved value could not be determined.
+ // Stability: release_candidate
+ FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown")
+ // The resolved value is non-authoritative or possibly out of date
+ // Stability: release_candidate
+ FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale")
+ // The resolved value was the result of an error.
+ // Stability: release_candidate
+ FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error")
+)
+
+// Namespace: file
+const (
+ // FileAccessedKey is the attribute Key conforming to the "file.accessed"
+ // semantic conventions. It represents the time when the file was last accessed,
+ // in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileAccessedKey = attribute.Key("file.accessed")
+
+ // FileAttributesKey is the attribute Key conforming to the "file.attributes"
+ // semantic conventions. It represents the array of file attributes.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "readonly", "hidden"
+ // Note: Attributes names depend on the OS or file system. Here’s a
+ // non-exhaustive list of values expected for this attribute: `archive`,
+ // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`,
+ // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`,
+ // `write`.
+ FileAttributesKey = attribute.Key("file.attributes")
+
+ // FileChangedKey is the attribute Key conforming to the "file.changed" semantic
+ // conventions. It represents the time when the file attributes or metadata was
+ // last changed, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: `file.changed` captures the time when any of the file's properties or
+ // attributes (including the content) are changed, while `file.modified`
+ // captures the timestamp when the file content is modified.
+ FileChangedKey = attribute.Key("file.changed")
+
+ // FileCreatedKey is the attribute Key conforming to the "file.created" semantic
+ // conventions. It represents the time when the file was created, in ISO 8601
+ // format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileCreatedKey = attribute.Key("file.created")
+
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is located.
+ // It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/user", "C:\Program Files\MyApp"
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the leading
+ // dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: When the file name has multiple extensions (example.tar.gz), only the
+ // last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileForkNameKey is the attribute Key conforming to the "file.fork_name"
+ // semantic conventions. It represents the name of the fork. A fork is
+ // additional data associated with a filesystem object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Zone.Identifier"
+ // Note: On Linux, a resource fork is used to store additional data with a
+ // filesystem object. A file always has at least one fork for the data portion,
+ // and additional forks may exist.
+ // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default
+ // data stream for a file is just called $DATA. Zone.Identifier is commonly used
+ // by Windows to track contents downloaded from the Internet. An ADS is
+ // typically of the form: C:\path\to\filename.extension:some_fork_name, and
+ // some_fork_name is the value that should populate `fork_name`.
+ // `filename.extension` should populate `file.name`, and `extension` should
+ // populate `file.extension`. The full path, `file.path`, will include the fork
+ // name.
+ FileForkNameKey = attribute.Key("file.fork_name")
+
+ // FileGroupIDKey is the attribute Key conforming to the "file.group.id"
+ // semantic conventions. It represents the primary Group ID (GID) of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileGroupIDKey = attribute.Key("file.group.id")
+
+ // FileGroupNameKey is the attribute Key conforming to the "file.group.name"
+ // semantic conventions. It represents the primary group name of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "users"
+ FileGroupNameKey = attribute.Key("file.group.name")
+
+ // FileInodeKey is the attribute Key conforming to the "file.inode" semantic
+ // conventions. It represents the inode representing the file in the filesystem.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "256383"
+ FileInodeKey = attribute.Key("file.inode")
+
+ // FileModeKey is the attribute Key conforming to the "file.mode" semantic
+ // conventions. It represents the mode of the file in octal representation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0640"
+ FileModeKey = attribute.Key("file.mode")
+
+ // FileModifiedKey is the attribute Key conforming to the "file.modified"
+ // semantic conventions. It represents the time when the file content was last
+ // modified, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ FileModifiedKey = attribute.Key("file.modified")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.png"
+ FileNameKey = attribute.Key("file.name")
+
+ // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id"
+ // semantic conventions. It represents the user ID (UID) or security identifier
+ // (SID) of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileOwnerIDKey = attribute.Key("file.owner.id")
+
+ // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name"
+ // semantic conventions. It represents the username of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ FileOwnerNameKey = attribute.Key("file.owner.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe"
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FileSizeKey = attribute.Key("file.size")
+
+ // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the
+ // "file.symbolic_link.target_path" semantic conventions. It represents the path
+ // to the target of a symbolic link.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/python3"
+ // Note: This attribute is only applicable to symbolic links.
+ FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path")
+)
+
+// FileAccessed returns an attribute KeyValue conforming to the "file.accessed"
+// semantic conventions. It represents the time when the file was last accessed,
+// in ISO 8601 format.
+func FileAccessed(val string) attribute.KeyValue {
+ return FileAccessedKey.String(val)
+}
+
+// FileAttributes returns an attribute KeyValue conforming to the
+// "file.attributes" semantic conventions. It represents the array of file
+// attributes.
+func FileAttributes(val ...string) attribute.KeyValue {
+ return FileAttributesKey.StringSlice(val)
+}
+
+// FileChanged returns an attribute KeyValue conforming to the "file.changed"
+// semantic conventions. It represents the time when the file attributes or
+// metadata was last changed, in ISO 8601 format.
+func FileChanged(val string) attribute.KeyValue {
+ return FileChangedKey.String(val)
+}
+
+// FileCreated returns an attribute KeyValue conforming to the "file.created"
+// semantic conventions. It represents the time when the file was created, in ISO
+// 8601 format.
+func FileCreated(val string) attribute.KeyValue {
+ return FileCreatedKey.String(val)
+}
+
+// FileDirectory returns an attribute KeyValue conforming to the "file.directory"
+// semantic conventions. It represents the directory where the file is located.
+// It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the "file.extension"
+// semantic conventions. It represents the file extension, excluding the leading
+// dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileForkName returns an attribute KeyValue conforming to the "file.fork_name"
+// semantic conventions. It represents the name of the fork. A fork is additional
+// data associated with a filesystem object.
+func FileForkName(val string) attribute.KeyValue {
+ return FileForkNameKey.String(val)
+}
+
+// FileGroupID returns an attribute KeyValue conforming to the "file.group.id"
+// semantic conventions. It represents the primary Group ID (GID) of the file.
+func FileGroupID(val string) attribute.KeyValue {
+ return FileGroupIDKey.String(val)
+}
+
+// FileGroupName returns an attribute KeyValue conforming to the
+// "file.group.name" semantic conventions. It represents the primary group name
+// of the file.
+func FileGroupName(val string) attribute.KeyValue {
+ return FileGroupNameKey.String(val)
+}
+
+// FileInode returns an attribute KeyValue conforming to the "file.inode"
+// semantic conventions. It represents the inode representing the file in the
+// filesystem.
+func FileInode(val string) attribute.KeyValue {
+ return FileInodeKey.String(val)
+}
+
+// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic
+// conventions. It represents the mode of the file in octal representation.
+func FileMode(val string) attribute.KeyValue {
+ return FileModeKey.String(val)
+}
+
+// FileModified returns an attribute KeyValue conforming to the "file.modified"
+// semantic conventions. It represents the time when the file content was last
+// modified, in ISO 8601 format.
+func FileModified(val string) attribute.KeyValue {
+ return FileModifiedKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name" semantic
+// conventions. It represents the name of the file including the extension,
+// without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id"
+// semantic conventions. It represents the user ID (UID) or security identifier
+// (SID) of the file owner.
+func FileOwnerID(val string) attribute.KeyValue {
+ return FileOwnerIDKey.String(val)
+}
+
+// FileOwnerName returns an attribute KeyValue conforming to the
+// "file.owner.name" semantic conventions. It represents the username of the file
+// owner.
+func FileOwnerName(val string) attribute.KeyValue {
+ return FileOwnerNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path" semantic
+// conventions. It represents the full path to the file, including the file name.
+// It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size" semantic
+// conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the
+// "file.symbolic_link.target_path" semantic conventions. It represents the path
+// to the target of a symbolic link.
+func FileSymbolicLinkTargetPath(val string) attribute.KeyValue {
+ return FileSymbolicLinkTargetPathKey.String(val)
+}
+
+// Namespace: gcp
+const (
+ // GCPAppHubApplicationContainerKey is the attribute Key conforming to the
+ // "gcp.apphub.application.container" semantic conventions. It represents the
+ // container within GCP where the AppHub application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-container-project"
+ GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container")
+
+ // GCPAppHubApplicationIDKey is the attribute Key conforming to the
+ // "gcp.apphub.application.id" semantic conventions. It represents the name of
+ // the application as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-application"
+ GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id")
+
+ // GCPAppHubApplicationLocationKey is the attribute Key conforming to the
+ // "gcp.apphub.application.location" semantic conventions. It represents the GCP
+ // zone or region where the application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1"
+ GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location")
+
+ // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.criticality_type" semantic conventions. It represents the
+ // criticality of a service indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type")
+
+ // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.environment_type" semantic conventions. It represents the
+ // environment of a service is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type")
+
+ // GCPAppHubServiceIDKey is the attribute Key conforming to the
+ // "gcp.apphub.service.id" semantic conventions. It represents the name of the
+ // service as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-service"
+ GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id")
+
+ // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.criticality_type" semantic conventions. It represents
+ // the criticality of a workload indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type")
+
+ // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.environment_type" semantic conventions. It represents
+ // the environment of a workload is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type")
+
+ // GCPAppHubWorkloadIDKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+ // workload as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-workload"
+ GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id")
+
+ // GCPClientServiceKey is the attribute Key conforming to the
+ // "gcp.client.service" semantic conventions. It represents the identifies the
+ // Google Cloud service for which the official client library is intended.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "appengine", "run", "firestore", "alloydb", "spanner"
+ // Note: Intended to be a stable identifier for Google Cloud client libraries
+ // that is uniform across implementation languages. The value should be derived
+ // from the canonical service domain for the service; for example,
+ // 'foo.googleapis.com' should result in a value of 'foo'.
+ GCPClientServiceKey = attribute.Key("gcp.client.service")
+
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+ // the Cloud Run [execution] being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`] environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "job-name-xxxx", "sample-job-mdw84"
+ //
+ // [execution]: https://cloud.google.com/run/docs/managing/job-executions
+ // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+ // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1
+ //
+ // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+
+ // GCPGCEInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+ // of a GCE instance. This is the full value of the default or [custom hostname]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-host1234.example.com",
+ // "sample-vm.us-west1-b.c.my-project.internal"
+ //
+ // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+ GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGCEInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance name
+ // of a GCE instance. This is the value provided by `host.name`, the visible
+ // name of the instance in the Cloud Console UI, and the prefix for the default
+ // hostname of the instance as defined by the [default internal DNS name].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-1", "my-vm-name"
+ //
+ // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+ GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.container" semantic conventions. It represents the
+// container within GCP where the AppHub application is defined.
+func GCPAppHubApplicationContainer(val string) attribute.KeyValue {
+ return GCPAppHubApplicationContainerKey.String(val)
+}
+
+// GCPAppHubApplicationID returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.id" semantic conventions. It represents the name of
+// the application as configured in AppHub.
+func GCPAppHubApplicationID(val string) attribute.KeyValue {
+ return GCPAppHubApplicationIDKey.String(val)
+}
+
+// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.location" semantic conventions. It represents the GCP
+// zone or region where the application is defined.
+func GCPAppHubApplicationLocation(val string) attribute.KeyValue {
+ return GCPAppHubApplicationLocationKey.String(val)
+}
+
+// GCPAppHubServiceID returns an attribute KeyValue conforming to the
+// "gcp.apphub.service.id" semantic conventions. It represents the name of the
+// service as configured in AppHub.
+func GCPAppHubServiceID(val string) attribute.KeyValue {
+ return GCPAppHubServiceIDKey.String(val)
+}
+
+// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the
+// "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+// workload as configured in AppHub.
+func GCPAppHubWorkloadID(val string) attribute.KeyValue {
+ return GCPAppHubWorkloadIDKey.String(val)
+}
+
+// GCPClientService returns an attribute KeyValue conforming to the
+// "gcp.client.service" semantic conventions. It represents the identifies the
+// Google Cloud service for which the official client library is intended.
+func GCPClientService(val string) attribute.KeyValue {
+ return GCPClientServiceKey.String(val)
+}
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+// the Cloud Run [execution] being run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`] environment variable.
+//
+// [execution]: https://cloud.google.com/run/docs/managing/job-executions
+// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+// environment variable.
+//
+// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom hostname]
+// .
+//
+// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+func GCPGCEInstanceHostname(val string) attribute.KeyValue {
+ return GCPGCEInstanceHostnameKey.String(val)
+}
+
+// GCPGCEInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance name
+// of a GCE instance. This is the value provided by `host.name`, the visible name
+// of the instance in the Cloud Console UI, and the prefix for the default
+// hostname of the instance as defined by the [default internal DNS name].
+//
+// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+func GCPGCEInstanceName(val string) attribute.KeyValue {
+ return GCPGCEInstanceNameKey.String(val)
+}
+
+// Enum values for gcp.apphub.service.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.service.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Enum values for gcp.apphub.workload.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.workload.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Namespace: gen_ai
+const (
+ // GenAIAgentDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.agent.description" semantic conventions. It represents the free-form
+ // description of the GenAI agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Helps with math problems", "Generates fiction stories"
+ GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description")
+
+ // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id"
+ // semantic conventions. It represents the unique identifier of the GenAI agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIAgentIDKey = attribute.Key("gen_ai.agent.id")
+
+ // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name"
+ // semantic conventions. It represents the human-readable name of the GenAI
+ // agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Math Tutor", "Fiction Writer"
+ GenAIAgentNameKey = attribute.Key("gen_ai.agent.name")
+
+ // GenAIConversationIDKey is the attribute Key conforming to the
+ // "gen_ai.conversation.id" semantic conventions. It represents the unique
+ // identifier for a conversation (session, thread), used to store and correlate
+ // messages within this conversation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id")
+
+ // GenAIDataSourceIDKey is the attribute Key conforming to the
+ // "gen_ai.data_source.id" semantic conventions. It represents the data source
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "H7STPQYOND"
+ // Note: Data sources are used by AI agents and RAG applications to store
+ // grounding data. A data source may be an external database, object store,
+ // document collection, website, or any other storage system used by the GenAI
+ // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier
+ // used by the GenAI system rather than a name specific to the external storage,
+ // such as a database or object store. Semantic conventions referencing
+ // `gen_ai.data_source.id` MAY also leverage additional attributes, such as
+ // `db.*`, to further identify and describe the data source.
+ GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id")
+
+ // GenAIInputMessagesKey is the attribute Key conforming to the
+ // "gen_ai.input.messages" semantic conventions. It represents the chat history
+ // provided to the model as an input.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n
+ // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n
+ // "parts": [\n {\n "type": "tool_call",\n "id":
+ // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n
+ // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n
+ // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n
+ // "result": "rainy, 57°F"\n }\n ]\n }\n]\n"
+ // Note: Instrumentations MUST follow [Input messages JSON schema].
+ // When the attribute is recorded on events, it MUST be recorded in structured
+ // form. When recorded on spans, it MAY be recorded as a JSON string if
+ // structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Messages MUST be provided in the order they were sent to the model.
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // input messages.
+ //
+ // > [!Warning]
+ // > This attribute is likely to contain sensitive information including
+ // > user/PII data.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages")
+
+ // GenAIOperationNameKey is the attribute Key conforming to the
+ // "gen_ai.operation.name" semantic conventions. It represents the name of the
+ // operation being performed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If one of the predefined values applies, but specific system uses a
+ // different name it's RECOMMENDED to document it in the semantic conventions
+ // for specific GenAI system and use system-specific name in the
+ // instrumentation. If a different name is not documented, instrumentation
+ // libraries SHOULD use applicable predefined value.
+ GenAIOperationNameKey = attribute.Key("gen_ai.operation.name")
+
+ // GenAIOutputMessagesKey is the attribute Key conforming to the
+ // "gen_ai.output.messages" semantic conventions. It represents the messages
+ // returned by the model where each message represents a specific model response
+ // (choice, candidate).
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n
+ // "content": "The weather in Paris is currently rainy with a temperature of
+ // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n"
+ // Note: Instrumentations MUST follow [Output messages JSON schema]
+ //
+ // Each message represents a single output choice/candidate generated by
+ // the model. Each message corresponds to exactly one generation
+ // (choice/candidate) and vice versa - one choice cannot be split across
+ // multiple messages or one message cannot contain parts from multiple choices.
+ //
+ // When the attribute is recorded on events, it MUST be recorded in structured
+ // form. When recorded on spans, it MAY be recorded as a JSON string if
+ // structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // output messages.
+ //
+ // > [!Warning]
+ // > This attribute is likely to contain sensitive information including
+ // > user/PII data.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages")
+
+ // GenAIOutputTypeKey is the attribute Key conforming to the
+ // "gen_ai.output.type" semantic conventions. It represents the represents the
+ // content type requested by the client.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute SHOULD be used when the client requests output of a
+ // specific type. The model may return zero or more outputs of this type.
+ // This attribute specifies the output modality and not the actual output
+ // format. For example, if an image is requested, the actual output could be a
+ // URL pointing to an image file.
+ // Additional output format details may be recorded in the future in the
+ // `gen_ai.output.{type}.*` attributes.
+ GenAIOutputTypeKey = attribute.Key("gen_ai.output.type")
+
+ // GenAIProviderNameKey is the attribute Key conforming to the
+ // "gen_ai.provider.name" semantic conventions. It represents the Generative AI
+ // provider as identified by the client or server instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The attribute SHOULD be set based on the instrumentation's best
+ // knowledge and may differ from the actual model provider.
+ //
+ // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms
+ // are accessible using the OpenAI REST API and corresponding client libraries,
+ // but may proxy or host models from different providers.
+ //
+ // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address`
+ // attributes may help identify the actual system in use.
+ //
+ // The `gen_ai.provider.name` attribute acts as a discriminator that
+ // identifies the GenAI telemetry format flavor specific to that provider
+ // within GenAI semantic conventions.
+ // It SHOULD be set consistently with provider-specific attributes and signals.
+ // For example, GenAI spans, metrics, and events related to AWS Bedrock
+ // should have the `gen_ai.provider.name` set to `aws.bedrock` and include
+ // applicable `aws.bedrock.*` attributes and are not expected to include
+ // `openai.*` attributes.
+ GenAIProviderNameKey = attribute.Key("gen_ai.provider.name")
+
+ // GenAIRequestChoiceCountKey is the attribute Key conforming to the
+ // "gen_ai.request.choice.count" semantic conventions. It represents the target
+ // number of candidate completions to return.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3
+ GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count")
+
+ // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the
+ // "gen_ai.request.encoding_formats" semantic conventions. It represents the
+ // encoding formats requested in an embeddings operation, if specified.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "base64"], ["float", "binary"
+ // Note: In some GenAI systems the encoding formats are called embedding types.
+ // Also, some GenAI systems only accept a single format per request.
+ GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats")
+
+ // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+ // frequency penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty")
+
+ // GenAIRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+ // number of tokens the model generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAIRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of the
+ // GenAI model a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: gpt-4
+ GenAIRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.presence_penalty" semantic conventions. It represents the
+ // presence penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty")
+
+ // GenAIRequestSeedKey is the attribute Key conforming to the
+ // "gen_ai.request.seed" semantic conventions. It represents the requests with
+ // same seed value more likely to return same result.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed")
+
+ // GenAIRequestStopSequencesKey is the attribute Key conforming to the
+ // "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+ // of sequences that the model will use to stop generating further tokens.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "forest", "lived"
+ GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences")
+
+ // GenAIRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.0
+ GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAIRequestTopKKey is the attribute Key conforming to the
+ // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k")
+
+ // GenAIRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAIResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to each
+ // generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "stop"], ["stop", "length"
+ GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAIResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "chatcmpl-123"
+ GenAIResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAIResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of the
+ // model that generated the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gpt-4-0613"
+ GenAIResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAISystemInstructionsKey is the attribute Key conforming to the
+ // "gen_ai.system_instructions" semantic conventions. It represents the system
+ // message or instructions provided to the GenAI model separately from the chat
+ // history.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet
+ // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type":
+ // "text",\n "content": "You are a language translator."\n },\n {\n "type":
+ // "text",\n "content": "Your mission is to translate text in English to
+ // French."\n }\n]\n"
+ // Note: This attribute SHOULD be used when the corresponding provider or API
+ // allows to provide system instructions or messages separately from the
+ // chat history.
+ //
+ // Instructions that are part of the chat history SHOULD be recorded in
+ // `gen_ai.input.messages` attribute instead.
+ //
+ // Instrumentations MUST follow [System instructions JSON schema].
+ //
+ // When recorded on spans, it MAY be recorded as a JSON string if structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // system instructions.
+ //
+ // > [!Warning]
+ // > This attribute may contain sensitive information.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions")
+
+ // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type"
+ // semantic conventions. It represents the type of token being counted.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "input", "output"
+ GenAITokenTypeKey = attribute.Key("gen_ai.token.type")
+
+ // GenAIToolCallIDKey is the attribute Key conforming to the
+ // "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4"
+ GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id")
+
+ // GenAIToolDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.tool.description" semantic conventions. It represents the tool
+ // description.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Multiply two numbers"
+ GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description")
+
+ // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name"
+ // semantic conventions. It represents the name of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Flights"
+ GenAIToolNameKey = attribute.Key("gen_ai.tool.name")
+
+ // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type"
+ // semantic conventions. It represents the type of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "function", "extension", "datastore"
+ // Note: Extension: A tool executed on the agent-side to directly call external
+ // APIs, bridging the gap between the agent and real-world systems.
+ // Agent-side operations involve actions that are performed by the agent on the
+ // server or within the agent's controlled environment.
+ // Function: A tool executed on the client-side, where the agent generates
+ // parameters for a predefined function, and the client executes the logic.
+ // Client-side operations are actions taken on the user's end or within the
+ // client application.
+ // Datastore: A tool used by the agent to access and query structured or
+ // unstructured external data for retrieval-augmented tasks or knowledge
+ // updates.
+ GenAIToolTypeKey = attribute.Key("gen_ai.tool.type")
+
+ // GenAIUsageInputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+ // tokens used in the GenAI input (prompt).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens")
+
+ // GenAIUsageOutputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.output_tokens" semantic conventions. It represents the number
+ // of tokens used in the GenAI response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 180
+ GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens")
+)
+
+// GenAIAgentDescription returns an attribute KeyValue conforming to the
+// "gen_ai.agent.description" semantic conventions. It represents the free-form
+// description of the GenAI agent provided by the application.
+func GenAIAgentDescription(val string) attribute.KeyValue {
+ return GenAIAgentDescriptionKey.String(val)
+}
+
+// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id"
+// semantic conventions. It represents the unique identifier of the GenAI agent.
+func GenAIAgentID(val string) attribute.KeyValue {
+ return GenAIAgentIDKey.String(val)
+}
+
+// GenAIAgentName returns an attribute KeyValue conforming to the
+// "gen_ai.agent.name" semantic conventions. It represents the human-readable
+// name of the GenAI agent provided by the application.
+func GenAIAgentName(val string) attribute.KeyValue {
+ return GenAIAgentNameKey.String(val)
+}
+
+// GenAIConversationID returns an attribute KeyValue conforming to the
+// "gen_ai.conversation.id" semantic conventions. It represents the unique
+// identifier for a conversation (session, thread), used to store and correlate
+// messages within this conversation.
+func GenAIConversationID(val string) attribute.KeyValue {
+ return GenAIConversationIDKey.String(val)
+}
+
+// GenAIDataSourceID returns an attribute KeyValue conforming to the
+// "gen_ai.data_source.id" semantic conventions. It represents the data source
+// identifier.
+func GenAIDataSourceID(val string) attribute.KeyValue {
+ return GenAIDataSourceIDKey.String(val)
+}
+
+// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the
+// "gen_ai.request.choice.count" semantic conventions. It represents the target
+// number of candidate completions to return.
+func GenAIRequestChoiceCount(val int) attribute.KeyValue {
+ return GenAIRequestChoiceCountKey.Int(val)
+}
+
+// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the
+// "gen_ai.request.encoding_formats" semantic conventions. It represents the
+// encoding formats requested in an embeddings operation, if specified.
+func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue {
+ return GenAIRequestEncodingFormatsKey.StringSlice(val)
+}
+
+// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+// frequency penalty setting for the GenAI request.
+func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue {
+ return GenAIRequestFrequencyPenaltyKey.Float64(val)
+}
+
+// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the model generates for a request.
+func GenAIRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAIRequestMaxTokensKey.Int(val)
+}
+
+// GenAIRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// GenAI model a request is being made to.
+func GenAIRequestModel(val string) attribute.KeyValue {
+ return GenAIRequestModelKey.String(val)
+}
+
+// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.presence_penalty" semantic conventions. It represents the
+// presence penalty setting for the GenAI request.
+func GenAIRequestPresencePenalty(val float64) attribute.KeyValue {
+ return GenAIRequestPresencePenaltyKey.Float64(val)
+}
+
+// GenAIRequestSeed returns an attribute KeyValue conforming to the
+// "gen_ai.request.seed" semantic conventions. It represents the requests with
+// same seed value more likely to return same result.
+func GenAIRequestSeed(val int) attribute.KeyValue {
+ return GenAIRequestSeedKey.Int(val)
+}
+
+// GenAIRequestStopSequences returns an attribute KeyValue conforming to the
+// "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+// of sequences that the model will use to stop generating further tokens.
+func GenAIRequestStopSequences(val ...string) attribute.KeyValue {
+ return GenAIRequestStopSequencesKey.StringSlice(val)
+}
+
+// GenAIRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the GenAI request.
+func GenAIRequestTemperature(val float64) attribute.KeyValue {
+ return GenAIRequestTemperatureKey.Float64(val)
+}
+
+// GenAIRequestTopK returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+// setting for the GenAI request.
+func GenAIRequestTopK(val float64) attribute.KeyValue {
+ return GenAIRequestTopKKey.Float64(val)
+}
+
+// GenAIRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+// setting for the GenAI request.
+func GenAIRequestTopP(val float64) attribute.KeyValue {
+ return GenAIRequestTopPKey.Float64(val)
+}
+
+// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the
+// "gen_ai.response.finish_reasons" semantic conventions. It represents the array
+// of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAIResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAIResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAIResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique identifier
+// for the completion.
+func GenAIResponseID(val string) attribute.KeyValue {
+ return GenAIResponseIDKey.String(val)
+}
+
+// GenAIResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// model that generated the response.
+func GenAIResponseModel(val string) attribute.KeyValue {
+ return GenAIResponseModelKey.String(val)
+}
+
+// GenAIToolCallID returns an attribute KeyValue conforming to the
+// "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+// identifier.
+func GenAIToolCallID(val string) attribute.KeyValue {
+ return GenAIToolCallIDKey.String(val)
+}
+
+// GenAIToolDescription returns an attribute KeyValue conforming to the
+// "gen_ai.tool.description" semantic conventions. It represents the tool
+// description.
+func GenAIToolDescription(val string) attribute.KeyValue {
+ return GenAIToolDescriptionKey.String(val)
+}
+
+// GenAIToolName returns an attribute KeyValue conforming to the
+// "gen_ai.tool.name" semantic conventions. It represents the name of the tool
+// utilized by the agent.
+func GenAIToolName(val string) attribute.KeyValue {
+ return GenAIToolNameKey.String(val)
+}
+
+// GenAIToolType returns an attribute KeyValue conforming to the
+// "gen_ai.tool.type" semantic conventions. It represents the type of the tool
+// utilized by the agent.
+func GenAIToolType(val string) attribute.KeyValue {
+ return GenAIToolTypeKey.String(val)
+}
+
+// GenAIUsageInputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI input (prompt).
+func GenAIUsageInputTokens(val int) attribute.KeyValue {
+ return GenAIUsageInputTokensKey.Int(val)
+}
+
+// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI response (completion).
+func GenAIUsageOutputTokens(val int) attribute.KeyValue {
+ return GenAIUsageOutputTokensKey.Int(val)
+}
+
+// Enum values for gen_ai.operation.name
+var (
+ // Chat completion operation such as [OpenAI Chat API]
+ // Stability: development
+ //
+ // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat
+ GenAIOperationNameChat = GenAIOperationNameKey.String("chat")
+ // Multimodal content generation operation such as [Gemini Generate Content]
+ // Stability: development
+ //
+ // [Gemini Generate Content]: https://ai.google.dev/api/generate-content
+ GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content")
+ // Text completions operation such as [OpenAI Completions API (Legacy)]
+ // Stability: development
+ //
+ // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions
+ GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion")
+ // Embeddings operation such as [OpenAI Create embeddings API]
+ // Stability: development
+ //
+ // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create
+ GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings")
+ // Create GenAI agent
+ // Stability: development
+ GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent")
+ // Invoke GenAI agent
+ // Stability: development
+ GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent")
+ // Execute a tool
+ // Stability: development
+ GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool")
+)
+
+// Enum values for gen_ai.output.type
+var (
+ // Plain text
+ // Stability: development
+ GenAIOutputTypeText = GenAIOutputTypeKey.String("text")
+ // JSON object with known or unknown schema
+ // Stability: development
+ GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json")
+ // Image
+ // Stability: development
+ GenAIOutputTypeImage = GenAIOutputTypeKey.String("image")
+ // Speech
+ // Stability: development
+ GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech")
+)
+
+// Enum values for gen_ai.provider.name
+var (
+ // [OpenAI]
+ // Stability: development
+ //
+ // [OpenAI]: https://openai.com/
+ GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai")
+ // Any Google generative AI endpoint
+ // Stability: development
+ GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai")
+ // [Vertex AI]
+ // Stability: development
+ //
+ // [Vertex AI]: https://cloud.google.com/vertex-ai
+ GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai")
+ // [Gemini]
+ // Stability: development
+ //
+ // [Gemini]: https://cloud.google.com/products/gemini
+ GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini")
+ // [Anthropic]
+ // Stability: development
+ //
+ // [Anthropic]: https://www.anthropic.com/
+ GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic")
+ // [Cohere]
+ // Stability: development
+ //
+ // [Cohere]: https://cohere.com/
+ GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere")
+ // Azure AI Inference
+ // Stability: development
+ GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference")
+ // [Azure OpenAI]
+ // Stability: development
+ //
+ // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/
+ GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai")
+ // [IBM Watsonx AI]
+ // Stability: development
+ //
+ // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai
+ GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai")
+ // [AWS Bedrock]
+ // Stability: development
+ //
+ // [AWS Bedrock]: https://aws.amazon.com/bedrock
+ GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock")
+ // [Perplexity]
+ // Stability: development
+ //
+ // [Perplexity]: https://www.perplexity.ai/
+ GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity")
+ // [xAI]
+ // Stability: development
+ //
+ // [xAI]: https://x.ai/
+ GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai")
+ // [DeepSeek]
+ // Stability: development
+ //
+ // [DeepSeek]: https://www.deepseek.com/
+ GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek")
+ // [Groq]
+ // Stability: development
+ //
+ // [Groq]: https://groq.com/
+ GenAIProviderNameGroq = GenAIProviderNameKey.String("groq")
+ // [Mistral AI]
+ // Stability: development
+ //
+ // [Mistral AI]: https://mistral.ai/
+ GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai")
+)
+
+// Enum values for gen_ai.token.type
+var (
+ // Input tokens (prompt, input, etc.)
+ // Stability: development
+ GenAITokenTypeInput = GenAITokenTypeKey.String("input")
+ // Output tokens (completion, response, etc.)
+ // Stability: development
+ GenAITokenTypeOutput = GenAITokenTypeKey.String("output")
+)
+
+// Namespace: geo
+const (
+ // GeoContinentCodeKey is the attribute Key conforming to the
+ // "geo.continent.code" semantic conventions. It represents the two-letter code
+ // representing continent’s name.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ GeoContinentCodeKey = attribute.Key("geo.continent.code")
+
+ // GeoCountryISOCodeKey is the attribute Key conforming to the
+ // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+ // Country Code ([ISO 3166-1 alpha2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA"
+ //
+ // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+ GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code")
+
+ // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name"
+ // semantic conventions. It represents the locality name. Represents the name of
+ // a city, town, village, or similar populated place.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Montreal", "Berlin"
+ GeoLocalityNameKey = attribute.Key("geo.locality.name")
+
+ // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat"
+ // semantic conventions. It represents the latitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 45.505918
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLatKey = attribute.Key("geo.location.lat")
+
+ // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon"
+ // semantic conventions. It represents the longitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -73.61483
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLonKey = attribute.Key("geo.location.lon")
+
+ // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code"
+ // semantic conventions. It represents the postal code associated with the
+ // location. Values appropriate for this field may also be known as a postcode
+ // or ZIP code and will vary widely from country to country.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "94040"
+ GeoPostalCodeKey = attribute.Key("geo.postal_code")
+
+ // GeoRegionISOCodeKey is the attribute Key conforming to the
+ // "geo.region.iso_code" semantic conventions. It represents the region ISO code
+ // ([ISO 3166-2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA-QC"
+ //
+ // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+ GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code")
+)
+
+// GeoCountryISOCode returns an attribute KeyValue conforming to the
+// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+// Country Code ([ISO 3166-1 alpha2]).
+//
+// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+func GeoCountryISOCode(val string) attribute.KeyValue {
+ return GeoCountryISOCodeKey.String(val)
+}
+
+// GeoLocalityName returns an attribute KeyValue conforming to the
+// "geo.locality.name" semantic conventions. It represents the locality name.
+// Represents the name of a city, town, village, or similar populated place.
+func GeoLocalityName(val string) attribute.KeyValue {
+ return GeoLocalityNameKey.String(val)
+}
+
+// GeoLocationLat returns an attribute KeyValue conforming to the
+// "geo.location.lat" semantic conventions. It represents the latitude of the geo
+// location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLat(val float64) attribute.KeyValue {
+ return GeoLocationLatKey.Float64(val)
+}
+
+// GeoLocationLon returns an attribute KeyValue conforming to the
+// "geo.location.lon" semantic conventions. It represents the longitude of the
+// geo location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLon(val float64) attribute.KeyValue {
+ return GeoLocationLonKey.Float64(val)
+}
+
+// GeoPostalCode returns an attribute KeyValue conforming to the
+// "geo.postal_code" semantic conventions. It represents the postal code
+// associated with the location. Values appropriate for this field may also be
+// known as a postcode or ZIP code and will vary widely from country to country.
+func GeoPostalCode(val string) attribute.KeyValue {
+ return GeoPostalCodeKey.String(val)
+}
+
+// GeoRegionISOCode returns an attribute KeyValue conforming to the
+// "geo.region.iso_code" semantic conventions. It represents the region ISO code
+// ([ISO 3166-2]).
+//
+// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+func GeoRegionISOCode(val string) attribute.KeyValue {
+ return GeoRegionISOCodeKey.String(val)
+}
+
+// Enum values for geo.continent.code
+var (
+ // Africa
+ // Stability: development
+ GeoContinentCodeAf = GeoContinentCodeKey.String("AF")
+ // Antarctica
+ // Stability: development
+ GeoContinentCodeAn = GeoContinentCodeKey.String("AN")
+ // Asia
+ // Stability: development
+ GeoContinentCodeAs = GeoContinentCodeKey.String("AS")
+ // Europe
+ // Stability: development
+ GeoContinentCodeEu = GeoContinentCodeKey.String("EU")
+ // North America
+ // Stability: development
+ GeoContinentCodeNa = GeoContinentCodeKey.String("NA")
+ // Oceania
+ // Stability: development
+ GeoContinentCodeOc = GeoContinentCodeKey.String("OC")
+ // South America
+ // Stability: development
+ GeoContinentCodeSa = GeoContinentCodeKey.String("SA")
+)
+
+// Namespace: go
+const (
+ // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type"
+ // semantic conventions. It represents the type of memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "other", "stack"
+ GoMemoryTypeKey = attribute.Key("go.memory.type")
+)
+
+// Enum values for go.memory.type
+var (
+ // Memory allocated from the heap that is reserved for stack space, whether or
+ // not it is currently in-use.
+ // Stability: development
+ GoMemoryTypeStack = GoMemoryTypeKey.String("stack")
+ // Memory used by the Go runtime, excluding other categories of memory usage
+ // described in this enumeration.
+ // Stability: development
+ GoMemoryTypeOther = GoMemoryTypeKey.String("other")
+)
+
+// Namespace: graphql
+const (
+ // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document"
+ // semantic conventions. It represents the GraphQL document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: query findBookById { bookById(id: ?) { name } }
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphQLDocumentKey = attribute.Key("graphql.document")
+
+ // GraphQLOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of the
+ // operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: findBookById
+ GraphQLOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphQLOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of the
+ // operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "query", "mutation", "subscription"
+ GraphQLOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+// GraphQLDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphQLDocument(val string) attribute.KeyValue {
+ return GraphQLDocumentKey.String(val)
+}
+
+// GraphQLOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphQLOperationName(val string) attribute.KeyValue {
+ return GraphQLOperationNameKey.String(val)
+}
+
+// Enum values for graphql.operation.type
+var (
+ // GraphQL query
+ // Stability: development
+ GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query")
+ // GraphQL mutation
+ // Stability: development
+ GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ // Stability: development
+ GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription")
+)
+
+// Namespace: heroku
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da"
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit hash
+ // for the current release.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "e6134959463efd8966b20e75b913cafe3f5ec"
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents the
+ // time and date the release was created.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2022-10-23T18:00:42Z"
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id"
+// semantic conventions. It represents the unique identifier for the application.
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release.
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the
+// "heroku.release.creation_timestamp" semantic conventions. It represents the
+// time and date the release was created.
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// Namespace: host
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is running
+ // on.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+ // level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family"
+ // semantic conventions. It represents the family or generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "PA-RISC 1.1e"
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id"
+ // semantic conventions. It represents the model identifier. It provides more
+ // granular information about the CPU, distinguishing it from other CPUs within
+ // the same family.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "9000/778/B180L"
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz"
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping"
+ // semantic conventions. It represents the stepping or core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1", "r1p1"
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "GenuineIntel"
+ // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX
+ // registers. Writing these to memory in this order results in a 12-character
+ // string.
+ //
+ // [CPUID]: https://wiki.osdev.org/CPUID
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be the
+ // instance_id assigned by the cloud provider. For non-containerized systems,
+ // this should be the `machine-id`. See the table below for the sources to use
+ // to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fdbf79e8af94cb7f9e8df36789187052"
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the VM image ID or host OS image ID. For
+ // Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ami-07b06b442921831e5"
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the "host.image.name"
+ // semantic conventions. It represents the name of the VM image or OS install
+ // the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905"
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version string
+ // of the VM image or host OS as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e"
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC 5952] format.
+ //
+ // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F"
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as
+ // hyphen-separated octets in uppercase hexadecimal form from most to least
+ // significant.
+ //
+ // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified hostname,
+ // or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-test"
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "n1-standard-1"
+ HostTypeKey = attribute.Key("host.type")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or generation
+// of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model identifier.
+// It provides more granular information about the CPU, distinguishing it from
+// other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use to
+// determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the "host.image.id"
+// semantic conventions. It represents the VM image ID or host OS image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM image
+// or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string of
+// the VM image or host OS as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic
+// conventions. It represents the available MAC addresses of the host, excluding
+// loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name" semantic
+// conventions. It represents the name of the host. On Unix systems, it may
+// contain what the hostname command returns, or the fully qualified hostname, or
+// another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type" semantic
+// conventions. It represents the type of host. For Cloud, this must be the
+// machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Enum values for host.arch
+var (
+ // AMD64
+ // Stability: development
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ // Stability: development
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ // Stability: development
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ // Stability: development
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ // Stability: development
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ // Stability: development
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ // Stability: development
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ // Stability: development
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// Namespace: http
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of the
+ // HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "idle"
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of the
+ // request payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the HTTP request
+ // method.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GET", "POST", "HEAD"
+ // Note: HTTP request method value SHOULD be "known" to the instrumentation.
+ // By default, this convention defines "known" methods as the ones listed in
+ // [RFC9110]
+ // and the PATCH method defined in [RFC5789].
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set the
+ // `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of
+ // case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is not a
+ // list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods to be
+ // case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ //
+ // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods
+ // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GeT", "ACL", "foo"
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending (e.g.
+ // redirection, authorization failure, 503 Server Unavailable, network issues,
+ // or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size"
+ // semantic conventions. It represents the total size of the request in bytes.
+ // This should be the total number of bytes sent over the wire, including the
+ // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request
+ // body if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size of the
+ // response payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size of
+ // the response in bytes. This should be the total number of bytes sent over the
+ // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+ // headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status code].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 200
+ //
+ // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic
+ // conventions. It represents the matched route, that is, the path template in
+ // the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/users/:userID?", "{controller}/{action}/{id?}"
+ // Note: MUST NOT be populated when this is not supported by the HTTP server
+ // framework as the route attribute should have low-cardinality and the URI path
+ // can NOT substitute it.
+ // SHOULD include the [application root] if there is one.
+ //
+ // [application root]: /docs/http/http-spans.md#http-server-definitions
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestHeader returns an attribute KeyValue conforming to the
+// "http.request.header" semantic conventions. It represents the HTTP request
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPRequestHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.request.header."+key, val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of the
+// request in bytes. This should be the total number of bytes sent over the wire,
+// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers,
+// and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of the
+// response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseHeader returns an attribute KeyValue conforming to the
+// "http.response.header" semantic conventions. It represents the HTTP response
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPResponseHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.response.header."+key, val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of the
+// response in bytes. This should be the total number of bytes sent over the
+// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Enum values for http.connection.state
+var (
+ // active state.
+ // Stability: development
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state.
+ // Stability: development
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+// Enum values for http.request.method
+var (
+ // CONNECT method.
+ // Stability: stable
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method.
+ // Stability: stable
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method.
+ // Stability: stable
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method.
+ // Stability: stable
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method.
+ // Stability: stable
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method.
+ // Stability: stable
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method.
+ // Stability: stable
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method.
+ // Stability: stable
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method.
+ // Stability: stable
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of.
+ // Stability: stable
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// Namespace: hw
+const (
+ // HwBatteryCapacityKey is the attribute Key conforming to the
+ // "hw.battery.capacity" semantic conventions. It represents the design capacity
+ // in Watts-hours or Amper-hours.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9.3Ah", "50Wh"
+ HwBatteryCapacityKey = attribute.Key("hw.battery.capacity")
+
+ // HwBatteryChemistryKey is the attribute Key conforming to the
+ // "hw.battery.chemistry" semantic conventions. It represents the battery
+ // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Li-ion", "NiMH"
+ //
+ // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html
+ HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry")
+
+ // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state"
+ // semantic conventions. It represents the current state of the battery.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwBatteryStateKey = attribute.Key("hw.battery.state")
+
+ // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version"
+ // semantic conventions. It represents the BIOS version of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2.3"
+ HwBiosVersionKey = attribute.Key("hw.bios_version")
+
+ // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version"
+ // semantic conventions. It represents the driver version for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10.2.1-3"
+ HwDriverVersionKey = attribute.Key("hw.driver_version")
+
+ // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type"
+ // semantic conventions. It represents the type of the enclosure (useful for
+ // modular systems).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Computer", "Storage", "Switch"
+ HwEnclosureTypeKey = attribute.Key("hw.enclosure.type")
+
+ // HwFirmwareVersionKey is the attribute Key conforming to the
+ // "hw.firmware_version" semantic conventions. It represents the firmware
+ // version of the hardware component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0.1"
+ HwFirmwareVersionKey = attribute.Key("hw.firmware_version")
+
+ // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic
+ // conventions. It represents the type of task the GPU is performing.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwGpuTaskKey = attribute.Key("hw.gpu.task")
+
+ // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions.
+ // It represents an identifier for the hardware component, unique within the
+ // monitored host.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "win32battery_battery_testsysa33_1"
+ HwIDKey = attribute.Key("hw.id")
+
+ // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type"
+ // semantic conventions. It represents the type of limit for hardware
+ // components.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwLimitTypeKey = attribute.Key("hw.limit_type")
+
+ // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the
+ // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID
+ // Level of the logical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "RAID0+1", "RAID5", "RAID10"
+ HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level")
+
+ // HwLogicalDiskStateKey is the attribute Key conforming to the
+ // "hw.logical_disk.state" semantic conventions. It represents the state of the
+ // logical disk space usage.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state")
+
+ // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type"
+ // semantic conventions. It represents the type of the memory module.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "DDR4", "DDR5", "LPDDR5"
+ HwMemoryTypeKey = attribute.Key("hw.memory.type")
+
+ // HwModelKey is the attribute Key conforming to the "hw.model" semantic
+ // conventions. It represents the descriptive model name of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery"
+ HwModelKey = attribute.Key("hw.model")
+
+ // HwNameKey is the attribute Key conforming to the "hw.name" semantic
+ // conventions. It represents an easily-recognizable name for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "eth0"
+ HwNameKey = attribute.Key("hw.name")
+
+ // HwNetworkLogicalAddressesKey is the attribute Key conforming to the
+ // "hw.network.logical_addresses" semantic conventions. It represents the
+ // logical addresses of the adapter (e.g. IP address, or WWPN).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "172.16.8.21", "57.11.193.42"
+ HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses")
+
+ // HwNetworkPhysicalAddressKey is the attribute Key conforming to the
+ // "hw.network.physical_address" semantic conventions. It represents the
+ // physical address of the adapter (e.g. MAC address, or WWNN).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00-90-F5-E9-7B-36"
+ HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address")
+
+ // HwParentKey is the attribute Key conforming to the "hw.parent" semantic
+ // conventions. It represents the unique identifier of the parent component
+ // (typically the `hw.id` attribute of the enclosure, or disk controller).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dellStorage_perc_0"
+ HwParentKey = attribute.Key("hw.parent")
+
+ // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the
+ // "hw.physical_disk.smart_attribute" semantic conventions. It represents the
+ // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute
+ // of the physical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate"
+ //
+ // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T.
+ HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute")
+
+ // HwPhysicalDiskStateKey is the attribute Key conforming to the
+ // "hw.physical_disk.state" semantic conventions. It represents the state of the
+ // physical disk endurance utilization.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state")
+
+ // HwPhysicalDiskTypeKey is the attribute Key conforming to the
+ // "hw.physical_disk.type" semantic conventions. It represents the type of the
+ // physical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "HDD", "SSD", "10K"
+ HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type")
+
+ // HwSensorLocationKey is the attribute Key conforming to the
+ // "hw.sensor_location" semantic conventions. It represents the location of the
+ // sensor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0
+ // V3_3", "MAIN_12V", "CPU_VCORE"
+ HwSensorLocationKey = attribute.Key("hw.sensor_location")
+
+ // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number"
+ // semantic conventions. It represents the serial number of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CNFCP0123456789"
+ HwSerialNumberKey = attribute.Key("hw.serial_number")
+
+ // HwStateKey is the attribute Key conforming to the "hw.state" semantic
+ // conventions. It represents the current state of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwStateKey = attribute.Key("hw.state")
+
+ // HwTapeDriveOperationTypeKey is the attribute Key conforming to the
+ // "hw.tape_drive.operation_type" semantic conventions. It represents the type
+ // of tape drive operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type")
+
+ // HwTypeKey is the attribute Key conforming to the "hw.type" semantic
+ // conventions. It represents the type of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: Describes the category of the hardware component for which `hw.state`
+ // is being reported. For example, `hw.type=temperature` along with
+ // `hw.state=degraded` would indicate that the temperature of the hardware
+ // component has been reported as `degraded`.
+ HwTypeKey = attribute.Key("hw.type")
+
+ // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic
+ // conventions. It represents the vendor name of the hardware component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo"
+ HwVendorKey = attribute.Key("hw.vendor")
+)
+
+// HwBatteryCapacity returns an attribute KeyValue conforming to the
+// "hw.battery.capacity" semantic conventions. It represents the design capacity
+// in Watts-hours or Amper-hours.
+func HwBatteryCapacity(val string) attribute.KeyValue {
+ return HwBatteryCapacityKey.String(val)
+}
+
+// HwBatteryChemistry returns an attribute KeyValue conforming to the
+// "hw.battery.chemistry" semantic conventions. It represents the battery
+// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc.
+//
+// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html
+func HwBatteryChemistry(val string) attribute.KeyValue {
+ return HwBatteryChemistryKey.String(val)
+}
+
+// HwBiosVersion returns an attribute KeyValue conforming to the
+// "hw.bios_version" semantic conventions. It represents the BIOS version of the
+// hardware component.
+func HwBiosVersion(val string) attribute.KeyValue {
+ return HwBiosVersionKey.String(val)
+}
+
+// HwDriverVersion returns an attribute KeyValue conforming to the
+// "hw.driver_version" semantic conventions. It represents the driver version for
+// the hardware component.
+func HwDriverVersion(val string) attribute.KeyValue {
+ return HwDriverVersionKey.String(val)
+}
+
+// HwEnclosureType returns an attribute KeyValue conforming to the
+// "hw.enclosure.type" semantic conventions. It represents the type of the
+// enclosure (useful for modular systems).
+func HwEnclosureType(val string) attribute.KeyValue {
+ return HwEnclosureTypeKey.String(val)
+}
+
+// HwFirmwareVersion returns an attribute KeyValue conforming to the
+// "hw.firmware_version" semantic conventions. It represents the firmware version
+// of the hardware component.
+func HwFirmwareVersion(val string) attribute.KeyValue {
+ return HwFirmwareVersionKey.String(val)
+}
+
+// HwID returns an attribute KeyValue conforming to the "hw.id" semantic
+// conventions. It represents an identifier for the hardware component, unique
+// within the monitored host.
+func HwID(val string) attribute.KeyValue {
+ return HwIDKey.String(val)
+}
+
+// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the
+// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID
+// Level of the logical disk.
+func HwLogicalDiskRaidLevel(val string) attribute.KeyValue {
+ return HwLogicalDiskRaidLevelKey.String(val)
+}
+
+// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type"
+// semantic conventions. It represents the type of the memory module.
+func HwMemoryType(val string) attribute.KeyValue {
+ return HwMemoryTypeKey.String(val)
+}
+
+// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic
+// conventions. It represents the descriptive model name of the hardware
+// component.
+func HwModel(val string) attribute.KeyValue {
+ return HwModelKey.String(val)
+}
+
+// HwName returns an attribute KeyValue conforming to the "hw.name" semantic
+// conventions. It represents an easily-recognizable name for the hardware
+// component.
+func HwName(val string) attribute.KeyValue {
+ return HwNameKey.String(val)
+}
+
+// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the
+// "hw.network.logical_addresses" semantic conventions. It represents the logical
+// addresses of the adapter (e.g. IP address, or WWPN).
+func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue {
+ return HwNetworkLogicalAddressesKey.StringSlice(val)
+}
+
+// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the
+// "hw.network.physical_address" semantic conventions. It represents the physical
+// address of the adapter (e.g. MAC address, or WWNN).
+func HwNetworkPhysicalAddress(val string) attribute.KeyValue {
+ return HwNetworkPhysicalAddressKey.String(val)
+}
+
+// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic
+// conventions. It represents the unique identifier of the parent component
+// (typically the `hw.id` attribute of the enclosure, or disk controller).
+func HwParent(val string) attribute.KeyValue {
+ return HwParentKey.String(val)
+}
+
+// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the
+// "hw.physical_disk.smart_attribute" semantic conventions. It represents the
+// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute
+// of the physical disk.
+//
+// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T.
+func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue {
+ return HwPhysicalDiskSmartAttributeKey.String(val)
+}
+
+// HwPhysicalDiskType returns an attribute KeyValue conforming to the
+// "hw.physical_disk.type" semantic conventions. It represents the type of the
+// physical disk.
+func HwPhysicalDiskType(val string) attribute.KeyValue {
+ return HwPhysicalDiskTypeKey.String(val)
+}
+
+// HwSensorLocation returns an attribute KeyValue conforming to the
+// "hw.sensor_location" semantic conventions. It represents the location of the
+// sensor.
+func HwSensorLocation(val string) attribute.KeyValue {
+ return HwSensorLocationKey.String(val)
+}
+
+// HwSerialNumber returns an attribute KeyValue conforming to the
+// "hw.serial_number" semantic conventions. It represents the serial number of
+// the hardware component.
+func HwSerialNumber(val string) attribute.KeyValue {
+ return HwSerialNumberKey.String(val)
+}
+
+// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic
+// conventions. It represents the vendor name of the hardware component.
+func HwVendor(val string) attribute.KeyValue {
+ return HwVendorKey.String(val)
+}
+
+// Enum values for hw.battery.state
+var (
+ // Charging
+ // Stability: development
+ HwBatteryStateCharging = HwBatteryStateKey.String("charging")
+ // Discharging
+ // Stability: development
+ HwBatteryStateDischarging = HwBatteryStateKey.String("discharging")
+)
+
+// Enum values for hw.gpu.task
+var (
+ // Decoder
+ // Stability: development
+ HwGpuTaskDecoder = HwGpuTaskKey.String("decoder")
+ // Encoder
+ // Stability: development
+ HwGpuTaskEncoder = HwGpuTaskKey.String("encoder")
+ // General
+ // Stability: development
+ HwGpuTaskGeneral = HwGpuTaskKey.String("general")
+)
+
+// Enum values for hw.limit_type
+var (
+ // Critical
+ // Stability: development
+ HwLimitTypeCritical = HwLimitTypeKey.String("critical")
+ // Degraded
+ // Stability: development
+ HwLimitTypeDegraded = HwLimitTypeKey.String("degraded")
+ // High Critical
+ // Stability: development
+ HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical")
+ // High Degraded
+ // Stability: development
+ HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded")
+ // Low Critical
+ // Stability: development
+ HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical")
+ // Low Degraded
+ // Stability: development
+ HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded")
+ // Maximum
+ // Stability: development
+ HwLimitTypeMax = HwLimitTypeKey.String("max")
+ // Throttled
+ // Stability: development
+ HwLimitTypeThrottled = HwLimitTypeKey.String("throttled")
+ // Turbo
+ // Stability: development
+ HwLimitTypeTurbo = HwLimitTypeKey.String("turbo")
+)
+
+// Enum values for hw.logical_disk.state
+var (
+ // Used
+ // Stability: development
+ HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used")
+ // Free
+ // Stability: development
+ HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free")
+)
+
+// Enum values for hw.physical_disk.state
+var (
+ // Remaining
+ // Stability: development
+ HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining")
+)
+
+// Enum values for hw.state
+var (
+ // Degraded
+ // Stability: development
+ HwStateDegraded = HwStateKey.String("degraded")
+ // Failed
+ // Stability: development
+ HwStateFailed = HwStateKey.String("failed")
+ // Needs Cleaning
+ // Stability: development
+ HwStateNeedsCleaning = HwStateKey.String("needs_cleaning")
+ // OK
+ // Stability: development
+ HwStateOk = HwStateKey.String("ok")
+ // Predicted Failure
+ // Stability: development
+ HwStatePredictedFailure = HwStateKey.String("predicted_failure")
+)
+
+// Enum values for hw.tape_drive.operation_type
+var (
+ // Mount
+ // Stability: development
+ HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount")
+ // Unmount
+ // Stability: development
+ HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount")
+ // Clean
+ // Stability: development
+ HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean")
+)
+
+// Enum values for hw.type
+var (
+ // Battery
+ // Stability: development
+ HwTypeBattery = HwTypeKey.String("battery")
+ // CPU
+ // Stability: development
+ HwTypeCPU = HwTypeKey.String("cpu")
+ // Disk controller
+ // Stability: development
+ HwTypeDiskController = HwTypeKey.String("disk_controller")
+ // Enclosure
+ // Stability: development
+ HwTypeEnclosure = HwTypeKey.String("enclosure")
+ // Fan
+ // Stability: development
+ HwTypeFan = HwTypeKey.String("fan")
+ // GPU
+ // Stability: development
+ HwTypeGpu = HwTypeKey.String("gpu")
+ // Logical disk
+ // Stability: development
+ HwTypeLogicalDisk = HwTypeKey.String("logical_disk")
+ // Memory
+ // Stability: development
+ HwTypeMemory = HwTypeKey.String("memory")
+ // Network
+ // Stability: development
+ HwTypeNetwork = HwTypeKey.String("network")
+ // Physical disk
+ // Stability: development
+ HwTypePhysicalDisk = HwTypeKey.String("physical_disk")
+ // Power supply
+ // Stability: development
+ HwTypePowerSupply = HwTypeKey.String("power_supply")
+ // Tape drive
+ // Stability: development
+ HwTypeTapeDrive = HwTypeKey.String("tape_drive")
+ // Temperature
+ // Stability: development
+ HwTypeTemperature = HwTypeKey.String("temperature")
+ // Voltage
+ // Stability: development
+ HwTypeVoltage = HwTypeKey.String("voltage")
+)
+
+// Namespace: ios
+const (
+ // IOSAppStateKey is the attribute Key conforming to the "ios.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The iOS lifecycle states are defined in the
+ // [UIApplicationDelegate documentation], and from which the `OS terminology`
+ // column values are derived.
+ //
+ // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate
+ IOSAppStateKey = attribute.Key("ios.app.state")
+)
+
+// Enum values for ios.app.state
+var (
+ // The app has become `active`. Associated with UIKit notification
+ // `applicationDidBecomeActive`.
+ //
+ // Stability: development
+ IOSAppStateActive = IOSAppStateKey.String("active")
+ // The app is now `inactive`. Associated with UIKit notification
+ // `applicationWillResignActive`.
+ //
+ // Stability: development
+ IOSAppStateInactive = IOSAppStateKey.String("inactive")
+ // The app is now in the background. This value is associated with UIKit
+ // notification `applicationDidEnterBackground`.
+ //
+ // Stability: development
+ IOSAppStateBackground = IOSAppStateKey.String("background")
+ // The app is now in the foreground. This value is associated with UIKit
+ // notification `applicationWillEnterForeground`.
+ //
+ // Stability: development
+ IOSAppStateForeground = IOSAppStateKey.String("foreground")
+ // The app is about to terminate. Associated with UIKit notification
+ // `applicationWillTerminate`.
+ //
+ // Stability: development
+ IOSAppStateTerminate = IOSAppStateKey.String("terminate")
+)
+
+// Namespace: k8s
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name"
+ // semantic conventions. It represents the name of the cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-cluster"
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid"
+ // semantic conventions. It represents a pseudo-ID for the cluster, set to the
+ // UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8s cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8s ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T X.667].
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // > different from all other UUIDs generated before 3603 A.D., or is
+ // > extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ //
+ // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "redis"
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the number
+ // of times the container was restarted. This attribute can be used to identify
+ // a particular container (running or stopped) within a container spec.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to
+ // the "k8s.container.status.last_terminated_reason" semantic conventions. It
+ // represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Evicted", "Error"
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SContainerStatusReasonKey is the attribute Key conforming to the
+ // "k8s.container.status.reason" semantic conventions. It represents the reason
+ // for the container state. Corresponds to the `reason` field of the:
+ // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ContainerCreating", "CrashLoopBackOff",
+ // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff",
+ // "OOMKilled", "Completed", "Error", "ContainerCannotRun"
+ //
+ // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core
+ // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core
+ K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason")
+
+ // K8SContainerStatusStateKey is the attribute Key conforming to the
+ // "k8s.container.status.state" semantic conventions. It represents the state of
+ // the container. [K8s ContainerState].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "terminated", "running", "waiting"
+ //
+ // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core
+ K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name"
+ // semantic conventions. It represents the name of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid"
+ // semantic conventions. It represents the UID of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid"
+ // semantic conventions. It represents the UID of the DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SHPAMetricTypeKey is the attribute Key conforming to the
+ // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric
+ // source for the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Resource", "ContainerResource"
+ // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA.
+ K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type")
+
+ // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic
+ // conventions. It represents the name of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SHPANameKey = attribute.Key("k8s.hpa.name")
+
+ // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the
+ // API version of the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "apps/v1", "autoscaling/v2"
+ // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA
+ // spec.
+ K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version")
+
+ // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of
+ // the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Deployment", "StatefulSet"
+ // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec.
+ K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind")
+
+ // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of
+ // the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-deployment", "my-statefulset"
+ // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec.
+ K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name")
+
+ // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic
+ // conventions. It represents the UID of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SHPAUIDKey = attribute.Key("k8s.hpa.uid")
+
+ // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size"
+ // semantic conventions. It represents the size (identifier) of the K8s huge
+ // page.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2Mi"
+ K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic
+ // conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic
+ // conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "default"
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNamespacePhaseKey is the attribute Key conforming to the
+ // "k8s.namespace.phase" semantic conventions. It represents the phase of the
+ // K8s namespace.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "terminating"
+ // Note: This attribute aligns with the `phase` field of the
+ // [K8s NamespaceStatus]
+ //
+ // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core
+ K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase")
+
+ // K8SNodeConditionStatusKey is the attribute Key conforming to the
+ // "k8s.node.condition.status" semantic conventions. It represents the status of
+ // the condition, one of True, False, Unknown.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "true", "false", "unknown"
+ // Note: This attribute aligns with the `status` field of the
+ // [NodeCondition]
+ //
+ // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core
+ K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status")
+
+ // K8SNodeConditionTypeKey is the attribute Key conforming to the
+ // "k8s.node.condition.type" semantic conventions. It represents the condition
+ // type of a K8s Node.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Ready", "DiskPressure"
+ // Note: K8s Node conditions as described
+ // by [K8s documentation].
+ //
+ // This attribute aligns with the `type` field of the
+ // [NodeCondition]
+ //
+ // The set of possible values is not limited to those listed here. Managed
+ // Kubernetes environments,
+ // or custom controllers MAY introduce additional node condition types.
+ // When this occurs, the exact value as reported by the Kubernetes API SHOULD be
+ // used.
+ //
+ // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition
+ // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core
+ K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "node-1"
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic
+ // conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2"
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic
+ // conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-pod-autoconf"
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic
+ // conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicationControllerNameKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.name" semantic conventions. It represents the name
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name")
+
+ // K8SReplicationControllerUIDKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid")
+
+ // K8SResourceQuotaNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.name" semantic conventions. It represents the name of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name")
+
+ // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.resource_name" semantic conventions. It represents the
+ // name of the K8s resource a resource quota defines.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "count/replicationcontrollers"
+ // Note: The value for this attribute can be either the full
+ // `count/[.]` string (e.g., count/deployments.apps,
+ // count/pods), or, for certain core Kubernetes resources, just the resource
+ // name (e.g., pods, services, configmaps). Both forms are supported by
+ // Kubernetes for object count quotas. See
+ // [Kubernetes Resource Quotas documentation] for more details.
+ //
+ // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota
+ K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name")
+
+ // K8SResourceQuotaUIDKey is the attribute Key conforming to the
+ // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStorageclassNameKey is the attribute Key conforming to the
+ // "k8s.storageclass.name" semantic conventions. It represents the name of K8s
+ // [StorageClass] object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gold.storageclass.storage.k8s.io"
+ //
+ // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io
+ K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name")
+
+ // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name"
+ // semantic conventions. It represents the name of the K8s volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "volume0"
+ K8SVolumeNameKey = attribute.Key("k8s.volume.name")
+
+ // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type"
+ // semantic conventions. It represents the type of the K8s volume.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "emptyDir", "persistentVolumeClaim"
+ K8SVolumeTypeKey = attribute.Key("k8s.volume.type")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify a
+// particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob
+// annotation placed on the CronJob, the `` being the annotation name, the
+// value being the annotation value.
+func K8SCronJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.annotation."+key, val)
+}
+
+// K8SCronJobLabel returns an attribute KeyValue conforming to the
+// "k8s.cronjob.label" semantic conventions. It represents the label placed on
+// the CronJob, the `` being the label name, the value being the label
+// value.
+func K8SCronJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.label."+key, val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.daemonset.annotation" semantic conventions. It represents the annotation
+// placed on the DaemonSet, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.annotation."+key, val)
+}
+
+// K8SDaemonSetLabel returns an attribute KeyValue conforming to the
+// "k8s.daemonset.label" semantic conventions. It represents the label placed on
+// the DaemonSet, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SDaemonSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.label."+key, val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the
+// "k8s.deployment.annotation" semantic conventions. It represents the annotation
+// placed on the Deployment, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.annotation."+key, val)
+}
+
+// K8SDeploymentLabel returns an attribute KeyValue conforming to the
+// "k8s.deployment.label" semantic conventions. It represents the label placed on
+// the Deployment, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SDeploymentLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.label."+key, val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SHPAMetricType returns an attribute KeyValue conforming to the
+// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric
+// source for the horizontal pod autoscaler.
+func K8SHPAMetricType(val string) attribute.KeyValue {
+ return K8SHPAMetricTypeKey.String(val)
+}
+
+// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name"
+// semantic conventions. It represents the name of the horizontal pod autoscaler.
+func K8SHPAName(val string) attribute.KeyValue {
+ return K8SHPANameKey.String(val)
+}
+
+// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the
+// API version of the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefAPIVersionKey.String(val)
+}
+
+// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of
+// the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefKind(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefKindKey.String(val)
+}
+
+// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of
+// the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefName(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefNameKey.String(val)
+}
+
+// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid"
+// semantic conventions. It represents the UID of the horizontal pod autoscaler.
+func K8SHPAUID(val string) attribute.KeyValue {
+ return K8SHPAUIDKey.String(val)
+}
+
+// K8SHugepageSize returns an attribute KeyValue conforming to the
+// "k8s.hugepage.size" semantic conventions. It represents the size (identifier)
+// of the K8s huge page.
+func K8SHugepageSize(val string) attribute.KeyValue {
+ return K8SHugepageSizeKey.String(val)
+}
+
+// K8SJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.job.annotation" semantic conventions. It represents the annotation placed
+// on the Job, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.annotation."+key, val)
+}
+
+// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label"
+// semantic conventions. It represents the label placed on the Job, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.label."+key, val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the
+// "k8s.namespace.annotation" semantic conventions. It represents the annotation
+// placed on the Namespace, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.annotation."+key, val)
+}
+
+// K8SNamespaceLabel returns an attribute KeyValue conforming to the
+// "k8s.namespace.label" semantic conventions. It represents the label placed on
+// the Namespace, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SNamespaceLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.label."+key, val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeAnnotation returns an attribute KeyValue conforming to the
+// "k8s.node.annotation" semantic conventions. It represents the annotation
+// placed on the Node, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SNodeAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.annotation."+key, val)
+}
+
+// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label"
+// semantic conventions. It represents the label placed on the Node, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SNodeLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.label."+key, val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name"
+// semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodAnnotation returns an attribute KeyValue conforming to the
+// "k8s.pod.annotation" semantic conventions. It represents the annotation placed
+// on the Pod, the `` being the annotation name, the value being the
+// annotation value.
+func K8SPodAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.annotation."+key, val)
+}
+
+// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label"
+// semantic conventions. It represents the label placed on the Pod, the ``
+// being the label name, the value being the label value.
+func K8SPodLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.label."+key, val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.replicaset.annotation" semantic conventions. It represents the annotation
+// placed on the ReplicaSet, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.annotation."+key, val)
+}
+
+// K8SReplicaSetLabel returns an attribute KeyValue conforming to the
+// "k8s.replicaset.label" semantic conventions. It represents the label placed on
+// the ReplicaSet, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SReplicaSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.label."+key, val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicationControllerName returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.name" semantic conventions. It represents the name
+// of the replication controller.
+func K8SReplicationControllerName(val string) attribute.KeyValue {
+ return K8SReplicationControllerNameKey.String(val)
+}
+
+// K8SReplicationControllerUID returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of
+// the replication controller.
+func K8SReplicationControllerUID(val string) attribute.KeyValue {
+ return K8SReplicationControllerUIDKey.String(val)
+}
+
+// K8SResourceQuotaName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.name" semantic conventions. It represents the name of the
+// resource quota.
+func K8SResourceQuotaName(val string) attribute.KeyValue {
+ return K8SResourceQuotaNameKey.String(val)
+}
+
+// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.resource_name" semantic conventions. It represents the name
+// of the K8s resource a resource quota defines.
+func K8SResourceQuotaResourceName(val string) attribute.KeyValue {
+ return K8SResourceQuotaResourceNameKey.String(val)
+}
+
+// K8SResourceQuotaUID returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+// resource quota.
+func K8SResourceQuotaUID(val string) attribute.KeyValue {
+ return K8SResourceQuotaUIDKey.String(val)
+}
+
+// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.statefulset.annotation" semantic conventions. It represents the
+// annotation placed on the StatefulSet, the `` being the annotation name,
+// the value being the annotation value, even if the value is empty.
+func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.annotation."+key, val)
+}
+
+// K8SStatefulSetLabel returns an attribute KeyValue conforming to the
+// "k8s.statefulset.label" semantic conventions. It represents the label placed
+// on the StatefulSet, the `` being the label name, the value being the
+// label value, even if the value is empty.
+func K8SStatefulSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.label."+key, val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStorageclassName returns an attribute KeyValue conforming to the
+// "k8s.storageclass.name" semantic conventions. It represents the name of K8s
+// [StorageClass] object.
+//
+// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io
+func K8SStorageclassName(val string) attribute.KeyValue {
+ return K8SStorageclassNameKey.String(val)
+}
+
+// K8SVolumeName returns an attribute KeyValue conforming to the
+// "k8s.volume.name" semantic conventions. It represents the name of the K8s
+// volume.
+func K8SVolumeName(val string) attribute.KeyValue {
+ return K8SVolumeNameKey.String(val)
+}
+
+// Enum values for k8s.container.status.reason
+var (
+ // The container is being created.
+ // Stability: development
+ K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating")
+ // The container is in a crash loop back off state.
+ // Stability: development
+ K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff")
+ // There was an error creating the container configuration.
+ // Stability: development
+ K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError")
+ // There was an error pulling the container image.
+ // Stability: development
+ K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull")
+ // The container image pull is in back off state.
+ // Stability: development
+ K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff")
+ // The container was killed due to out of memory.
+ // Stability: development
+ K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled")
+ // The container has completed execution.
+ // Stability: development
+ K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed")
+ // There was an error with the container.
+ // Stability: development
+ K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error")
+ // The container cannot run.
+ // Stability: development
+ K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun")
+)
+
+// Enum values for k8s.container.status.state
+var (
+ // The container has terminated.
+ // Stability: development
+ K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated")
+ // The container is running.
+ // Stability: development
+ K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running")
+ // The container is waiting.
+ // Stability: development
+ K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting")
+)
+
+// Enum values for k8s.namespace.phase
+var (
+ // Active namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active")
+ // Terminating namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating")
+)
+
+// Enum values for k8s.node.condition.status
+var (
+ // condition_true
+ // Stability: development
+ K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true")
+ // condition_false
+ // Stability: development
+ K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false")
+ // condition_unknown
+ // Stability: development
+ K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown")
+)
+
+// Enum values for k8s.node.condition.type
+var (
+ // The node is healthy and ready to accept pods
+ // Stability: development
+ K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready")
+ // Pressure exists on the disk size—that is, if the disk capacity is low
+ // Stability: development
+ K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure")
+ // Pressure exists on the node memory—that is, if the node memory is low
+ // Stability: development
+ K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure")
+ // Pressure exists on the processes—that is, if there are too many processes
+ // on the node
+ // Stability: development
+ K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure")
+ // The network for the node is not correctly configured
+ // Stability: development
+ K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable")
+)
+
+// Enum values for k8s.volume.type
+var (
+ // A [persistentVolumeClaim] volume
+ // Stability: development
+ //
+ // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim
+ K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim")
+ // A [configMap] volume
+ // Stability: development
+ //
+ // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap
+ K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap")
+ // A [downwardAPI] volume
+ // Stability: development
+ //
+ // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi
+ K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI")
+ // An [emptyDir] volume
+ // Stability: development
+ //
+ // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir
+ K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir")
+ // A [secret] volume
+ // Stability: development
+ //
+ // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret
+ K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret")
+ // A [local] volume
+ // Stability: development
+ //
+ // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local
+ K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local")
+)
+
+// Namespace: linux
+const (
+ // LinuxMemorySlabStateKey is the attribute Key conforming to the
+ // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab
+ // memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "reclaimable", "unreclaimable"
+ LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state")
+)
+
+// Enum values for linux.memory.slab.state
+var (
+ // reclaimable
+ // Stability: development
+ LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable")
+ // unreclaimable
+ // Stability: development
+ LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable")
+)
+
+// Namespace: log
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "audit.log"
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the basename of
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "uuid.log"
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/log/mysql/audit.log"
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full path to
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/lib/docker/uuid.log"
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic
+ // conventions. It represents the stream associated with the log. See below for
+ // a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ LogIostreamKey = attribute.Key("log.iostream")
+
+ // LogRecordOriginalKey is the attribute Key conforming to the
+ // "log.record.original" semantic conventions. It represents the complete
+ // original Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - -
+ // Something happened", "[INFO] 8/3/24 12:34:56 Something happened"
+ // Note: This value MAY be added when processing a Log Record which was
+ // originally transmitted as a string or equivalent data type AND the Body field
+ // of the Log Record does not contain the same value. (e.g. a syslog or a log
+ // record read from a file.)
+ LogRecordOriginalKey = attribute.Key("log.record.original")
+
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV"
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other
+ // identifiers (e.g. UUID) may be used as needed.
+ //
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the "log.file.name"
+// semantic conventions. It represents the basename of the file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the "log.file.path"
+// semantic conventions. It represents the full path to the file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path to
+// the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// LogRecordOriginal returns an attribute KeyValue conforming to the
+// "log.record.original" semantic conventions. It represents the complete
+// original Log Record.
+func LogRecordOriginal(val string) attribute.KeyValue {
+ return LogRecordOriginalKey.String(val)
+}
+
+// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid"
+// semantic conventions. It represents a unique identifier for the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Enum values for log.iostream
+var (
+ // Logs from stdout stream
+ // Stability: development
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ // Stability: development
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Namespace: mainframe
+const (
+ // MainframeLparNameKey is the attribute Key conforming to the
+ // "mainframe.lpar.name" semantic conventions. It represents the name of the
+ // logical partition that hosts a systems with a mainframe operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "LPAR01"
+ MainframeLparNameKey = attribute.Key("mainframe.lpar.name")
+)
+
+// MainframeLparName returns an attribute KeyValue conforming to the
+// "mainframe.lpar.name" semantic conventions. It represents the name of the
+// logical partition that hosts a systems with a mainframe operating system.
+func MainframeLparName(val string) attribute.KeyValue {
+ return MainframeLparNameKey.String(val)
+}
+
+// Namespace: messaging
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the batching
+ // operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client library
+ // supports both batch and single-message API for the same operation,
+ // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs
+ // and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique identifier
+ // for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "client-5", "myhost@8742@s8083jm"
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingConsumerGroupNameKey is the attribute Key conforming to the
+ // "messaging.consumer.group.name" semantic conventions. It represents the name
+ // of the consumer group with which a consumer is associated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-group", "indexer"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.consumer.group.name` is applicable and what it means in
+ // the context of that system.
+ MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the message
+ // destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MyQueue", "MyTopic"
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD uniquely
+ // identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to the
+ // "messaging.destination.partition.id" semantic conventions. It represents the
+ // identifier of the partition messages are sent to or received from, unique
+ // within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to
+ // the "messaging.destination.subscription.name" semantic conventions. It
+ // represents the name of the destination subscription from which a message is
+ // consumed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "subscription-a"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.destination.subscription.name` is applicable and what it
+ // means in the context of that system.
+ MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/customers/{customerId}"
+ // Note: Destination names could be constructed from templates. An example would
+ // be a destination name involving a user name or product id. Although the
+ // destination name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+
+ // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+ // represents the ack deadline in seconds set for the modify ack deadline
+ // request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+ // ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ack_id
+ MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions.
+ // It represents the delivery attempt for a given message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+ // represents the ordering key for a given message. If the attribute is not
+ // present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ordering_key
+ MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the message
+ // keys in Kafka are used for grouping alike messages to ensure they're
+ // processed on the same partition. They differ from `messaging.message.id` in
+ // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ // Note: If the key type is not string, it's string representation has to be
+ // supplied for the attribute. If the key has no unambiguous, canonical string
+ // form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents a
+ // boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+ // MessagingKafkaOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.offset" semantic conventions. It represents the offset of a
+ // record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the size of
+ // the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed body size. If
+ // both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents the
+ // conversation ID identifying the conversation to which the message belongs,
+ // represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: MyConversationId
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents the
+ // size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed size. If both
+ // sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used by
+ // the messaging system as an identifier for the message, represented as a
+ // string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 452a7c7c7c7048c2f887f61572b18fc2
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ack", "nack", "send"
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to
+ // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It
+ // represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the
+ // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents
+ // the rabbitMQ message delivery tag.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+
+ // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the
+ // "messaging.rocketmq.consumption_model" semantic conventions. It represents
+ // the model of message consumption. This only applies to consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It
+ // represents the delay time level for delay message, which determines the
+ // message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming
+ // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions.
+ // It represents the timestamp in milliseconds that the delay message is
+ // expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents the it
+ // is essential for FIFO message. Messages that belong to the same message group
+ // are always processed one by one within the same consumer group.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myMessageGroup
+ MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents the
+ // key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "keyA", "keyB"
+ MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketMQMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: tagA
+ MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents the
+ // type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketMQNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myNamespace
+ MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to
+ // the "messaging.servicebus.disposition_status" semantic conventions. It
+ // represents the describes the [settlement type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock
+ MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.delivery_count" semantic conventions. It
+ // represents the number of deliveries that have been attempted for this
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+
+ // MessagingSystemKey is the attribute Key conforming to the "messaging.system"
+ // semantic conventions. It represents the messaging system as identified by the
+ // client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate with
+ // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the
+ // instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to the
+// "messaging.batch.message_count" semantic conventions. It represents the number
+// of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique identifier
+// for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingConsumerGroupName returns an attribute KeyValue conforming to the
+// "messaging.consumer.group.name" semantic conventions. It represents the name
+// of the consumer group with which a consumer is associated.
+func MessagingConsumerGroupName(val string) attribute.KeyValue {
+ return MessagingConsumerGroupNameKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the
+// "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be unnamed
+// or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name.
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming to
+// the "messaging.destination.partition.id" semantic conventions. It represents
+// the identifier of the partition messages are sent to or received from, unique
+// within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming
+// to the "messaging.destination.subscription.name" semantic conventions. It
+// represents the name of the destination subscription from which a message is
+// consumed.
+func MessagingDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to the
+// "messaging.destination.template" semantic conventions. It represents the low
+// cardinality representation of the messaging destination name.
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to the
+// "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+// represents the UTC epoch seconds at which the message has been accepted and
+// stored in the entity.
+func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventHubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+// represents the ack deadline in seconds set for the modify ack deadline
+// request.
+func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the
+// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+// ack id for a given message.
+func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+// represents the ordering key for a given message. If the attribute is not
+// present, the message does not have an ordering key.
+func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the message
+// keys in Kafka are used for grouping alike messages to ensure they're processed
+// on the same partition. They differ from `messaging.message.id` in that they're
+// not unique. If the key is `null`, the attribute MUST NOT be set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.tombstone" semantic conventions. It represents a
+// boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingKafkaOffset returns an attribute KeyValue conforming to the
+// "messaging.kafka.offset" semantic conventions. It represents the offset of a
+// record in the corresponding Kafka partition.
+func MessagingKafkaOffset(val int) attribute.KeyValue {
+ return MessagingKafkaOffsetKey.Int(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size of
+// the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming to the
+// "messaging.message.conversation_id" semantic conventions. It represents the
+// conversation ID identifying the conversation to which the message belongs,
+// represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the
+// "messaging.message.envelope.size" semantic conventions. It represents the size
+// of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by the
+// messaging system as an identifier for the message, represented as a string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitMQDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming
+// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It
+// represents the rabbitMQ message delivery tag.
+func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitMQMessageDeliveryTagKey.Int(val)
+}
+
+// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.group" semantic conventions. It represents the it
+// is essential for FIFO message. Messages that belong to the same message group
+// are always processed one by one within the same consumer group.
+func MessagingRocketMQMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageGroupKey.String(val)
+}
+
+// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.keys" semantic conventions. It represents the
+// key(s) of message, another way to mark message besides message id.
+func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketMQMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketMQMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageTagKey.String(val)
+}
+
+// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketMQNamespace(val string) attribute.KeyValue {
+ return MessagingRocketMQNamespaceKey.String(val)
+}
+
+// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has been
+// accepted and stored in the entity.
+func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageEnqueuedTimeKey.Int(val)
+}
+
+// Enum values for messaging.operation.type
+var (
+ // A message is created. "Create" spans always refer to a single message and are
+ // used to provide a unique creation context for messages in batch sending
+ // scenarios.
+ //
+ // Stability: development
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are provided for sending to an intermediary. If a single
+ // message is sent, the context of the "Send" span can be used as the creation
+ // context and no "Create" span needs to be created.
+ //
+ // Stability: development
+ MessagingOperationTypeSend = MessagingOperationTypeKey.String("send")
+ // One or more messages are requested by a consumer. This operation refers to
+ // pull-based scenarios, where consumers explicitly call methods of messaging
+ // SDKs to receive messages.
+ //
+ // Stability: development
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are processed by a consumer.
+ //
+ // Stability: development
+ MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled.
+ //
+ // Stability: development
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+)
+
+// Enum values for messaging.rocketmq.consumption_model
+var (
+ // Clustering consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting")
+)
+
+// Enum values for messaging.rocketmq.message.type
+var (
+ // Normal message
+ // Stability: development
+ MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal")
+ // FIFO message
+ // Stability: development
+ MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo")
+ // Delay message
+ // Stability: development
+ MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay")
+ // Transaction message
+ // Stability: development
+ MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction")
+)
+
+// Enum values for messaging.servicebus.disposition_status
+var (
+ // Message is completed
+ // Stability: development
+ MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ // Stability: development
+ MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ // Stability: development
+ MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ // Stability: development
+ MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer")
+)
+
+// Enum values for messaging.system
+var (
+ // Apache ActiveMQ
+ // Stability: development
+ MessagingSystemActiveMQ = MessagingSystemKey.String("activemq")
+ // Amazon Simple Notification Service (SNS)
+ // Stability: development
+ MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns")
+ // Amazon Simple Queue Service (SQS)
+ // Stability: development
+ MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ // Stability: development
+ MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ // Stability: development
+ MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ // Stability: development
+ MessagingSystemServiceBus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ // Stability: development
+ MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ // Stability: development
+ MessagingSystemJMS = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ // Stability: development
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ // Stability: development
+ MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ // Stability: development
+ MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq")
+ // Apache Pulsar
+ // Stability: development
+ MessagingSystemPulsar = MessagingSystemKey.String("pulsar")
+)
+
+// Namespace: network
+const (
+ // NetworkCarrierICCKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: DE
+ NetworkCarrierICCKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMCCKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+ // country code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 310
+ NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMNCKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+ // network code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 001
+ NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of the
+ // mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: sprint
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionStateKey is the attribute Key conforming to the
+ // "network.connection.state" semantic conventions. It represents the state of
+ // network connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "close_wait"
+ // Note: Connection states are defined as part of the [rfc9293]
+ //
+ // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2
+ NetworkConnectionStateKey = attribute.Key("network.connection.state")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the this
+ // describes more details regarding the connection.type. It may be the type of
+ // cell technology connection, but it could be used for describing details about
+ // a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: LTE
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the internet
+ // connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: wifi
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkInterfaceNameKey is the attribute Key conforming to the
+ // "network.interface.name" semantic conventions. It represents the network
+ // interface name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lo", "eth0"
+ NetworkInterfaceNameKey = attribute.Key("network.interface.name")
+
+ // NetworkIODirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "transmit"
+ NetworkIODirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port"
+ // semantic conventions. It represents the peer port number of the network
+ // connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the
+ // [OSI application layer] or non-OSI equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "amqp", "http", "mqtt"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the actual
+ // version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.1", "2"
+ // Note: If protocol version is subject to negotiation (for example using [ALPN]
+ // ), this attribute SHOULD be set to the negotiated version. If the actual
+ // protocol version is not known, this attribute SHOULD NOT be set.
+ //
+ // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the
+ // [OSI transport layer] or [inter-process communication method].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "tcp", "udp"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port 12345.
+ //
+ // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer
+ // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic
+ // conventions. It represents the [OSI network layer] or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "ipv4", "ipv6"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI network layer]: https://wikipedia.org/wiki/Network_layer
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+// NetworkCarrierICC returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierICC(val string) attribute.KeyValue {
+ return NetworkCarrierICCKey.String(val)
+}
+
+// NetworkCarrierMCC returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMCC(val string) attribute.KeyValue {
+ return NetworkCarrierMCCKey.String(val)
+}
+
+// NetworkCarrierMNC returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMNC(val string) attribute.KeyValue {
+ return NetworkCarrierMNCKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkInterfaceName returns an attribute KeyValue conforming to the
+// "network.interface.name" semantic conventions. It represents the network
+// interface name.
+func NetworkInterfaceName(val string) attribute.KeyValue {
+ return NetworkInterfaceNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port number
+// of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// Enum values for network.connection.state
+var (
+ // closed
+ // Stability: development
+ NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed")
+ // close_wait
+ // Stability: development
+ NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait")
+ // closing
+ // Stability: development
+ NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing")
+ // established
+ // Stability: development
+ NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established")
+ // fin_wait_1
+ // Stability: development
+ NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1")
+ // fin_wait_2
+ // Stability: development
+ NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2")
+ // last_ack
+ // Stability: development
+ NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack")
+ // listen
+ // Stability: development
+ NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen")
+ // syn_received
+ // Stability: development
+ NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received")
+ // syn_sent
+ // Stability: development
+ NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent")
+ // time_wait
+ // Stability: development
+ NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait")
+)
+
+// Enum values for network.connection.subtype
+var (
+ // GPRS
+ // Stability: development
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ // Stability: development
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ // Stability: development
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ // Stability: development
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ // Stability: development
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ // Stability: development
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ // Stability: development
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ // Stability: development
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ // Stability: development
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ // Stability: development
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ // Stability: development
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ // Stability: development
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ // Stability: development
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ // Stability: development
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ // Stability: development
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ // Stability: development
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ // Stability: development
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ // Stability: development
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ // Stability: development
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ // Stability: development
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ // Stability: development
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+// Enum values for network.connection.type
+var (
+ // wifi
+ // Stability: development
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ // Stability: development
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ // Stability: development
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ // Stability: development
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ // Stability: development
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+// Enum values for network.io.direction
+var (
+ // transmit
+ // Stability: development
+ NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit")
+ // receive
+ // Stability: development
+ NetworkIODirectionReceive = NetworkIODirectionKey.String("receive")
+)
+
+// Enum values for network.transport
+var (
+ // TCP
+ // Stability: stable
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ // Stability: stable
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe.
+ // Stability: stable
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ // Stability: stable
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+ // QUIC
+ // Stability: stable
+ NetworkTransportQUIC = NetworkTransportKey.String("quic")
+)
+
+// Enum values for network.type
+var (
+ // IPv4
+ // Stability: stable
+ NetworkTypeIPv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ // Stability: stable
+ NetworkTypeIPv6 = NetworkTypeKey.String("ipv6")
+)
+
+// Namespace: oci
+const (
+ // OCIManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of the
+ // OCI image manifest. For container images specifically is the digest by which
+ // the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4"
+ // Note: Follows [OCI Image Manifest Specification], and specifically the
+ // [Digest property].
+ // An example can be found in [Example Image Manifest].
+ //
+ // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md
+ // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests
+ // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest
+ OCIManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OCIManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OCIManifestDigest(val string) attribute.KeyValue {
+ return OCIManifestDigestKey.String(val)
+}
+
+// Namespace: openai
+const (
+ // OpenAIRequestServiceTierKey is the attribute Key conforming to the
+ // "openai.request.service_tier" semantic conventions. It represents the service
+ // tier requested. May be a specific tier, default, or auto.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "auto", "default"
+ OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier")
+
+ // OpenAIResponseServiceTierKey is the attribute Key conforming to the
+ // "openai.response.service_tier" semantic conventions. It represents the
+ // service tier used for the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "scale", "default"
+ OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier")
+
+ // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the
+ // "openai.response.system_fingerprint" semantic conventions. It represents a
+ // fingerprint to track any eventual change in the Generative AI environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fp_44709d6fcb"
+ OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint")
+)
+
+// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the
+// "openai.response.service_tier" semantic conventions. It represents the service
+// tier used for the response.
+func OpenAIResponseServiceTier(val string) attribute.KeyValue {
+ return OpenAIResponseServiceTierKey.String(val)
+}
+
+// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to
+// the "openai.response.system_fingerprint" semantic conventions. It represents a
+// fingerprint to track any eventual change in the Generative AI environment.
+func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue {
+ return OpenAIResponseSystemFingerprintKey.String(val)
+}
+
+// Enum values for openai.request.service_tier
+var (
+ // The system will utilize scale tier credits until they are exhausted.
+ // Stability: development
+ OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto")
+ // The system will utilize the default scale tier.
+ // Stability: development
+ OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default")
+)
+
+// Namespace: opentracing
+const (
+ // OpenTracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the parent-child
+ // Reference type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+// Enum values for opentracing.ref_type
+var (
+ // The parent Span depends on the child Span in some capacity
+ // Stability: development
+ OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ // Stability: development
+ OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from")
+)
+
+// Namespace: os
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TQ3C.230805.001.B2", "20E247", "22621"
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to be
+ // parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS"
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version" semantic
+ // conventions. It represents the version string of the operating system as
+ // defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ OSVersionKey = attribute.Key("os.version")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the "os.description"
+// semantic conventions. It represents the human readable (not intended to be
+// parsed) OS version information, like e.g. reported by `ver` or
+// `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating system
+// as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Enum values for os.type
+var (
+ // Microsoft Windows
+ // Stability: development
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ // Stability: development
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ // Stability: development
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ // Stability: development
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ // Stability: development
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ // Stability: development
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ // Stability: development
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ // Stability: development
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ // Stability: development
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ // Stability: development
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ // Stability: development
+ OSTypeZOS = OSTypeKey.String("zos")
+)
+
+// Namespace: otel
+const (
+ // OTelComponentNameKey is the attribute Key conforming to the
+ // "otel.component.name" semantic conventions. It represents a name uniquely
+ // identifying the instance of the OpenTelemetry component within its containing
+ // SDK instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otlp_grpc_span_exporter/0", "custom-name"
+ // Note: Implementations SHOULD ensure a low cardinality for this attribute,
+ // even across application or SDK restarts.
+ // E.g. implementations MUST NOT use UUIDs as values for this attribute.
+ //
+ // Implementations MAY achieve these goals by following a
+ // `/` pattern, e.g.
+ // `batching_span_processor/0`.
+ // Hereby `otel.component.type` refers to the corresponding attribute value of
+ // the component.
+ //
+ // The value of `instance-counter` MAY be automatically assigned by the
+ // component and uniqueness within the enclosing SDK instance MUST be
+ // guaranteed.
+ // For example, `` MAY be implemented by using a monotonically
+ // increasing counter (starting with `0`), which is incremented every time an
+ // instance of the given component type is started.
+ //
+ // With this implementation, for example the first Batching Span Processor would
+ // have `batching_span_processor/0`
+ // as `otel.component.name`, the second one `batching_span_processor/1` and so
+ // on.
+ // These values will therefore be reused in the case of an application restart.
+ OTelComponentNameKey = attribute.Key("otel.component.name")
+
+ // OTelComponentTypeKey is the attribute Key conforming to the
+ // "otel.component.type" semantic conventions. It represents a name identifying
+ // the type of the OpenTelemetry component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "batching_span_processor", "com.example.MySpanExporter"
+ // Note: If none of the standardized values apply, implementations SHOULD use
+ // the language-defined name of the type.
+ // E.g. for Java the fully qualified classname SHOULD be used in this case.
+ OTelComponentTypeKey = attribute.Key("otel.component.type")
+
+ // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name"
+ // semantic conventions. It represents the name of the instrumentation scope - (
+ // `InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "io.opentelemetry.contrib.mongodb"
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeSchemaURLKey is the attribute Key conforming to the
+ // "otel.scope.schema_url" semantic conventions. It represents the schema URL of
+ // the instrumentation scope.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://opentelemetry.io/schemas/1.31.0"
+ OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of the
+ // instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.0.0"
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+
+ // OTelSpanParentOriginKey is the attribute Key conforming to the
+ // "otel.span.parent.origin" semantic conventions. It represents the determines
+ // whether the span has a parent span, and if so,
+ // [whether it is a remote parent].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin")
+
+ // OTelSpanSamplingResultKey is the attribute Key conforming to the
+ // "otel.span.sampling_result" semantic conventions. It represents the result
+ // value of the sampler for this span.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result")
+
+ // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code"
+ // semantic conventions. It represents the name of the code, either "OK" or
+ // "ERROR". MUST NOT be set if the status code is UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the description
+ // of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "resource not found"
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+// OTelComponentName returns an attribute KeyValue conforming to the
+// "otel.component.name" semantic conventions. It represents a name uniquely
+// identifying the instance of the OpenTelemetry component within its containing
+// SDK instance.
+func OTelComponentName(val string) attribute.KeyValue {
+ return OTelComponentNameKey.String(val)
+}
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeSchemaURL returns an attribute KeyValue conforming to the
+// "otel.scope.schema_url" semantic conventions. It represents the schema URL of
+// the instrumentation scope.
+func OTelScopeSchemaURL(val string) attribute.KeyValue {
+ return OTelScopeSchemaURLKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the description
+// of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Enum values for otel.component.type
+var (
+ // The builtin SDK batching span processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor")
+ // The builtin SDK simple span processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor")
+ // The builtin SDK batching log record processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor")
+ // The builtin SDK simple log record processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor")
+ // OTLP span exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter")
+ // OTLP span exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter")
+ // OTLP span exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter")
+ // Zipkin span exporter over HTTP
+ //
+ // Stability: development
+ OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter")
+ // OTLP log record exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter")
+ // OTLP log record exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter")
+ // OTLP log record exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter")
+ // The builtin SDK periodically exporting metric reader
+ //
+ // Stability: development
+ OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader")
+ // OTLP metric exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter")
+ // OTLP metric exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter")
+ // OTLP metric exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter")
+ // Prometheus metric exporter over HTTP with the default text-based format
+ //
+ // Stability: development
+ OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter")
+)
+
+// Enum values for otel.span.parent.origin
+var (
+ // The span does not have a parent, it is a root span
+ // Stability: development
+ OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none")
+ // The span has a parent and the parent's span context [isRemote()] is false
+ // Stability: development
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local")
+ // The span has a parent and the parent's span context [isRemote()] is true
+ // Stability: development
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote")
+)
+
+// Enum values for otel.span.sampling_result
+var (
+ // The span is not sampled and not recording
+ // Stability: development
+ OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP")
+ // The span is not sampled, but recording
+ // Stability: development
+ OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY")
+ // The span is sampled and recording
+ // Stability: development
+ OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE")
+)
+
+// Enum values for otel.status_code
+var (
+ // The operation has been validated by an Application developer or Operator to
+ // have completed successfully.
+ // Stability: stable
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error.
+ // Stability: stable
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// Namespace: peer
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic
+ // conventions. It represents the [`service.name`] of the remote service. SHOULD
+ // be equal to the actual `service.name` resource attribute of the remote
+ // service if any.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: AuthTokenCache
+ //
+ // [`service.name`]: /docs/resource/README.md#service
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the "peer.service"
+// semantic conventions. It represents the [`service.name`] of the remote
+// service. SHOULD be equal to the actual `service.name` resource attribute of
+// the remote service if any.
+//
+// [`service.name`]: /docs/resource/README.md#service
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// Namespace: process
+const (
+ // ProcessArgsCountKey is the attribute Key conforming to the
+ // "process.args_count" semantic conventions. It represents the length of the
+ // process.command_args array.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 4
+ // Note: This field can be useful for querying or performing bucket analysis on
+ // how many arguments were provided to start a process. More arguments may be an
+ // indication of suspicious activity.
+ ProcessArgsCountKey = attribute.Key("process.args_count")
+
+ // ProcessCommandKey is the attribute Key conforming to the "process.command"
+ // semantic conventions. It represents the command used to launch the process
+ // (i.e. the command name). On Linux based systems, can be set to the zeroth
+ // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter
+ // extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otelcol"
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received by
+ // the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this
+ // would be the full argv vector passed to `main`. SHOULD NOT be collected by
+ // default unless there is sanitization that excludes sensitive data.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otecol", "--config=config.yaml"
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full command
+ // used to launch the process as a single string representing the full command.
+ // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+ // you have to assemble it just for monitoring; use `process.command_args`
+ // instead. SHOULD NOT be collected by default unless there is sanitization that
+ // excludes sensitive data.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "C:\cmd\otecol --config="my directory\config.yaml""
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch_type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were voluntary or
+ // involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and time
+ // the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:25:34.853Z"
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the
+ // "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+ // build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "c89b11207f6479603b0d49bf291c092c2b719293"
+ ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu")
+
+ // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the
+ // "process.executable.build_id.go" semantic conventions. It represents the Go
+ // build ID as retrieved by `go tool buildid `.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY"
+ ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go")
+
+ // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the
+ // "process.executable.build_id.htlhash" semantic conventions. It represents the
+ // profiling specific build ID for executables. See the OTel specification for
+ // Profiles for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "600DCAFE4A110000F2BF38C493F5FB92"
+ ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name of the
+ // process executable. On Linux based systems, this SHOULD be set to the base
+ // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to
+ // the base name of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcol"
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full path
+ // to the process executable. On Linux based systems, can be set to the target
+ // of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/cmd/otelcol"
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code"
+ // semantic conventions. It represents the exit code of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time"
+ // semantic conventions. It represents the date and time the process exited, in
+ // ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:26:12.315Z"
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID of the
+ // process's group leader. This is also the process group ID (PGID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether the
+ // process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessLinuxCgroupKey is the attribute Key conforming to the
+ // "process.linux.cgroup" semantic conventions. It represents the control group
+ // associated with the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope",
+ // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope"
+ // Note: Control groups (cgroups) are a kernel feature used to organize and
+ // manage process resources. This attribute provides the path(s) to the
+ // cgroup(s) associated with the process, which should match the contents of the
+ // [/proc/[PID]/cgroup] file.
+ //
+ // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html
+ ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessPagingFaultTypeKey is the attribute Key conforming to the
+ // "process.paging.fault_type" semantic conventions. It represents the type of
+ // page fault for this data point. Type `major` is for major/hard page faults,
+ // and `minor` is for minor/soft page faults.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent Process
+ // identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic
+ // conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user ID
+ // (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the username of
+ // the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of the
+ // runtime of this process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "OpenJDK Runtime Environment"
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the version of
+ // the runtime of this process, as returned by the runtime without modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14.0.2
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved user ID
+ // (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the username of
+ // the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID of
+ // the process's session leader. This is also the session ID (SID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessTitleKey is the attribute Key conforming to the "process.title"
+ // semantic conventions. It represents the process title (proctitle).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cat /etc/hostname", "xfce4-session", "bash"
+ // Note: In many Unix-like systems, process title (proctitle), is the string
+ // that represents the name or command line of a running process, displayed by
+ // system monitoring tools like ps, top, and htop.
+ ProcessTitleKey = attribute.Key("process.title")
+
+ // ProcessUserIDKey is the attribute Key conforming to the "process.user.id"
+ // semantic conventions. It represents the effective user ID (EUID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the "process.user.name"
+ // semantic conventions. It represents the username of the effective user of the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic
+ // conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily unique
+ // across all processes on the host but it is unique within the process
+ // namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+
+ // ProcessWorkingDirectoryKey is the attribute Key conforming to the
+ // "process.working_directory" semantic conventions. It represents the working
+ // directory of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/root"
+ ProcessWorkingDirectoryKey = attribute.Key("process.working_directory")
+)
+
+// ProcessArgsCount returns an attribute KeyValue conforming to the
+// "process.args_count" semantic conventions. It represents the length of the
+// process.command_args array.
+func ProcessArgsCount(val int) attribute.KeyValue {
+ return ProcessArgsCountKey.Int(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be set
+// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the
+// first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the command
+// arguments (including the command/executable itself) as received by the
+// process. On Linux-based systems (and some other Unixoid systems supporting
+// procfs), can be set according to the list of null-delimited strings extracted
+// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full
+// argv vector passed to `main`. SHOULD NOT be collected by default unless there
+// is sanitization that excludes sensitive data.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+// you have to assemble it just for monitoring; use `process.command_args`
+// instead. SHOULD NOT be collected by default unless there is sanitization that
+// excludes sensitive data.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and time
+// the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the
+// "process.environment_variable" semantic conventions. It represents the process
+// environment variables, `` being the environment variable name, the value
+// being the environment variable value.
+func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue {
+ return attribute.String("process.environment_variable."+key, val)
+}
+
+// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the
+// "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+// build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGNUKey.String(val)
+}
+
+// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the
+// "process.executable.build_id.go" semantic conventions. It represents the Go
+// build ID as retrieved by `go tool buildid `.
+func ProcessExecutableBuildIDGo(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGoKey.String(val)
+}
+
+// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to
+// the "process.executable.build_id.htlhash" semantic conventions. It represents
+// the profiling specific build ID for executables. See the OTel specification
+// for Profiles for more information.
+func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDHtlhashKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of the
+// process executable. On Linux based systems, this SHOULD be set to the base
+// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the
+// base name of `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path to
+// the process executable. On Linux based systems, can be set to the target of
+// `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time the
+// process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of the
+// process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessLinuxCgroup returns an attribute KeyValue conforming to the
+// "process.linux.cgroup" semantic conventions. It represents the control group
+// associated with the process.
+func ProcessLinuxCgroup(val string) attribute.KeyValue {
+ return ProcessLinuxCgroupKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the "process.owner"
+// semantic conventions. It represents the username of the user that owns the
+// process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user ID
+// (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username of
+// the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessTitle returns an attribute KeyValue conforming to the "process.title"
+// semantic conventions. It represents the process title (proctitle).
+func ProcessTitle(val string) attribute.KeyValue {
+ return ProcessTitleKey.String(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid"
+// semantic conventions. It represents the virtual process identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// ProcessWorkingDirectory returns an attribute KeyValue conforming to the
+// "process.working_directory" semantic conventions. It represents the working
+// directory of the process.
+func ProcessWorkingDirectory(val string) attribute.KeyValue {
+ return ProcessWorkingDirectoryKey.String(val)
+}
+
+// Enum values for process.context_switch_type
+var (
+ // voluntary
+ // Stability: development
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ // Stability: development
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+// Enum values for process.paging.fault_type
+var (
+ // major
+ // Stability: development
+ ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+ // minor
+ // Stability: development
+ ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// Namespace: profile
+const (
+ // ProfileFrameTypeKey is the attribute Key conforming to the
+ // "profile.frame.type" semantic conventions. It represents the describes the
+ // interpreter or compiler of a single frame.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpython"
+ ProfileFrameTypeKey = attribute.Key("profile.frame.type")
+)
+
+// Enum values for profile.frame.type
+var (
+ // [.NET]
+ //
+ // Stability: development
+ //
+ // [.NET]: https://wikipedia.org/wiki/.NET
+ ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet")
+ // [JVM]
+ //
+ // Stability: development
+ //
+ // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine
+ ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm")
+ // [Kernel]
+ //
+ // Stability: development
+ //
+ // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system)
+ ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel")
+ // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a
+ // more precise value MUST be used.
+ //
+ // Stability: development
+ //
+ // [C]: https://wikipedia.org/wiki/C_(programming_language)
+ // [C++]: https://wikipedia.org/wiki/C%2B%2B
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeNative = ProfileFrameTypeKey.String("native")
+ // [Perl]
+ //
+ // Stability: development
+ //
+ // [Perl]: https://wikipedia.org/wiki/Perl
+ ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl")
+ // [PHP]
+ //
+ // Stability: development
+ //
+ // [PHP]: https://wikipedia.org/wiki/PHP
+ ProfileFrameTypePHP = ProfileFrameTypeKey.String("php")
+ // [Python]
+ //
+ // Stability: development
+ //
+ // [Python]: https://wikipedia.org/wiki/Python_(programming_language)
+ ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython")
+ // [Ruby]
+ //
+ // Stability: development
+ //
+ // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language)
+ ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby")
+ // [V8JS]
+ //
+ // Stability: development
+ //
+ // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine)
+ ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js")
+ // [Erlang]
+ //
+ // Stability: development
+ //
+ // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine)
+ ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam")
+ // [Go],
+ //
+ // Stability: development
+ //
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ ProfileFrameTypeGo = ProfileFrameTypeKey.String("go")
+ // [Rust]
+ //
+ // Stability: development
+ //
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust")
+)
+
+// Namespace: rpc
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes] of the Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [error codes]: https://connectrpc.com//docs/protocol/#error-codes
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the
+ // [numeric status code] of the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
+ // property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -32700, 100
+ RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Parse error", "User already exists"
+ RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJSONRPCRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int, string,
+ // `null` or missing (for notifications), value is expected to be cast to string
+ // for simplicity. Use empty string in case of `null` value. Omit entirely if
+ // this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10", "request-7", ""
+ RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJSONRPCVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0", "1.0"
+ RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It MUST be calculated as two different counters
+ // starting from `1` one for sent messages and one for received message..
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type"
+ // semantic conventions. It represents the whether this is a received or sent
+ // message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic
+ // conventions. It represents the name of the (logical) method being called,
+ // must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: exampleMethod
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function.name` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic
+ // conventions. It represents the full (logical) name of the service being
+ // called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myservice.EchoService
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing class.
+ // The `code.namespace` attribute may be used to store the latter (despite the
+ // attribute name, it may include a class name; e.g., class with method actually
+ // executing the call on the server side, RPC client stub class on the client
+ // side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic
+ // conventions. It represents a string identifying the remoting system. See
+ // below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the
+// connect request metadata, `` being the normalized Connect Metadata key
+// (lowercase), the value being the metadata values.
+func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val)
+}
+
+// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the
+// connect response metadata, `` being the normalized Connect Metadata key
+// (lowercase), the value being the metadata values.
+func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val)
+}
+
+// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC
+// request metadata, `` being the normalized gRPC Metadata key (lowercase),
+// the value being the metadata values.
+func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.grpc.request.metadata."+key, val)
+}
+
+// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC
+// response metadata, `` being the normalized gRPC Metadata key (lowercase),
+// the value being the metadata values.
+func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.grpc.response.metadata."+key, val)
+}
+
+// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
+// property of response if it is an error response.
+func RPCJSONRPCErrorCode(val int) attribute.KeyValue {
+ return RPCJSONRPCErrorCodeKey.Int(val)
+}
+
+// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJSONRPCErrorMessage(val string) attribute.KeyValue {
+ return RPCJSONRPCErrorMessageKey.String(val)
+}
+
+// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property
+// of request or response. Since protocol allows id to be int, string, `null` or
+// missing (for notifications), value is expected to be cast to string for
+// simplicity. Use empty string in case of `null` value. Omit entirely if this is
+// a notification.
+func RPCJSONRPCRequestID(val string) attribute.KeyValue {
+ return RPCJSONRPCRequestIDKey.String(val)
+}
+
+// RPCJSONRPCVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version
+// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't
+// specify this, the value can be omitted.
+func RPCJSONRPCVersion(val string) attribute.KeyValue {
+ return RPCJSONRPCVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id"
+// semantic conventions. It MUST be calculated as two different counters starting
+// from `1` one for sent messages and one for received message..
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// Enum values for rpc.connect_rpc.error_code
+var (
+ // cancelled
+ // Stability: development
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ // Stability: development
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ // Stability: development
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ // Stability: development
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ // Stability: development
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ // Stability: development
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ // Stability: development
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ // Stability: development
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ // Stability: development
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ // Stability: development
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ // Stability: development
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ // Stability: development
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ // Stability: development
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ // Stability: development
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ // Stability: development
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ // Stability: development
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+// Enum values for rpc.grpc.status_code
+var (
+ // OK
+ // Stability: development
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ // Stability: development
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ // Stability: development
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ // Stability: development
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ // Stability: development
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ // Stability: development
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ // Stability: development
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ // Stability: development
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ // Stability: development
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ // Stability: development
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ // Stability: development
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ // Stability: development
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ // Stability: development
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ // Stability: development
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ // Stability: development
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ // Stability: development
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ // Stability: development
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Enum values for rpc.message.type
+var (
+ // sent
+ // Stability: development
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ // Stability: development
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+// Enum values for rpc.system
+var (
+ // gRPC
+ // Stability: development
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ // Stability: development
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ // Stability: development
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ // Stability: development
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ // Stability: development
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// Namespace: security_rule
+const (
+ // SecurityRuleCategoryKey is the attribute Key conforming to the
+ // "security_rule.category" semantic conventions. It represents a categorization
+ // value keyword used by the entity using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Attempted Information Leak"
+ SecurityRuleCategoryKey = attribute.Key("security_rule.category")
+
+ // SecurityRuleDescriptionKey is the attribute Key conforming to the
+ // "security_rule.description" semantic conventions. It represents the
+ // description of the rule generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Block requests to public DNS over HTTPS / TLS protocols"
+ SecurityRuleDescriptionKey = attribute.Key("security_rule.description")
+
+ // SecurityRuleLicenseKey is the attribute Key conforming to the
+ // "security_rule.license" semantic conventions. It represents the name of the
+ // license under which the rule used to generate this event is made available.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apache 2.0"
+ SecurityRuleLicenseKey = attribute.Key("security_rule.license")
+
+ // SecurityRuleNameKey is the attribute Key conforming to the
+ // "security_rule.name" semantic conventions. It represents the name of the rule
+ // or signature generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BLOCK_DNS_over_TLS"
+ SecurityRuleNameKey = attribute.Key("security_rule.name")
+
+ // SecurityRuleReferenceKey is the attribute Key conforming to the
+ // "security_rule.reference" semantic conventions. It represents the reference
+ // URL to additional information about the rule used to generate this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS"
+ // Note: The URL can point to the vendor’s documentation about the rule. If
+ // that’s not available, it can also be a link to a more general page
+ // describing this type of alert.
+ SecurityRuleReferenceKey = attribute.Key("security_rule.reference")
+
+ // SecurityRuleRulesetNameKey is the attribute Key conforming to the
+ // "security_rule.ruleset.name" semantic conventions. It represents the name of
+ // the ruleset, policy, group, or parent category in which the rule used to
+ // generate this event is a member.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Standard_Protocol_Filters"
+ SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name")
+
+ // SecurityRuleUUIDKey is the attribute Key conforming to the
+ // "security_rule.uuid" semantic conventions. It represents a rule ID that is
+ // unique within the scope of a set or group of agents, observers, or other
+ // entities using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011"
+ SecurityRuleUUIDKey = attribute.Key("security_rule.uuid")
+
+ // SecurityRuleVersionKey is the attribute Key conforming to the
+ // "security_rule.version" semantic conventions. It represents the version /
+ // revision of the rule being used for analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.0.0"
+ SecurityRuleVersionKey = attribute.Key("security_rule.version")
+)
+
+// SecurityRuleCategory returns an attribute KeyValue conforming to the
+// "security_rule.category" semantic conventions. It represents a categorization
+// value keyword used by the entity using the rule for detection of this event.
+func SecurityRuleCategory(val string) attribute.KeyValue {
+ return SecurityRuleCategoryKey.String(val)
+}
+
+// SecurityRuleDescription returns an attribute KeyValue conforming to the
+// "security_rule.description" semantic conventions. It represents the
+// description of the rule generating the event.
+func SecurityRuleDescription(val string) attribute.KeyValue {
+ return SecurityRuleDescriptionKey.String(val)
+}
+
+// SecurityRuleLicense returns an attribute KeyValue conforming to the
+// "security_rule.license" semantic conventions. It represents the name of the
+// license under which the rule used to generate this event is made available.
+func SecurityRuleLicense(val string) attribute.KeyValue {
+ return SecurityRuleLicenseKey.String(val)
+}
+
+// SecurityRuleName returns an attribute KeyValue conforming to the
+// "security_rule.name" semantic conventions. It represents the name of the rule
+// or signature generating the event.
+func SecurityRuleName(val string) attribute.KeyValue {
+ return SecurityRuleNameKey.String(val)
+}
+
+// SecurityRuleReference returns an attribute KeyValue conforming to the
+// "security_rule.reference" semantic conventions. It represents the reference
+// URL to additional information about the rule used to generate this event.
+func SecurityRuleReference(val string) attribute.KeyValue {
+ return SecurityRuleReferenceKey.String(val)
+}
+
+// SecurityRuleRulesetName returns an attribute KeyValue conforming to the
+// "security_rule.ruleset.name" semantic conventions. It represents the name of
+// the ruleset, policy, group, or parent category in which the rule used to
+// generate this event is a member.
+func SecurityRuleRulesetName(val string) attribute.KeyValue {
+ return SecurityRuleRulesetNameKey.String(val)
+}
+
+// SecurityRuleUUID returns an attribute KeyValue conforming to the
+// "security_rule.uuid" semantic conventions. It represents a rule ID that is
+// unique within the scope of a set or group of agents, observers, or other
+// entities using the rule for detection of this event.
+func SecurityRuleUUID(val string) attribute.KeyValue {
+ return SecurityRuleUUIDKey.String(val)
+}
+
+// SecurityRuleVersion returns an attribute KeyValue conforming to the
+// "security_rule.version" semantic conventions. It represents the version /
+// revision of the rule being used for analysis.
+func SecurityRuleVersion(val string) attribute.KeyValue {
+ return SecurityRuleVersionKey.String(val)
+}
+
+// Namespace: server
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.address` SHOULD represent the server address behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port" semantic
+ // conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.port` SHOULD represent the server port behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the "server.address"
+// semantic conventions. It represents the server domain name if available
+// without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// Namespace: service
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID of
+ // the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "627cc493-f310-47de-96bd-71410b7dec09"
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be globally
+ // unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time (e.g.
+ // instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random Version 1
+ // or Version 4 [RFC
+ // 4122] UUID, but are free to use an inherent unique ID as
+ // the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be used as
+ // source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the purposes of
+ // identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`] file, the underlying
+ // data, such as pod name and namespace should be treated as confidential, being
+ // the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we do
+ // not recommend using one identifier
+ // for all processes participating in the application. Instead, it's recommended
+ // each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it can't
+ // unambiguously determine the
+ // service instance that is generating that telemetry. For instance, creating an
+ // UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container within
+ // that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers, as
+ // they know the target address and
+ // port.
+ //
+ // [RFC
+ // 4122]: https://www.ietf.org/rfc/rfc4122.txt
+ // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name" semantic
+ // conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "shoppingcart"
+ // Note: MUST be the same for all instances of horizontally scaled services. If
+ // the value was not specified, SDKs MUST fallback to `unknown_service:`
+ // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`.
+ // If `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ //
+ // [`process.executable.name`]: process.md
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Shop"
+ // Note: A string value having a meaning that helps to distinguish a group of
+ // services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name` is
+ // expected to be unique for all services that have no explicit namespace
+ // defined (so the empty/unspecified namespace is simply one more valid
+ // namespace). Zero-length namespace string is assumed equal to unspecified
+ // namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the "service.version"
+ // semantic conventions. It represents the version string of the service API or
+ // implementation. The format is not defined by these conventions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "2.0.0", "a01dbef8a"
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of the
+// service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the "service.name"
+// semantic conventions. It represents the logical name of the service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Namespace: session
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id" semantic
+ // conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// Namespace: signalr
+const (
+ // SignalRConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the signalR
+ // HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "app_shutdown", "timeout"
+ SignalRConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalRTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the
+ // [SignalR transport type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "web_sockets", "long_polling"
+ //
+ // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md
+ SignalRTransportKey = attribute.Key("signalr.transport")
+)
+
+// Enum values for signalr.connection.status
+var (
+ // The connection was closed normally.
+ // Stability: stable
+ SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout.
+ // Stability: stable
+ SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down.
+ // Stability: stable
+ SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown")
+)
+
+// Enum values for signalr.transport
+var (
+ // ServerSentEvents protocol
+ // Stability: stable
+ SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ // Stability: stable
+ SignalRTransportLongPolling = SignalRTransportKey.String("long_polling")
+ // WebSockets protocol
+ // Stability: stable
+ SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets")
+)
+
+// Namespace: source
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the destination side, and when communicating through
+ // an intermediary, `source.address` SHOULD represent the source address behind
+ // any intermediaries, for example proxies, if it's available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port" semantic
+ // conventions. It represents the source port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the "source.address"
+// semantic conventions. It represents the source address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number.
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Namespace: system
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // deprecated, use `cpu.logical_number` instead.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "(identifier)"
+ SystemDeviceKey = attribute.Key("system.device")
+
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the filesystem
+ // mode.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "rw, ro"
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/mnt/data"
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the filesystem
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "used"
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the filesystem
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ext4"
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free", "cached"
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "in"
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory paging
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free"
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory paging
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "minor"
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State Codes].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "running"
+ //
+ // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the
+// deprecated, use `cpu.logical_number` instead.
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// SystemDevice returns an attribute KeyValue conforming to the "system.device"
+// semantic conventions. It represents the device identifier.
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode.
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the
+// "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path.
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Enum values for system.filesystem.state
+var (
+ // used
+ // Stability: development
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ // Stability: development
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ // Stability: development
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+// Enum values for system.filesystem.type
+var (
+ // fat32
+ // Stability: development
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ // Stability: development
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ // Stability: development
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ // Stability: development
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ // Stability: development
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ // Stability: development
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// Enum values for system.memory.state
+var (
+ // Actual used virtual memory in bytes.
+ // Stability: development
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ // Stability: development
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // buffers
+ // Stability: development
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ // Stability: development
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Enum values for system.paging.direction
+var (
+ // in
+ // Stability: development
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ // Stability: development
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+// Enum values for system.paging.state
+var (
+ // used
+ // Stability: development
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ // Stability: development
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+// Enum values for system.paging.type
+var (
+ // major
+ // Stability: development
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ // Stability: development
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Enum values for system.process.status
+var (
+ // running
+ // Stability: development
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ // Stability: development
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ // Stability: development
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ // Stability: development
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Namespace: telemetry
+const (
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of the
+ // auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "parts-unlimited-java"
+ // Note: Official auto instrumentation agents and distributions SHOULD set the
+ // `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2.3"
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the language of
+ // the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "opentelemetry"
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to
+ // `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is used,
+ // this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module name of
+ // this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.2.3"
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version string
+// of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// Enum values for telemetry.sdk.language
+var (
+ // cpp
+ // Stability: stable
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ // Stability: stable
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ // Stability: stable
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ // Stability: stable
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ // Stability: stable
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ // Stability: stable
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ // Stability: stable
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ // Stability: stable
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ // Stability: stable
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ // Stability: stable
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ // Stability: stable
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ // Stability: stable
+ TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// Namespace: test
+const (
+ // TestCaseNameKey is the attribute Key conforming to the "test.case.name"
+ // semantic conventions. It represents the fully qualified human readable name
+ // of the [test case].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1",
+ // "ExampleTestCase1_test1"
+ //
+ // [test case]: https://wikipedia.org/wiki/Test_case
+ TestCaseNameKey = attribute.Key("test.case.name")
+
+ // TestCaseResultStatusKey is the attribute Key conforming to the
+ // "test.case.result.status" semantic conventions. It represents the status of
+ // the actual test case result from test execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pass", "fail"
+ TestCaseResultStatusKey = attribute.Key("test.case.result.status")
+
+ // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name"
+ // semantic conventions. It represents the human readable name of a [test suite]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TestSuite1"
+ //
+ // [test suite]: https://wikipedia.org/wiki/Test_suite
+ TestSuiteNameKey = attribute.Key("test.suite.name")
+
+ // TestSuiteRunStatusKey is the attribute Key conforming to the
+ // "test.suite.run.status" semantic conventions. It represents the status of the
+ // test suite run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "skipped", "aborted", "timed_out",
+ // "in_progress"
+ TestSuiteRunStatusKey = attribute.Key("test.suite.run.status")
+)
+
+// TestCaseName returns an attribute KeyValue conforming to the "test.case.name"
+// semantic conventions. It represents the fully qualified human readable name of
+// the [test case].
+//
+// [test case]: https://wikipedia.org/wiki/Test_case
+func TestCaseName(val string) attribute.KeyValue {
+ return TestCaseNameKey.String(val)
+}
+
+// TestSuiteName returns an attribute KeyValue conforming to the
+// "test.suite.name" semantic conventions. It represents the human readable name
+// of a [test suite].
+//
+// [test suite]: https://wikipedia.org/wiki/Test_suite
+func TestSuiteName(val string) attribute.KeyValue {
+ return TestSuiteNameKey.String(val)
+}
+
+// Enum values for test.case.result.status
+var (
+ // pass
+ // Stability: development
+ TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass")
+ // fail
+ // Stability: development
+ TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail")
+)
+
+// Enum values for test.suite.run.status
+var (
+ // success
+ // Stability: development
+ TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success")
+ // failure
+ // Stability: development
+ TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure")
+ // skipped
+ // Stability: development
+ TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped")
+ // aborted
+ // Stability: development
+ TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted")
+ // timed_out
+ // Stability: development
+ TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out")
+ // in_progress
+ // Stability: development
+ TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress")
+)
+
+// Namespace: thread
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed to OS
+ // thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic
+ // conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: main
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic
+// conventions. It represents the current "managed" thread ID (as opposed to OS
+// thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Namespace: tls
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic
+ // conventions. It represents the string indicating the [cipher] used during the
+ // current connection.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
+ // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions`
+ // of the [registered TLS Cipher Suits].
+ //
+ // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+ // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the client. This is usually
+ // mutually-exclusive of `client.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // client. This is usually mutually-exclusive of `client.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the date/Time
+ // indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com"
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the array
+ // of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the given
+ // cipher, when applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "secp256r1"
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the "tls.established"
+ // semantic conventions. It represents the boolean flag indicating if the TLS
+ // negotiation was successful and transitioned to an encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol"
+ // semantic conventions. It represents the string indicating the protocol being
+ // tunneled. Per the values in the [IANA registry], this string should be lower
+ // case.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "http/1.1"
+ //
+ // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name"
+ // semantic conventions. It represents the normalized lowercase protocol name
+ // parsed from original string of the negotiated [SSL/TLS protocol version].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric part
+ // of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol version].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2", "3"
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic
+ // conventions. It represents the boolean flag indicating if this TLS connection
+ // was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the server. This is usually
+ // mutually-exclusive of `server.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // server. This is usually mutually-exclusive of `server.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s"
+ // semantic conventions. It represents a hash that identifies servers based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the date/Time
+ // indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the server.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com"
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the [cipher] used
+// during the current connection.
+//
+// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also exists
+// in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// client. This is usually mutually-exclusive of `client.certificate` since that
+// value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the client. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3"
+// semantic conventions. It represents a hash that identifies clients based on
+// how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic
+// conventions. It represents the string indicating the curve used for the given
+// cipher, when applicable.
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string indicating
+// the protocol being tunneled. Per the values in the [IANA registry], this
+// string should be lower case.
+//
+// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part of
+// the version parsed from the original string of the negotiated
+// [SSL/TLS protocol version].
+//
+// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also exists
+// in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// server. This is usually mutually-exclusive of `server.certificate` since that
+// value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the server. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Enum values for tls.protocol.name
+var (
+ // ssl
+ // Stability: development
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ // Stability: development
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// Namespace: url
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain" semantic
+ // conventions. It represents the domain extracted from the `url.full`, such as
+ // "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2",
+ // "[1080:0:0:0:8:800:200C:417A]"
+ // Note: In some cases a URL may refer to an IP and/or port directly, without a
+ // domain name. In this case, the IP address would go to the domain field. If
+ // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[`
+ // and `]` characters should also be captured in the domain field.
+ //
+ // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from the
+ // `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: The file extension is only set if it exists, as not every url has a
+ // file extension. When the file name has multiple extensions `example.tar.gz`,
+ // only the last one should be captured `gz`, not `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic
+ // conventions. It represents the [URI fragment] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SemConv"
+ //
+ // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network resource
+ // according to [RFC3986].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost"
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the fragment
+ // is not transmitted over HTTP, but if it is known, it SHOULD be included
+ // nevertheless.
+ //
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`.
+ // In such case username and password SHOULD be redacted and attribute's value
+ // SHOULD be `https://REDACTED:REDACTED@www.example.com/`.
+ //
+ // `url.full` SHOULD capture the absolute URL when it is available (or can be
+ // reconstructed).
+ //
+ // Sensitive content provided in `url.full` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the
+ // value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `https://www.example.com/path?color=blue&sig=REDACTED`.
+ //
+ // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original" semantic
+ // conventions. It represents the unmodified original URL as seen in the event
+ // source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv",
+ // "search?q=OpenTelemetry"
+ // Note: In network monitoring, the observed URL may be a full URL, whereas in
+ // access logs, the URL is often just represented as a path. This field is meant
+ // to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI path] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/search"
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI query] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "q=OpenTelemetry"
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `q=OpenTelemetry&sig=REDACTED`.
+ //
+ // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.com", "foo.co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ // For example, the registered domain for `foo.example.com` is `example.com`.
+ // Trying to approximate this by simply taking the last two labels will not work
+ // well for TLDs such as `co.uk`.
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic
+ // conventions. It represents the [URI scheme] component identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https", "ftp", "telnet"
+ //
+ // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name under
+ // the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain contains
+ // all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "east", "sub2.sub1"
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the
+ // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the
+ // subdomain field should contain `sub2.sub1`, with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template" semantic
+ // conventions. It represents the low-cardinality template of an
+ // [absolute path reference].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/users/{id}", "/users/:id", "/users?id={id}"
+ //
+ // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective top
+ // level domain (eTLD), also known as the domain suffix, is the last part of the
+ // domain name. For example, the top level domain for example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com", "co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the `url.full`,
+// such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the "url.extension"
+// semantic conventions. It represents the file extension extracted from the
+// `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the "url.fragment"
+// semantic conventions. It represents the [URI fragment] component.
+//
+// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full" semantic
+// conventions. It represents the absolute URL describing a network resource
+// according to [RFC3986].
+//
+// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the "url.original"
+// semantic conventions. It represents the unmodified original URL as seen in the
+// event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path" semantic
+// conventions. It represents the [URI path] component.
+//
+// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port" semantic
+// conventions. It represents the port extracted from the `url.full`.
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic
+// conventions. It represents the [URI query] component.
+//
+// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI scheme] component identifying the
+// used protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain"
+// semantic conventions. It represents the subdomain portion of a fully qualified
+// domain name includes all of the names except the host name under the
+// registered_domain. In a partially qualified domain, or if the qualification
+// level of the full name cannot be determined, subdomain contains all of the
+// names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the "url.template"
+// semantic conventions. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of the
+// domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Namespace: user
+const (
+ // UserEmailKey is the attribute Key conforming to the "user.email" semantic
+ // conventions. It represents the user email address.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein@example.com"
+ UserEmailKey = attribute.Key("user.email")
+
+ // UserFullNameKey is the attribute Key conforming to the "user.full_name"
+ // semantic conventions. It represents the user's full name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Albert Einstein"
+ UserFullNameKey = attribute.Key("user.full_name")
+
+ // UserHashKey is the attribute Key conforming to the "user.hash" semantic
+ // conventions. It represents the unique user hash to correlate information for
+ // a user in anonymized form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa"
+ // Note: Useful if `user.id` or `user.name` contain confidential information and
+ // cannot be used.
+ UserHashKey = attribute.Key("user.hash")
+
+ // UserIDKey is the attribute Key conforming to the "user.id" semantic
+ // conventions. It represents the unique identifier of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000"
+ UserIDKey = attribute.Key("user.id")
+
+ // UserNameKey is the attribute Key conforming to the "user.name" semantic
+ // conventions. It represents the short name or login/username of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein"
+ UserNameKey = attribute.Key("user.name")
+
+ // UserRolesKey is the attribute Key conforming to the "user.roles" semantic
+ // conventions. It represents the array of user roles at the time of the event.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "admin", "reporting_user"
+ UserRolesKey = attribute.Key("user.roles")
+)
+
+// UserEmail returns an attribute KeyValue conforming to the "user.email"
+// semantic conventions. It represents the user email address.
+func UserEmail(val string) attribute.KeyValue {
+ return UserEmailKey.String(val)
+}
+
+// UserFullName returns an attribute KeyValue conforming to the "user.full_name"
+// semantic conventions. It represents the user's full name.
+func UserFullName(val string) attribute.KeyValue {
+ return UserFullNameKey.String(val)
+}
+
+// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic
+// conventions. It represents the unique user hash to correlate information for a
+// user in anonymized form.
+func UserHash(val string) attribute.KeyValue {
+ return UserHashKey.String(val)
+}
+
+// UserID returns an attribute KeyValue conforming to the "user.id" semantic
+// conventions. It represents the unique identifier of the user.
+func UserID(val string) attribute.KeyValue {
+ return UserIDKey.String(val)
+}
+
+// UserName returns an attribute KeyValue conforming to the "user.name" semantic
+// conventions. It represents the short name or login/username of the user.
+func UserName(val string) attribute.KeyValue {
+ return UserNameKey.String(val)
+}
+
+// UserRoles returns an attribute KeyValue conforming to the "user.roles"
+// semantic conventions. It represents the array of user roles at the time of the
+// event.
+func UserRoles(val ...string) attribute.KeyValue {
+ return UserRolesKey.StringSlice(val)
+}
+
+// Namespace: user_agent
+const (
+ // UserAgentNameKey is the attribute Key conforming to the "user_agent.name"
+ // semantic conventions. It represents the name of the user-agent extracted from
+ // original. Usually refers to the browser's name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Safari", "YourApp"
+ // Note: [Example] of extracting browser's name from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant name SHOULD be selected. In such a scenario it should align with
+ // `user_agent.version`
+ //
+ // [Example]: https://www.whatsmyua.info
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of the
+ // [HTTP User-Agent] header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2"
+ //
+ // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentOSNameKey is the attribute Key conforming to the
+ // "user_agent.os.name" semantic conventions. It represents the human readable
+ // operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ // Note: For mapping user agent strings to OS names, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSNameKey = attribute.Key("user_agent.os.name")
+
+ // UserAgentOSVersionKey is the attribute Key conforming to the
+ // "user_agent.os.version" semantic conventions. It represents the version
+ // string of the operating system as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ // Note: For mapping user agent strings to OS versions, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSVersionKey = attribute.Key("user_agent.os.version")
+
+ // UserAgentSyntheticTypeKey is the attribute Key conforming to the
+ // "user_agent.synthetic.type" semantic conventions. It represents the specifies
+ // the category of synthetic traffic, such as tests or bots.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute MAY be derived from the contents of the
+ // `user_agent.original` attribute. Components that populate the attribute are
+ // responsible for determining what they consider to be synthetic bot or test
+ // traffic. This attribute can either be set for self-identification purposes,
+ // or on telemetry detected to be generated as a result of a synthetic request.
+ // This attribute is useful for distinguishing between genuine client traffic
+ // and synthetic traffic generated by bots or tests.
+ UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of the
+ // user-agent extracted from original. Usually refers to the browser's version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.1.2", "1.0.0"
+ // Note: [Example] of extracting browser's version from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant version SHOULD be selected. In such a scenario it should align
+ // with `user_agent.name`
+ //
+ // [Example]: https://www.whatsmyua.info
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP User-Agent] header sent by the client.
+//
+// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentOSName returns an attribute KeyValue conforming to the
+// "user_agent.os.name" semantic conventions. It represents the human readable
+// operating system name.
+func UserAgentOSName(val string) attribute.KeyValue {
+ return UserAgentOSNameKey.String(val)
+}
+
+// UserAgentOSVersion returns an attribute KeyValue conforming to the
+// "user_agent.os.version" semantic conventions. It represents the version string
+// of the operating system as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func UserAgentOSVersion(val string) attribute.KeyValue {
+ return UserAgentOSVersionKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version.
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// Enum values for user_agent.synthetic.type
+var (
+ // Bot source.
+ // Stability: development
+ UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot")
+ // Synthetic test source.
+ // Stability: development
+ UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test")
+)
+
+// Namespace: vcs
+const (
+ // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id"
+ // semantic conventions. It represents the ID of the change (pull request/merge
+ // request/changelist) if applicable. This is usually a unique (within
+ // repository) identifier generated by the VCS system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ VCSChangeIDKey = attribute.Key("vcs.change.id")
+
+ // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state"
+ // semantic conventions. It represents the state of the change (pull
+ // request/merge request/changelist).
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "open", "closed", "merged"
+ VCSChangeStateKey = attribute.Key("vcs.change.state")
+
+ // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title"
+ // semantic conventions. It represents the human readable title of the change
+ // (pull request/merge request/changelist). This title is often a brief summary
+ // of the change and may get merged in to a ref as the commit summary.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update
+ // dependency"
+ VCSChangeTitleKey = attribute.Key("vcs.change.title")
+
+ // VCSLineChangeTypeKey is the attribute Key conforming to the
+ // "vcs.line_change.type" semantic conventions. It represents the type of line
+ // change being measured on a branch or change.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "added", "removed"
+ VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type")
+
+ // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name"
+ // semantic conventions. It represents the group owner within the version
+ // control system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org", "myteam", "business-unit"
+ VCSOwnerNameKey = attribute.Key("vcs.owner.name")
+
+ // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name"
+ // semantic conventions. It represents the name of the version control system
+ // provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "github", "gitlab", "gitea", "bitbucket"
+ VCSProviderNameKey = attribute.Key("vcs.provider.name")
+
+ // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name")
+
+ // VCSRefBaseRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.base.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits. The
+ // revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.base.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision")
+
+ // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type")
+
+ // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name")
+
+ // VCSRefHeadRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.head.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.The revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.head.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision")
+
+ // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type")
+
+ // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic
+ // conventions. It represents the type of the [reference] in the repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefTypeKey = attribute.Key("vcs.ref.type")
+
+ // VCSRepositoryNameKey is the attribute Key conforming to the
+ // "vcs.repository.name" semantic conventions. It represents the human readable
+ // name of the repository. It SHOULD NOT include any additional identifier like
+ // Group/SubGroup in GitLab or organization in GitHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "semantic-conventions", "my-cool-repo"
+ // Note: Due to it only being the name, it can clash with forks of the same
+ // repository if collecting telemetry across multiple orgs or groups in
+ // the same backends.
+ VCSRepositoryNameKey = attribute.Key("vcs.repository.name")
+
+ // VCSRepositoryURLFullKey is the attribute Key conforming to the
+ // "vcs.repository.url.full" semantic conventions. It represents the
+ // [canonical URL] of the repository providing the complete HTTP(S) address in
+ // order to locate and identify the repository through a browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/opentelemetry/open-telemetry-collector-contrib",
+ // "https://gitlab.com/my-org/my-project/my-projects-project/repo"
+ // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include
+ // the `.git` extension.
+ //
+ // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.
+ VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full")
+
+ // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the
+ // "vcs.revision_delta.direction" semantic conventions. It represents the type
+ // of revision comparison.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ahead", "behind"
+ VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction")
+)
+
+// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id"
+// semantic conventions. It represents the ID of the change (pull request/merge
+// request/changelist) if applicable. This is usually a unique (within
+// repository) identifier generated by the VCS system.
+func VCSChangeID(val string) attribute.KeyValue {
+ return VCSChangeIDKey.String(val)
+}
+
+// VCSChangeTitle returns an attribute KeyValue conforming to the
+// "vcs.change.title" semantic conventions. It represents the human readable
+// title of the change (pull request/merge request/changelist). This title is
+// often a brief summary of the change and may get merged in to a ref as the
+// commit summary.
+func VCSChangeTitle(val string) attribute.KeyValue {
+ return VCSChangeTitleKey.String(val)
+}
+
+// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name"
+// semantic conventions. It represents the group owner within the version control
+// system.
+func VCSOwnerName(val string) attribute.KeyValue {
+ return VCSOwnerNameKey.String(val)
+}
+
+// VCSRefBaseName returns an attribute KeyValue conforming to the
+// "vcs.ref.base.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefBaseName(val string) attribute.KeyValue {
+ return VCSRefBaseNameKey.String(val)
+}
+
+// VCSRefBaseRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.base.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefBaseRevision(val string) attribute.KeyValue {
+ return VCSRefBaseRevisionKey.String(val)
+}
+
+// VCSRefHeadName returns an attribute KeyValue conforming to the
+// "vcs.ref.head.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefHeadName(val string) attribute.KeyValue {
+ return VCSRefHeadNameKey.String(val)
+}
+
+// VCSRefHeadRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.head.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefHeadRevision(val string) attribute.KeyValue {
+ return VCSRefHeadRevisionKey.String(val)
+}
+
+// VCSRepositoryName returns an attribute KeyValue conforming to the
+// "vcs.repository.name" semantic conventions. It represents the human readable
+// name of the repository. It SHOULD NOT include any additional identifier like
+// Group/SubGroup in GitLab or organization in GitHub.
+func VCSRepositoryName(val string) attribute.KeyValue {
+ return VCSRepositoryNameKey.String(val)
+}
+
+// VCSRepositoryURLFull returns an attribute KeyValue conforming to the
+// "vcs.repository.url.full" semantic conventions. It represents the
+// [canonical URL] of the repository providing the complete HTTP(S) address in
+// order to locate and identify the repository through a browser.
+//
+// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.
+func VCSRepositoryURLFull(val string) attribute.KeyValue {
+ return VCSRepositoryURLFullKey.String(val)
+}
+
+// Enum values for vcs.change.state
+var (
+ // Open means the change is currently active and under review. It hasn't been
+ // merged into the target branch yet, and it's still possible to make changes or
+ // add comments.
+ // Stability: development
+ VCSChangeStateOpen = VCSChangeStateKey.String("open")
+ // WIP (work-in-progress, draft) means the change is still in progress and not
+ // yet ready for a full review. It might still undergo significant changes.
+ // Stability: development
+ VCSChangeStateWip = VCSChangeStateKey.String("wip")
+ // Closed means the merge request has been closed without merging. This can
+ // happen for various reasons, such as the changes being deemed unnecessary, the
+ // issue being resolved in another way, or the author deciding to withdraw the
+ // request.
+ // Stability: development
+ VCSChangeStateClosed = VCSChangeStateKey.String("closed")
+ // Merged indicates that the change has been successfully integrated into the
+ // target codebase.
+ // Stability: development
+ VCSChangeStateMerged = VCSChangeStateKey.String("merged")
+)
+
+// Enum values for vcs.line_change.type
+var (
+ // How many lines were added.
+ // Stability: development
+ VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added")
+ // How many lines were removed.
+ // Stability: development
+ VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed")
+)
+
+// Enum values for vcs.provider.name
+var (
+ // [GitHub]
+ // Stability: development
+ //
+ // [GitHub]: https://github.com
+ VCSProviderNameGithub = VCSProviderNameKey.String("github")
+ // [GitLab]
+ // Stability: development
+ //
+ // [GitLab]: https://gitlab.com
+ VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab")
+ // [Gitea]
+ // Stability: development
+ //
+ // [Gitea]: https://gitea.io
+ VCSProviderNameGitea = VCSProviderNameKey.String("gitea")
+ // [Bitbucket]
+ // Stability: development
+ //
+ // [Bitbucket]: https://bitbucket.org
+ VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket")
+)
+
+// Enum values for vcs.ref.base.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.head.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefTypeBranch = VCSRefTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefTypeTag = VCSRefTypeKey.String("tag")
+)
+
+// Enum values for vcs.revision_delta.direction
+var (
+ // How many revisions the change is behind the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind")
+ // How many revisions the change is ahead of the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead")
+)
+
+// Namespace: webengine
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the additional
+ // description of the web engine (e.g. detailed version and edition
+ // information).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final"
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly"
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of the
+ // web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "21.0.0"
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the "webengine.name"
+// semantic conventions. It represents the name of the web engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the web
+// engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// Namespace: zos
+const (
+ // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic
+ // conventions. It represents the System Management Facility (SMF) Identifier
+ // uniquely identified a z/OS system within a SYSPLEX or mainframe environment
+ // and is used for system and performance analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "SYS1"
+ ZOSSmfIDKey = attribute.Key("zos.smf.id")
+
+ // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name"
+ // semantic conventions. It represents the name of the SYSPLEX to which the z/OS
+ // system belongs too.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "SYSPLEX1"
+ ZOSSysplexNameKey = attribute.Key("zos.sysplex.name")
+)
+
+// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic
+// conventions. It represents the System Management Facility (SMF) Identifier
+// uniquely identified a z/OS system within a SYSPLEX or mainframe environment
+// and is used for system and performance analysis.
+func ZOSSmfID(val string) attribute.KeyValue {
+ return ZOSSmfIDKey.String(val)
+}
+
+// ZOSSysplexName returns an attribute KeyValue conforming to the
+// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX
+// to which the z/OS system belongs too.
+func ZOSSysplexName(val string) attribute.KeyValue {
+ return ZOSSysplexNameKey.String(val)
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
similarity index 80%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
index d031bbea7..111010321 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
@@ -4,6 +4,6 @@
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the v1.26.0
+// patterns for OpenTelemetry things. This package represents the v1.37.0
// version of the OpenTelemetry semantic conventions.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
new file mode 100644
index 000000000..666bded4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
+func ErrorType(err error) attribute.KeyValue {
+ if err == nil {
+ return ErrorTypeOther
+ }
+ t := reflect.TypeOf(err)
+ var value string
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ value = t.String()
+ } else {
+ value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+ }
+
+ if value == "" {
+ return ErrorTypeOther
+ }
+ return ErrorTypeKey.String(value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
similarity index 74%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
index f40c97825..e67469a4f 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
new file mode 100644
index 000000000..55bde895d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
@@ -0,0 +1,1641 @@
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package httpconv provides types and functionality for OpenTelemetry semantic
+// conventions in the "http" namespace.
+package httpconv
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+)
+
+var (
+ addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }}
+ recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }}
+)
+
+// ErrorTypeAttr is an attribute conforming to the error.type semantic
+// conventions. It represents the describes a class of error the operation ended
+// with.
+type ErrorTypeAttr string
+
+var (
+ // ErrorTypeOther is a fallback error value to be used when the instrumentation
+ // doesn't define a custom value.
+ ErrorTypeOther ErrorTypeAttr = "_OTHER"
+)
+
+// ConnectionStateAttr is an attribute conforming to the http.connection.state
+// semantic conventions. It represents the state of the HTTP connection in the
+// HTTP connection pool.
+type ConnectionStateAttr string
+
+var (
+ // ConnectionStateActive is the active state.
+ ConnectionStateActive ConnectionStateAttr = "active"
+ // ConnectionStateIdle is the idle state.
+ ConnectionStateIdle ConnectionStateAttr = "idle"
+)
+
+// RequestMethodAttr is an attribute conforming to the http.request.method
+// semantic conventions. It represents the HTTP request method.
+type RequestMethodAttr string
+
+var (
+ // RequestMethodConnect is the CONNECT method.
+ RequestMethodConnect RequestMethodAttr = "CONNECT"
+ // RequestMethodDelete is the DELETE method.
+ RequestMethodDelete RequestMethodAttr = "DELETE"
+ // RequestMethodGet is the GET method.
+ RequestMethodGet RequestMethodAttr = "GET"
+ // RequestMethodHead is the HEAD method.
+ RequestMethodHead RequestMethodAttr = "HEAD"
+ // RequestMethodOptions is the OPTIONS method.
+ RequestMethodOptions RequestMethodAttr = "OPTIONS"
+ // RequestMethodPatch is the PATCH method.
+ RequestMethodPatch RequestMethodAttr = "PATCH"
+ // RequestMethodPost is the POST method.
+ RequestMethodPost RequestMethodAttr = "POST"
+ // RequestMethodPut is the PUT method.
+ RequestMethodPut RequestMethodAttr = "PUT"
+ // RequestMethodTrace is the TRACE method.
+ RequestMethodTrace RequestMethodAttr = "TRACE"
+ // RequestMethodOther is the any HTTP method that the instrumentation has no
+ // prior knowledge of.
+ RequestMethodOther RequestMethodAttr = "_OTHER"
+)
+
+// UserAgentSyntheticTypeAttr is an attribute conforming to the
+// user_agent.synthetic.type semantic conventions. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+type UserAgentSyntheticTypeAttr string
+
+var (
+ // UserAgentSyntheticTypeBot is the bot source.
+ UserAgentSyntheticTypeBot UserAgentSyntheticTypeAttr = "bot"
+ // UserAgentSyntheticTypeTest is the synthetic test source.
+ UserAgentSyntheticTypeTest UserAgentSyntheticTypeAttr = "test"
+)
+
+// ClientActiveRequests is an instrument used to record metric values conforming
+// to the "http.client.active_requests" semantic conventions. It represents the
+// number of active HTTP requests.
+type ClientActiveRequests struct {
+ metric.Int64UpDownCounter
+}
+
+// NewClientActiveRequests returns a new ClientActiveRequests instrument.
+func NewClientActiveRequests(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ClientActiveRequests, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.client.active_requests",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP requests."),
+ metric.WithUnit("{request}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
+ }
+ return ClientActiveRequests{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientActiveRequests) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientActiveRequests) Name() string {
+ return "http.client.active_requests"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientActiveRequests) Unit() string {
+ return "{request}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientActiveRequests) Description() string {
+ return "Number of active HTTP requests."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+func (m ClientActiveRequests) Add(
+ ctx context.Context,
+ incr int64,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m ClientActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientActiveRequests) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrRequestMethod returns an optional attribute for the "http.request.method"
+// semantic convention. It represents the HTTP request method.
+func (ClientActiveRequests) AttrRequestMethod(val RequestMethodAttr) attribute.KeyValue {
+ return attribute.String("http.request.method", string(val))
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientActiveRequests) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientConnectionDuration is an instrument used to record metric values
+// conforming to the "http.client.connection.duration" semantic conventions. It
+// represents the duration of the successfully established outbound HTTP
+// connections.
+type ClientConnectionDuration struct {
+ metric.Float64Histogram
+}
+
+// NewClientConnectionDuration returns a new ClientConnectionDuration instrument.
+func NewClientConnectionDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ClientConnectionDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientConnectionDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.client.connection.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientConnectionDuration{noop.Float64Histogram{}}, err
+ }
+ return ClientConnectionDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientConnectionDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientConnectionDuration) Name() string {
+ return "http.client.connection.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientConnectionDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientConnectionDuration) Description() string {
+ return "The duration of the successfully established outbound HTTP connections."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+func (m ClientConnectionDuration) Record(
+ ctx context.Context,
+ val float64,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrNetworkPeerAddress returns an optional attribute for the
+// "network.peer.address" semantic convention. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func (ClientConnectionDuration) AttrNetworkPeerAddress(val string) attribute.KeyValue {
+ return attribute.String("network.peer.address", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientConnectionDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientConnectionDuration) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientOpenConnections is an instrument used to record metric values conforming
+// to the "http.client.open_connections" semantic conventions. It represents the
+// number of outbound HTTP connections that are currently active or idle on the
+// client.
+type ClientOpenConnections struct {
+ metric.Int64UpDownCounter
+}
+
+// NewClientOpenConnections returns a new ClientOpenConnections instrument.
+func NewClientOpenConnections(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ClientOpenConnections, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.client.open_connections",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
+ metric.WithUnit("{connection}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
+ }
+ return ClientOpenConnections{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientOpenConnections) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientOpenConnections) Name() string {
+ return "http.client.open_connections"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientOpenConnections) Unit() string {
+ return "{connection}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientOpenConnections) Description() string {
+ return "Number of outbound HTTP connections that are currently active or idle on the client."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// The connectionState is the state of the HTTP connection in the HTTP connection
+// pool.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+func (m ClientOpenConnections) Add(
+ ctx context.Context,
+ incr int64,
+ connectionState ConnectionStateAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.connection.state", string(connectionState)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m ClientOpenConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrNetworkPeerAddress returns an optional attribute for the
+// "network.peer.address" semantic convention. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func (ClientOpenConnections) AttrNetworkPeerAddress(val string) attribute.KeyValue {
+ return attribute.String("network.peer.address", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientOpenConnections) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientOpenConnections) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientRequestBodySize is an instrument used to record metric values conforming
+// to the "http.client.request.body.size" semantic conventions. It represents the
+// size of HTTP client request bodies.
+type ClientRequestBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewClientRequestBodySize returns a new ClientRequestBodySize instrument.
+func NewClientRequestBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ClientRequestBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientRequestBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.client.request.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client request bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientRequestBodySize{noop.Int64Histogram{}}, err
+ }
+ return ClientRequestBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientRequestBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientRequestBodySize) Name() string {
+ return "http.client.request.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientRequestBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientRequestBodySize) Description() string {
+ return "Size of HTTP client request bodies."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientRequestBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientRequestBodySize) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientRequestBodySize) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientRequestDuration is an instrument used to record metric values conforming
+// to the "http.client.request.duration" semantic conventions. It represents the
+// duration of HTTP client requests.
+type ClientRequestDuration struct {
+ metric.Float64Histogram
+}
+
+// NewClientRequestDuration returns a new ClientRequestDuration instrument.
+func NewClientRequestDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ClientRequestDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientRequestDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.client.request.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP client requests."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientRequestDuration{noop.Float64Histogram{}}, err
+ }
+ return ClientRequestDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientRequestDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientRequestDuration) Name() string {
+ return "http.client.request.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientRequestDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientRequestDuration) Description() string {
+ return "Duration of HTTP client requests."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+func (m ClientRequestDuration) Record(
+ ctx context.Context,
+ val float64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientRequestDuration) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientRequestDuration) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// ClientResponseBodySize is an instrument used to record metric values
+// conforming to the "http.client.response.body.size" semantic conventions. It
+// represents the size of HTTP client response bodies.
+type ClientResponseBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewClientResponseBodySize returns a new ClientResponseBodySize instrument.
+func NewClientResponseBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ClientResponseBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientResponseBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.client.response.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client response bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientResponseBodySize{noop.Int64Histogram{}}, err
+ }
+ return ClientResponseBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientResponseBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientResponseBodySize) Name() string {
+ return "http.client.response.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientResponseBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientResponseBodySize) Description() string {
+ return "Size of HTTP client response bodies."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientResponseBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientResponseBodySize) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientResponseBodySize) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ServerActiveRequests is an instrument used to record metric values conforming
+// to the "http.server.active_requests" semantic conventions. It represents the
+// number of active HTTP server requests.
+type ServerActiveRequests struct {
+ metric.Int64UpDownCounter
+}
+
+// NewServerActiveRequests returns a new ServerActiveRequests instrument.
+func NewServerActiveRequests(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ServerActiveRequests, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.server.active_requests",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP server requests."),
+ metric.WithUnit("{request}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
+ }
+ return ServerActiveRequests{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerActiveRequests) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerActiveRequests) Name() string {
+ return "http.server.active_requests"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerActiveRequests) Unit() string {
+ return "{request}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerActiveRequests) Description() string {
+ return "Number of active HTTP server requests."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (m ServerActiveRequests) Add(
+ ctx context.Context,
+ incr int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m ServerActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerActiveRequests) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerActiveRequests) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// ServerRequestBodySize is an instrument used to record metric values conforming
+// to the "http.server.request.body.size" semantic conventions. It represents the
+// size of HTTP server request bodies.
+type ServerRequestBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewServerRequestBodySize returns a new ServerRequestBodySize instrument.
+func NewServerRequestBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ServerRequestBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerRequestBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.server.request.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server request bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerRequestBodySize{noop.Int64Histogram{}}, err
+ }
+ return ServerRequestBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerRequestBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerRequestBodySize) Name() string {
+ return "http.server.request.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerRequestBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerRequestBodySize) Description() string {
+ return "Size of HTTP server request bodies."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerRequestBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerRequestBodySize) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerRequestBodySize) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerRequestBodySize) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerRequestBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
+
+// ServerRequestDuration is an instrument used to record metric values conforming
+// to the "http.server.request.duration" semantic conventions. It represents the
+// duration of HTTP server requests.
+type ServerRequestDuration struct {
+ metric.Float64Histogram
+}
+
+// NewServerRequestDuration returns a new ServerRequestDuration instrument.
+func NewServerRequestDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ServerRequestDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerRequestDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.server.request.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP server requests."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerRequestDuration{noop.Float64Histogram{}}, err
+ }
+ return ServerRequestDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerRequestDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerRequestDuration) Name() string {
+ return "http.server.request.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerRequestDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerRequestDuration) Description() string {
+ return "Duration of HTTP server requests."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (m ServerRequestDuration) Record(
+ ctx context.Context,
+ val float64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerRequestDuration) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerRequestDuration) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerRequestDuration) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerRequestDuration) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
+
+// ServerResponseBodySize is an instrument used to record metric values
+// conforming to the "http.server.response.body.size" semantic conventions. It
+// represents the size of HTTP server response bodies.
+type ServerResponseBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewServerResponseBodySize returns a new ServerResponseBodySize instrument.
+func NewServerResponseBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ServerResponseBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerResponseBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.server.response.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server response bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerResponseBodySize{noop.Int64Histogram{}}, err
+ }
+ return ServerResponseBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerResponseBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerResponseBodySize) Name() string {
+ return "http.server.response.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerResponseBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerResponseBodySize) Description() string {
+ return "Size of HTTP server response bodies."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerResponseBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerResponseBodySize) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerResponseBodySize) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
similarity index 71%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
index 4c87c7adc..f8a0b7044 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
@@ -1,9 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/
-const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
+const SchemaURL = "https://opentelemetry.io/schemas/1.37.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/otel/trace/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go
index d90af8f67..8763936a8 100644
--- a/vendor/go.opentelemetry.io/otel/trace/auto.go
+++ b/vendor/go.opentelemetry.io/otel/trace/auto.go
@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/internal/telemetry"
)
@@ -39,7 +39,7 @@ type autoTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = autoTracerProvider{}
-func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
+func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
cfg := NewTracerConfig(opts...)
return autoTracer{
name: name,
@@ -81,7 +81,7 @@ func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOpt
// Expected to be implemented in eBPF.
//
//go:noinline
-func (t *autoTracer) start(
+func (*autoTracer) start(
ctx context.Context,
spanPtr *autoSpan,
psc *SpanContext,
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index 9c0b720a4..aea11a2b5 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -73,7 +73,7 @@ func (cfg *SpanConfig) Timestamp() time.Time {
return cfg.timestamp
}
-// StackTrace checks whether stack trace capturing is enabled.
+// StackTrace reports whether stack trace capturing is enabled.
func (cfg *SpanConfig) StackTrace() bool {
return cfg.stackTrace
}
@@ -154,7 +154,7 @@ func (cfg *EventConfig) Timestamp() time.Time {
return cfg.timestamp
}
-// StackTrace checks whether stack trace capturing is enabled.
+// StackTrace reports whether stack trace capturing is enabled.
func (cfg *EventConfig) StackTrace() bool {
return cfg.stackTrace
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go
new file mode 100644
index 000000000..1cbef1d4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/hex.go
@@ -0,0 +1,38 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+const (
+ // hexLU is a hex lookup table of the 16 lowercase hex digits.
+ // The character values of the string are indexed at the equivalent
+ // hexadecimal value they represent. This table efficiently encodes byte data
+ // into a string representation of hexadecimal.
+ hexLU = "0123456789abcdef"
+
+ // hexRev is a reverse hex lookup table for lowercase hex digits.
+ // The table is efficiently decodes a hexadecimal string into bytes.
+ // Valid hexadecimal characters are indexed at their respective values. All
+ // other invalid ASCII characters are represented with '\xff'.
+ //
+ // The '\xff' character is used as invalid because no valid character has
+ // the upper 4 bits set. Meaning, an efficient validation can be performed
+ // over multiple character parsing by checking these bits remain zero.
+ hexRev = "" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+)
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
index f663547b4..ff0f6eac6 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
@@ -52,7 +52,7 @@ func Map(key string, value ...Attr) Attr {
return Attr{key, MapValue(value...)}
}
-// Equal returns if a is equal to b.
+// Equal reports whether a is equal to b.
func (a Attr) Equal(b Attr) bool {
return a.Key == b.Key && a.Value.Equal(b.Value)
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
index 7b1ae3c4e..bea56f2e7 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
@@ -22,7 +22,7 @@ func (tid TraceID) String() string {
return hex.EncodeToString(tid[:])
}
-// IsEmpty returns false if id contains at least one non-zero byte.
+// IsEmpty reports whether the TraceID contains only zero bytes.
func (tid TraceID) IsEmpty() bool {
return tid == [traceIDSize]byte{}
}
@@ -50,7 +50,7 @@ func (sid SpanID) String() string {
return hex.EncodeToString(sid[:])
}
-// IsEmpty returns true if the span ID contains at least one non-zero byte.
+// IsEmpty reports whether the SpanID contains only zero bytes.
func (sid SpanID) IsEmpty() bool {
return sid == [spanIDSize]byte{}
}
@@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) {
}
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
-func unmarshalJSON(dst []byte, src []byte) error {
+func unmarshalJSON(dst, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1]
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
index ae9ce102a..cb7927b81 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
@@ -257,10 +257,10 @@ func (v Value) Kind() ValueKind {
}
}
-// Empty returns if v does not hold any value.
+// Empty reports whether v does not hold any value.
func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
-// Equal returns if v is equal to w.
+// Equal reports whether v is equal to w.
func (v Value) Equal(w Value) bool {
k1 := v.Kind()
k2 := w.Kind()
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index 0f56e4dbb..400fab123 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = noopTracerProvider{}
// Tracer returns noop implementation of Tracer.
-func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
return noopTracer{}
}
@@ -37,7 +37,7 @@ var _ Tracer = noopTracer{}
// Start carries forward a non-recording Span, if one is present in the context, otherwise it
// creates a no-op Span.
-func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
+func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) {
span := SpanFromContext(ctx)
if _, ok := span.(nonRecordingSpan); !ok {
// span is likely already a noopSpan, but let's be sure
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
index 64a4f1b36..689d220df 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer }
// If ctx contains a span context, the returned span will also contain that
// span context. If the span context in ctx is for a non-recording span, that
// span instance will be returned directly.
-func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
span := trace.SpanFromContext(ctx)
// If the parent context contains a non-zero span context, that span
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index d49adf671..ee6f4bcb2 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -4,8 +4,6 @@
package trace // import "go.opentelemetry.io/otel/trace"
import (
- "bytes"
- "encoding/hex"
"encoding/json"
)
@@ -38,21 +36,47 @@ var (
_ json.Marshaler = nilTraceID
)
-// IsValid checks whether the trace TraceID is valid. A valid trace ID does
+// IsValid reports whether the trace TraceID is valid. A valid trace ID does
// not consist of zeros only.
func (t TraceID) IsValid() bool {
- return !bytes.Equal(t[:], nilTraceID[:])
+ return t != nilTraceID
}
// MarshalJSON implements a custom marshal function to encode TraceID
// as a hex string.
func (t TraceID) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.String())
+ b := [32 + 2]byte{0: '"', 33: '"'}
+ h := t.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of a TraceID.
func (t TraceID) String() string {
- return hex.EncodeToString(t[:])
+ h := t.hexBytes()
+ return string(h[:])
+}
+
+// hexBytes returns the hex string representation form of a TraceID.
+func (t TraceID) hexBytes() [32]byte {
+ return [32]byte{
+ hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf],
+ hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf],
+ hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf],
+ hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf],
+ hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf],
+ hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf],
+ hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf],
+ hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf],
+ hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf],
+ hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf],
+ hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf],
+ hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf],
+ hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf],
+ hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf],
+ hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf],
+ hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf],
+ }
}
// SpanID is a unique identity of a span in a trace.
@@ -63,21 +87,38 @@ var (
_ json.Marshaler = nilSpanID
)
-// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
+// IsValid reports whether the SpanID is valid. A valid SpanID does not consist
// of zeros only.
func (s SpanID) IsValid() bool {
- return !bytes.Equal(s[:], nilSpanID[:])
+ return s != nilSpanID
}
// MarshalJSON implements a custom marshal function to encode SpanID
// as a hex string.
func (s SpanID) MarshalJSON() ([]byte, error) {
- return json.Marshal(s.String())
+ b := [16 + 2]byte{0: '"', 17: '"'}
+ h := s.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of a SpanID.
func (s SpanID) String() string {
- return hex.EncodeToString(s[:])
+ b := s.hexBytes()
+ return string(b[:])
+}
+
+func (s SpanID) hexBytes() [16]byte {
+ return [16]byte{
+ hexLU[s[0]>>4], hexLU[s[0]&0xf],
+ hexLU[s[1]>>4], hexLU[s[1]&0xf],
+ hexLU[s[2]>>4], hexLU[s[2]&0xf],
+ hexLU[s[3]>>4], hexLU[s[3]&0xf],
+ hexLU[s[4]>>4], hexLU[s[4]&0xf],
+ hexLU[s[5]>>4], hexLU[s[5]&0xf],
+ hexLU[s[6]>>4], hexLU[s[6]&0xf],
+ hexLU[s[7]>>4], hexLU[s[7]&0xf],
+ }
}
// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
@@ -85,65 +126,58 @@ func (s SpanID) String() string {
// https://www.w3.org/TR/trace-context/#trace-id
// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
func TraceIDFromHex(h string) (TraceID, error) {
- t := TraceID{}
if len(h) != 32 {
- return t, errInvalidTraceIDLength
+ return [16]byte{}, errInvalidTraceIDLength
}
-
- if err := decodeHex(h, t[:]); err != nil {
- return t, err
+ var b [16]byte
+ invalidMark := byte(0)
+ for i := 0; i < len(h); i += 4 {
+ b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]]
+ b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]]
+ invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]]
}
-
- if !t.IsValid() {
- return t, errNilTraceID
+ // If the upper 4 bits of any byte are not zero, there was an invalid hex
+ // character since invalid hex characters are 0xff in hexRev.
+ if invalidMark&0xf0 != 0 {
+ return [16]byte{}, errInvalidHexID
+ }
+ // If we didn't set any bits, then h was all zeros.
+ if invalidMark == 0 {
+ return [16]byte{}, errNilTraceID
}
- return t, nil
+ return b, nil
}
// SpanIDFromHex returns a SpanID from a hex string if it is compliant
// with the w3c trace-context specification.
// See more at https://www.w3.org/TR/trace-context/#parent-id
func SpanIDFromHex(h string) (SpanID, error) {
- s := SpanID{}
if len(h) != 16 {
- return s, errInvalidSpanIDLength
- }
-
- if err := decodeHex(h, s[:]); err != nil {
- return s, err
+ return [8]byte{}, errInvalidSpanIDLength
}
-
- if !s.IsValid() {
- return s, errNilSpanID
+ var b [8]byte
+ invalidMark := byte(0)
+ for i := 0; i < len(h); i += 4 {
+ b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]]
+ b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]]
+ invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]]
}
- return s, nil
-}
-
-func decodeHex(h string, b []byte) error {
- for _, r := range h {
- switch {
- case 'a' <= r && r <= 'f':
- continue
- case '0' <= r && r <= '9':
- continue
- default:
- return errInvalidHexID
- }
+ // If the upper 4 bits of any byte are not zero, there was an invalid hex
+ // character since invalid hex characters are 0xff in hexRev.
+ if invalidMark&0xf0 != 0 {
+ return [8]byte{}, errInvalidHexID
}
-
- decoded, err := hex.DecodeString(h)
- if err != nil {
- return err
+ // If we didn't set any bits, then h was all zeros.
+ if invalidMark == 0 {
+ return [8]byte{}, errNilSpanID
}
-
- copy(b, decoded)
- return nil
+ return b, nil
}
// TraceFlags contains flags that can be set on a SpanContext.
type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
-// IsSampled returns if the sampling bit is set in the TraceFlags.
+// IsSampled reports whether the sampling bit is set in the TraceFlags.
func (tf TraceFlags) IsSampled() bool {
return tf&FlagsSampled == FlagsSampled
}
@@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive //
// MarshalJSON implements a custom marshal function to encode TraceFlags
// as a hex string.
func (tf TraceFlags) MarshalJSON() ([]byte, error) {
- return json.Marshal(tf.String())
+ b := [2 + 2]byte{0: '"', 3: '"'}
+ h := tf.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of TraceFlags.
func (tf TraceFlags) String() string {
- return hex.EncodeToString([]byte{byte(tf)}[:])
+ h := tf.hexBytes()
+ return string(h[:])
+}
+
+func (tf TraceFlags) hexBytes() [2]byte {
+ return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]}
}
// SpanContextConfig contains mutable fields usable for constructing
@@ -201,13 +243,13 @@ type SpanContext struct {
var _ json.Marshaler = SpanContext{}
-// IsValid returns if the SpanContext is valid. A valid span context has a
+// IsValid reports whether the SpanContext is valid. A valid span context has a
// valid TraceID and SpanID.
func (sc SpanContext) IsValid() bool {
return sc.HasTraceID() && sc.HasSpanID()
}
-// IsRemote indicates whether the SpanContext represents a remotely-created Span.
+// IsRemote reports whether the SpanContext represents a remotely-created Span.
func (sc SpanContext) IsRemote() bool {
return sc.remote
}
@@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID {
return sc.traceID
}
-// HasTraceID checks if the SpanContext has a valid TraceID.
+// HasTraceID reports whether the SpanContext has a valid TraceID.
func (sc SpanContext) HasTraceID() bool {
return sc.traceID.IsValid()
}
@@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID {
return sc.spanID
}
-// HasSpanID checks if the SpanContext has a valid SpanID.
+// HasSpanID reports whether the SpanContext has a valid SpanID.
func (sc SpanContext) HasSpanID() bool {
return sc.spanID.IsValid()
}
@@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags {
return sc.traceFlags
}
-// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
+// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags.
func (sc SpanContext) IsSampled() bool {
return sc.traceFlags.IsSampled()
}
@@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
}
}
-// Equal is a predicate that determines whether two SpanContext values are equal.
+// Equal reports whether two SpanContext values are equal.
func (sc SpanContext) Equal(other SpanContext) bool {
return sc.traceID == other.traceID &&
sc.spanID == other.spanID &&
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index dc5e34cad..073adae2f 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool {
//
// param n is remain part length, should be 255 in simple-key or 13 in system-id.
func checkKeyPart(key string, n int) bool {
- if len(key) == 0 {
+ if key == "" {
return false
}
first := key[0] // key's first char
@@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool {
//
// param n is remain part length, should be 240 exactly.
func checkKeyTenant(key string, n int) bool {
- if len(key) == 0 {
+ if key == "" {
return false
}
return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
@@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) {
for ts != "" {
var memberStr string
memberStr, ts, _ = strings.Cut(ts, listDelimiters)
- if len(memberStr) == 0 {
+ if memberStr == "" {
continue
}
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index ac3c0b15d..bcaa5aa53 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.36.0"
+ return "1.38.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 79f82f3d0..07145e254 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,13 +3,12 @@
module-sets:
stable-v1:
- version: v1.36.0
+ version: v1.38.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/bridge/opentracing
- - go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
@@ -23,23 +22,23 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.58.0
+ version: v0.60.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.12.0
+ version: v0.14.0
modules:
- go.opentelemetry.io/otel/log
+ - go.opentelemetry.io/otel/log/logtest
- go.opentelemetry.io/otel/sdk/log
+ - go.opentelemetry.io/otel/sdk/log/logtest
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.12
+ version: v0.0.13
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
- - go.opentelemetry.io/otel/log/logtest
- - go.opentelemetry.io/otel/sdk/log/logtest
- go.opentelemetry.io/otel/trace/internal/telemetry/test
diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go
deleted file mode 100644
index e6c645e7c..000000000
--- a/vendor/golang.org/x/crypto/ocsp/ocsp.go
+++ /dev/null
@@ -1,793 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses
-// are signed messages attesting to the validity of a certificate for a small
-// period of time. This is used to manage revocation for X.509 certificates.
-package ocsp
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/rsa"
- _ "crypto/sha1"
- _ "crypto/sha256"
- _ "crypto/sha512"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/asn1"
- "errors"
- "fmt"
- "math/big"
- "strconv"
- "time"
-)
-
-var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1})
-
-// ResponseStatus contains the result of an OCSP request. See
-// https://tools.ietf.org/html/rfc6960#section-2.3
-type ResponseStatus int
-
-const (
- Success ResponseStatus = 0
- Malformed ResponseStatus = 1
- InternalError ResponseStatus = 2
- TryLater ResponseStatus = 3
- // Status code four is unused in OCSP. See
- // https://tools.ietf.org/html/rfc6960#section-4.2.1
- SignatureRequired ResponseStatus = 5
- Unauthorized ResponseStatus = 6
-)
-
-func (r ResponseStatus) String() string {
- switch r {
- case Success:
- return "success"
- case Malformed:
- return "malformed"
- case InternalError:
- return "internal error"
- case TryLater:
- return "try later"
- case SignatureRequired:
- return "signature required"
- case Unauthorized:
- return "unauthorized"
- default:
- return "unknown OCSP status: " + strconv.Itoa(int(r))
- }
-}
-
-// ResponseError is an error that may be returned by ParseResponse to indicate
-// that the response itself is an error, not just that it's indicating that a
-// certificate is revoked, unknown, etc.
-type ResponseError struct {
- Status ResponseStatus
-}
-
-func (r ResponseError) Error() string {
- return "ocsp: error from server: " + r.Status.String()
-}
-
-// These are internal structures that reflect the ASN.1 structure of an OCSP
-// response. See RFC 2560, section 4.2.
-
-type certID struct {
- HashAlgorithm pkix.AlgorithmIdentifier
- NameHash []byte
- IssuerKeyHash []byte
- SerialNumber *big.Int
-}
-
-// https://tools.ietf.org/html/rfc2560#section-4.1.1
-type ocspRequest struct {
- TBSRequest tbsRequest
-}
-
-type tbsRequest struct {
- Version int `asn1:"explicit,tag:0,default:0,optional"`
- RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"`
- RequestList []request
-}
-
-type request struct {
- Cert certID
-}
-
-type responseASN1 struct {
- Status asn1.Enumerated
- Response responseBytes `asn1:"explicit,tag:0,optional"`
-}
-
-type responseBytes struct {
- ResponseType asn1.ObjectIdentifier
- Response []byte
-}
-
-type basicResponse struct {
- TBSResponseData responseData
- SignatureAlgorithm pkix.AlgorithmIdentifier
- Signature asn1.BitString
- Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"`
-}
-
-type responseData struct {
- Raw asn1.RawContent
- Version int `asn1:"optional,default:0,explicit,tag:0"`
- RawResponderID asn1.RawValue
- ProducedAt time.Time `asn1:"generalized"`
- Responses []singleResponse
-}
-
-type singleResponse struct {
- CertID certID
- Good asn1.Flag `asn1:"tag:0,optional"`
- Revoked revokedInfo `asn1:"tag:1,optional"`
- Unknown asn1.Flag `asn1:"tag:2,optional"`
- ThisUpdate time.Time `asn1:"generalized"`
- NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"`
- SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"`
-}
-
-type revokedInfo struct {
- RevocationTime time.Time `asn1:"generalized"`
- Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"`
-}
-
-var (
- oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
- oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
- oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
- oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
- oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
- oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
- oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
- oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
- oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
- oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
- oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
- oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
-)
-
-var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{
- crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}),
- crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}),
- crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}),
- crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}),
-}
-
-// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
-var signatureAlgorithmDetails = []struct {
- algo x509.SignatureAlgorithm
- oid asn1.ObjectIdentifier
- pubKeyAlgo x509.PublicKeyAlgorithm
- hash crypto.Hash
-}{
- {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
- {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
- {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
- {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
- {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
- {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
- {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
- {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
- {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
- {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
- {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
- {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
-}
-
-// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
-func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
- var pubType x509.PublicKeyAlgorithm
-
- switch pub := pub.(type) {
- case *rsa.PublicKey:
- pubType = x509.RSA
- hashFunc = crypto.SHA256
- sigAlgo.Algorithm = oidSignatureSHA256WithRSA
- sigAlgo.Parameters = asn1.RawValue{
- Tag: 5,
- }
-
- case *ecdsa.PublicKey:
- pubType = x509.ECDSA
-
- switch pub.Curve {
- case elliptic.P224(), elliptic.P256():
- hashFunc = crypto.SHA256
- sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
- case elliptic.P384():
- hashFunc = crypto.SHA384
- sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
- case elliptic.P521():
- hashFunc = crypto.SHA512
- sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
- default:
- err = errors.New("x509: unknown elliptic curve")
- }
-
- default:
- err = errors.New("x509: only RSA and ECDSA keys supported")
- }
-
- if err != nil {
- return
- }
-
- if requestedSigAlgo == 0 {
- return
- }
-
- found := false
- for _, details := range signatureAlgorithmDetails {
- if details.algo == requestedSigAlgo {
- if details.pubKeyAlgo != pubType {
- err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
- return
- }
- sigAlgo.Algorithm, hashFunc = details.oid, details.hash
- if hashFunc == 0 {
- err = errors.New("x509: cannot sign with hash function requested")
- return
- }
- found = true
- break
- }
- }
-
- if !found {
- err = errors.New("x509: unknown SignatureAlgorithm")
- }
-
- return
-}
-
-// TODO(agl): this is taken from crypto/x509 and so should probably be exported
-// from crypto/x509 or crypto/x509/pkix.
-func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm {
- for _, details := range signatureAlgorithmDetails {
- if oid.Equal(details.oid) {
- return details.algo
- }
- }
- return x509.UnknownSignatureAlgorithm
-}
-
-// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form.
-func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash {
- for hash, oid := range hashOIDs {
- if oid.Equal(target) {
- return hash
- }
- }
- return crypto.Hash(0)
-}
-
-func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier {
- for hash, oid := range hashOIDs {
- if hash == target {
- return oid
- }
- }
- return nil
-}
-
-// This is the exposed reflection of the internal OCSP structures.
-
-// The status values that can be expressed in OCSP. See RFC 6960.
-// These are used for the Response.Status field.
-const (
- // Good means that the certificate is valid.
- Good = 0
- // Revoked means that the certificate has been deliberately revoked.
- Revoked = 1
- // Unknown means that the OCSP responder doesn't know about the certificate.
- Unknown = 2
- // ServerFailed is unused and was never used (see
- // https://go-review.googlesource.com/#/c/18944). ParseResponse will
- // return a ResponseError when an error response is parsed.
- ServerFailed = 3
-)
-
-// The enumerated reasons for revoking a certificate. See RFC 5280.
-const (
- Unspecified = 0
- KeyCompromise = 1
- CACompromise = 2
- AffiliationChanged = 3
- Superseded = 4
- CessationOfOperation = 5
- CertificateHold = 6
-
- RemoveFromCRL = 8
- PrivilegeWithdrawn = 9
- AACompromise = 10
-)
-
-// Request represents an OCSP request. See RFC 6960.
-type Request struct {
- HashAlgorithm crypto.Hash
- IssuerNameHash []byte
- IssuerKeyHash []byte
- SerialNumber *big.Int
-}
-
-// Marshal marshals the OCSP request to ASN.1 DER encoded form.
-func (req *Request) Marshal() ([]byte, error) {
- hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm)
- if hashAlg == nil {
- return nil, errors.New("Unknown hash algorithm")
- }
- return asn1.Marshal(ocspRequest{
- tbsRequest{
- Version: 0,
- RequestList: []request{
- {
- Cert: certID{
- pkix.AlgorithmIdentifier{
- Algorithm: hashAlg,
- Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
- },
- req.IssuerNameHash,
- req.IssuerKeyHash,
- req.SerialNumber,
- },
- },
- },
- },
- })
-}
-
-// Response represents an OCSP response containing a single SingleResponse. See
-// RFC 6960.
-type Response struct {
- Raw []byte
-
- // Status is one of {Good, Revoked, Unknown}
- Status int
- SerialNumber *big.Int
- ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time
- RevocationReason int
- Certificate *x509.Certificate
- // TBSResponseData contains the raw bytes of the signed response. If
- // Certificate is nil then this can be used to verify Signature.
- TBSResponseData []byte
- Signature []byte
- SignatureAlgorithm x509.SignatureAlgorithm
-
- // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash.
- // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512.
- // If zero, the default is crypto.SHA1.
- IssuerHash crypto.Hash
-
- // RawResponderName optionally contains the DER-encoded subject of the
- // responder certificate. Exactly one of RawResponderName and
- // ResponderKeyHash is set.
- RawResponderName []byte
- // ResponderKeyHash optionally contains the SHA-1 hash of the
- // responder's public key. Exactly one of RawResponderName and
- // ResponderKeyHash is set.
- ResponderKeyHash []byte
-
- // Extensions contains raw X.509 extensions from the singleExtensions field
- // of the OCSP response. When parsing certificates, this can be used to
- // extract non-critical extensions that are not parsed by this package. When
- // marshaling OCSP responses, the Extensions field is ignored, see
- // ExtraExtensions.
- Extensions []pkix.Extension
-
- // ExtraExtensions contains extensions to be copied, raw, into any marshaled
- // OCSP response (in the singleExtensions field). Values override any
- // extensions that would otherwise be produced based on the other fields. The
- // ExtraExtensions field is not populated when parsing certificates, see
- // Extensions.
- ExtraExtensions []pkix.Extension
-}
-
-// These are pre-serialized error responses for the various non-success codes
-// defined by OCSP. The Unauthorized code in particular can be used by an OCSP
-// responder that supports only pre-signed responses as a response to requests
-// for certificates with unknown status. See RFC 5019.
-var (
- MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}
- InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}
- TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}
- SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}
- UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}
-)
-
-// CheckSignatureFrom checks that the signature in resp is a valid signature
-// from issuer. This should only be used if resp.Certificate is nil. Otherwise,
-// the OCSP response contained an intermediate certificate that created the
-// signature. That signature is checked by ParseResponse and only
-// resp.Certificate remains to be validated.
-func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error {
- return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature)
-}
-
-// ParseError results from an invalid OCSP response.
-type ParseError string
-
-func (p ParseError) Error() string {
- return string(p)
-}
-
-// ParseRequest parses an OCSP request in DER form. It only supports
-// requests for a single certificate. Signed requests are not supported.
-// If a request includes a signature, it will result in a ParseError.
-func ParseRequest(bytes []byte) (*Request, error) {
- var req ocspRequest
- rest, err := asn1.Unmarshal(bytes, &req)
- if err != nil {
- return nil, err
- }
- if len(rest) > 0 {
- return nil, ParseError("trailing data in OCSP request")
- }
-
- if len(req.TBSRequest.RequestList) == 0 {
- return nil, ParseError("OCSP request contains no request body")
- }
- innerRequest := req.TBSRequest.RequestList[0]
-
- hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm)
- if hashFunc == crypto.Hash(0) {
- return nil, ParseError("OCSP request uses unknown hash function")
- }
-
- return &Request{
- HashAlgorithm: hashFunc,
- IssuerNameHash: innerRequest.Cert.NameHash,
- IssuerKeyHash: innerRequest.Cert.IssuerKeyHash,
- SerialNumber: innerRequest.Cert.SerialNumber,
- }, nil
-}
-
-// ParseResponse parses an OCSP response in DER form. The response must contain
-// only one certificate status. To parse the status of a specific certificate
-// from a response which may contain multiple statuses, use ParseResponseForCert
-// instead.
-//
-// If the response contains an embedded certificate, then that certificate will
-// be used to verify the response signature. If the response contains an
-// embedded certificate and issuer is not nil, then issuer will be used to verify
-// the signature on the embedded certificate.
-//
-// If the response does not contain an embedded certificate and issuer is not
-// nil, then issuer will be used to verify the response signature.
-//
-// Invalid responses and parse failures will result in a ParseError.
-// Error responses will result in a ResponseError.
-func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) {
- return ParseResponseForCert(bytes, nil, issuer)
-}
-
-// ParseResponseForCert acts identically to ParseResponse, except it supports
-// parsing responses that contain multiple statuses. If the response contains
-// multiple statuses and cert is not nil, then ParseResponseForCert will return
-// the first status which contains a matching serial, otherwise it will return an
-// error. If cert is nil, then the first status in the response will be returned.
-func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) {
- var resp responseASN1
- rest, err := asn1.Unmarshal(bytes, &resp)
- if err != nil {
- return nil, err
- }
- if len(rest) > 0 {
- return nil, ParseError("trailing data in OCSP response")
- }
-
- if status := ResponseStatus(resp.Status); status != Success {
- return nil, ResponseError{status}
- }
-
- if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) {
- return nil, ParseError("bad OCSP response type")
- }
-
- var basicResp basicResponse
- rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp)
- if err != nil {
- return nil, err
- }
- if len(rest) > 0 {
- return nil, ParseError("trailing data in OCSP response")
- }
-
- if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 {
- return nil, ParseError("OCSP response contains bad number of responses")
- }
-
- var singleResp singleResponse
- if cert == nil {
- singleResp = basicResp.TBSResponseData.Responses[0]
- } else {
- match := false
- for _, resp := range basicResp.TBSResponseData.Responses {
- if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 {
- singleResp = resp
- match = true
- break
- }
- }
- if !match {
- return nil, ParseError("no response matching the supplied certificate")
- }
- }
-
- ret := &Response{
- Raw: bytes,
- TBSResponseData: basicResp.TBSResponseData.Raw,
- Signature: basicResp.Signature.RightAlign(),
- SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm),
- Extensions: singleResp.SingleExtensions,
- SerialNumber: singleResp.CertID.SerialNumber,
- ProducedAt: basicResp.TBSResponseData.ProducedAt,
- ThisUpdate: singleResp.ThisUpdate,
- NextUpdate: singleResp.NextUpdate,
- }
-
- // Handle the ResponderID CHOICE tag. ResponderID can be flattened into
- // TBSResponseData once https://go-review.googlesource.com/34503 has been
- // released.
- rawResponderID := basicResp.TBSResponseData.RawResponderID
- switch rawResponderID.Tag {
- case 1: // Name
- var rdn pkix.RDNSequence
- if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 {
- return nil, ParseError("invalid responder name")
- }
- ret.RawResponderName = rawResponderID.Bytes
- case 2: // KeyHash
- if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 {
- return nil, ParseError("invalid responder key hash")
- }
- default:
- return nil, ParseError("invalid responder id tag")
- }
-
- if len(basicResp.Certificates) > 0 {
- // Responders should only send a single certificate (if they
- // send any) that connects the responder's certificate to the
- // original issuer. We accept responses with multiple
- // certificates due to a number responders sending them[1], but
- // ignore all but the first.
- //
- // [1] https://github.com/golang/go/issues/21527
- ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes)
- if err != nil {
- return nil, err
- }
-
- if err := ret.CheckSignatureFrom(ret.Certificate); err != nil {
- return nil, ParseError("bad signature on embedded certificate: " + err.Error())
- }
-
- if issuer != nil {
- if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil {
- return nil, ParseError("bad OCSP signature: " + err.Error())
- }
- }
- } else if issuer != nil {
- if err := ret.CheckSignatureFrom(issuer); err != nil {
- return nil, ParseError("bad OCSP signature: " + err.Error())
- }
- }
-
- for _, ext := range singleResp.SingleExtensions {
- if ext.Critical {
- return nil, ParseError("unsupported critical extension")
- }
- }
-
- for h, oid := range hashOIDs {
- if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) {
- ret.IssuerHash = h
- break
- }
- }
- if ret.IssuerHash == 0 {
- return nil, ParseError("unsupported issuer hash algorithm")
- }
-
- switch {
- case bool(singleResp.Good):
- ret.Status = Good
- case bool(singleResp.Unknown):
- ret.Status = Unknown
- default:
- ret.Status = Revoked
- ret.RevokedAt = singleResp.Revoked.RevocationTime
- ret.RevocationReason = int(singleResp.Revoked.Reason)
- }
-
- return ret, nil
-}
-
-// RequestOptions contains options for constructing OCSP requests.
-type RequestOptions struct {
- // Hash contains the hash function that should be used when
- // constructing the OCSP request. If zero, SHA-1 will be used.
- Hash crypto.Hash
-}
-
-func (opts *RequestOptions) hash() crypto.Hash {
- if opts == nil || opts.Hash == 0 {
- // SHA-1 is nearly universally used in OCSP.
- return crypto.SHA1
- }
- return opts.Hash
-}
-
-// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If
-// opts is nil then sensible defaults are used.
-func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) {
- hashFunc := opts.hash()
-
- // OCSP seems to be the only place where these raw hash identifiers are
- // used. I took the following from
- // http://msdn.microsoft.com/en-us/library/ff635603.aspx
- _, ok := hashOIDs[hashFunc]
- if !ok {
- return nil, x509.ErrUnsupportedAlgorithm
- }
-
- if !hashFunc.Available() {
- return nil, x509.ErrUnsupportedAlgorithm
- }
- h := opts.hash().New()
-
- var publicKeyInfo struct {
- Algorithm pkix.AlgorithmIdentifier
- PublicKey asn1.BitString
- }
- if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
- return nil, err
- }
-
- h.Write(publicKeyInfo.PublicKey.RightAlign())
- issuerKeyHash := h.Sum(nil)
-
- h.Reset()
- h.Write(issuer.RawSubject)
- issuerNameHash := h.Sum(nil)
-
- req := &Request{
- HashAlgorithm: hashFunc,
- IssuerNameHash: issuerNameHash,
- IssuerKeyHash: issuerKeyHash,
- SerialNumber: cert.SerialNumber,
- }
- return req.Marshal()
-}
-
-// CreateResponse returns a DER-encoded OCSP response with the specified contents.
-// The fields in the response are populated as follows:
-//
-// The responder cert is used to populate the responder's name field, and the
-// certificate itself is provided alongside the OCSP response signature.
-//
-// The issuer cert is used to populate the IssuerNameHash and IssuerKeyHash fields.
-//
-// The template is used to populate the SerialNumber, Status, RevokedAt,
-// RevocationReason, ThisUpdate, and NextUpdate fields.
-//
-// If template.IssuerHash is not set, SHA1 will be used.
-//
-// The ProducedAt date is automatically set to the current date, to the nearest minute.
-func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) {
- var publicKeyInfo struct {
- Algorithm pkix.AlgorithmIdentifier
- PublicKey asn1.BitString
- }
- if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
- return nil, err
- }
-
- if template.IssuerHash == 0 {
- template.IssuerHash = crypto.SHA1
- }
- hashOID := getOIDFromHashAlgorithm(template.IssuerHash)
- if hashOID == nil {
- return nil, errors.New("unsupported issuer hash algorithm")
- }
-
- if !template.IssuerHash.Available() {
- return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash)
- }
- h := template.IssuerHash.New()
- h.Write(publicKeyInfo.PublicKey.RightAlign())
- issuerKeyHash := h.Sum(nil)
-
- h.Reset()
- h.Write(issuer.RawSubject)
- issuerNameHash := h.Sum(nil)
-
- innerResponse := singleResponse{
- CertID: certID{
- HashAlgorithm: pkix.AlgorithmIdentifier{
- Algorithm: hashOID,
- Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
- },
- NameHash: issuerNameHash,
- IssuerKeyHash: issuerKeyHash,
- SerialNumber: template.SerialNumber,
- },
- ThisUpdate: template.ThisUpdate.UTC(),
- NextUpdate: template.NextUpdate.UTC(),
- SingleExtensions: template.ExtraExtensions,
- }
-
- switch template.Status {
- case Good:
- innerResponse.Good = true
- case Unknown:
- innerResponse.Unknown = true
- case Revoked:
- innerResponse.Revoked = revokedInfo{
- RevocationTime: template.RevokedAt.UTC(),
- Reason: asn1.Enumerated(template.RevocationReason),
- }
- }
-
- rawResponderID := asn1.RawValue{
- Class: 2, // context-specific
- Tag: 1, // Name (explicit tag)
- IsCompound: true,
- Bytes: responderCert.RawSubject,
- }
- tbsResponseData := responseData{
- Version: 0,
- RawResponderID: rawResponderID,
- ProducedAt: time.Now().Truncate(time.Minute).UTC(),
- Responses: []singleResponse{innerResponse},
- }
-
- tbsResponseDataDER, err := asn1.Marshal(tbsResponseData)
- if err != nil {
- return nil, err
- }
-
- hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
- if err != nil {
- return nil, err
- }
-
- responseHash := hashFunc.New()
- responseHash.Write(tbsResponseDataDER)
- signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc)
- if err != nil {
- return nil, err
- }
-
- response := basicResponse{
- TBSResponseData: tbsResponseData,
- SignatureAlgorithm: signatureAlgorithm,
- Signature: asn1.BitString{
- Bytes: signature,
- BitLength: 8 * len(signature),
- },
- }
- if template.Certificate != nil {
- response.Certificates = []asn1.RawValue{
- {FullBytes: template.Certificate.Raw},
- }
- }
- responseDER, err := asn1.Marshal(response)
- if err != nil {
- return nil, err
- }
-
- return asn1.Marshal(responseASN1{
- Status: asn1.Enumerated(Success),
- Response: responseBytes{
- ResponseType: idPKIXOCSPBasic,
- Response: responseDER,
- },
- })
-}
diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go
deleted file mode 100644
index a51269d91..000000000
--- a/vendor/golang.org/x/crypto/sha3/hashes.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha3 implements the SHA-3 hash algorithms and the SHAKE extendable
-// output functions defined in FIPS 202.
-//
-// Most of this package is a wrapper around the crypto/sha3 package in the
-// standard library. The only exception is the legacy Keccak hash functions.
-package sha3
-
-import (
- "crypto/sha3"
- "hash"
-)
-
-// New224 creates a new SHA3-224 hash.
-// Its generic security strength is 224 bits against preimage attacks,
-// and 112 bits against collision attacks.
-//
-// It is a wrapper for the [sha3.New224] function in the standard library.
-//
-//go:fix inline
-func New224() hash.Hash {
- return sha3.New224()
-}
-
-// New256 creates a new SHA3-256 hash.
-// Its generic security strength is 256 bits against preimage attacks,
-// and 128 bits against collision attacks.
-//
-// It is a wrapper for the [sha3.New256] function in the standard library.
-//
-//go:fix inline
-func New256() hash.Hash {
- return sha3.New256()
-}
-
-// New384 creates a new SHA3-384 hash.
-// Its generic security strength is 384 bits against preimage attacks,
-// and 192 bits against collision attacks.
-//
-// It is a wrapper for the [sha3.New384] function in the standard library.
-//
-//go:fix inline
-func New384() hash.Hash {
- return sha3.New384()
-}
-
-// New512 creates a new SHA3-512 hash.
-// Its generic security strength is 512 bits against preimage attacks,
-// and 256 bits against collision attacks.
-//
-// It is a wrapper for the [sha3.New512] function in the standard library.
-//
-//go:fix inline
-func New512() hash.Hash {
- return sha3.New512()
-}
-
-// Sum224 returns the SHA3-224 digest of the data.
-//
-// It is a wrapper for the [sha3.Sum224] function in the standard library.
-//
-//go:fix inline
-func Sum224(data []byte) [28]byte {
- return sha3.Sum224(data)
-}
-
-// Sum256 returns the SHA3-256 digest of the data.
-//
-// It is a wrapper for the [sha3.Sum256] function in the standard library.
-//
-//go:fix inline
-func Sum256(data []byte) [32]byte {
- return sha3.Sum256(data)
-}
-
-// Sum384 returns the SHA3-384 digest of the data.
-//
-// It is a wrapper for the [sha3.Sum384] function in the standard library.
-//
-//go:fix inline
-func Sum384(data []byte) [48]byte {
- return sha3.Sum384(data)
-}
-
-// Sum512 returns the SHA3-512 digest of the data.
-//
-// It is a wrapper for the [sha3.Sum512] function in the standard library.
-//
-//go:fix inline
-func Sum512(data []byte) [64]byte {
- return sha3.Sum512(data)
-}
diff --git a/vendor/golang.org/x/crypto/sha3/legacy_hash.go b/vendor/golang.org/x/crypto/sha3/legacy_hash.go
deleted file mode 100644
index b8784536e..000000000
--- a/vendor/golang.org/x/crypto/sha3/legacy_hash.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This implementation is only used for NewLegacyKeccak256 and
-// NewLegacyKeccak512, which are not implemented by crypto/sha3.
-// All other functions in this package are wrappers around crypto/sha3.
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "errors"
- "hash"
- "unsafe"
-
- "golang.org/x/sys/cpu"
-)
-
-const (
- dsbyteKeccak = 0b00000001
-
- // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in
- // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits.
- rateK256 = (1600 - 256) / 8
- rateK512 = (1600 - 512) / 8
- rateK1024 = (1600 - 1024) / 8
-)
-
-// NewLegacyKeccak256 creates a new Keccak-256 hash.
-//
-// Only use this function if you require compatibility with an existing cryptosystem
-// that uses non-standard padding. All other users should use New256 instead.
-func NewLegacyKeccak256() hash.Hash {
- return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak}
-}
-
-// NewLegacyKeccak512 creates a new Keccak-512 hash.
-//
-// Only use this function if you require compatibility with an existing cryptosystem
-// that uses non-standard padding. All other users should use New512 instead.
-func NewLegacyKeccak512() hash.Hash {
- return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak}
-}
-
-// spongeDirection indicates the direction bytes are flowing through the sponge.
-type spongeDirection int
-
-const (
- // spongeAbsorbing indicates that the sponge is absorbing input.
- spongeAbsorbing spongeDirection = iota
- // spongeSqueezing indicates that the sponge is being squeezed.
- spongeSqueezing
-)
-
-type state struct {
- a [1600 / 8]byte // main state of the hash
-
- // a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR
- // into before running the permutation. If squeezing, it's the remaining
- // output to produce before running the permutation.
- n, rate int
-
- // dsbyte contains the "domain separation" bits and the first bit of
- // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
- // SHA-3 and SHAKE functions by appending bitstrings to the message.
- // Using a little-endian bit-ordering convention, these are "01" for SHA-3
- // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
- // padding rule from section 5.1 is applied to pad the message to a multiple
- // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
- // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
- // giving 00000110b (0x06) and 00011111b (0x1f).
- // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
- // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
- // Extendable-Output Functions (May 2014)"
- dsbyte byte
-
- outputLen int // the default output size in bytes
- state spongeDirection // whether the sponge is absorbing or squeezing
-}
-
-// BlockSize returns the rate of sponge underlying this hash function.
-func (d *state) BlockSize() int { return d.rate }
-
-// Size returns the output size of the hash function in bytes.
-func (d *state) Size() int { return d.outputLen }
-
-// Reset clears the internal state by zeroing the sponge state and
-// the buffer indexes, and setting Sponge.state to absorbing.
-func (d *state) Reset() {
- // Zero the permutation's state.
- for i := range d.a {
- d.a[i] = 0
- }
- d.state = spongeAbsorbing
- d.n = 0
-}
-
-func (d *state) clone() *state {
- ret := *d
- return &ret
-}
-
-// permute applies the KeccakF-1600 permutation.
-func (d *state) permute() {
- var a *[25]uint64
- if cpu.IsBigEndian {
- a = new([25]uint64)
- for i := range a {
- a[i] = binary.LittleEndian.Uint64(d.a[i*8:])
- }
- } else {
- a = (*[25]uint64)(unsafe.Pointer(&d.a))
- }
-
- keccakF1600(a)
- d.n = 0
-
- if cpu.IsBigEndian {
- for i := range a {
- binary.LittleEndian.PutUint64(d.a[i*8:], a[i])
- }
- }
-}
-
-// pads appends the domain separation bits in dsbyte, applies
-// the multi-bitrate 10..1 padding rule, and permutes the state.
-func (d *state) padAndPermute() {
- // Pad with this instance's domain-separator bits. We know that there's
- // at least one byte of space in the sponge because, if it were full,
- // permute would have been called to empty it. dsbyte also contains the
- // first one bit for the padding. See the comment in the state struct.
- d.a[d.n] ^= d.dsbyte
- // This adds the final one bit for the padding. Because of the way that
- // bits are numbered from the LSB upwards, the final bit is the MSB of
- // the last byte.
- d.a[d.rate-1] ^= 0x80
- // Apply the permutation
- d.permute()
- d.state = spongeSqueezing
-}
-
-// Write absorbs more data into the hash's state. It panics if any
-// output has already been read.
-func (d *state) Write(p []byte) (n int, err error) {
- if d.state != spongeAbsorbing {
- panic("sha3: Write after Read")
- }
-
- n = len(p)
-
- for len(p) > 0 {
- x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p)
- d.n += x
- p = p[x:]
-
- // If the sponge is full, apply the permutation.
- if d.n == d.rate {
- d.permute()
- }
- }
-
- return
-}
-
-// Read squeezes an arbitrary number of bytes from the sponge.
-func (d *state) Read(out []byte) (n int, err error) {
- // If we're still absorbing, pad and apply the permutation.
- if d.state == spongeAbsorbing {
- d.padAndPermute()
- }
-
- n = len(out)
-
- // Now, do the squeezing.
- for len(out) > 0 {
- // Apply the permutation if we've squeezed the sponge dry.
- if d.n == d.rate {
- d.permute()
- }
-
- x := copy(out, d.a[d.n:d.rate])
- d.n += x
- out = out[x:]
- }
-
- return
-}
-
-// Sum applies padding to the hash state and then squeezes out the desired
-// number of output bytes. It panics if any output has already been read.
-func (d *state) Sum(in []byte) []byte {
- if d.state != spongeAbsorbing {
- panic("sha3: Sum after Read")
- }
-
- // Make a copy of the original hash so that caller can keep writing
- // and summing.
- dup := d.clone()
- hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation
- dup.Read(hash)
- return append(in, hash...)
-}
-
-const (
- magicKeccak = "sha\x0b"
- // magic || rate || main state || n || sponge direction
- marshaledSize = len(magicKeccak) + 1 + 200 + 1 + 1
-)
-
-func (d *state) MarshalBinary() ([]byte, error) {
- return d.AppendBinary(make([]byte, 0, marshaledSize))
-}
-
-func (d *state) AppendBinary(b []byte) ([]byte, error) {
- switch d.dsbyte {
- case dsbyteKeccak:
- b = append(b, magicKeccak...)
- default:
- panic("unknown dsbyte")
- }
- // rate is at most 168, and n is at most rate.
- b = append(b, byte(d.rate))
- b = append(b, d.a[:]...)
- b = append(b, byte(d.n), byte(d.state))
- return b, nil
-}
-
-func (d *state) UnmarshalBinary(b []byte) error {
- if len(b) != marshaledSize {
- return errors.New("sha3: invalid hash state")
- }
-
- magic := string(b[:len(magicKeccak)])
- b = b[len(magicKeccak):]
- switch {
- case magic == magicKeccak && d.dsbyte == dsbyteKeccak:
- default:
- return errors.New("sha3: invalid hash state identifier")
- }
-
- rate := int(b[0])
- b = b[1:]
- if rate != d.rate {
- return errors.New("sha3: invalid hash state function")
- }
-
- copy(d.a[:], b)
- b = b[len(d.a):]
-
- n, state := int(b[0]), spongeDirection(b[1])
- if n > d.rate {
- return errors.New("sha3: invalid hash state")
- }
- d.n = n
- if state != spongeAbsorbing && state != spongeSqueezing {
- return errors.New("sha3: invalid hash state")
- }
- d.state = state
-
- return nil
-}
diff --git a/vendor/golang.org/x/crypto/sha3/legacy_keccakf.go b/vendor/golang.org/x/crypto/sha3/legacy_keccakf.go
deleted file mode 100644
index 101588c16..000000000
--- a/vendor/golang.org/x/crypto/sha3/legacy_keccakf.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This implementation is only used for NewLegacyKeccak256 and
-// NewLegacyKeccak512, which are not implemented by crypto/sha3.
-// All other functions in this package are wrappers around crypto/sha3.
-
-import "math/bits"
-
-// rc stores the round constants for use in the ι step.
-var rc = [24]uint64{
- 0x0000000000000001,
- 0x0000000000008082,
- 0x800000000000808A,
- 0x8000000080008000,
- 0x000000000000808B,
- 0x0000000080000001,
- 0x8000000080008081,
- 0x8000000000008009,
- 0x000000000000008A,
- 0x0000000000000088,
- 0x0000000080008009,
- 0x000000008000000A,
- 0x000000008000808B,
- 0x800000000000008B,
- 0x8000000000008089,
- 0x8000000000008003,
- 0x8000000000008002,
- 0x8000000000000080,
- 0x000000000000800A,
- 0x800000008000000A,
- 0x8000000080008081,
- 0x8000000000008080,
- 0x0000000080000001,
- 0x8000000080008008,
-}
-
-// keccakF1600 applies the Keccak permutation to a 1600b-wide
-// state represented as a slice of 25 uint64s.
-func keccakF1600(a *[25]uint64) {
- // Implementation translated from Keccak-inplace.c
- // in the keccak reference code.
- var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
-
- for i := 0; i < 24; i += 4 {
- // Combines the 5 steps in each round into 2 steps.
- // Unrolls 4 rounds per loop and spreads some steps across rounds.
-
- // Round 1
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[6] ^ d1
- bc1 = bits.RotateLeft64(t, 44)
- t = a[12] ^ d2
- bc2 = bits.RotateLeft64(t, 43)
- t = a[18] ^ d3
- bc3 = bits.RotateLeft64(t, 21)
- t = a[24] ^ d4
- bc4 = bits.RotateLeft64(t, 14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc2 = bits.RotateLeft64(t, 3)
- t = a[16] ^ d1
- bc3 = bits.RotateLeft64(t, 45)
- t = a[22] ^ d2
- bc4 = bits.RotateLeft64(t, 61)
- t = a[3] ^ d3
- bc0 = bits.RotateLeft64(t, 28)
- t = a[9] ^ d4
- bc1 = bits.RotateLeft64(t, 20)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc4 = bits.RotateLeft64(t, 18)
- t = a[1] ^ d1
- bc0 = bits.RotateLeft64(t, 1)
- t = a[7] ^ d2
- bc1 = bits.RotateLeft64(t, 6)
- t = a[13] ^ d3
- bc2 = bits.RotateLeft64(t, 25)
- t = a[19] ^ d4
- bc3 = bits.RotateLeft64(t, 8)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc1 = bits.RotateLeft64(t, 36)
- t = a[11] ^ d1
- bc2 = bits.RotateLeft64(t, 10)
- t = a[17] ^ d2
- bc3 = bits.RotateLeft64(t, 15)
- t = a[23] ^ d3
- bc4 = bits.RotateLeft64(t, 56)
- t = a[4] ^ d4
- bc0 = bits.RotateLeft64(t, 27)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc3 = bits.RotateLeft64(t, 41)
- t = a[21] ^ d1
- bc4 = bits.RotateLeft64(t, 2)
- t = a[2] ^ d2
- bc0 = bits.RotateLeft64(t, 62)
- t = a[8] ^ d3
- bc1 = bits.RotateLeft64(t, 55)
- t = a[14] ^ d4
- bc2 = bits.RotateLeft64(t, 39)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- // Round 2
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[16] ^ d1
- bc1 = bits.RotateLeft64(t, 44)
- t = a[7] ^ d2
- bc2 = bits.RotateLeft64(t, 43)
- t = a[23] ^ d3
- bc3 = bits.RotateLeft64(t, 21)
- t = a[14] ^ d4
- bc4 = bits.RotateLeft64(t, 14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc2 = bits.RotateLeft64(t, 3)
- t = a[11] ^ d1
- bc3 = bits.RotateLeft64(t, 45)
- t = a[2] ^ d2
- bc4 = bits.RotateLeft64(t, 61)
- t = a[18] ^ d3
- bc0 = bits.RotateLeft64(t, 28)
- t = a[9] ^ d4
- bc1 = bits.RotateLeft64(t, 20)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc4 = bits.RotateLeft64(t, 18)
- t = a[6] ^ d1
- bc0 = bits.RotateLeft64(t, 1)
- t = a[22] ^ d2
- bc1 = bits.RotateLeft64(t, 6)
- t = a[13] ^ d3
- bc2 = bits.RotateLeft64(t, 25)
- t = a[4] ^ d4
- bc3 = bits.RotateLeft64(t, 8)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc1 = bits.RotateLeft64(t, 36)
- t = a[1] ^ d1
- bc2 = bits.RotateLeft64(t, 10)
- t = a[17] ^ d2
- bc3 = bits.RotateLeft64(t, 15)
- t = a[8] ^ d3
- bc4 = bits.RotateLeft64(t, 56)
- t = a[24] ^ d4
- bc0 = bits.RotateLeft64(t, 27)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc3 = bits.RotateLeft64(t, 41)
- t = a[21] ^ d1
- bc4 = bits.RotateLeft64(t, 2)
- t = a[12] ^ d2
- bc0 = bits.RotateLeft64(t, 62)
- t = a[3] ^ d3
- bc1 = bits.RotateLeft64(t, 55)
- t = a[19] ^ d4
- bc2 = bits.RotateLeft64(t, 39)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- // Round 3
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[11] ^ d1
- bc1 = bits.RotateLeft64(t, 44)
- t = a[22] ^ d2
- bc2 = bits.RotateLeft64(t, 43)
- t = a[8] ^ d3
- bc3 = bits.RotateLeft64(t, 21)
- t = a[19] ^ d4
- bc4 = bits.RotateLeft64(t, 14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc2 = bits.RotateLeft64(t, 3)
- t = a[1] ^ d1
- bc3 = bits.RotateLeft64(t, 45)
- t = a[12] ^ d2
- bc4 = bits.RotateLeft64(t, 61)
- t = a[23] ^ d3
- bc0 = bits.RotateLeft64(t, 28)
- t = a[9] ^ d4
- bc1 = bits.RotateLeft64(t, 20)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc4 = bits.RotateLeft64(t, 18)
- t = a[16] ^ d1
- bc0 = bits.RotateLeft64(t, 1)
- t = a[2] ^ d2
- bc1 = bits.RotateLeft64(t, 6)
- t = a[13] ^ d3
- bc2 = bits.RotateLeft64(t, 25)
- t = a[24] ^ d4
- bc3 = bits.RotateLeft64(t, 8)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc1 = bits.RotateLeft64(t, 36)
- t = a[6] ^ d1
- bc2 = bits.RotateLeft64(t, 10)
- t = a[17] ^ d2
- bc3 = bits.RotateLeft64(t, 15)
- t = a[3] ^ d3
- bc4 = bits.RotateLeft64(t, 56)
- t = a[14] ^ d4
- bc0 = bits.RotateLeft64(t, 27)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc3 = bits.RotateLeft64(t, 41)
- t = a[21] ^ d1
- bc4 = bits.RotateLeft64(t, 2)
- t = a[7] ^ d2
- bc0 = bits.RotateLeft64(t, 62)
- t = a[18] ^ d3
- bc1 = bits.RotateLeft64(t, 55)
- t = a[4] ^ d4
- bc2 = bits.RotateLeft64(t, 39)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- // Round 4
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[1] ^ d1
- bc1 = bits.RotateLeft64(t, 44)
- t = a[2] ^ d2
- bc2 = bits.RotateLeft64(t, 43)
- t = a[3] ^ d3
- bc3 = bits.RotateLeft64(t, 21)
- t = a[4] ^ d4
- bc4 = bits.RotateLeft64(t, 14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc2 = bits.RotateLeft64(t, 3)
- t = a[6] ^ d1
- bc3 = bits.RotateLeft64(t, 45)
- t = a[7] ^ d2
- bc4 = bits.RotateLeft64(t, 61)
- t = a[8] ^ d3
- bc0 = bits.RotateLeft64(t, 28)
- t = a[9] ^ d4
- bc1 = bits.RotateLeft64(t, 20)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc4 = bits.RotateLeft64(t, 18)
- t = a[11] ^ d1
- bc0 = bits.RotateLeft64(t, 1)
- t = a[12] ^ d2
- bc1 = bits.RotateLeft64(t, 6)
- t = a[13] ^ d3
- bc2 = bits.RotateLeft64(t, 25)
- t = a[14] ^ d4
- bc3 = bits.RotateLeft64(t, 8)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc1 = bits.RotateLeft64(t, 36)
- t = a[16] ^ d1
- bc2 = bits.RotateLeft64(t, 10)
- t = a[17] ^ d2
- bc3 = bits.RotateLeft64(t, 15)
- t = a[18] ^ d3
- bc4 = bits.RotateLeft64(t, 56)
- t = a[19] ^ d4
- bc0 = bits.RotateLeft64(t, 27)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc3 = bits.RotateLeft64(t, 41)
- t = a[21] ^ d1
- bc4 = bits.RotateLeft64(t, 2)
- t = a[22] ^ d2
- bc0 = bits.RotateLeft64(t, 62)
- t = a[23] ^ d3
- bc1 = bits.RotateLeft64(t, 55)
- t = a[24] ^ d4
- bc2 = bits.RotateLeft64(t, 39)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
- }
-}
diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
deleted file mode 100644
index 6f3f70c26..000000000
--- a/vendor/golang.org/x/crypto/sha3/shake.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-import (
- "crypto/sha3"
- "hash"
- "io"
-)
-
-// ShakeHash defines the interface to hash functions that support
-// arbitrary-length output. When used as a plain [hash.Hash], it
-// produces minimum-length outputs that provide full-strength generic
-// security.
-type ShakeHash interface {
- hash.Hash
-
- // Read reads more output from the hash; reading affects the hash's
- // state. (ShakeHash.Read is thus very different from Hash.Sum.)
- // It never returns an error, but subsequent calls to Write or Sum
- // will panic.
- io.Reader
-
- // Clone returns a copy of the ShakeHash in its current state.
- Clone() ShakeHash
-}
-
-// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 128 bits against all attacks if at
-// least 32 bytes of its output are used.
-func NewShake128() ShakeHash {
- return &shakeWrapper{sha3.NewSHAKE128(), 32, false, sha3.NewSHAKE128}
-}
-
-// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
-// Its generic security strength is 256 bits against all attacks if
-// at least 64 bytes of its output are used.
-func NewShake256() ShakeHash {
- return &shakeWrapper{sha3.NewSHAKE256(), 64, false, sha3.NewSHAKE256}
-}
-
-// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
-// a customizable variant of SHAKE128.
-// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
-// desired. S is a customization byte string used for domain separation - two cSHAKE
-// computations on same input with different S yield unrelated outputs.
-// When N and S are both empty, this is equivalent to NewShake128.
-func NewCShake128(N, S []byte) ShakeHash {
- return &shakeWrapper{sha3.NewCSHAKE128(N, S), 32, false, func() *sha3.SHAKE {
- return sha3.NewCSHAKE128(N, S)
- }}
-}
-
-// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
-// a customizable variant of SHAKE256.
-// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
-// desired. S is a customization byte string used for domain separation - two cSHAKE
-// computations on same input with different S yield unrelated outputs.
-// When N and S are both empty, this is equivalent to NewShake256.
-func NewCShake256(N, S []byte) ShakeHash {
- return &shakeWrapper{sha3.NewCSHAKE256(N, S), 64, false, func() *sha3.SHAKE {
- return sha3.NewCSHAKE256(N, S)
- }}
-}
-
-// ShakeSum128 writes an arbitrary-length digest of data into hash.
-func ShakeSum128(hash, data []byte) {
- h := NewShake128()
- h.Write(data)
- h.Read(hash)
-}
-
-// ShakeSum256 writes an arbitrary-length digest of data into hash.
-func ShakeSum256(hash, data []byte) {
- h := NewShake256()
- h.Write(data)
- h.Read(hash)
-}
-
-// shakeWrapper adds the Size, Sum, and Clone methods to a sha3.SHAKE
-// to implement the ShakeHash interface.
-type shakeWrapper struct {
- *sha3.SHAKE
- outputLen int
- squeezing bool
- newSHAKE func() *sha3.SHAKE
-}
-
-func (w *shakeWrapper) Read(p []byte) (n int, err error) {
- w.squeezing = true
- return w.SHAKE.Read(p)
-}
-
-func (w *shakeWrapper) Clone() ShakeHash {
- s := w.newSHAKE()
- b, err := w.MarshalBinary()
- if err != nil {
- panic(err) // unreachable
- }
- if err := s.UnmarshalBinary(b); err != nil {
- panic(err) // unreachable
- }
- return &shakeWrapper{s, w.outputLen, w.squeezing, w.newSHAKE}
-}
-
-func (w *shakeWrapper) Size() int { return w.outputLen }
-
-func (w *shakeWrapper) Sum(b []byte) []byte {
- if w.squeezing {
- panic("sha3: Sum after Read")
- }
- out := make([]byte, w.outputLen)
- // Clone the state so that we don't affect future Write calls.
- s := w.Clone()
- s.Read(out)
- return append(b, out...)
-}
diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go
index c25939b92..4a9747ef4 100644
--- a/vendor/golang.org/x/exp/maps/maps.go
+++ b/vendor/golang.org/x/exp/maps/maps.go
@@ -7,17 +7,13 @@ package maps
import "maps"
-// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
-// these functions except Keys and Values should be annotated
-// (provisionally with "//go:fix inline") so that tools can safely and
-// automatically replace calls to exp/maps with calls to std maps by
-// inlining them.
-
// Keys returns the keys of the map m.
// The keys will be in an indeterminate order.
+//
+// The simplest true equivalent using the standard library is:
+//
+// slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m))
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
- // The simplest true equivalent using std is:
- // return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)).
r := make([]K, 0, len(m))
for k := range m {
@@ -28,9 +24,11 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K {
// Values returns the values of the map m.
// The values will be in an indeterminate order.
+//
+// The simplest true equivalent using the standard library is:
+//
+// slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m))
func Values[M ~map[K]V, K comparable, V any](m M) []V {
- // The simplest true equivalent using std is:
- // return slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)).
r := make([]V, 0, len(m))
for _, v := range m {
@@ -41,23 +39,31 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V {
// Equal reports whether two maps contain the same key/value pairs.
// Values are compared using ==.
+//
+//go:fix inline
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
return maps.Equal(m1, m2)
}
// EqualFunc is like Equal, but compares values using eq.
// Keys are still compared with ==.
+//
+//go:fix inline
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
return maps.EqualFunc(m1, m2, eq)
}
// Clear removes all entries from m, leaving it empty.
+//
+//go:fix inline
func Clear[M ~map[K]V, K comparable, V any](m M) {
clear(m)
}
// Clone returns a copy of m. This is a shallow clone:
// the new keys and values are set using ordinary assignment.
+//
+//go:fix inline
func Clone[M ~map[K]V, K comparable, V any](m M) M {
return maps.Clone(m)
}
@@ -66,11 +72,15 @@ func Clone[M ~map[K]V, K comparable, V any](m M) M {
// When a key in src is already present in dst,
// the value in dst will be overwritten by the value associated
// with the key in src.
+//
+//go:fix inline
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
maps.Copy(dst, src)
}
// DeleteFunc deletes any key/value pairs from m for which del returns true.
+//
+//go:fix inline
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
maps.DeleteFunc(m, del)
}
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 794b2e32b..563270c15 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -195,7 +195,7 @@ func (r *Reservation) CancelAt(t time.Time) {
// update state
r.lim.last = t
r.lim.tokens = tokens
- if r.timeToAct == r.lim.lastEvent {
+ if r.timeToAct.Equal(r.lim.lastEvent) {
prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
if !prevEvent.Before(t) {
r.lim.lastEvent = prevEvent
diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go
index 6ba99ddb6..9b8393269 100644
--- a/vendor/golang.org/x/time/rate/sometimes.go
+++ b/vendor/golang.org/x/time/rate/sometimes.go
@@ -61,7 +61,9 @@ func (s *Sometimes) Do(f func()) {
(s.Every > 0 && s.count%s.Every == 0) ||
(s.Interval > 0 && time.Since(s.last) >= s.Interval) {
f()
- s.last = time.Now()
+ if s.Interval > 0 {
+ s.last = time.Now()
+ }
}
s.count++
}
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index d9bfa6e1e..2079de7b0 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -1,73 +1,159 @@
# How to contribute
-We definitely welcome your patches and contributions to gRPC! Please read the gRPC
-organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
-and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
+We welcome your patches and contributions to gRPC! Please read the gRPC
+organization's [governance
+rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before
+proceeding.
If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
+[Contributor License
+Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create
+your first PR, a link will be added as a comment that contains the steps needed
+to complete this process.
+
+## Getting Started
+
+A great way to start is by searching through our open issues. [Unassigned issues
+labeled as "help
+wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee)
+are especially nice for first-time contributors, as they should be well-defined
+problems that already have agreed-upon solutions.
+
+## Code Style
+
+We follow [Google's published Go style
+guide](https://google.github.io/styleguide/go/). Note that there are three
+primary documents that make up this style guide; please follow them as closely
+as possible. If a reviewer recommends something that contradicts those
+guidelines, there may be valid reasons to do so, but it should be rare.
## Guidelines for Pull Requests
-How to get your contributions merged smoothly and quickly.
+
+Please read the following carefully to ensure your contributions can be merged
+smoothly and quickly.
+
+### PR Contents
- Create **small PRs** that are narrowly focused on **addressing a single
- concern**. We often times receive PRs that are trying to fix several things at
- a time, but only one fix is considered acceptable, nothing gets merged and
- both author's & review's time is wasted. Create more PRs to address different
- concerns and everyone will be happy.
+ concern**. We often receive PRs that attempt to fix several things at the same
+ time, and if one part of the PR has a problem, that will hold up the entire
+ PR.
+
+- If your change does not address an **open issue** with an **agreed
+ resolution**, consider opening an issue and discussing it first. If you are
+ suggesting a behavioral or API change, consider starting with a [gRFC
+ proposal](https://github.com/grpc/proposal). Many new features that are not
+ bug fixes will require cross-language agreement.
+
+- If you want to fix **formatting or style**, consider whether your changes are
+ an obvious improvement or might be considered a personal preference. If a
+ style change is based on preference, it likely will not be accepted. If it
+ corrects widely agreed-upon anti-patterns, then please do create a PR and
+ explain the benefits of the change.
+
+- For correcting **misspellings**, please be aware that we use some terms that
+ are sometimes flagged by spell checkers. As an example, "if an only if" is
+ often written as "iff". Please do not make spelling correction changes unless
+ you are certain they are misspellings.
+
+- **All tests need to be passing** before your change can be merged. We
+ recommend you run tests locally before creating your PR to catch breakages
+ early on:
-- If you are searching for features to work on, issues labeled [Status: Help
- Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
- is a great place to start. These issues are well-documented and usually can be
- resolved with a single pull request.
+ - `./scripts/vet.sh` to catch vet errors.
+ - `go test -cpu 1,4 -timeout 7m ./...` to run the tests.
+ - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode.
-- If you are adding a new file, make sure it has the copyright message template
- at the top as a comment. You can copy over the message from an existing file
- and update the year.
+ Note that we have a multi-module repo, so `go test` commands may need to be
+ run from the root of each module in order to cause all tests to run.
+
+ *Alternatively*, you may find it easier to push your changes to your fork on
+ GitHub, which will trigger a GitHub Actions run that you can use to verify
+ everything is passing.
+
+- Note that there are two GitHub actions checks that need not be green:
+
+ 1. We test the freshness of the generated proto code we maintain via the
+ `vet-proto` check. If the source proto files are updated, but our repo is
+ not updated, an optional checker will fail. This will be fixed by our team
+ in a separate PR and will not prevent the merge of your PR.
+
+ 2. We run a checker that will fail if there is any change in dependencies of
+ an exported package via the `dependencies` check. If new dependencies are
+ added that are not appropriate, we may not accept your PR (see below).
+
+- If you are adding a **new file**, make sure it has the **copyright message**
+ template at the top as a comment. You can copy the message from an existing
+ file and update the year.
- The grpc package should only depend on standard Go packages and a small number
- of exceptions. If your contribution introduces new dependencies which are NOT
- in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
- discussion with gRPC-Go authors and consultants.
+ of exceptions. **If your contribution introduces new dependencies**, you will
+ need a discussion with gRPC-Go maintainers.
-- For speculative changes, consider opening an issue and discussing it first. If
- you are suggesting a behavioral or API change, consider starting with a [gRFC
- proposal](https://github.com/grpc/proposal).
+### PR Descriptions
-- Provide a good **PR description** as a record of **what** change is being made
- and **why** it was made. Link to a GitHub issue if it exists.
+- **PR titles** should start with the name of the component being addressed, or
+ the type of change. Examples: transport, client, server, round_robin, xds,
+ cleanup, deps.
-- If you want to fix formatting or style, consider whether your changes are an
- obvious improvement or might be considered a personal preference. If a style
- change is based on preference, it likely will not be accepted. If it corrects
- widely agreed-upon anti-patterns, then please do create a PR and explain the
- benefits of the change.
+- Read and follow the **guidelines for PR titles and descriptions** here:
+ https://google.github.io/eng-practices/review/developer/cl-descriptions.html
-- Unless your PR is trivial, you should expect there will be reviewer comments
- that you'll need to address before merging. We'll mark it as `Status: Requires
- Reporter Clarification` if we expect you to respond to these comments in a
- timely manner. If the PR remains inactive for 6 days, it will be marked as
- `stale` and automatically close 7 days after that if we don't hear back from
- you.
+ *particularly* the sections "First Line" and "Body is Informative".
-- Maintain **clean commit history** and use **meaningful commit messages**. PRs
- with messy commit history are difficult to review and won't be merged. Use
- `rebase -i upstream/master` to curate your commit history and/or to bring in
- latest changes from master (but avoid rebasing in the middle of a code
- review).
+ Note: your PR description will be used as the git commit message in a
+ squash-and-merge if your PR is approved. We may make changes to this as
+ necessary.
-- Keep your PR up to date with upstream/master (if there are merge conflicts, we
- can't really merge your change).
+- **Does this PR relate to an open issue?** On the first line, please use the
+ tag `Fixes #` to ensure the issue is closed when the PR is merged. Or
+ use `Updates #` if the PR is related to an open issue, but does not fix
+ it. Consider filing an issue if one does not already exist.
-- **All tests need to be passing** before your change can be merged. We
- recommend you **run tests locally** before creating your PR to catch breakages
- early on.
- - `./scripts/vet.sh` to catch vet errors
- - `go test -cpu 1,4 -timeout 7m ./...` to run the tests
- - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
+- PR descriptions *must* conclude with **release notes** as follows:
+
+ ```
+ RELEASE NOTES:
+ * :
+ ```
+
+ This need not match the PR title.
+
+ The summary must:
+
+ * be something that gRPC users will understand.
+
+ * clearly explain the feature being added, the issue being fixed, or the
+ behavior being changed, etc. If fixing a bug, be clear about how the bug
+ can be triggered by an end-user.
+
+ * begin with a capital letter and use complete sentences.
-- Exceptions to the rules can be made if there's a compelling reason for doing so.
+ * be as short as possible to describe the change being made.
+
+ If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or
+ GitHub-related, use `RELEASE NOTES: n/a`.
+
+### PR Process
+
+- Please **self-review** your code changes before sending your PR. This will
+ prevent simple, obvious errors from causing delays.
+
+- Maintain a **clean commit history** and use **meaningful commit messages**.
+ PRs with messy commit histories are difficult to review and won't be merged.
+ Before sending your PR, ensure your changes are based on top of the latest
+ `upstream/master` commits, and avoid rebasing in the middle of a code review.
+ You should **never use `git push -f`** unless absolutely necessary during a
+ review, as it can interfere with GitHub's tracking of comments.
+
+- Unless your PR is trivial, you should **expect reviewer comments** that you
+ will need to address before merging. We'll label the PR as `Status: Requires
+ Reporter Clarification` if we expect you to respond to these comments in a
+ timely manner. If the PR remains inactive for 6 days, it will be marked as
+ `stale`, and we will automatically close it after 7 days if we don't hear back
+ from you. Please feel free to ping issues or bugs if you do not get a response
+ within a week.
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index 5d4096d46..df35bb9a8 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -9,21 +9,19 @@ for general contribution guidelines.
## Maintainers (in alphabetical order)
-- [aranjans](https://github.com/aranjans), Google LLC
- [arjan-bal](https://github.com/arjan-bal), Google LLC
- [arvindbr8](https://github.com/arvindbr8), Google LLC
- [atollena](https://github.com/atollena), Datadog, Inc.
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
-- [erm-g](https://github.com/erm-g), Google LLC
- [gtcooke94](https://github.com/gtcooke94), Google LLC
-- [purnesh42h](https://github.com/purnesh42h), Google LLC
-- [zasweq](https://github.com/zasweq), Google LLC
## Emeritus Maintainers (in alphabetical order)
- [adelez](https://github.com/adelez)
+- [aranjans](https://github.com/aranjans)
- [canguler](https://github.com/canguler)
- [cesarghali](https://github.com/cesarghali)
+- [erm-g](https://github.com/erm-g)
- [iamqizhao](https://github.com/iamqizhao)
- [jeanbza](https://github.com/jeanbza)
- [jtattermusch](https://github.com/jtattermusch)
@@ -32,5 +30,7 @@ for general contribution guidelines.
- [matt-kwong](https://github.com/matt-kwong)
- [menghanl](https://github.com/menghanl)
- [nicolasnoble](https://github.com/nicolasnoble)
+- [purnesh42h](https://github.com/purnesh42h)
- [srini100](https://github.com/srini100)
- [yongni](https://github.com/yongni)
+- [zasweq](https://github.com/zasweq)
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index b572707c6..f9a88d597 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -32,6 +32,7 @@ import "google.golang.org/grpc"
- [Low-level technical docs](Documentation) from this repository
- [Performance benchmark][]
- [Examples](examples)
+- [Contribution guidelines](CONTRIBUTING.md)
## FAQ
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index c9b343c71..b1264017d 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -360,6 +360,10 @@ type Balancer interface {
// call SubConn.Shutdown for its existing SubConns; however, this will be
// required in a future release, so it is recommended.
Close()
+ // ExitIdle instructs the LB policy to reconnect to backends / exit the
+ // IDLE state, if appropriate and possible. Note that SubConns that enter
+ // the IDLE state will not reconnect until SubConn.Connect is called.
+ ExitIdle()
}
// ExitIdler is an optional interface for balancers to implement. If
@@ -367,8 +371,8 @@ type Balancer interface {
// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause
// all SubConns to connect.
//
-// Notice: it will be required for all balancers to implement this in a future
-// release.
+// Deprecated: All balancers must implement this interface. This interface will
+// be removed in a future release.
type ExitIdler interface {
// ExitIdle instructs the LB policy to reconnect to backends / exit the
// IDLE state, if appropriate and possible. Note that SubConns that enter
diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
index cc606f4da..360db08eb 100644
--- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
+++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
@@ -37,6 +37,8 @@ import (
"google.golang.org/grpc/resolver"
)
+var randIntN = rand.IntN
+
// ChildState is the balancer state of a child along with the endpoint which
// identifies the child balancer.
type ChildState struct {
@@ -45,7 +47,15 @@ type ChildState struct {
// Balancer exposes only the ExitIdler interface of the child LB policy.
// Other methods of the child policy are called only by endpointsharding.
- Balancer balancer.ExitIdler
+ Balancer ExitIdler
+}
+
+// ExitIdler provides access to only the ExitIdle method of the child balancer.
+type ExitIdler interface {
+ // ExitIdle instructs the LB policy to reconnect to backends / exit the
+ // IDLE state, if appropriate and possible. Note that SubConns that enter
+ // the IDLE state will not reconnect until SubConn.Connect is called.
+ ExitIdle()
}
// Options are the options to configure the behaviour of the
@@ -104,6 +114,21 @@ type endpointSharding struct {
mu sync.Mutex
}
+// rotateEndpoints returns a slice of all the input endpoints rotated a random
+// amount.
+func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint {
+ les := len(es)
+ if les == 0 {
+ return es
+ }
+ r := randIntN(les)
+ // Make a copy to avoid mutating data beyond the end of es.
+ ret := make([]resolver.Endpoint, les)
+ copy(ret, es[r:])
+ copy(ret[les-r:], es[:r])
+ return ret
+}
+
// UpdateClientConnState creates a child for new endpoints and deletes children
// for endpoints that are no longer present. It also updates all the children,
// and sends a single synchronous update of the childrens' aggregated state at
@@ -125,7 +150,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState
newChildren := resolver.NewEndpointMap[*balancerWrapper]()
// Update/Create new children.
- for _, endpoint := range state.ResolverState.Endpoints {
+ for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) {
if _, ok := newChildren.Get(endpoint); ok {
// Endpoint child was already created, continue to avoid duplicate
// update.
@@ -205,6 +230,16 @@ func (es *endpointSharding) Close() {
}
}
+func (es *endpointSharding) ExitIdle() {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+ for _, bw := range es.children.Load().Values() {
+ if !bw.isClosed {
+ bw.child.ExitIdle()
+ }
+ }
+}
+
// updateState updates this component's state. It sends the aggregated state,
// and a picker with round robin behavior with all the child states present if
// needed.
@@ -261,7 +296,7 @@ func (es *endpointSharding) updateState() {
p := &pickerWithChildStates{
pickers: pickers,
childStates: childStates,
- next: uint32(rand.IntN(len(pickers))),
+ next: uint32(randIntN(len(pickers))),
}
es.cc.UpdateState(balancer.State{
ConnectivityState: aggState,
@@ -326,15 +361,13 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) {
// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to
// avoid deadlocks due to synchronous balancer state updates.
func (bw *balancerWrapper) ExitIdle() {
- if ei, ok := bw.child.(balancer.ExitIdler); ok {
- go func() {
- bw.es.childMu.Lock()
- if !bw.isClosed {
- ei.ExitIdle()
- }
- bw.es.childMu.Unlock()
- }()
- }
+ go func() {
+ bw.es.childMu.Lock()
+ if !bw.isClosed {
+ bw.child.ExitIdle()
+ }
+ bw.es.childMu.Unlock()
+ }()
}
// updateClientConnStateLocked delivers the ClientConnState to the child
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index ea8899818..b4bc3a2bf 100644
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -16,55 +16,124 @@
*
*/
-// Package pickfirst contains the pick_first load balancing policy.
+// Package pickfirst contains the pick_first load balancing policy which
+// is the universal leaf policy.
package pickfirst
import (
"encoding/json"
"errors"
"fmt"
- rand "math/rand/v2"
+ "net"
+ "net/netip"
+ "sync"
+ "time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
+ expstats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
-
- _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
)
func init() {
- if envconfig.NewPickFirstEnabled {
- return
- }
balancer.Register(pickfirstBuilder{})
}
-var logger = grpclog.Component("pick-first-lb")
+// Name is the name of the pick_first balancer.
+const Name = "pick_first"
+
+// enableHealthListenerKeyType is a unique key type used in resolver
+// attributes to indicate whether the health listener usage is enabled.
+type enableHealthListenerKeyType struct{}
+
+var (
+ logger = grpclog.Component("pick-first-leaf-lb")
+ disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.disconnections",
+ Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
+ Unit: "{disconnection}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_succeeded",
+ Description: "EXPERIMENTAL. Number of successful connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_failed",
+ Description: "EXPERIMENTAL. Number of failed connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+)
const (
- // Name is the name of the pick_first balancer.
- Name = "pick_first"
- logPrefix = "[pick-first-lb %p] "
+ // TODO: change to pick-first when this becomes the default pick_first policy.
+ logPrefix = "[pick-first-leaf-lb %p] "
+ // connectionDelayInterval is the time to wait for during the happy eyeballs
+ // pass before starting the next connection attempt.
+ connectionDelayInterval = 250 * time.Millisecond
+)
+
+type ipAddrFamily int
+
+const (
+ // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
+ // address.
+ ipAddrFamilyUnknown ipAddrFamily = iota
+ ipAddrFamilyV4
+ ipAddrFamilyV6
)
type pickfirstBuilder struct{}
-func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
- b := &pickfirstBalancer{cc: cc}
+func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
+ b := &pickfirstBalancer{
+ cc: cc,
+ target: bo.Target.String(),
+ metricsRecorder: cc.MetricsRecorder(),
+
+ subConns: resolver.NewAddressMapV2[*scData](),
+ state: connectivity.Connecting,
+ cancelConnectionTimer: func() {},
+ }
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
}
-func (pickfirstBuilder) Name() string {
+func (b pickfirstBuilder) Name() string {
return Name
}
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var cfg pfConfig
+ if err := json.Unmarshal(js, &cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
+// EnableHealthListener updates the state to configure pickfirst for using a
+// generic health listener.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func EnableHealthListener(state resolver.State) resolver.State {
+ state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
+ return state
+}
+
type pfConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
@@ -74,90 +143,129 @@ type pfConfig struct {
ShuffleAddressList bool `json:"shuffleAddressList"`
}
-func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var cfg pfConfig
- if err := json.Unmarshal(js, &cfg); err != nil {
- return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+ // The following fields are initialized at build time and read-only after
+ // that.
+ subConn balancer.SubConn
+ addr resolver.Address
+
+ rawConnectivityState connectivity.State
+ // The effective connectivity state based on raw connectivity, health state
+ // and after following sticky TransientFailure behaviour defined in A62.
+ effectiveState connectivity.State
+ lastErr error
+ connectionFailedInFirstPass bool
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+ sd := &scData{
+ rawConnectivityState: connectivity.Idle,
+ effectiveState: connectivity.Idle,
+ addr: addr,
}
- return cfg, nil
+ sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+ StateListener: func(state balancer.SubConnState) {
+ b.updateSubConnState(sd, state)
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ sd.subConn = sc
+ return sd, nil
}
type pickfirstBalancer struct {
- logger *internalgrpclog.PrefixLogger
- state connectivity.State
- cc balancer.ClientConn
- subConn balancer.SubConn
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+ logger *internalgrpclog.PrefixLogger
+ cc balancer.ClientConn
+ target string
+ metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
+
+ // The mutex is used to ensure synchronization of updates triggered
+ // from the idle picker and the already serialized resolver,
+ // SubConn state updates.
+ mu sync.Mutex
+ // State reported to the channel based on SubConn states and resolver
+ // updates.
+ state connectivity.State
+ // scData for active subonns mapped by address.
+ subConns *resolver.AddressMapV2[*scData]
+ addressList addressList
+ firstPass bool
+ numTF int
+ cancelConnectionTimer func()
+ healthCheckingEnabled bool
}
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
func (b *pickfirstBalancer) ResolverError(err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
if b.logger.V(2) {
b.logger.Infof("Received error from the name resolver: %v", err)
}
- if b.subConn == nil {
- b.state = connectivity.TransientFailure
- }
- if b.state != connectivity.TransientFailure {
- // The picker will not change since the balancer does not currently
- // report an error.
+ // The picker will not change since the balancer does not currently
+ // report an error. If the balancer hasn't received a single good resolver
+ // update yet, transition to TRANSIENT_FAILURE.
+ if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+ if b.logger.V(2) {
+ b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+ }
return
}
- b.cc.UpdateState(balancer.State{
+
+ b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
})
}
-// Shuffler is an interface for shuffling an address list.
-type Shuffler interface {
- ShuffleAddressListForTesting(n int, swap func(i, j int))
-}
-
-// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
-// is the number of elements. swap swaps the elements with indexes i and j.
-func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
-
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.cancelConnectionTimer()
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
- // The resolver reported an empty address list. Treat it like an error by
- // calling b.ResolverError.
- if b.subConn != nil {
- // Shut down the old subConn. All addresses were removed, so it is
- // no longer valid.
- b.subConn.Shutdown()
- b.subConn = nil
- }
- b.ResolverError(errors.New("produced zero addresses"))
+ // Cleanup state pertaining to the previous resolver state.
+ // Treat an empty address list like an error by calling b.ResolverError.
+ b.closeSubConnsLocked()
+ b.addressList.updateAddrs(nil)
+ b.resolverErrorLocked(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
- // We don't have to guard this block with the env var because ParseConfig
- // already does so.
+ b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
cfg, ok := state.BalancerConfig.(pfConfig)
if state.BalancerConfig != nil && !ok {
- return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
+ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
}
if b.logger.V(2) {
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
}
- var addrs []resolver.Address
+ var newAddrs []resolver.Address
if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
- // Perform the optional shuffling described in gRFC A62. The shuffling will
- // change the order of endpoints but not touch the order of the addresses
- // within each endpoint. - A61
+ // Perform the optional shuffling described in gRFC A62. The shuffling
+ // will change the order of endpoints but not touch the order of the
+ // addresses within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
- // "Flatten the list by concatenating the ordered list of addresses for each
- // of the endpoints, in order." - A61
+ // "Flatten the list by concatenating the ordered list of addresses for
+ // each of the endpoints, in order." - A61
for _, endpoint := range endpoints {
- // "In the flattened list, interleave addresses from the two address
- // families, as per RFC-8304 section 4." - A61
- // TODO: support the above language.
- addrs = append(addrs, endpoint.Addresses...)
+ newAddrs = append(newAddrs, endpoint.Addresses...)
}
} else {
// Endpoints not set, process addresses until we migrate resolver
@@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
- addrs = state.ResolverState.Addresses
+ newAddrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
- addrs = append([]resolver.Address{}, addrs...)
- rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ newAddrs = append([]resolver.Address{}, newAddrs...)
+ internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
}
}
- if b.subConn != nil {
- b.cc.UpdateAddresses(b.subConn, addrs)
+ // If an address appears in multiple endpoints or in the same endpoint
+ // multiple times, we keep it only once. We will create only one SubConn
+ // for the address because an AddressMap is used to store SubConns.
+ // Not de-duplicating would result in attempting to connect to the same
+ // SubConn multiple times in the same pass. We don't want this.
+ newAddrs = deDupAddresses(newAddrs)
+ newAddrs = interleaveAddresses(newAddrs)
+
+ prevAddr := b.addressList.currentAddress()
+ prevSCData, found := b.subConns.Get(prevAddr)
+ prevAddrsCount := b.addressList.size()
+ isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
+ b.addressList.updateAddrs(newAddrs)
+
+ // If the previous ready SubConn exists in new address list,
+ // keep this connection and don't create new SubConns.
+ if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
return nil
}
- var subConn balancer.SubConn
- subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
- StateListener: func(state balancer.SubConnState) {
- b.updateSubConnState(subConn, state)
- },
- })
- if err != nil {
- if b.logger.V(2) {
- b.logger.Infof("Failed to create new SubConn: %v", err)
- }
- b.state = connectivity.TransientFailure
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
+ b.reconcileSubConnsLocked(newAddrs)
+ // If it's the first resolver update or the balancer was already READY
+ // (but the new address list does not contain the ready SubConn) or
+ // CONNECTING, enter CONNECTING.
+ // We may be in TRANSIENT_FAILURE due to a previous empty address list,
+ // we should still enter CONNECTING because the sticky TF behaviour
+ // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+ // due to connectivity failures.
+ if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+ // Start connection attempt at first address.
+ b.forceUpdateConcludedStateLocked(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
- return balancer.ErrBadResolverState
+ b.startFirstPassLocked()
+ } else if b.state == connectivity.TransientFailure {
+ // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+ // we're READY. See A62.
+ b.startFirstPassLocked()
}
- b.subConn = subConn
- b.state = connectivity.Idle
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.subConn.Connect()
return nil
}
@@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
}
-func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- if b.logger.V(2) {
- b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
+func (b *pickfirstBalancer) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.closeSubConnsLocked()
+ b.cancelConnectionTimer()
+ b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.state == connectivity.Idle {
+ // Move the balancer into CONNECTING state immediately. This is done to
+ // avoid staying in IDLE if a resolver update arrives before the first
+ // SubConn reports CONNECTING.
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.startFirstPassLocked()
+ }
+}
+
+func (b *pickfirstBalancer) startFirstPassLocked() {
+ b.firstPass = true
+ b.numTF = 0
+ // Reset the connection attempt record for existing SubConns.
+ for _, sd := range b.subConns.Values() {
+ sd.connectionFailedInFirstPass = false
+ }
+ b.requestConnectionLocked()
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+ for _, sd := range b.subConns.Values() {
+ sd.subConn.Shutdown()
+ }
+ b.subConns = resolver.NewAddressMapV2[*scData]()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+ seenAddrs := resolver.NewAddressMapV2[bool]()
+ retAddrs := []resolver.Address{}
+
+ for _, addr := range addrs {
+ if _, ok := seenAddrs.Get(addr); ok {
+ continue
+ }
+ seenAddrs.Set(addr, true)
+ retAddrs = append(retAddrs, addr)
+ }
+ return retAddrs
+}
+
+// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
+// as per RFC-8305 section 4.
+// Whichever address family is first in the list is followed by an address of
+// the other address family; that is, if the first address in the list is IPv6,
+// then the first IPv4 address should be moved up in the list to be second in
+// the list. It doesn't support configuring "First Address Family Count", i.e.
+// there will always be a single member of the first address family at the
+// beginning of the interleaved list.
+// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
+// "unknown" family for interleaving.
+// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
+func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
+ familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
+ interleavingOrder := []ipAddrFamily{}
+ for _, addr := range addrs {
+ family := addressFamily(addr.Addr)
+ if _, found := familyAddrsMap[family]; !found {
+ interleavingOrder = append(interleavingOrder, family)
+ }
+ familyAddrsMap[family] = append(familyAddrsMap[family], addr)
+ }
+
+ interleavedAddrs := make([]resolver.Address, 0, len(addrs))
+
+ for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
+ // Some IP types may have fewer addresses than others, so we look for
+ // the next type that has a remaining member to add to the interleaved
+ // list.
+ family := interleavingOrder[curFamilyIdx]
+ remainingMembers := familyAddrsMap[family]
+ if len(remainingMembers) > 0 {
+ interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
+ familyAddrsMap[family] = remainingMembers[1:]
+ }
+ }
+
+ return interleavedAddrs
+}
+
+// addressFamily returns the ipAddrFamily after parsing the address string.
+// If the address isn't of the format "ip-address:port", it returns
+// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
+// using a resolver like passthrough where the address may be a hostname in
+// some format that the dialer can resolve.
+func addressFamily(address string) ipAddrFamily {
+ // Parse the IP after removing the port.
+ host, _, err := net.SplitHostPort(address)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ switch {
+ case ip.Is4() || ip.Is4In6():
+ return ipAddrFamilyV4
+ case ip.Is6():
+ return ipAddrFamilyV6
+ default:
+ return ipAddrFamilyUnknown
+ }
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+// - closing subchannels: any existing subchannels associated with addresses
+// that are no longer in the updated list are shut down.
+// - removing subchannels: entries for these closed subchannels are removed
+// from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+ newAddrsMap := resolver.NewAddressMapV2[bool]()
+ for _, addr := range newAddrs {
+ newAddrsMap.Set(addr, true)
+ }
+
+ for _, oldAddr := range b.subConns.Keys() {
+ if _, ok := newAddrsMap.Get(oldAddr); ok {
+ continue
+ }
+ val, _ := b.subConns.Get(oldAddr)
+ val.subConn.Shutdown()
+ b.subConns.Delete(oldAddr)
+ }
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+ b.cancelConnectionTimer()
+ for _, sd := range b.subConns.Values() {
+ if sd.subConn != selected.subConn {
+ sd.subConn.Shutdown()
+ }
+ }
+ b.subConns = resolver.NewAddressMapV2[*scData]()
+ b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+ if !b.addressList.isValid() {
+ return
+ }
+ var lastErr error
+ for valid := true; valid; valid = b.addressList.increment() {
+ curAddr := b.addressList.currentAddress()
+ sd, ok := b.subConns.Get(curAddr)
+ if !ok {
+ var err error
+ // We want to assign the new scData to sd from the outer scope,
+ // hence we can't use := below.
+ sd, err = b.newSCData(curAddr)
+ if err != nil {
+ // This should never happen, unless the clientConn is being shut
+ // down.
+ if b.logger.V(2) {
+ b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+ }
+ // Do nothing, the LB policy will be closed soon.
+ return
+ }
+ b.subConns.Set(curAddr, sd)
+ }
+
+ switch sd.rawConnectivityState {
+ case connectivity.Idle:
+ sd.subConn.Connect()
+ b.scheduleNextConnectionLocked()
+ return
+ case connectivity.TransientFailure:
+ // The SubConn is being re-used and failed during a previous pass
+ // over the addressList. It has not completed backoff yet.
+ // Mark it as having failed and try the next address.
+ sd.connectionFailedInFirstPass = true
+ lastErr = sd.lastErr
+ continue
+ case connectivity.Connecting:
+ // Wait for the connection attempt to complete or the timer to fire
+ // before attempting the next address.
+ b.scheduleNextConnectionLocked()
+ return
+ default:
+ b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
+ return
+
+ }
+ }
+
+ // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+ // first pass if possible.
+ b.endFirstPassIfPossibleLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
+ b.cancelConnectionTimer()
+ if !b.addressList.hasNext() {
+ return
}
- if b.subConn != subConn {
+ curAddr := b.addressList.currentAddress()
+ cancelled := false // Access to this is protected by the balancer's mutex.
+ closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // If the scheduled task is cancelled while acquiring the mutex, return.
+ if cancelled {
+ return
+ }
if b.logger.V(2) {
- b.logger.Infof("Ignored state change because subConn is not recognized")
+ b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
+ }
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
}
+ })
+ // Access to the cancellation callback held by the balancer is guarded by
+ // the balancer's mutex, so it's safe to set the boolean from the callback.
+ b.cancelConnectionTimer = sync.OnceFunc(func() {
+ cancelled = true
+ closeFn()
+ })
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ oldState := sd.rawConnectivityState
+ sd.rawConnectivityState = newState.ConnectivityState
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes this
+ // SubConn.
+ if !b.isActiveSCData(sd) {
return
}
- if state.ConnectivityState == connectivity.Shutdown {
- b.subConn = nil
+ if newState.ConnectivityState == connectivity.Shutdown {
+ sd.effectiveState = connectivity.Shutdown
return
}
- switch state.ConnectivityState {
- case connectivity.Ready:
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
- })
- case connectivity.Connecting:
- if b.state == connectivity.TransientFailure {
- // We stay in TransientFailure until we are Ready. See A62.
+ // Record a connection attempt when exiting CONNECTING.
+ if newState.ConnectivityState == connectivity.TransientFailure {
+ sd.connectionFailedInFirstPass = true
+ connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+
+ if newState.ConnectivityState == connectivity.Ready {
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ b.shutdownRemainingLocked(sd)
+ if !b.addressList.seekTo(sd.addr) {
+ // This should not fail as we should have only one SubConn after
+ // entering READY. The SubConn should be present in the addressList.
+ b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
return
}
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
+ if !b.healthCheckingEnabled {
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
+ }
+
+ sd.effectiveState = connectivity.Ready
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ return
+ }
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
+ }
+ // Send a CONNECTING update to take the SubConn out of sticky-TF if
+ // required.
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
+ sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
+ b.updateSubConnHealthState(sd, scs)
+ })
+ return
+ }
+
+ // If the LB policy is READY, and it receives a subchannel state change,
+ // it means that the READY subchannel has failed.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
+ // part of the if condition below once the issue is fixed.
+ if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+ // Once a transport fails, the balancer enters IDLE and starts from
+ // the first address when the picker is used.
+ b.shutdownRemainingLocked(sd)
+ sd.effectiveState = newState.ConnectivityState
+ // READY SubConn interspliced in between CONNECTING and IDLE, need to
+ // account for that.
+ if oldState == connectivity.Connecting {
+ // A known issue (https://github.com/grpc/grpc-go/issues/7862)
+ // causes a race that prevents the READY state change notification.
+ // This works around it.
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+ disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
+ b.addressList.reset()
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Idle,
+ Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+ })
+ return
+ }
+
+ if b.firstPass {
+ switch newState.ConnectivityState {
+ case connectivity.Connecting:
+ // The effective state can be in either IDLE, CONNECTING or
+ // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
+ // TRANSIENT_FAILURE until it's READY. See A62.
+ if sd.effectiveState != connectivity.TransientFailure {
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ }
+ case connectivity.TransientFailure:
+ sd.lastErr = newState.ConnectionError
+ sd.effectiveState = connectivity.TransientFailure
+ // Since we're re-using common SubConns while handling resolver
+ // updates, we could receive an out of turn TRANSIENT_FAILURE from
+ // a pass over the previous address list. Happy Eyeballs will also
+ // cause out of order updates to arrive.
+
+ if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+ b.cancelConnectionTimer()
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ return
+ }
+ }
+
+ // End the first pass if we've seen a TRANSIENT_FAILURE from all
+ // SubConns once.
+ b.endFirstPassIfPossibleLocked(newState.ConnectionError)
+ }
+ return
+ }
+
+ // We have finished the first pass, keep re-connecting failing SubConns.
+ switch newState.ConnectivityState {
+ case connectivity.TransientFailure:
+ b.numTF = (b.numTF + 1) % b.subConns.Len()
+ sd.lastErr = newState.ConnectionError
+ if b.numTF%b.subConns.Len() == 0 {
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: newState.ConnectionError},
+ })
+ }
+ // We don't need to request re-resolution since the SubConn already
+ // does that before reporting TRANSIENT_FAILURE.
+ // TODO: #7534 - Move re-resolution requests from SubConn into
+ // pick_first.
case connectivity.Idle:
- if b.state == connectivity.TransientFailure {
- // We stay in TransientFailure until we are Ready. Also kick the
- // subConn out of Idle into Connecting. See A62.
- b.subConn.Connect()
+ sd.subConn.Connect()
+ }
+}
+
+// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
+// addresses are tried and their SubConns have reported a failure.
+func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
+ // An optimization to avoid iterating over the entire SubConn map.
+ if b.addressList.isValid() {
+ return
+ }
+ // Connect() has been called on all the SubConns. The first pass can be
+ // ended if all the SubConns have reported a failure.
+ for _, sd := range b.subConns.Values() {
+ if !sd.connectionFailedInFirstPass {
return
}
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &idlePicker{subConn: subConn},
+ }
+ b.firstPass = false
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: lastErr},
+ })
+ // Start re-connecting all the SubConns that are already in IDLE.
+ for _, sd := range b.subConns.Values() {
+ if sd.rawConnectivityState == connectivity.Idle {
+ sd.subConn.Connect()
+ }
+ }
+}
+
+func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
+ activeSD, found := b.subConns.Get(sd.addr)
+ return found && activeSD == sd
+}
+
+func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes
+ // this SubConn.
+ if !b.isActiveSCData(sd) {
+ return
+ }
+ sd.effectiveState = state.ConnectivityState
+ switch state.ConnectivityState {
+ case connectivity.Ready:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
})
case connectivity.TransientFailure:
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{err: state.ConnectionError},
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
+ })
+ case connectivity.Connecting:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
+ default:
+ b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
}
- b.state = state.ConnectivityState
}
-func (b *pickfirstBalancer) Close() {
+// updateBalancerState stores the state reported to the channel and calls
+// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
+// updates to the channel.
+func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
+ // In case of TransientFailures allow the picker to be updated to update
+ // the connectivity error, in all other cases don't send duplicate state
+ // updates.
+ if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
+ return
+ }
+ b.forceUpdateConcludedStateLocked(newState)
}
-func (b *pickfirstBalancer) ExitIdle() {
- if b.subConn != nil && b.state == connectivity.Idle {
- b.subConn.Connect()
- }
+// forceUpdateConcludedStateLocked stores the state reported to the channel and
+// calls ClientConn.UpdateState().
+// A separate function is defined to force update the ClientConn state since the
+// channel doesn't correctly assume that LB policies start in CONNECTING and
+// relies on LB policy to send an initial CONNECTING update.
+func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
+ b.state = newState.ConnectivityState
+ b.cc.UpdateState(newState)
}
type picker struct {
@@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
// CONNECTING when Pick is called.
type idlePicker struct {
- subConn balancer.SubConn
+ exitIdle func()
}
func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- i.subConn.Connect()
+ i.exitIdle()
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+ addresses []resolver.Address
+ idx int
+}
+
+func (al *addressList) isValid() bool {
+ return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+ return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+ if !al.isValid() {
+ return false
+ }
+ al.idx++
+ return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+ if !al.isValid() {
+ return resolver.Address{}
+ }
+ return al.addresses[al.idx]
+}
+
+func (al *addressList) reset() {
+ al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+ al.addresses = addrs
+ al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+ for ai, addr := range al.addresses {
+ if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+ continue
+ }
+ al.idx = ai
+ return true
+ }
+ return false
+}
+
+// hasNext returns whether incrementing the addressList will result in moving
+// past the end of the list. If the list has already moved past the end, it
+// returns false.
+func (al *addressList) hasNext() bool {
+ if !al.isValid() {
+ return false
+ }
+ return al.idx+1 < len(al.addresses)
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
deleted file mode 100644
index 494314f23..000000000
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
+++ /dev/null
@@ -1,927 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package pickfirstleaf contains the pick_first load balancing policy which
-// will be the universal leaf policy after dualstack changes are implemented.
-//
-// # Experimental
-//
-// Notice: This package is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package pickfirstleaf
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net"
- "net/netip"
- "sync"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/pickfirst/internal"
- "google.golang.org/grpc/connectivity"
- expstats "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/envconfig"
- internalgrpclog "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-func init() {
- if envconfig.NewPickFirstEnabled {
- // Register as the default pick_first balancer.
- Name = "pick_first"
- }
- balancer.Register(pickfirstBuilder{})
-}
-
-type (
- // enableHealthListenerKeyType is a unique key type used in resolver
- // attributes to indicate whether the health listener usage is enabled.
- enableHealthListenerKeyType struct{}
- // managedByPickfirstKeyType is an attribute key type to inform Outlier
- // Detection that the generic health listener is being used.
- // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when
- // implementing the dualstack design. This is a hack. Once Dualstack is
- // completed, outlier detection will stop sending ejection updates through
- // the connectivity listener.
- managedByPickfirstKeyType struct{}
-)
-
-var (
- logger = grpclog.Component("pick-first-leaf-lb")
- // Name is the name of the pick_first_leaf balancer.
- // It is changed to "pick_first" in init() if this balancer is to be
- // registered as the default pickfirst.
- Name = "pick_first_leaf"
- disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.disconnections",
- Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
- Unit: "disconnection",
- Labels: []string{"grpc.target"},
- Default: false,
- })
- connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.connection_attempts_succeeded",
- Description: "EXPERIMENTAL. Number of successful connection attempts.",
- Unit: "attempt",
- Labels: []string{"grpc.target"},
- Default: false,
- })
- connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.connection_attempts_failed",
- Description: "EXPERIMENTAL. Number of failed connection attempts.",
- Unit: "attempt",
- Labels: []string{"grpc.target"},
- Default: false,
- })
-)
-
-const (
- // TODO: change to pick-first when this becomes the default pick_first policy.
- logPrefix = "[pick-first-leaf-lb %p] "
- // connectionDelayInterval is the time to wait for during the happy eyeballs
- // pass before starting the next connection attempt.
- connectionDelayInterval = 250 * time.Millisecond
-)
-
-type ipAddrFamily int
-
-const (
- // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
- // address.
- ipAddrFamilyUnknown ipAddrFamily = iota
- ipAddrFamilyV4
- ipAddrFamilyV6
-)
-
-type pickfirstBuilder struct{}
-
-func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
- b := &pickfirstBalancer{
- cc: cc,
- target: bo.Target.String(),
- metricsRecorder: cc.MetricsRecorder(),
-
- subConns: resolver.NewAddressMapV2[*scData](),
- state: connectivity.Connecting,
- cancelConnectionTimer: func() {},
- }
- b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
- return b
-}
-
-func (b pickfirstBuilder) Name() string {
- return Name
-}
-
-func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var cfg pfConfig
- if err := json.Unmarshal(js, &cfg); err != nil {
- return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
- }
- return cfg, nil
-}
-
-// EnableHealthListener updates the state to configure pickfirst for using a
-// generic health listener.
-func EnableHealthListener(state resolver.State) resolver.State {
- state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
- return state
-}
-
-// IsManagedByPickfirst returns whether an address belongs to a SubConn
-// managed by the pickfirst LB policy.
-// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable
-// outlier_detection via the with connectivity listener when using pick_first.
-// Once Dualstack changes are complete, all SubConns will be created by
-// pick_first and outlier detection will only use the health listener for
-// ejection. This hack can then be removed.
-func IsManagedByPickfirst(addr resolver.Address) bool {
- return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil
-}
-
-type pfConfig struct {
- serviceconfig.LoadBalancingConfig `json:"-"`
-
- // If set to true, instructs the LB policy to shuffle the order of the list
- // of endpoints received from the name resolver before attempting to
- // connect to them.
- ShuffleAddressList bool `json:"shuffleAddressList"`
-}
-
-// scData keeps track of the current state of the subConn.
-// It is not safe for concurrent access.
-type scData struct {
- // The following fields are initialized at build time and read-only after
- // that.
- subConn balancer.SubConn
- addr resolver.Address
-
- rawConnectivityState connectivity.State
- // The effective connectivity state based on raw connectivity, health state
- // and after following sticky TransientFailure behaviour defined in A62.
- effectiveState connectivity.State
- lastErr error
- connectionFailedInFirstPass bool
-}
-
-func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
- addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true)
- sd := &scData{
- rawConnectivityState: connectivity.Idle,
- effectiveState: connectivity.Idle,
- addr: addr,
- }
- sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
- StateListener: func(state balancer.SubConnState) {
- b.updateSubConnState(sd, state)
- },
- })
- if err != nil {
- return nil, err
- }
- sd.subConn = sc
- return sd, nil
-}
-
-type pickfirstBalancer struct {
- // The following fields are initialized at build time and read-only after
- // that and therefore do not need to be guarded by a mutex.
- logger *internalgrpclog.PrefixLogger
- cc balancer.ClientConn
- target string
- metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
-
- // The mutex is used to ensure synchronization of updates triggered
- // from the idle picker and the already serialized resolver,
- // SubConn state updates.
- mu sync.Mutex
- // State reported to the channel based on SubConn states and resolver
- // updates.
- state connectivity.State
- // scData for active subonns mapped by address.
- subConns *resolver.AddressMapV2[*scData]
- addressList addressList
- firstPass bool
- numTF int
- cancelConnectionTimer func()
- healthCheckingEnabled bool
-}
-
-// ResolverError is called by the ClientConn when the name resolver produces
-// an error or when pickfirst determined the resolver update to be invalid.
-func (b *pickfirstBalancer) ResolverError(err error) {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.resolverErrorLocked(err)
-}
-
-func (b *pickfirstBalancer) resolverErrorLocked(err error) {
- if b.logger.V(2) {
- b.logger.Infof("Received error from the name resolver: %v", err)
- }
-
- // The picker will not change since the balancer does not currently
- // report an error. If the balancer hasn't received a single good resolver
- // update yet, transition to TRANSIENT_FAILURE.
- if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
- if b.logger.V(2) {
- b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
- }
- return
- }
-
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
- })
-}
-
-func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.cancelConnectionTimer()
- if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
- // Cleanup state pertaining to the previous resolver state.
- // Treat an empty address list like an error by calling b.ResolverError.
- b.closeSubConnsLocked()
- b.addressList.updateAddrs(nil)
- b.resolverErrorLocked(errors.New("produced zero addresses"))
- return balancer.ErrBadResolverState
- }
- b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
- cfg, ok := state.BalancerConfig.(pfConfig)
- if state.BalancerConfig != nil && !ok {
- return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
- }
-
- if b.logger.V(2) {
- b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
- }
-
- var newAddrs []resolver.Address
- if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
- // Perform the optional shuffling described in gRFC A62. The shuffling
- // will change the order of endpoints but not touch the order of the
- // addresses within each endpoint. - A61
- if cfg.ShuffleAddressList {
- endpoints = append([]resolver.Endpoint{}, endpoints...)
- internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
- }
-
- // "Flatten the list by concatenating the ordered list of addresses for
- // each of the endpoints, in order." - A61
- for _, endpoint := range endpoints {
- newAddrs = append(newAddrs, endpoint.Addresses...)
- }
- } else {
- // Endpoints not set, process addresses until we migrate resolver
- // emissions fully to Endpoints. The top channel does wrap emitted
- // addresses with endpoints, however some balancers such as weighted
- // target do not forward the corresponding correct endpoints down/split
- // endpoints properly. Once all balancers correctly forward endpoints
- // down, can delete this else conditional.
- newAddrs = state.ResolverState.Addresses
- if cfg.ShuffleAddressList {
- newAddrs = append([]resolver.Address{}, newAddrs...)
- internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
- }
- }
-
- // If an address appears in multiple endpoints or in the same endpoint
- // multiple times, we keep it only once. We will create only one SubConn
- // for the address because an AddressMap is used to store SubConns.
- // Not de-duplicating would result in attempting to connect to the same
- // SubConn multiple times in the same pass. We don't want this.
- newAddrs = deDupAddresses(newAddrs)
- newAddrs = interleaveAddresses(newAddrs)
-
- prevAddr := b.addressList.currentAddress()
- prevSCData, found := b.subConns.Get(prevAddr)
- prevAddrsCount := b.addressList.size()
- isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
- b.addressList.updateAddrs(newAddrs)
-
- // If the previous ready SubConn exists in new address list,
- // keep this connection and don't create new SubConns.
- if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
- return nil
- }
-
- b.reconcileSubConnsLocked(newAddrs)
- // If it's the first resolver update or the balancer was already READY
- // (but the new address list does not contain the ready SubConn) or
- // CONNECTING, enter CONNECTING.
- // We may be in TRANSIENT_FAILURE due to a previous empty address list,
- // we should still enter CONNECTING because the sticky TF behaviour
- // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
- // due to connectivity failures.
- if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
- // Start connection attempt at first address.
- b.forceUpdateConcludedStateLocked(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.startFirstPassLocked()
- } else if b.state == connectivity.TransientFailure {
- // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
- // we're READY. See A62.
- b.startFirstPassLocked()
- }
- return nil
-}
-
-// UpdateSubConnState is unused as a StateListener is always registered when
-// creating SubConns.
-func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
-}
-
-func (b *pickfirstBalancer) Close() {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.closeSubConnsLocked()
- b.cancelConnectionTimer()
- b.state = connectivity.Shutdown
-}
-
-// ExitIdle moves the balancer out of idle state. It can be called concurrently
-// by the idlePicker and clientConn so access to variables should be
-// synchronized.
-func (b *pickfirstBalancer) ExitIdle() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.state == connectivity.Idle {
- b.startFirstPassLocked()
- }
-}
-
-func (b *pickfirstBalancer) startFirstPassLocked() {
- b.firstPass = true
- b.numTF = 0
- // Reset the connection attempt record for existing SubConns.
- for _, sd := range b.subConns.Values() {
- sd.connectionFailedInFirstPass = false
- }
- b.requestConnectionLocked()
-}
-
-func (b *pickfirstBalancer) closeSubConnsLocked() {
- for _, sd := range b.subConns.Values() {
- sd.subConn.Shutdown()
- }
- b.subConns = resolver.NewAddressMapV2[*scData]()
-}
-
-// deDupAddresses ensures that each address appears only once in the slice.
-func deDupAddresses(addrs []resolver.Address) []resolver.Address {
- seenAddrs := resolver.NewAddressMapV2[*scData]()
- retAddrs := []resolver.Address{}
-
- for _, addr := range addrs {
- if _, ok := seenAddrs.Get(addr); ok {
- continue
- }
- retAddrs = append(retAddrs, addr)
- }
- return retAddrs
-}
-
-// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
-// as per RFC-8305 section 4.
-// Whichever address family is first in the list is followed by an address of
-// the other address family; that is, if the first address in the list is IPv6,
-// then the first IPv4 address should be moved up in the list to be second in
-// the list. It doesn't support configuring "First Address Family Count", i.e.
-// there will always be a single member of the first address family at the
-// beginning of the interleaved list.
-// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
-// "unknown" family for interleaving.
-// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
-func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
- familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
- interleavingOrder := []ipAddrFamily{}
- for _, addr := range addrs {
- family := addressFamily(addr.Addr)
- if _, found := familyAddrsMap[family]; !found {
- interleavingOrder = append(interleavingOrder, family)
- }
- familyAddrsMap[family] = append(familyAddrsMap[family], addr)
- }
-
- interleavedAddrs := make([]resolver.Address, 0, len(addrs))
-
- for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
- // Some IP types may have fewer addresses than others, so we look for
- // the next type that has a remaining member to add to the interleaved
- // list.
- family := interleavingOrder[curFamilyIdx]
- remainingMembers := familyAddrsMap[family]
- if len(remainingMembers) > 0 {
- interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
- familyAddrsMap[family] = remainingMembers[1:]
- }
- }
-
- return interleavedAddrs
-}
-
-// addressFamily returns the ipAddrFamily after parsing the address string.
-// If the address isn't of the format "ip-address:port", it returns
-// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
-// using a resolver like passthrough where the address may be a hostname in
-// some format that the dialer can resolve.
-func addressFamily(address string) ipAddrFamily {
- // Parse the IP after removing the port.
- host, _, err := net.SplitHostPort(address)
- if err != nil {
- return ipAddrFamilyUnknown
- }
- ip, err := netip.ParseAddr(host)
- if err != nil {
- return ipAddrFamilyUnknown
- }
- switch {
- case ip.Is4() || ip.Is4In6():
- return ipAddrFamilyV4
- case ip.Is6():
- return ipAddrFamilyV6
- default:
- return ipAddrFamilyUnknown
- }
-}
-
-// reconcileSubConnsLocked updates the active subchannels based on a new address
-// list from the resolver. It does this by:
-// - closing subchannels: any existing subchannels associated with addresses
-// that are no longer in the updated list are shut down.
-// - removing subchannels: entries for these closed subchannels are removed
-// from the subchannel map.
-//
-// This ensures that the subchannel map accurately reflects the current set of
-// addresses received from the name resolver.
-func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
- newAddrsMap := resolver.NewAddressMapV2[bool]()
- for _, addr := range newAddrs {
- newAddrsMap.Set(addr, true)
- }
-
- for _, oldAddr := range b.subConns.Keys() {
- if _, ok := newAddrsMap.Get(oldAddr); ok {
- continue
- }
- val, _ := b.subConns.Get(oldAddr)
- val.subConn.Shutdown()
- b.subConns.Delete(oldAddr)
- }
-}
-
-// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
-// becomes ready, which means that all other subConn must be shutdown.
-func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
- b.cancelConnectionTimer()
- for _, sd := range b.subConns.Values() {
- if sd.subConn != selected.subConn {
- sd.subConn.Shutdown()
- }
- }
- b.subConns = resolver.NewAddressMapV2[*scData]()
- b.subConns.Set(selected.addr, selected)
-}
-
-// requestConnectionLocked starts connecting on the subchannel corresponding to
-// the current address. If no subchannel exists, one is created. If the current
-// subchannel is in TransientFailure, a connection to the next address is
-// attempted until a subchannel is found.
-func (b *pickfirstBalancer) requestConnectionLocked() {
- if !b.addressList.isValid() {
- return
- }
- var lastErr error
- for valid := true; valid; valid = b.addressList.increment() {
- curAddr := b.addressList.currentAddress()
- sd, ok := b.subConns.Get(curAddr)
- if !ok {
- var err error
- // We want to assign the new scData to sd from the outer scope,
- // hence we can't use := below.
- sd, err = b.newSCData(curAddr)
- if err != nil {
- // This should never happen, unless the clientConn is being shut
- // down.
- if b.logger.V(2) {
- b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
- }
- // Do nothing, the LB policy will be closed soon.
- return
- }
- b.subConns.Set(curAddr, sd)
- }
-
- switch sd.rawConnectivityState {
- case connectivity.Idle:
- sd.subConn.Connect()
- b.scheduleNextConnectionLocked()
- return
- case connectivity.TransientFailure:
- // The SubConn is being re-used and failed during a previous pass
- // over the addressList. It has not completed backoff yet.
- // Mark it as having failed and try the next address.
- sd.connectionFailedInFirstPass = true
- lastErr = sd.lastErr
- continue
- case connectivity.Connecting:
- // Wait for the connection attempt to complete or the timer to fire
- // before attempting the next address.
- b.scheduleNextConnectionLocked()
- return
- default:
- b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
- return
-
- }
- }
-
- // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
- // first pass if possible.
- b.endFirstPassIfPossibleLocked(lastErr)
-}
-
-func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
- b.cancelConnectionTimer()
- if !b.addressList.hasNext() {
- return
- }
- curAddr := b.addressList.currentAddress()
- cancelled := false // Access to this is protected by the balancer's mutex.
- closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
- b.mu.Lock()
- defer b.mu.Unlock()
- // If the scheduled task is cancelled while acquiring the mutex, return.
- if cancelled {
- return
- }
- if b.logger.V(2) {
- b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
- }
- if b.addressList.increment() {
- b.requestConnectionLocked()
- }
- })
- // Access to the cancellation callback held by the balancer is guarded by
- // the balancer's mutex, so it's safe to set the boolean from the callback.
- b.cancelConnectionTimer = sync.OnceFunc(func() {
- cancelled = true
- closeFn()
- })
-}
-
-func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
- b.mu.Lock()
- defer b.mu.Unlock()
- oldState := sd.rawConnectivityState
- sd.rawConnectivityState = newState.ConnectivityState
- // Previously relevant SubConns can still callback with state updates.
- // To prevent pickers from returning these obsolete SubConns, this logic
- // is included to check if the current list of active SubConns includes this
- // SubConn.
- if !b.isActiveSCData(sd) {
- return
- }
- if newState.ConnectivityState == connectivity.Shutdown {
- sd.effectiveState = connectivity.Shutdown
- return
- }
-
- // Record a connection attempt when exiting CONNECTING.
- if newState.ConnectivityState == connectivity.TransientFailure {
- sd.connectionFailedInFirstPass = true
- connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
- }
-
- if newState.ConnectivityState == connectivity.Ready {
- connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
- b.shutdownRemainingLocked(sd)
- if !b.addressList.seekTo(sd.addr) {
- // This should not fail as we should have only one SubConn after
- // entering READY. The SubConn should be present in the addressList.
- b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
- return
- }
- if !b.healthCheckingEnabled {
- if b.logger.V(2) {
- b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
- }
-
- sd.effectiveState = connectivity.Ready
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Ready,
- Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
- })
- return
- }
- if b.logger.V(2) {
- b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
- }
- // Send a CONNECTING update to take the SubConn out of sticky-TF if
- // required.
- sd.effectiveState = connectivity.Connecting
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
- b.updateSubConnHealthState(sd, scs)
- })
- return
- }
-
- // If the LB policy is READY, and it receives a subchannel state change,
- // it means that the READY subchannel has failed.
- // A SubConn can also transition from CONNECTING directly to IDLE when
- // a transport is successfully created, but the connection fails
- // before the SubConn can send the notification for READY. We treat
- // this as a successful connection and transition to IDLE.
- // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
- // part of the if condition below once the issue is fixed.
- if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
- // Once a transport fails, the balancer enters IDLE and starts from
- // the first address when the picker is used.
- b.shutdownRemainingLocked(sd)
- sd.effectiveState = newState.ConnectivityState
- // READY SubConn interspliced in between CONNECTING and IDLE, need to
- // account for that.
- if oldState == connectivity.Connecting {
- // A known issue (https://github.com/grpc/grpc-go/issues/7862)
- // causes a race that prevents the READY state change notification.
- // This works around it.
- connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
- }
- disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
- b.addressList.reset()
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Idle,
- Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
- })
- return
- }
-
- if b.firstPass {
- switch newState.ConnectivityState {
- case connectivity.Connecting:
- // The effective state can be in either IDLE, CONNECTING or
- // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
- // TRANSIENT_FAILURE until it's READY. See A62.
- if sd.effectiveState != connectivity.TransientFailure {
- sd.effectiveState = connectivity.Connecting
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- }
- case connectivity.TransientFailure:
- sd.lastErr = newState.ConnectionError
- sd.effectiveState = connectivity.TransientFailure
- // Since we're re-using common SubConns while handling resolver
- // updates, we could receive an out of turn TRANSIENT_FAILURE from
- // a pass over the previous address list. Happy Eyeballs will also
- // cause out of order updates to arrive.
-
- if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
- b.cancelConnectionTimer()
- if b.addressList.increment() {
- b.requestConnectionLocked()
- return
- }
- }
-
- // End the first pass if we've seen a TRANSIENT_FAILURE from all
- // SubConns once.
- b.endFirstPassIfPossibleLocked(newState.ConnectionError)
- }
- return
- }
-
- // We have finished the first pass, keep re-connecting failing SubConns.
- switch newState.ConnectivityState {
- case connectivity.TransientFailure:
- b.numTF = (b.numTF + 1) % b.subConns.Len()
- sd.lastErr = newState.ConnectionError
- if b.numTF%b.subConns.Len() == 0 {
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: newState.ConnectionError},
- })
- }
- // We don't need to request re-resolution since the SubConn already
- // does that before reporting TRANSIENT_FAILURE.
- // TODO: #7534 - Move re-resolution requests from SubConn into
- // pick_first.
- case connectivity.Idle:
- sd.subConn.Connect()
- }
-}
-
-// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
-// addresses are tried and their SubConns have reported a failure.
-func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
- // An optimization to avoid iterating over the entire SubConn map.
- if b.addressList.isValid() {
- return
- }
- // Connect() has been called on all the SubConns. The first pass can be
- // ended if all the SubConns have reported a failure.
- for _, sd := range b.subConns.Values() {
- if !sd.connectionFailedInFirstPass {
- return
- }
- }
- b.firstPass = false
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: lastErr},
- })
- // Start re-connecting all the SubConns that are already in IDLE.
- for _, sd := range b.subConns.Values() {
- if sd.rawConnectivityState == connectivity.Idle {
- sd.subConn.Connect()
- }
- }
-}
-
-func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
- activeSD, found := b.subConns.Get(sd.addr)
- return found && activeSD == sd
-}
-
-func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
- b.mu.Lock()
- defer b.mu.Unlock()
- // Previously relevant SubConns can still callback with state updates.
- // To prevent pickers from returning these obsolete SubConns, this logic
- // is included to check if the current list of active SubConns includes
- // this SubConn.
- if !b.isActiveSCData(sd) {
- return
- }
- sd.effectiveState = state.ConnectivityState
- switch state.ConnectivityState {
- case connectivity.Ready:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Ready,
- Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
- })
- case connectivity.TransientFailure:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
- })
- case connectivity.Connecting:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- default:
- b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
- }
-}
-
-// updateBalancerState stores the state reported to the channel and calls
-// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
-// updates to the channel.
-func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
- // In case of TransientFailures allow the picker to be updated to update
- // the connectivity error, in all other cases don't send duplicate state
- // updates.
- if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
- return
- }
- b.forceUpdateConcludedStateLocked(newState)
-}
-
-// forceUpdateConcludedStateLocked stores the state reported to the channel and
-// calls ClientConn.UpdateState().
-// A separate function is defined to force update the ClientConn state since the
-// channel doesn't correctly assume that LB policies start in CONNECTING and
-// relies on LB policy to send an initial CONNECTING update.
-func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
- b.state = newState.ConnectivityState
- b.cc.UpdateState(newState)
-}
-
-type picker struct {
- result balancer.PickResult
- err error
-}
-
-func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- return p.result, p.err
-}
-
-// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
-// CONNECTING when Pick is called.
-type idlePicker struct {
- exitIdle func()
-}
-
-func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- i.exitIdle()
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
-}
-
-// addressList manages sequentially iterating over addresses present in a list
-// of endpoints. It provides a 1 dimensional view of the addresses present in
-// the endpoints.
-// This type is not safe for concurrent access.
-type addressList struct {
- addresses []resolver.Address
- idx int
-}
-
-func (al *addressList) isValid() bool {
- return al.idx < len(al.addresses)
-}
-
-func (al *addressList) size() int {
- return len(al.addresses)
-}
-
-// increment moves to the next index in the address list.
-// This method returns false if it went off the list, true otherwise.
-func (al *addressList) increment() bool {
- if !al.isValid() {
- return false
- }
- al.idx++
- return al.idx < len(al.addresses)
-}
-
-// currentAddress returns the current address pointed to in the addressList.
-// If the list is in an invalid state, it returns an empty address instead.
-func (al *addressList) currentAddress() resolver.Address {
- if !al.isValid() {
- return resolver.Address{}
- }
- return al.addresses[al.idx]
-}
-
-func (al *addressList) reset() {
- al.idx = 0
-}
-
-func (al *addressList) updateAddrs(addrs []resolver.Address) {
- al.addresses = addrs
- al.reset()
-}
-
-// seekTo returns false if the needle was not found and the current index was
-// left unchanged.
-func (al *addressList) seekTo(needle resolver.Address) bool {
- for ai, addr := range al.addresses {
- if !equalAddressIgnoringBalAttributes(&addr, &needle) {
- continue
- }
- al.idx = ai
- return true
- }
- return false
-}
-
-// hasNext returns whether incrementing the addressList will result in moving
-// past the end of the list. If the list has already moved past the end, it
-// returns false.
-func (al *addressList) hasNext() bool {
- if !al.isValid() {
- return false
- }
- return al.idx+1 < len(al.addresses)
-}
-
-// equalAddressIgnoringBalAttributes returns true is a and b are considered
-// equal. This is different from the Equal method on the resolver.Address type
-// which considers all fields to determine equality. Here, we only consider
-// fields that are meaningful to the SubConn.
-func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
- return a.Addr == b.Addr && a.ServerName == b.ServerName &&
- a.Attributes.Equal(b.Attributes)
-}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 35da5d1ec..22e6e3267 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -26,7 +26,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/endpointsharding"
- "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/grpclog"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
)
@@ -47,7 +47,7 @@ func (bb builder) Name() string {
}
func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
- childBuilder := balancer.Get(pickfirstleaf.Name).Build
+ childBuilder := balancer.Get(pickfirst.Name).Build
bal := &rrBalancer{
cc: cc,
Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
@@ -67,13 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
// Enable the health listener in pickfirst children for client side health
// checks and outlier detection, if configured.
- ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
+ ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState),
})
}
-
-func (b *rrBalancer) ExitIdle() {
- // Should always be ok, as child is endpoint sharding.
- if ei, ok := b.Balancer.(balancer.ExitIdler); ok {
- ei.ExitIdle()
- }
-}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index 948a21ef6..2c760e623 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(
if acbw.ccb.cc.dopts.disableHealthCheck {
return noOpRegisterHealthListenerFn
}
+ cfg := acbw.ac.cc.healthCheckConfig()
+ if cfg == nil {
+ return noOpRegisterHealthListenerFn
+ }
regHealthLisFn := internal.RegisterClientHealthCheckListener
if regHealthLisFn == nil {
// The health package is not imported.
- return noOpRegisterHealthListenerFn
- }
- cfg := acbw.ac.cc.healthCheckConfig()
- if cfg == nil {
+ channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.")
return noOpRegisterHealthListenerFn
}
return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 825c31795..42c61cf9f 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.5
+// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
@@ -858,133 +858,68 @@ func (x *Address) GetIpPort() uint32 {
var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
-var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{
- 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
- 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
- 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
- 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75,
- 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63,
- 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65,
- 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12,
- 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
- 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
- 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e,
- 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
- 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e,
- 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e,
- 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46,
- 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
- 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
- 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
- 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00,
- 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36,
- 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
- 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65,
- 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
- 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69,
- 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b,
- 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70,
- 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63,
- 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64,
- 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45,
- 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
- 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12,
- 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45,
- 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a,
- 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
- 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19,
- 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45,
- 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45,
- 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
- 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a,
- 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
- 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11,
- 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45,
- 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a,
- 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
- 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45,
- 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53,
- 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61,
- 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
- 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b,
- 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
- 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74,
- 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
- 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
- 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72,
- 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
- 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61,
- 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f,
- 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12,
- 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a,
- 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
- 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
- 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
- 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
- 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72,
- 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e,
- 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
- 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07,
- 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69,
- 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
- 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d,
- 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a,
- 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14,
- 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f,
- 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62,
- 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
- 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-})
+const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" +
+ "\n" +
+ "\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" +
+ "\fGrpcLogEntry\x128\n" +
+ "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" +
+ "\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" +
+ "\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" +
+ "\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" +
+ "\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" +
+ "\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" +
+ "\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" +
+ "\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" +
+ "\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" +
+ "\x11payload_truncated\x18\n" +
+ " \x01(\bR\x10payloadTruncated\x12.\n" +
+ "\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" +
+ "\tEventType\x12\x16\n" +
+ "\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" +
+ "\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" +
+ "\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" +
+ "\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" +
+ "\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" +
+ "\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" +
+ "\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" +
+ "\x11EVENT_TYPE_CANCEL\x10\a\"B\n" +
+ "\x06Logger\x12\x12\n" +
+ "\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" +
+ "\rLOGGER_CLIENT\x10\x01\x12\x11\n" +
+ "\rLOGGER_SERVER\x10\x02B\t\n" +
+ "\apayload\"\xbb\x01\n" +
+ "\fClientHeader\x127\n" +
+ "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
+ "\vmethod_name\x18\x02 \x01(\tR\n" +
+ "methodName\x12\x1c\n" +
+ "\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" +
+ "\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" +
+ "\fServerHeader\x127\n" +
+ "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" +
+ "\aTrailer\x127\n" +
+ "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
+ "\vstatus_code\x18\x02 \x01(\rR\n" +
+ "statusCode\x12%\n" +
+ "\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" +
+ "\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" +
+ "\aMessage\x12\x16\n" +
+ "\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" +
+ "\x04data\x18\x02 \x01(\fR\x04data\"B\n" +
+ "\bMetadata\x126\n" +
+ "\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" +
+ "\rMetadataEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" +
+ "\aAddress\x123\n" +
+ "\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" +
+ "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" +
+ "\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" +
+ "\x04Type\x12\x10\n" +
+ "\fTYPE_UNKNOWN\x10\x00\x12\r\n" +
+ "\tTYPE_IPV4\x10\x01\x12\r\n" +
+ "\tTYPE_IPV6\x10\x02\x12\r\n" +
+ "\tTYPE_UNIX\x10\x03B\\\n" +
+ "\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3"
var (
file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 4f350ca56..c0c2c9a76 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -40,11 +40,12 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
- "google.golang.org/grpc/internal/stats"
+ istats "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
+ "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
@@ -208,9 +209,10 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
- cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+ cc.pickerWrapper = newPickerWrapper()
- cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+ cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+ cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...)
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
@@ -456,7 +458,7 @@ func (cc *ClientConn) validateTransportCredentials() error {
func (cc *ClientConn) channelzRegistration(target string) {
parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
cc.channelz = channelz.RegisterChannel(parentChannel, target)
- cc.addTraceEvent("created")
+ cc.addTraceEvent(fmt.Sprintf("created for target %q", target))
}
// chainUnaryClientInterceptors chains all unary client interceptors into one.
@@ -621,7 +623,8 @@ type ClientConn struct {
channelz *channelz.Channel // Channelz object.
resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
idlenessMgr *idle.Manager
- metricsRecorderList *stats.MetricsRecorderList
+ metricsRecorderList *istats.MetricsRecorderList
+ statsHandler stats.Handler
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@@ -689,22 +692,31 @@ func (cc *ClientConn) Connect() {
cc.mu.Unlock()
}
-// waitForResolvedAddrs blocks until the resolver has provided addresses or the
-// context expires. Returns nil unless the context expires first; otherwise
-// returns a status error based on the context.
-func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
+// waitForResolvedAddrs blocks until the resolver provides addresses or the
+// context expires, whichever happens first.
+//
+// Error is nil unless the context expires first; otherwise returns a status
+// error based on the context.
+//
+// The returned boolean indicates whether it did block or not. If the
+// resolution has already happened once before, it returns false without
+// blocking. Otherwise, it wait for the resolution and return true if
+// resolution has succeeded or return false along with error if resolution has
+// failed.
+func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) {
// This is on the RPC path, so we use a fast path to avoid the
// more-expensive "select" below after the resolver has returned once.
if cc.firstResolveEvent.HasFired() {
- return nil
+ return false, nil
}
+ internal.NewStreamWaitingForResolver()
select {
case <-cc.firstResolveEvent.Done():
- return nil
+ return true, nil
case <-ctx.Done():
- return status.FromContextError(ctx.Err()).Err()
+ return false, status.FromContextError(ctx.Err()).Err()
case <-cc.ctx.Done():
- return ErrClientConnClosing
+ return false, ErrClientConnClosing
}
}
@@ -1067,13 +1079,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
return cc.sc.healthCheckConfig
}
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
- return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{
- Ctx: ctx,
- FullMethodName: method,
- })
-}
-
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) {
if sc == nil {
// should never reach here.
@@ -1822,7 +1827,7 @@ func (cc *ClientConn) initAuthority() error {
} else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
cc.authority = auth.OverrideAuthority(cc.parsedTarget)
} else if strings.HasPrefix(endpoint, ":") {
- cc.authority = "localhost" + endpoint
+ cc.authority = "localhost" + encodeAuthority(endpoint)
} else {
cc.authority = encodeAuthority(endpoint)
}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 665e790bb..06f6c6c70 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -44,8 +44,7 @@ type PerRPCCredentials interface {
// A54). uri is the URI of the entry point for the request. When supported
// by the underlying implementation, ctx can be used for timeout and
// cancellation. Additionally, RequestInfo data will be available via ctx
- // to this call. TODO(zhaoq): Define the set of the qualified keys instead
- // of leaving it as an arbitrary string.
+ // to this call.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.
@@ -96,10 +95,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
return c
}
-// ProtocolInfo provides information regarding the gRPC wire protocol version,
-// security protocol, security protocol version in use, server name, etc.
+// ProtocolInfo provides static information regarding transport credentials.
type ProtocolInfo struct {
// ProtocolVersion is the gRPC wire protocol version.
+ //
+ // Deprecated: this is unused by gRPC.
ProtocolVersion string
// SecurityProtocol is the security protocol in use.
SecurityProtocol string
@@ -109,7 +109,16 @@ type ProtocolInfo struct {
//
// Deprecated: please use Peer.AuthInfo.
SecurityVersion string
- // ServerName is the user-configured server name.
+ // ServerName is the user-configured server name. If set, this overrides
+ // the default :authority header used for all RPCs on the channel using the
+ // containing credentials, unless grpc.WithAuthority is set on the channel,
+ // in which case that setting will take precedence.
+ //
+ // This must be a valid `:authority` header according to
+ // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2).
+ //
+ // Deprecated: Users should use grpc.WithAuthority to override the authority
+ // on a channel instead of configuring the credentials.
ServerName string
}
@@ -120,6 +129,20 @@ type AuthInfo interface {
AuthType() string
}
+// AuthorityValidator validates the authority used to override the `:authority`
+// header. This is an optional interface that implementations of AuthInfo can
+// implement if they support per-RPC authority overrides. It is invoked when the
+// application attempts to override the HTTP/2 `:authority` header using the
+// CallAuthority call option.
+type AuthorityValidator interface {
+ // ValidateAuthority checks the authority value used to override the
+ // `:authority` header. The authority parameter is the override value
+ // provided by the application via the CallAuthority option. This value
+ // typically corresponds to the server hostname or endpoint the RPC is
+ // targeting. It returns non-nil error if the validation fails.
+ ValidateAuthority(authority string) error
+}
+
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
// and the caller should not close rawConn.
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
@@ -159,12 +182,17 @@ type TransportCredentials interface {
// Clone makes a copy of this TransportCredentials.
Clone() TransportCredentials
// OverrideServerName specifies the value used for the following:
+ //
// - verifying the hostname on the returned certificates
// - as SNI in the client's handshake to support virtual hosting
// - as the value for `:authority` header at stream creation time
//
- // Deprecated: use grpc.WithAuthority instead. Will be supported
- // throughout 1.x.
+ // The provided string should be a valid `:authority` header according to
+ // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2).
+ //
+ // Deprecated: this method is unused by gRPC. Users should use
+ // grpc.WithAuthority to override the authority on a channel instead of
+ // configuring the credentials.
OverrideServerName(string) error
}
@@ -207,14 +235,32 @@ type RequestInfo struct {
AuthInfo AuthInfo
}
+// requestInfoKey is a struct to be used as the key to store RequestInfo in a
+// context.
+type requestInfoKey struct{}
+
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
//
// This API is experimental.
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
- ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
+ ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
return ri, ok
}
+// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it.
+//
+// This RequestInfo will be accessible via RequestInfoFromContext.
+//
+// Intended to be used from tests for PerRPCCredentials implementations (that
+// often need to check connection's SecurityLevel). Should not be used from
+// non-test code: the gRPC client already prepares a context with the correct
+// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata.
+//
+// This API is experimental.
+func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context {
+ return context.WithValue(ctx, requestInfoKey{}, ri)
+}
+
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
// balancer etc. Individual credential implementations control the actual
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
index 4c805c644..93156c0f3 100644
--- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -30,7 +30,7 @@ import (
// NewCredentials returns a credentials which disables transport security.
//
// Note that using this credentials with per-RPC credentials which require
-// transport security is incompatible and will cause grpc.Dial() to fail.
+// transport security is incompatible and will cause RPCs to fail.
func NewCredentials() credentials.TransportCredentials {
return insecureTC{}
}
@@ -71,6 +71,12 @@ func (info) AuthType() string {
return "insecure"
}
+// ValidateAuthority allows any value to be overridden for the :authority
+// header.
+func (info) ValidateAuthority(string) error {
+ return nil
+}
+
// insecureBundle implements an insecure bundle.
// An insecure bundle provides a thin wrapper around insecureTC to support
// the credentials.Bundle interface.
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index bd5fe22b6..8277be7d6 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -22,6 +22,7 @@ import (
"context"
"crypto/tls"
"crypto/x509"
+ "errors"
"fmt"
"net"
"net/url"
@@ -50,6 +51,21 @@ func (t TLSInfo) AuthType() string {
return "tls"
}
+// ValidateAuthority validates the provided authority being used to override the
+// :authority header by verifying it against the peer certificates. It returns a
+// non-nil error if the validation fails.
+func (t TLSInfo) ValidateAuthority(authority string) error {
+ var errs []error
+ for _, cert := range t.State.PeerCertificates {
+ var err error
+ if err = cert.VerifyHostname(authority); err == nil {
+ return nil
+ }
+ errs = append(errs, err)
+ }
+ return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...))
+}
+
// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
func cipherSuiteLookup(cipherSuiteID uint16) string {
for _, s := range tls.CipherSuites() {
@@ -94,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo {
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
// use local cfg to avoid clobbering ServerName if using multiple endpoints
cfg := credinternal.CloneTLSConfig(c.config)
- if cfg.ServerName == "" {
- serverName, _, err := net.SplitHostPort(authority)
- if err != nil {
- // If the authority had no host port or if the authority cannot be parsed, use it as-is.
- serverName = authority
- }
- cfg.ServerName = serverName
+
+ serverName, _, err := net.SplitHostPort(authority)
+ if err != nil {
+ // If the authority had no host port or if the authority cannot be parsed, use it as-is.
+ serverName = authority
}
+ cfg.ServerName = serverName
+
conn := tls.Client(rawConn, cfg)
errChannel := make(chan error, 1)
go func() {
@@ -243,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config {
// certificates to establish the identity of the client need to be included in
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
// tls.Config can be specified.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header
-// field) in requests.
+//
+// serverNameOverride is for testing only. If set to a non empty string, it will
+// override the virtual host name of authority (e.g. :authority header field) in
+// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to
+// override the authority of the client instead.
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
}
@@ -255,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
// certificates to establish the identity of the client need to be included in
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
// tls.Config can be specified.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header
-// field) in requests.
+//
+// serverNameOverride is for testing only. If set to a non empty string, it will
+// override the virtual host name of authority (e.g. :authority header field) in
+// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to
+// override the authority of the client instead.
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
b, err := os.ReadFile(certFile)
if err != nil {
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 405a2ffeb..7a5ac2e7c 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -213,6 +213,7 @@ func WithReadBufferSize(s int) DialOption {
func WithInitialWindowSize(s int32) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.InitialWindowSize = s
+ o.copts.StaticWindowSize = true
})
}
@@ -222,6 +223,26 @@ func WithInitialWindowSize(s int32) DialOption {
func WithInitialConnWindowSize(s int32) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.InitialConnWindowSize = s
+ o.copts.StaticWindowSize = true
+ })
+}
+
+// WithStaticStreamWindowSize returns a DialOption which sets the initial
+// stream window size to the value provided and disables dynamic flow control.
+func WithStaticStreamWindowSize(s int32) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.copts.InitialWindowSize = s
+ o.copts.StaticWindowSize = true
+ })
+}
+
+// WithStaticConnWindowSize returns a DialOption which sets the initial
+// connection window size to the value provided and disables dynamic flow
+// control.
+func WithStaticConnWindowSize(s int32) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.copts.InitialConnWindowSize = s
+ o.copts.StaticWindowSize = true
})
}
@@ -360,7 +381,7 @@ func WithReturnConnectionError() DialOption {
//
// Note that using this DialOption with per-RPC credentials (through
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
-// security is incompatible and will cause grpc.Dial() to fail.
+// security is incompatible and will cause RPCs to fail.
//
// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
// instead. Will be supported throughout 1.x.
@@ -587,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt
// WithAuthority returns a DialOption that specifies the value to be used as the
// :authority pseudo-header and as the server name in authentication handshake.
+// This overrides all other ways of setting authority on the channel, but can be
+// overridden per-call by using grpc.CallAuthority.
func WithAuthority(a string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.authority = a
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 11d0ae142..dadd21e40 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -27,8 +27,10 @@ package encoding
import (
"io"
+ "slices"
"strings"
+ "google.golang.org/grpc/encoding/internal"
"google.golang.org/grpc/internal/grpcutil"
)
@@ -36,6 +38,24 @@ import (
// It is intended for grpc internal use only.
const Identity = "identity"
+func init() {
+ internal.RegisterCompressorForTesting = func(c Compressor) func() {
+ name := c.Name()
+ curCompressor, found := registeredCompressor[name]
+ RegisterCompressor(c)
+ return func() {
+ if found {
+ registeredCompressor[name] = curCompressor
+ return
+ }
+ delete(registeredCompressor, name)
+ grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool {
+ return s == name
+ })
+ }
+ }
+}
+
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
//
diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go
new file mode 100644
index 000000000..ee9acb437
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go
@@ -0,0 +1,28 @@
+/*
+ *
+ * Copyright 2025 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code internal to the encoding package.
+package internal
+
+// RegisterCompressorForTesting registers a compressor in the global compressor
+// registry. It returns a cleanup function that should be called at the end
+// of the test to unregister the compressor.
+//
+// This prevents compressors registered in one test from appearing in the
+// encoding headers of subsequent tests.
+var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func()
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index ceec319dd..1ab874c7a 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
+ // Important: if we remove this Size call then we cannot use
+ // UseCachedSize in MarshalOptions below.
size := proto.Size(vv)
+
+ // MarshalOptions with UseCachedSize allows reusing the result from the
+ // previous Size call. This is safe here because:
+ //
+ // 1. We just computed the size.
+ // 2. We assume the message is not being mutated concurrently.
+ //
+ // Important: If the proto.Size call above is removed, using UseCachedSize
+ // becomes unsafe and may lead to incorrect marshaling.
+ //
+ // For more details, see the doc of UseCachedSize:
+ // https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions
+ marshalOptions := proto.MarshalOptions{UseCachedSize: true}
+
if mem.IsBelowBufferPoolingThreshold(size) {
- buf, err := proto.Marshal(vv)
+ buf, err := marshalOptions.Marshal(vv)
if err != nil {
return nil, err
}
@@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
} else {
pool := mem.DefaultBufferPool()
buf := pool.Get(size)
- if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+ if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil {
pool.Put(buf)
return nil, err
}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
index ad75313a1..2b57ba65a 100644
--- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -75,6 +75,7 @@ const (
MetricTypeIntHisto
MetricTypeFloatHisto
MetricTypeIntGauge
+ MetricTypeIntUpDownCount
)
// Int64CountHandle is a typed handle for a int count metric. This handle
@@ -93,6 +94,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels .
recorder.RecordInt64Count(h, incr, labels...)
}
+// Int64UpDownCountHandle is a typed handle for an int up-down counter metric.
+// This handle is passed at the recording point in order to know which metric
+// to record on.
+type Int64UpDownCountHandle MetricDescriptor
+
+// Descriptor returns the int64 up-down counter handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 up-down counter value on the metrics recorder provided.
+// The value 'v' can be positive to increment or negative to decrement.
+func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) {
+ recorder.RecordInt64UpDownCount(h, v, labels...)
+}
+
// Float64CountHandle is a typed handle for a float count metric. This handle is
// passed at the recording point in order to know which metric to record on.
type Float64CountHandle MetricDescriptor
@@ -249,6 +267,21 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
return (*Int64GaugeHandle)(descPtr)
}
+// RegisterInt64UpDownCount registers the metric description onto the global registry.
+// It returns a typed handle to use for recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ // Set the specific metric type for the up-down counter
+ descriptor.Type = MetricTypeIntUpDownCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64UpDownCountHandle)(descPtr)
+}
+
// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
// registry. Returns a cleanup function that sets the metrics registry to its
// original state.
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
index ee1423605..cb57f1a74 100644
--- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -38,6 +38,9 @@ type MetricsRecorder interface {
// RecordInt64Gauge records the measurement alongside labels on the int
// gauge associated with the provided handle.
RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+ // RecordInt64UpDownCounter records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string)
}
// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
index fbc1ca356..ba25b8988 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -223,15 +223,7 @@ func (gsb *Balancer) ExitIdle() {
// There is no need to protect this read with a mutex, as the write to the
// Balancer field happens in SwitchTo, which completes before this can be
// called.
- if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
- ei.ExitIdle()
- return
- }
- gsb.mu.Lock()
- defer gsb.mu.Unlock()
- for sc := range balToUpdate.subconns {
- sc.Connect()
- }
+ balToUpdate.ExitIdle()
}
// updateSubConnState forwards the update to the appropriate child.
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
index 11f91668a..467392b8d 100644
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -83,6 +83,7 @@ func (b *Unbounded) Load() {
default:
}
} else if b.closing && !b.closed {
+ b.closed = true
close(b.c)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
index 2bffe4777..3b7ba5966 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/trace.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -194,7 +194,7 @@ func (r RefChannelType) String() string {
// If channelz is not turned ON, this will simply log the event descriptions.
func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
// Log only the trace description associated with the bottom most entity.
- d := fmt.Sprintf("[%s]%s", e, desc.Desc)
+ d := fmt.Sprintf("[%s] %s", e, desc.Desc)
switch desc.Severity {
case CtUnknown, CtInfo:
l.InfoDepth(depth+1, d)
diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
index 9deee7f65..48b22d9cf 100644
--- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
@@ -20,20 +20,6 @@ import (
"context"
)
-// requestInfoKey is a struct to be used as the key to store RequestInfo in a
-// context.
-type requestInfoKey struct{}
-
-// NewRequestInfoContext creates a context with ri.
-func NewRequestInfoContext(ctx context.Context, ri any) context.Context {
- return context.WithValue(ctx, requestInfoKey{}, ri)
-}
-
-// RequestInfoFromContext extracts the RequestInfo from ctx.
-func RequestInfoFromContext(ctx context.Context) any {
- return ctx.Value(requestInfoKey{})
-}
-
// clientHandshakeInfoKey is a struct used as the key to store
// ClientHandshakeInfo in a context.
type clientHandshakeInfoKey struct{}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index cc5713fd9..91f760936 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -26,35 +26,31 @@ import (
)
var (
- // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
+ // EnableTXTServiceConfig is set if the DNS resolver should perform TXT
+ // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not
+ // "false").
+ EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true)
+
+ // TXTErrIgnore is set if TXT errors should be ignored
+ // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
+
// RingHashCap indicates the maximum ring size which defaults to 4096
// entries but may be overridden by setting the environment variable
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
- // LeastRequestLB is set if we should support the least_request_experimental
- // LB policy, which can be enabled by setting the environment variable
- // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
- LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false)
+
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
// handshakes that can be performed.
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
+
// EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
// should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
// option is present for backward compatibility. This option may be overridden
// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
// or "false".
EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
- // XDSFallbackSupport is the env variable that controls whether support for
- // xDS fallback is turned on. If this is unset or is false, only the first
- // xDS server in the list of server configs will be used.
- XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true)
- // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
- // instead of the exiting pickfirst implementation. This can be disabled by
- // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
- // to "false".
- NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true)
// XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
// key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
@@ -69,6 +65,18 @@ var (
// to gRFC A76. It can be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true".
RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false)
+
+ // ALTSHandshakerKeepaliveParams is set if we should add the
+ // KeepaliveParams when dial the ALTS handshaker service.
+ ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false)
+
+ // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443
+ // to a target address that lacks one. This flag only has an effect when all of
+ // the following conditions are met:
+ // - A connect proxy is being used.
+ // - Target resolution is disabled.
+ // - The DNS resolver is being used.
+ EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 2eb97f832..7685d08b5 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -63,4 +63,20 @@ var (
// For more details, see:
// https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md.
XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false)
+
+ // XDSSPIFFEEnabled controls if SPIFFE Bundle Maps can be used as roots of
+ // trust. For more details, see:
+ // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md
+ XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false)
+
+ // XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata
+ // configuring use of an HTTP CONNECT proxy via xDS from cluster resources.
+ // For more details, see:
+ // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md
+ XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false)
+
+ // XDSBootstrapCallCredsEnabled controls if call credentials can be used in
+ // xDS bootstrap configuration via the `call_creds` field. For more details,
+ // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md
+ XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false)
)
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index 8e8e86128..9b6d8a1fa 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure
func (cs *CallbackSerializer) run(ctx context.Context) {
defer close(cs.done)
- // TODO: when Go 1.21 is the oldest supported version, this loop and Close
- // can be replaced with:
- //
- // context.AfterFunc(ctx, cs.callbacks.Close)
- for ctx.Err() == nil {
- select {
- case <-ctx.Done():
- // Do nothing here. Next iteration of the for loop will not happen,
- // since ctx.Err() would be non-nil.
- case cb := <-cs.callbacks.Get():
- cs.callbacks.Load()
- cb.(func(context.Context))(ctx)
- }
- }
-
- // Close the buffer to prevent new callbacks from being added.
- cs.callbacks.Close()
+ // Close the buffer when the context is canceled
+ // to prevent new callbacks from being added.
+ context.AfterFunc(ctx, cs.callbacks.Close)
- // Run all pending callbacks.
+ // Run all callbacks.
for cb := range cs.callbacks.Get() {
cs.callbacks.Load()
cb.(func(context.Context))(ctx)
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
index fbe697c37..d788c2493 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
@@ -21,28 +21,25 @@
package grpcsync
import (
- "sync"
"sync/atomic"
)
// Event represents a one-time event that may occur in the future.
type Event struct {
- fired int32
+ fired atomic.Bool
c chan struct{}
- o sync.Once
}
// Fire causes e to complete. It is safe to call multiple times, and
// concurrently. It returns true iff this call to Fire caused the signaling
-// channel returned by Done to close.
+// channel returned by Done to close. If Fire returns false, it is possible
+// the Done channel has not been closed yet.
func (e *Event) Fire() bool {
- ret := false
- e.o.Do(func() {
- atomic.StoreInt32(&e.fired, 1)
+ if e.fired.CompareAndSwap(false, true) {
close(e.c)
- ret = true
- })
- return ret
+ return true
+ }
+ return false
}
// Done returns a channel that will be closed when Fire is called.
@@ -52,7 +49,7 @@ func (e *Event) Done() <-chan struct{} {
// HasFired returns true if Fire has been called.
func (e *Event) HasFired() bool {
- return atomic.LoadInt32(&e.fired) == 1
+ return e.fired.Load()
}
// NewEvent returns a new, ready-to-use Event.
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 2ce012cda..2699223a2 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -182,35 +182,6 @@ var (
// other features, including the CSDS service.
NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
- // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
- // Specifier Plugin for testing purposes, regardless of the XDSRLS environment
- // variable.
- //
- // TODO: Remove this function once the RLS env var is removed.
- RegisterRLSClusterSpecifierPluginForTesting func()
-
- // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
- // Specifier Plugin for testing purposes. This is needed because there is no way
- // to unregister the RLS Cluster Specifier Plugin after registering it solely
- // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
- //
- // TODO: Remove this function once the RLS env var is removed.
- UnregisterRLSClusterSpecifierPluginForTesting func()
-
- // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
- // purposes, regardless of the RBAC environment variable.
- //
- // TODO: Remove this function once the RBAC env var is removed.
- RegisterRBACHTTPFilterForTesting func()
-
- // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
- // testing purposes. This is needed because there is no way to unregister the
- // HTTP Filter after registering it solely for testing purposes using
- // RegisterRBACHTTPFilterForTesting().
- //
- // TODO: Remove this function once the RBAC env var is removed.
- UnregisterRBACHTTPFilterForTesting func()
-
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
@@ -266,6 +237,13 @@ var (
TimeAfterFunc = func(d time.Duration, f func()) Timer {
return time.AfterFunc(d, f)
}
+
+ // NewStreamWaitingForResolver is a test hook that is triggered when a
+ // new stream blocks while waiting for name resolution. This can be
+ // used in tests to synchronize resolver updates and avoid race conditions.
+ // When set, the function will be called before the stream enters
+ // the blocking state.
+ NewStreamWaitingForResolver = func() {}
)
// HealthChecker defines the signature of the client-side LB channel health
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
index 20b8fb098..5bfa67b72 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
@@ -22,11 +22,13 @@ package delegatingresolver
import (
"fmt"
+ "net"
"net/http"
"net/url"
"sync"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/proxyattributes"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/internal/transport/networktype"
@@ -40,6 +42,8 @@ var (
HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
)
+const defaultPort = "443"
+
// delegatingResolver manages both target URI and proxy address resolution by
// delegating these tasks to separate child resolvers. Essentially, it acts as
// an intermediary between the gRPC ClientConn and the child resolvers.
@@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
targetResolver: nopResolver{},
}
+ addr := target.Endpoint()
var err error
- r.proxyURL, err = proxyURLForTarget(target.Endpoint())
+ if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget {
+ addr, err = parseTarget(addr)
+ if err != nil {
+ return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err)
+ }
+ }
+
+ r.proxyURL, err = proxyURLForTarget(addr)
if err != nil {
- return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
+ return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err)
}
// proxy is not configured or proxy address excluded using `NO_PROXY` env
@@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
// bypass the target resolver and store the unresolved target address.
if target.URL.Scheme == "dns" && !targetResolutionEnabled {
r.targetResolverState = &resolver.State{
- Addresses: []resolver.Address{{Addr: target.Endpoint()}},
- Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
+ Addresses: []resolver.Address{{Addr: addr}},
+ Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}},
}
r.updateTargetResolverState(*r.targetResolverState)
return r, nil
@@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool {
return false
}
+// parseTarget takes a target string and ensures it is a valid "host:port" target.
+//
+// It does the following:
+// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"),
+// it is returned as is.
+// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost",
+// returning "localhost:80".
+// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort
+// is added.
+//
+// An error is returned for empty targets or targets with a trailing colon
+// but no port (e.g., "host:").
+func parseTarget(target string) (string, error) {
+ if target == "" {
+ return "", fmt.Errorf("missing address")
+ }
+
+ host, port, err := net.SplitHostPort(target)
+ if err != nil {
+ // If SplitHostPort fails, it's likely because the port is missing.
+ // We append the default port and return the result.
+ return net.JoinHostPort(target, defaultPort), nil
+ }
+
+ // If SplitHostPort succeeds, we check for edge cases.
+ if port == "" {
+ // A success with an empty port means the target had a trailing colon,
+ // e.g., "host:", which is an error.
+ return "", fmt.Errorf("missing port after port-separator colon")
+ }
+ if host == "" {
+ // A success with an empty host means the target was like ":80".
+ // We default the host to "localhost".
+ host = "localhost"
+ }
+ return net.JoinHostPort(host, port), nil
+}
+
func skipProxy(address resolver.Address) bool {
// Avoid proxy when network is not tcp.
networkType, ok := networktype.Get(address)
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index ba5c5a95d..ada5251cf 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
// DNS address (non-IP).
ctx, cancel := context.WithCancel(context.Background())
d := &dnsResolver{
- host: host,
- port: port,
- ctx: ctx,
- cancel: cancel,
- cc: cc,
- rn: make(chan struct{}, 1),
- disableServiceConfig: opts.DisableServiceConfig,
+ host: host,
+ port: port,
+ ctx: ctx,
+ cancel: cancel,
+ cc: cc,
+ rn: make(chan struct{}, 1),
+ enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig,
}
d.resolver, err = internal.NewNetResolver(target.URL.Host)
@@ -181,8 +181,8 @@ type dnsResolver struct {
// finishes, race detector sometimes will warn lookup (READ the lookup
// function pointers) inside watcher() goroutine has data race with
// replaceNetFunc (WRITE the lookup function pointers).
- wg sync.WaitGroup
- disableServiceConfig bool
+ wg sync.WaitGroup
+ enableServiceConfig bool
}
// ResolveNow invoke an immediate resolution of the target that this
@@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
if len(srv) > 0 {
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
}
- if !d.disableServiceConfig {
+ if d.enableServiceConfig {
state.ServiceConfig = d.lookupTXT(ctx)
}
return &state, nil
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
index 79044657b..d5f7e4d62 100644
--- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle,
}
}
+// RecordInt64UpDownCount records the measurement alongside labels on the int
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64UpDownCount(handle, incr, labels...)
+ }
+}
+
// RecordFloat64Count records the measurement alongside labels on the float
// count associated with the provided handle.
func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go
new file mode 100644
index 000000000..49019b80d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/stats.go
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2025 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "context"
+
+ "google.golang.org/grpc/stats"
+)
+
+type combinedHandler struct {
+ handlers []stats.Handler
+}
+
+// NewCombinedHandler combines multiple stats.Handlers into a single handler.
+//
+// It returns nil if no handlers are provided. If only one handler is
+// provided, it is returned directly without wrapping.
+func NewCombinedHandler(handlers ...stats.Handler) stats.Handler {
+ switch len(handlers) {
+ case 0:
+ return nil
+ case 1:
+ return handlers[0]
+ default:
+ return &combinedHandler{handlers: handlers}
+ }
+}
+
+func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ for _, h := range ch.handlers {
+ ctx = h.TagRPC(ctx, info)
+ }
+ return ctx
+}
+
+func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) {
+ for _, h := range ch.handlers {
+ h.HandleRPC(ctx, stats)
+ }
+}
+
+func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ for _, h := range ch.handlers {
+ ctx = h.TagConn(ctx, info)
+ }
+ return ctx
+}
+
+func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) {
+ for _, h := range ch.handlers {
+ h.HandleConn(ctx, stats)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 1186f1e9a..aad171cd0 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -236,3 +236,11 @@ func IsRestrictedControlPlaneCode(s *Status) bool {
}
return false
}
+
+// RawStatusProto returns the internal protobuf message for use by gRPC itself.
+func RawStatusProto(s *Status) *spb.Status {
+ if s == nil {
+ return nil
+ }
+ return s.s
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
index ccc0e017e..980452519 100644
--- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
@@ -29,25 +29,27 @@ import (
// ClientStream implements streaming functionality for a gRPC client.
type ClientStream struct {
- *Stream // Embed for common stream functionality.
+ Stream // Embed for common stream functionality.
ct *http2Client
done chan struct{} // closed at the end of stream to unblock writers.
doneFunc func() // invoked at the end of stream.
- headerChan chan struct{} // closed to indicate the end of header metadata.
- headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ headerChan chan struct{} // closed to indicate the end of header metadata.
+ header metadata.MD // the received header metadata
+
+ status *status.Status // the status error received from the server
+
+ // Non-pointer fields are at the end to optimize GC allocations.
+
// headerValid indicates whether a valid header was received. Only
// meaningful after headerChan is closed (always call waitOnHeader() before
// reading its value).
- headerValid bool
- header metadata.MD // the received header metadata
- noHeaders bool // set if the client never received headers (set only after the stream is done).
-
- bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
- unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
-
- status *status.Status // the status error received from the server
+ headerValid bool
+ noHeaders bool // set if the client never received headers (set only after the stream is done).
+ headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
+ unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
}
// Read reads an n byte message from the input stream.
@@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool {
func (s *ClientStream) Status() *status.Status {
return s.status
}
+
+func (s *ClientStream) requestRead(n int) {
+ s.ct.adjustWindow(s, uint32(n))
+}
+
+func (s *ClientStream) updateWindow(n int) {
+ s.ct.updateWindow(s, uint32(n))
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index ef72fbb3a..2dcd1e63b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -40,6 +40,13 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
e.SetMaxDynamicTableSizeLimit(v)
}
+// itemNodePool is used to reduce heap allocations.
+var itemNodePool = sync.Pool{
+ New: func() any {
+ return &itemNode{}
+ },
+}
+
type itemNode struct {
it any
next *itemNode
@@ -51,7 +58,9 @@ type itemList struct {
}
func (il *itemList) enqueue(i any) {
- n := &itemNode{it: i}
+ n := itemNodePool.Get().(*itemNode)
+ n.next = nil
+ n.it = i
if il.tail == nil {
il.head, il.tail = n, n
return
@@ -71,7 +80,9 @@ func (il *itemList) dequeue() any {
return nil
}
i := il.head.it
+ temp := il.head
il.head = il.head.next
+ itemNodePool.Put(temp)
if il.head == nil {
il.tail = nil
}
@@ -146,10 +157,11 @@ type earlyAbortStream struct {
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
type dataFrame struct {
- streamID uint32
- endStream bool
- h []byte
- reader mem.Reader
+ streamID uint32
+ endStream bool
+ h []byte
+ data mem.BufferSlice
+ processing bool
// onEachWrite is called every time
// a part of data is written out.
onEachWrite func()
@@ -234,6 +246,7 @@ type outStream struct {
itl *itemList
bytesOutStanding int
wq *writeQuota
+ reader mem.Reader
next *outStream
prev *outStream
@@ -461,7 +474,9 @@ func (c *controlBuffer) finish() {
v.onOrphaned(ErrConnClosing)
}
case *dataFrame:
- _ = v.reader.Close()
+ if !v.processing {
+ v.data.Free()
+ }
}
}
@@ -481,6 +496,16 @@ const (
serverSide
)
+// maxWriteBufSize is the maximum length (number of elements) the cached
+// writeBuf can grow to. The length depends on the number of buffers
+// contained within the BufferSlice produced by the codec, which is
+// generally small.
+//
+// If a writeBuf larger than this limit is required, it will be allocated
+// and freed after use, rather than being cached. This avoids holding
+// on to large amounts of memory.
+const maxWriteBufSize = 64
+
// Loopy receives frames from the control buffer.
// Each frame is handled individually; most of the work done by loopy goes
// into handling data frames. Loopy maintains a queue of active streams, and each
@@ -515,6 +540,8 @@ type loopyWriter struct {
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
+
+ writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek.
}
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
@@ -790,10 +817,13 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
// a RST_STREAM before stream initialization thus the stream might
// not be established yet.
delete(l.estdStreams, c.streamID)
+ str.reader.Close()
str.deleteSelf()
for head := str.itl.dequeueAll(); head != nil; head = head.next {
if df, ok := head.it.(*dataFrame); ok {
- _ = df.reader.Close()
+ if !df.processing {
+ df.data.Free()
+ }
}
}
}
@@ -928,7 +958,13 @@ func (l *loopyWriter) processData() (bool, error) {
if str == nil {
return true, nil
}
+ reader := &str.reader
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
+ if !dataItem.processing {
+ dataItem.processing = true
+ reader.Reset(dataItem.data)
+ dataItem.data.Free()
+ }
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
// Every dataFrame has two buffers; h that keeps grpc-message header and data
@@ -936,13 +972,13 @@ func (l *loopyWriter) processData() (bool, error) {
// from data is copied to h to make as big as the maximum possible HTTP2 frame
// size.
- if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
+ if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
- if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
+ if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
- _ = dataItem.reader.Close()
+ reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -971,29 +1007,24 @@ func (l *loopyWriter) processData() (bool, error) {
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
- dSize := min(maxSize-hSize, dataItem.reader.Remaining())
- remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
+ dSize := min(maxSize-hSize, reader.Remaining())
+ remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
size := hSize + dSize
- var buf *[]byte
-
- if hSize != 0 && dSize == 0 {
- buf = &dataItem.h
- } else {
- // Note: this is only necessary because the http2.Framer does not support
- // partially writing a frame, so the sequence must be materialized into a buffer.
- // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
- pool := l.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
+ l.writeBuf = l.writeBuf[:0]
+ if hSize > 0 {
+ l.writeBuf = append(l.writeBuf, dataItem.h[:hSize])
+ }
+ if dSize > 0 {
+ var err error
+ l.writeBuf, err = reader.Peek(dSize, l.writeBuf)
+ if err != nil {
+ // This must never happen since the reader must have at least dSize
+ // bytes.
+ // Log an error to fail tests.
+ l.logger.Errorf("unexpected error while reading Data frame payload: %v", err)
+ return false, err
}
- buf = pool.Get(size)
- defer pool.Put(buf)
-
- copy((*buf)[:hSize], dataItem.h)
- _, _ = dataItem.reader.Read((*buf)[hSize:])
}
// Now that outgoing flow controls are checked we can replenish str's write quota
@@ -1006,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) {
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
+ err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf)
+ reader.Discard(dSize)
+ if cap(l.writeBuf) > maxWriteBufSize {
+ l.writeBuf = nil
+ } else {
+ clear(l.writeBuf)
+ }
+ if err != nil {
return false, err
}
str.bytesOutStanding += size
@@ -1014,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem.h = dataItem.h[hSize:]
if remainingBytes == 0 { // All the data from that message was written out.
- _ = dataItem.reader.Close()
+ reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index dfc0f224e..7cfbc9637 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -28,7 +28,7 @@ import (
// writeQuota is a soft limit on the amount of data a stream can
// schedule before some of it is written out.
type writeQuota struct {
- quota int32
+ _ noCopy
// get waits on read from when quota goes less than or equal to zero.
// replenish writes on it when quota goes positive again.
ch chan struct{}
@@ -38,16 +38,17 @@ type writeQuota struct {
// It is implemented as a field so that it can be updated
// by tests.
replenish func(n int)
+ quota int32
}
-func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
- w := &writeQuota{
- quota: sz,
- ch: make(chan struct{}, 1),
- done: done,
- }
+// init allows a writeQuota to be initialized in-place, which is useful for
+// resetting a buffer or for avoiding a heap allocation when the buffer is
+// embedded in another struct.
+func (w *writeQuota) init(sz int32, done <-chan struct{}) {
+ w.quota = sz
+ w.ch = make(chan struct{}, 1)
+ w.done = done
w.replenish = w.realReplenish
- return w
}
func (w *writeQuota) get(sz int32) error {
@@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error {
func (w *writeQuota) realReplenish(n int) {
sz := int32(n)
- a := atomic.AddInt32(&w.quota, sz)
- b := a - sz
- if b <= 0 && a > 0 {
+ newQuota := atomic.AddInt32(&w.quota, sz)
+ previousQuota := newQuota - sz
+ if previousQuota <= 0 && newQuota > 0 {
select {
case w.ch <- struct{}{}:
default:
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 3dea23573..7ab3422b8 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@@ -170,7 +170,7 @@ type serverHandlerTransport struct {
// TODO make sure this is consistent across handler_server and http2_server
contentSubtype string
- stats []stats.Handler
+ stats stats.Handler
logger *grpclog.PrefixLogger
bufferPool mem.BufferPool
@@ -274,14 +274,14 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status
}
})
- if err == nil { // transport has not been closed
+ if err == nil && ht.stats != nil { // transport has not been closed
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
- for _, sh := range ht.stats {
- sh.HandleRPC(s.Context(), &stats.OutTrailer{
- Trailer: s.trailer.Copy(),
- })
- }
+ s.hdrMu.Lock()
+ ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+ Trailer: s.trailer.Copy(),
+ })
+ s.hdrMu.Unlock()
}
ht.Close(errors.New("finished writing status"))
return err
@@ -372,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e
ht.rw.(http.Flusher).Flush()
})
- if err == nil {
- for _, sh := range ht.stats {
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- sh.HandleRPC(s.Context(), &stats.OutHeader{
- Header: md.Copy(),
- Compression: s.sendCompress,
- })
- }
+ if err == nil && ht.stats != nil {
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
+ Header: md.Copy(),
+ Compression: s.sendCompress,
+ })
}
return err
}
+func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) {
+}
+
+func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) {
+}
+
func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
var cancel context.CancelFunc
@@ -409,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
req := ht.req
s := &ServerStream{
- Stream: &Stream{
+ Stream: Stream{
id: 0, // irrelevant
ctx: ctx,
- requestRead: func(int) {},
- buf: newRecvBuffer(),
method: req.URL.Path,
recvCompress: req.Header.Get("grpc-encoding"),
contentSubtype: ht.contentSubtype,
@@ -422,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
st: ht,
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
- s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
- windowHandler: func(int) {},
+ s.Stream.buf.init()
+ s.readRequester = s
+ s.trReader = transportReader{
+ reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf},
+ windowHandler: s,
}
// readerDone is closed when the Body.Read-ing goroutine exits.
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 171e690a3..65b4ab243 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -44,6 +44,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
"google.golang.org/grpc/internal/proxyattributes"
+ istats "google.golang.org/grpc/internal/stats"
istatus "google.golang.org/grpc/internal/status"
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
@@ -105,7 +106,7 @@ type http2Client struct {
kp keepalive.ClientParameters
keepaliveEnabled bool
- statsHandlers []stats.Handler
+ statsHandler stats.Handler
initialWindowSize int32
@@ -309,11 +310,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
scheme = "https"
}
}
- dynamicWindow := true
icwz := int32(initialWindowSize)
if opts.InitialConnWindowSize >= defaultWindowSize {
icwz = opts.InitialConnWindowSize
- dynamicWindow = false
}
writeBufSize := opts.WriteBufferSize
readBufSize := opts.ReadBufferSize
@@ -337,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
keepaliveDone: make(chan struct{}),
- framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
+ framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
activeStreams: make(map[uint32]*ClientStream),
isSecure: isSecure,
perRPCCreds: perRPCCreds,
kp: kp,
- statsHandlers: opts.StatsHandlers,
+ statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...),
initialWindowSize: initialWindowSize,
nextID: 1,
maxConcurrentStreams: defaultMaxStreamsClient,
@@ -381,23 +380,21 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
- dynamicWindow = false
}
- if dynamicWindow {
+ if !opts.StaticWindowSize {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
updateFlowControl: t.updateFlowControl,
}
}
- for _, sh := range t.statsHandlers {
- t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
+ if t.statsHandler != nil {
+ t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
- connBegin := &stats.ConnBegin{
+ t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{
Client: true,
- }
- sh.HandleConn(t.ctx, connBegin)
+ })
}
if t.keepaliveEnabled {
t.kpDormancyCond = sync.NewCond(&t.mu)
@@ -484,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
s := &ClientStream{
- Stream: &Stream{
+ Stream: Stream{
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
contentSubtype: callHdr.ContentSubtype,
},
ct: t,
@@ -495,26 +491,21 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt
headerChan: make(chan struct{}),
doneFunc: callHdr.DoneFunc,
}
- s.wq = newWriteQuota(defaultWriteQuota, s.done)
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
+ s.Stream.buf.init()
+ s.Stream.wq.init(defaultWriteQuota, s.done)
+ s.readRequester = s
// The client side stream context should have exactly the same life cycle with the user provided context.
// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
// So we use the original context here instead of creating a copy.
s.ctx = ctx
- s.trReader = &transportReader{
- reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctx.Done(),
- recv: s.buf,
- closeStream: func(err error) {
- s.Close(err)
- },
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
+ s.trReader = transportReader{
+ reader: recvBufferReader{
+ ctx: s.ctx,
+ ctxDone: s.ctx.Done(),
+ recv: &s.buf,
+ clientStream: s,
},
+ windowHandler: s,
}
return s
}
@@ -545,7 +536,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
Method: callHdr.Method,
AuthInfo: t.authInfo,
}
- ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
+ ctxWithRequestInfo := credentials.NewContextWithRequestInfo(ctx, ri)
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
if err != nil {
return nil, err
@@ -559,6 +550,19 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
// Make the slice of certain predictable size to reduce allocations made by append.
hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
hfLen += len(authData) + len(callAuthData)
+ registeredCompressors := t.registeredCompressors
+ if callHdr.PreviousAttempts > 0 {
+ hfLen++
+ }
+ if callHdr.SendCompress != "" {
+ hfLen++
+ }
+ if registeredCompressors != "" {
+ hfLen++
+ }
+ if _, ok := ctx.Deadline(); ok {
+ hfLen++
+ }
headerFields := make([]hpack.HeaderField, 0, hfLen)
headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
@@ -571,7 +575,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
}
- registeredCompressors := t.registeredCompressors
if callHdr.SendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
// Include the outgoing compressor name when compressor is not registered
@@ -592,6 +595,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
// Send out timeout regardless its value. The server can detect timeout context by itself.
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
timeout := time.Until(dl)
+ if timeout <= 0 {
+ return nil, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
+ }
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
}
for k, v := range authData {
@@ -749,6 +755,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
callHdr = &newCallHdr
}
+ // The authority specified via the `CallAuthority` CallOption takes the
+ // highest precedence when determining the `:authority` header. It overrides
+ // any value present in the Host field of CallHdr. Before applying this
+ // override, the authority string is validated. If the credentials do not
+ // implement the AuthorityValidator interface, or if validation fails, the
+ // RPC is failed with a status code of `UNAVAILABLE`.
+ if callHdr.Authority != "" {
+ auth, ok := t.authInfo.(credentials.AuthorityValidator)
+ if !ok {
+ return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "credentials type %q does not implement the AuthorityValidator interface, but authority override specified with CallAuthority call option", t.authInfo.AuthType())}
+ }
+ if err := auth.ValidateAuthority(callHdr.Authority); err != nil {
+ return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "failed to validate authority %q : %v", callHdr.Authority, err)}
+ }
+ newCallHdr := *callHdr
+ newCallHdr.Host = callHdr.Authority
+ callHdr = &newCallHdr
+ }
+
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
@@ -792,7 +817,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
return nil
},
onOrphaned: cleanup,
- wq: s.wq,
+ wq: &s.wq,
}
firstTry := true
var ch chan struct{}
@@ -823,7 +848,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
transportDrainRequired = t.nextID > MaxStreamID
s.id = hdr.streamID
- s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
+ s.fc = inFlow{limit: uint32(t.initialWindowSize)}
t.activeStreams[s.id] = s
t.mu.Unlock()
@@ -874,27 +899,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
}
}
- if len(t.statsHandlers) != 0 {
+ if t.statsHandler != nil {
header, ok := metadata.FromOutgoingContext(ctx)
if ok {
header.Set("user-agent", t.userAgent)
} else {
header = metadata.Pairs("user-agent", t.userAgent)
}
- for _, sh := range t.statsHandlers {
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- // Note: Creating a new stats object to prevent pollution.
- outHeader := &stats.OutHeader{
- Client: true,
- FullMethod: callHdr.Method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: callHdr.SendCompress,
- Header: header,
- }
- sh.HandleRPC(s.ctx, outHeader)
- }
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{
+ Client: true,
+ FullMethod: callHdr.Method,
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ Compression: callHdr.SendCompress,
+ Header: header,
+ })
}
if transportDrainRequired {
if t.logger.V(logLevel) {
@@ -971,6 +992,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode
// accessed anymore.
func (t *http2Client) Close(err error) {
t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
+ // For background on the deadline value chosen here, see
+ // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 .
+ t.conn.SetReadDeadline(time.Now().Add(time.Second))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -1032,11 +1056,10 @@ func (t *http2Client) Close(err error) {
for _, s := range streams {
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
}
- for _, sh := range t.statsHandlers {
- connEnd := &stats.ConnEnd{
+ if t.statsHandler != nil {
+ t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{
Client: true,
- }
- sh.HandleConn(t.ctx, connEnd)
+ })
}
}
@@ -1069,32 +1092,29 @@ func (t *http2Client) GracefulClose() {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
- reader := data.Reader()
-
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
- _ = reader.Close()
return errStreamDone
}
} else if s.getState() != streamActive {
- _ = reader.Close()
return errStreamDone
}
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
- reader: reader,
+ data: data,
}
- if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
- if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
- _ = reader.Close()
+ dataLen := data.Len()
+ if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota.
+ if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
return err
}
}
+ data.Ref()
if err := t.controlBuf.put(df); err != nil {
- _ = reader.Close()
+ data.Free()
return err
}
t.incrMsgSent()
@@ -1150,7 +1170,7 @@ func (t *http2Client) updateFlowControl(n uint32) {
})
}
-func (t *http2Client) handleData(f *http2.DataFrame) {
+func (t *http2Client) handleData(f *parsedDataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
@@ -1194,22 +1214,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
return
}
+ dataLen := f.data.Len()
if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+ if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- pool := t.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
+ if dataLen > 0 {
+ f.data.Ref()
+ s.write(recvMsg{buffer: f.data})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1449,17 +1462,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
contentTypeErr = "malformed header: missing HTTP content-type"
grpcMessage string
recvCompress string
- httpStatusCode *int
httpStatusErr string
- rawStatusCode = codes.Unknown
+ // the code from the grpc-status header, if present
+ grpcStatusCode = codes.Unknown
// headerError is set if an error is encountered while parsing the headers
headerError string
+ httpStatus string
)
- if initialHeader {
- httpStatusErr = "malformed header: missing HTTP status"
- }
-
for _, hf := range frame.Fields {
switch hf.Name {
case "content-type":
@@ -1479,31 +1489,11 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
- rawStatusCode = codes.Code(uint32(code))
+ grpcStatusCode = codes.Code(uint32(code))
case "grpc-message":
grpcMessage = decodeGrpcMessage(hf.Value)
case ":status":
- if hf.Value == "200" {
- httpStatusErr = ""
- statusCode := 200
- httpStatusCode = &statusCode
- break
- }
-
- c, err := strconv.ParseInt(hf.Value, 10, 32)
- if err != nil {
- se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
- t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
- return
- }
- statusCode := int(c)
- httpStatusCode = &statusCode
-
- httpStatusErr = fmt.Sprintf(
- "unexpected HTTP status code received from server: %d (%s)",
- statusCode,
- http.StatusText(statusCode),
- )
+ httpStatus = hf.Value
default:
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
break
@@ -1518,25 +1508,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
- if !isGRPC || httpStatusErr != "" {
- var code = codes.Internal // when header does not include HTTP status, return INTERNAL
-
- if httpStatusCode != nil {
+ // If a non-gRPC response is received, then evaluate the HTTP status to
+ // process the response and close the stream.
+ // In case http status doesn't provide any error information (status : 200),
+ // then evalute response code to be Unknown.
+ if !isGRPC {
+ var grpcErrorCode = codes.Internal
+ if httpStatus == "" {
+ httpStatusErr = "malformed header: missing HTTP status"
+ } else {
+ // Parse the status codes (e.g. "200", 404").
+ statusCode, err := strconv.Atoi(httpStatus)
+ if err != nil {
+ se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+ if statusCode >= 100 && statusCode < 200 {
+ if endStream {
+ se := status.New(codes.Internal, fmt.Sprintf(
+ "protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ }
+ // In case of informational headers, return.
+ return
+ }
+ httpStatusErr = fmt.Sprintf(
+ "unexpected HTTP status code received from server: %d (%s)",
+ statusCode,
+ http.StatusText(statusCode),
+ )
var ok bool
- code, ok = HTTPStatusConvTab[*httpStatusCode]
+ grpcErrorCode, ok = HTTPStatusConvTab[statusCode]
if !ok {
- code = codes.Unknown
+ grpcErrorCode = codes.Unknown
}
}
var errs []string
if httpStatusErr != "" {
errs = append(errs, httpStatusErr)
}
+
if contentTypeErr != "" {
errs = append(errs, contentTypeErr)
}
- // Verify the HTTP response is a 200.
- se := status.New(code, strings.Join(errs, "; "))
+
+ se := status.New(grpcErrorCode, strings.Join(errs, "; "))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
@@ -1567,22 +1584,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
- for _, sh := range t.statsHandlers {
+ if t.statsHandler != nil {
if !endStream {
- inHeader := &stats.InHeader{
+ t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{
Client: true,
WireLength: int(frame.Header().Length),
Header: metadata.MD(mdata).Copy(),
Compression: s.recvCompress,
- }
- sh.HandleRPC(s.ctx, inHeader)
+ })
} else {
- inTrailer := &stats.InTrailer{
+ t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
Trailer: metadata.MD(mdata).Copy(),
- }
- sh.HandleRPC(s.ctx, inTrailer)
+ })
}
}
@@ -1590,7 +1605,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
- status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
+ status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
// If client received END_STREAM from server while stream was still active,
// send RST_STREAM.
@@ -1637,7 +1652,7 @@ func (t *http2Client) reader(errCh chan<- error) {
// loop to keep reading incoming messages on this transport.
for {
t.controlBuf.throttle()
- frame, err := t.framer.fr.ReadFrame()
+ frame, err := t.framer.readFrame()
if t.keepaliveEnabled {
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
}
@@ -1652,7 +1667,7 @@ func (t *http2Client) reader(errCh chan<- error) {
if s != nil {
// use error detail to provide better err message
code := http2ErrConvTab[se.Code]
- errorDetail := t.framer.fr.ErrorDetail()
+ errorDetail := t.framer.errorDetail()
var msg string
if errorDetail != nil {
msg = errorDetail.Error()
@@ -1670,8 +1685,9 @@ func (t *http2Client) reader(errCh chan<- error) {
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
t.operateHeaders(frame)
- case *http2.DataFrame:
+ case *parsedDataFrame:
t.handleData(frame)
+ frame.data.Free()
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 7e53eb173..6f78a6b0c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -35,13 +35,15 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/protobuf/proto"
+
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
+ istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/mem"
- "google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@@ -85,7 +87,7 @@ type http2Server struct {
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
fc *trInFlow
- stats []stats.Handler
+ stats stats.Handler
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
@@ -131,6 +133,10 @@ type http2Server struct {
maxStreamID uint32 // max stream ID ever seen
logger *grpclog.PrefixLogger
+ // setResetPingStrikes is stored as a closure instead of making this a
+ // method on http2Server to avoid a heap allocation when converting a method
+ // to a closure for passing to frames objects.
+ setResetPingStrikes func()
}
// NewServerTransport creates a http2 transport with conn and configuration
@@ -163,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
if config.MaxHeaderListSize != nil {
maxHeaderListSize = *config.MaxHeaderListSize
}
- framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
+ framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool)
// Send initial settings as connection preface to client.
isettings := []http2.Setting{{
ID: http2.SettingMaxFrameSize,
@@ -175,16 +181,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
Val: config.MaxStreams,
})
}
- dynamicWindow := true
iwz := int32(initialWindowSize)
if config.InitialWindowSize >= defaultWindowSize {
iwz = config.InitialWindowSize
- dynamicWindow = false
}
icwz := int32(initialWindowSize)
if config.InitialConnWindowSize >= defaultWindowSize {
icwz = config.InitialConnWindowSize
- dynamicWindow = false
}
if iwz != defaultWindowSize {
isettings = append(isettings, http2.Setting{
@@ -258,13 +261,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
activeStreams: make(map[uint32]*ServerStream),
- stats: config.StatsHandlers,
+ stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
bufferPool: config.BufferPool,
}
+ t.setResetPingStrikes = func() {
+ atomic.StoreUint32(&t.resetPingStrikes, 1)
+ }
var czSecurity credentials.ChannelzSecurityValue
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
czSecurity = au.GetSecurityValue()
@@ -284,7 +290,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.logger = prefixLoggerForServerTransport(t)
t.controlBuf = newControlBuffer(t.done)
- if dynamicWindow {
+ if !config.StaticWindowSize {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
updateFlowControl: t.updateFlowControl,
@@ -385,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
}
t.maxStreamID = streamID
- buf := newRecvBuffer()
s := &ServerStream{
- Stream: &Stream{
- id: streamID,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ Stream: Stream{
+ id: streamID,
+ fc: inFlow{limit: uint32(t.initialWindowSize)},
},
st: t,
headerWireLength: int(frame.Header().Length),
}
+ s.Stream.buf.init()
var (
// if false, content-type was missing or invalid
isGRPC = false
@@ -595,10 +600,25 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
return nil
}
}
+
+ if s.ctx.Err() != nil {
+ t.mu.Unlock()
+ // Early abort in case the timeout was zero or so low it already fired.
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: http.StatusOK,
+ streamID: s.id,
+ contentSubtype: s.contentSubtype,
+ status: status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()),
+ rst: !frame.StreamEnded(),
+ })
+ return nil
+ }
+
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
+
// Start a timer to close the stream on reaching the deadline.
if timeoutSet {
// We need to wait for s.cancel to be updated before calling
@@ -620,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
t.channelz.SocketMetrics.StreamsStarted.Add(1)
t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
}
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
+ s.readRequester = s
s.ctxDone = s.ctx.Done()
- s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
- s.trReader = &transportReader{
- reader: &recvBufferReader{
+ s.Stream.wq.init(defaultWriteQuota, s.ctxDone)
+ s.trReader = transportReader{
+ reader: recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
- recv: s.buf,
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
+ recv: &s.buf,
},
+ windowHandler: s,
}
// Register the stream with loopy.
t.controlBuf.put(®isterStream{
streamID: s.id,
- wq: s.wq,
+ wq: &s.wq,
})
handle(s)
return nil
@@ -654,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
}()
for {
t.controlBuf.throttle()
- frame, err := t.framer.fr.ReadFrame()
+ frame, err := t.framer.readFrame()
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
@@ -691,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
})
continue
}
- case *http2.DataFrame:
+ case *parsedDataFrame:
t.handleData(frame)
+ frame.data.Free()
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
@@ -772,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) {
}
-func (t *http2Server) handleData(f *http2.DataFrame) {
+func (t *http2Server) handleData(f *parsedDataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
@@ -817,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
return
}
+ dataLen := f.data.Len()
if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+ if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- pool := t.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
+ if dataLen > 0 {
+ f.data.Ref()
+ s.write(recvMsg{buffer: f.data})
}
}
if f.StreamEnded() {
@@ -1015,10 +1025,6 @@ func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
return nil
}
-func (t *http2Server) setResetPingStrikes() {
- atomic.StoreUint32(&t.resetPingStrikes, 1)
-}
-
func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
@@ -1043,19 +1049,18 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
- for _, sh := range t.stats {
+ if t.stats != nil {
// Note: Headers are compressed with hpack after this call returns.
// No WireLength field is set here.
- outHeader := &stats.OutHeader{
+ t.stats.HandleRPC(s.Context(), &stats.OutHeader{
Header: s.header.Copy(),
Compression: s.sendCompress,
- }
- sh.HandleRPC(s.Context(), outHeader)
+ })
}
return nil
}
-// WriteStatus sends stream status to the client and terminates the stream.
+// writeStatus sends stream status to the client and terminates the stream.
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
@@ -1083,7 +1088,7 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
- if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ if p := istatus.RawStatusProto(st); len(p.GetDetails()) > 0 {
// Do not use the user's grpc-status-details-bin (if present) if we are
// even attempting to set our own.
delete(s.trailer, grpcStatusDetailsBinHeader)
@@ -1118,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
- for _, sh := range t.stats {
+ if t.stats != nil {
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
- sh.HandleRPC(s.Context(), &stats.OutTrailer{
+ t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
@@ -1131,17 +1136,13 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
- reader := data.Reader()
-
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.writeHeader(s, nil); err != nil {
- _ = reader.Close()
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
- _ = reader.Close()
return t.streamContextErr(s)
}
}
@@ -1149,15 +1150,16 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _
df := &dataFrame{
streamID: s.id,
h: hdr,
- reader: reader,
+ data: data,
onEachWrite: t.setResetPingStrikes,
}
- if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
- _ = reader.Close()
+ dataLen := data.Len()
+ if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
return t.streamContextErr(s)
}
+ data.Ref()
if err := t.controlBuf.put(df); err != nil {
- _ = reader.Close()
+ data.Free()
return err
}
t.incrMsgSent()
@@ -1292,7 +1294,8 @@ func (t *http2Server) Close(err error) {
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
t.mu.Lock()
- if _, ok := t.activeStreams[s.id]; ok {
+ _, isActive := t.activeStreams[s.id]
+ if isActive {
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
@@ -1300,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
}
t.mu.Unlock()
- if channelz.IsOn() {
+ if isActive && channelz.IsOn() {
if eosReceived {
t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
} else {
@@ -1340,10 +1343,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo
// called to interrupt the potential blocking on other goroutines.
s.cancel()
- oldState := s.swapState(streamDone)
- if oldState == streamDone {
- return
- }
+ // We can't return early even if the stream's state is "done" as the state
+ // might have been set by the `finishStream` method. Deleting the stream via
+ // `finishStream` can get blocked on flow control.
+ s.swapState(streamDone)
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index f997f9fdb..6209eb23c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -25,7 +25,6 @@ import (
"fmt"
"io"
"math"
- "net"
"net/http"
"net/url"
"strconv"
@@ -37,6 +36,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/mem"
)
const (
@@ -196,11 +196,11 @@ func decodeTimeout(s string) (time.Duration, error) {
if !ok {
return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
}
- t, err := strconv.ParseInt(s[:size-1], 10, 64)
+ t, err := strconv.ParseUint(s[:size-1], 10, 64)
if err != nil {
return 0, err
}
- const maxHours = math.MaxInt64 / int64(time.Hour)
+ const maxHours = math.MaxInt64 / uint64(time.Hour)
if d == time.Hour && t > maxHours {
// This timeout would overflow math.MaxInt64; clamp it.
return time.Duration(math.MaxInt64), nil
@@ -300,11 +300,11 @@ type bufWriter struct {
buf []byte
offset int
batchSize int
- conn net.Conn
+ conn io.Writer
err error
}
-func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
+func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter {
w := &bufWriter{
batchSize: batchSize,
conn: conn,
@@ -388,15 +388,35 @@ func toIOError(err error) error {
return ioError{error: err}
}
+type parsedDataFrame struct {
+ http2.FrameHeader
+ data mem.Buffer
+}
+
+func (df *parsedDataFrame) StreamEnded() bool {
+ return df.FrameHeader.Flags.Has(http2.FlagDataEndStream)
+}
+
type framer struct {
- writer *bufWriter
- fr *http2.Framer
+ writer *bufWriter
+ fr *http2.Framer
+ headerBuf []byte // cached slice for framer headers to reduce heap allocs.
+ reader io.Reader
+ dataFrame parsedDataFrame // Cached data frame to avoid heap allocations.
+ pool mem.BufferPool
+ errDetail error
}
var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
+func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer {
+ if memPool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream
+ // is always initialized with a BufferPool.
+ memPool = mem.DefaultBufferPool()
+ }
+
if writeBufferSize < 0 {
writeBufferSize = 0
}
@@ -412,6 +432,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
f := &framer{
writer: w,
fr: http2.NewFramer(w, r),
+ reader: r,
+ pool: memPool,
}
f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
// Opt-in to Frame reuse API on framer to reduce garbage.
@@ -422,6 +444,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
return f
}
+// writeData writes a DATA frame.
+//
+// It is the caller's responsibility not to violate the maximum frame size.
+func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error {
+ var flags http2.Flags
+ if endStream {
+ flags = http2.FlagDataEndStream
+ }
+ length := uint32(0)
+ for _, d := range data {
+ length += uint32(len(d))
+ }
+ // TODO: Replace the header write with the framer API being added in
+ // https://github.com/golang/go/issues/66655.
+ f.headerBuf = append(f.headerBuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length),
+ byte(http2.FrameData),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+ if _, err := f.writer.Write(f.headerBuf); err != nil {
+ return err
+ }
+ for _, d := range data {
+ if _, err := f.writer.Write(d); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// readFrame reads a single frame. The returned Frame is only valid
+// until the next call to readFrame.
+func (f *framer) readFrame() (any, error) {
+ f.errDetail = nil
+ fh, err := f.fr.ReadFrameHeader()
+ if err != nil {
+ f.errDetail = f.fr.ErrorDetail()
+ return nil, err
+ }
+ // Read the data frame directly from the underlying io.Reader to avoid
+ // copies.
+ if fh.Type == http2.FrameData {
+ err = f.readDataFrame(fh)
+ return &f.dataFrame, err
+ }
+ fr, err := f.fr.ReadFrameForHeader(fh)
+ if err != nil {
+ f.errDetail = f.fr.ErrorDetail()
+ return nil, err
+ }
+ return fr, err
+}
+
+// errorDetail returns a more detailed error of the last error
+// returned by framer.readFrame. For instance, if readFrame
+// returns a StreamError with code PROTOCOL_ERROR, errorDetail
+// will say exactly what was invalid. errorDetail is not guaranteed
+// to return a non-nil value.
+// errorDetail is reset after the next call to readFrame.
+func (f *framer) errorDetail() error {
+ return f.errDetail
+}
+
+func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ f.errDetail = errors.New("DATA frame with stream ID 0")
+ return http2.ConnectionError(http2.ErrCodeProtocol)
+ }
+ // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This
+ // conversion is performed by mem.NewBuffer. To avoid the extra allocation
+ // a []byte is allocated directly if required and cast to a mem.SliceBuffer.
+ var buf []byte
+ // poolHandle is the pointer returned by the buffer pool (if it's used.).
+ var poolHandle *[]byte
+ useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length))
+ if useBufferPool {
+ poolHandle = f.pool.Get(int(fh.Length))
+ buf = *poolHandle
+ defer func() {
+ if err != nil {
+ f.pool.Put(poolHandle)
+ }
+ }()
+ } else {
+ buf = make([]byte, int(fh.Length))
+ }
+ if fh.Flags.Has(http2.FlagDataPadded) {
+ if fh.Length == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ // This initial 1-byte read can be inefficient for unbuffered readers,
+ // but it allows the rest of the payload to be read directly to the
+ // start of the destination slice. This makes it easy to return the
+ // original slice back to the buffer pool.
+ if _, err := io.ReadFull(f.reader, buf[:1]); err != nil {
+ return err
+ }
+ padSize := buf[0]
+ buf = buf[:len(buf)-1]
+ if int(padSize) > len(buf) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ f.errDetail = errors.New("pad size larger than data payload")
+ return http2.ConnectionError(http2.ErrCodeProtocol)
+ }
+ if _, err := io.ReadFull(f.reader, buf); err != nil {
+ return err
+ }
+ buf = buf[:len(buf)-int(padSize)]
+ } else if _, err := io.ReadFull(f.reader, buf); err != nil {
+ return err
+ }
+
+ f.dataFrame.FrameHeader = fh
+ if useBufferPool {
+ // Update the handle to point to the (potentially re-sliced) buf.
+ *poolHandle = buf
+ f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool)
+ } else {
+ f.dataFrame.data = mem.SliceBuffer(buf)
+ }
+ return nil
+}
+
+func (df *parsedDataFrame) Header() http2.FrameHeader {
+ return df.FrameHeader
+}
+
func getWriteBufferPool(size int) *sync.Pool {
writeBufferMutex.Lock()
defer writeBufferMutex.Unlock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
index cf8da0b52..ed6a13b75 100644
--- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
@@ -32,7 +32,7 @@ import (
// ServerStream implements streaming functionality for a gRPC server.
type ServerStream struct {
- *Stream // Embed for common stream functionality.
+ Stream // Embed for common stream functionality.
st internalServerTransport
ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
@@ -43,12 +43,13 @@ type ServerStream struct {
// Holds compressor names passed in grpc-accept-encoding metadata from the
// client.
clientAdvertisedCompressors string
- headerWireLength int
// hdrMu protects outgoing header and trailer metadata.
hdrMu sync.Mutex
header metadata.MD // the outgoing header metadata. Updated by WriteHeader.
headerSent atomic.Bool // atomically set when the headers are sent out.
+
+ headerWireLength int
}
// Read reads an n byte message from the input stream.
@@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error {
s.hdrMu.Unlock()
return nil
}
+
+func (s *ServerStream) requestRead(n int) {
+ s.st.adjustWindow(s, uint32(n))
+}
+
+func (s *ServerStream) updateWindow(n int) {
+ s.st.updateWindow(s, uint32(n))
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index af4a4aeab..5ff83a7d7 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -68,11 +68,11 @@ type recvBuffer struct {
err error
}
-func newRecvBuffer() *recvBuffer {
- b := &recvBuffer{
- c: make(chan recvMsg, 1),
- }
- return b
+// init allows a recvBuffer to be initialized in-place, which is useful
+// for resetting a buffer or for avoiding a heap allocation when the buffer
+// is embedded in another struct.
+func (b *recvBuffer) init() {
+ b.c = make(chan recvMsg, 1)
}
func (b *recvBuffer) put(r recvMsg) {
@@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg {
// recvBufferReader implements io.Reader interface to read the data from
// recvBuffer.
type recvBufferReader struct {
- closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
- ctx context.Context
- ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
- recv *recvBuffer
- last mem.Buffer // Stores the remaining data in the previous calls.
- err error
+ _ noCopy
+ clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata.
+ ctx context.Context
+ ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
+ recv *recvBuffer
+ last mem.Buffer // Stores the remaining data in the previous calls.
+ err error
}
func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
@@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
n, r.last = mem.ReadUnsafe(header, r.last)
return n, nil
}
- if r.closeStream != nil {
+ if r.clientStream != nil {
n, r.err = r.readMessageHeaderClient(header)
} else {
n, r.err = r.readMessageHeader(header)
@@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
}
return buf, nil
}
- if r.closeStream != nil {
+ if r.clientStream != nil {
buf, r.err = r.readClient(n)
} else {
buf, r.err = r.read(n)
@@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
- r.closeStream(ContextErr(r.ctx.Err()))
+ r.clientStream.Close(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readMessageHeaderAdditional(m, header)
case m := <-r.recv.get():
@@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
- r.closeStream(ContextErr(r.ctx.Err()))
+ r.clientStream.Close(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readAdditional(m, n)
case m := <-r.recv.get():
@@ -285,27 +286,32 @@ const (
// Stream represents an RPC in the transport layer.
type Stream struct {
- id uint32
ctx context.Context // the associated context of the stream
method string // the associated RPC method of the stream
recvCompress string
sendCompress string
- buf *recvBuffer
- trReader *transportReader
- fc *inFlow
- wq *writeQuota
-
- // Callback to state application's intentions to read data. This
- // is used to adjust flow control, if needed.
- requestRead func(int)
- state streamState
+ readRequester readRequester
// contentSubtype is the content-subtype for requests.
// this must be lowercase or the behavior is undefined.
contentSubtype string
trailer metadata.MD // the key-value map of trailer metadata.
+
+ // Non-pointer fields are at the end to optimize GC performance.
+ state streamState
+ id uint32
+ buf recvBuffer
+ trReader transportReader
+ fc inFlow
+ wq writeQuota
+}
+
+// readRequester is used to state application's intentions to read data. This
+// is used to adjust flow control, if needed.
+type readRequester interface {
+ requestRead(int)
}
func (s *Stream) swapState(st streamState) streamState {
@@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) {
if er := s.trReader.er; er != nil {
return er
}
- s.requestRead(len(header))
+ s.readRequester.requestRead(len(header))
for len(header) != 0 {
n, err := s.trReader.ReadMessageHeader(header)
header = header[n:]
@@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
if er := s.trReader.er; er != nil {
return nil, er
}
- s.requestRead(n)
+ s.readRequester.requestRead(n)
for n != 0 {
buf, err := s.trReader.Read(n)
var bufLen int
@@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
return data, nil
}
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct {
+}
+
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader *recvBufferReader
+ _ noCopy
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
- windowHandler func(int)
+ windowHandler windowHandler
er error
+ reader recvBufferReader
+}
+
+// The handler to control the window update procedure for both this
+// particular stream and the associated transport.
+type windowHandler interface {
+ updateWindow(int)
}
func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
@@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
t.er = err
return 0, err
}
- t.windowHandler(n)
+ t.windowHandler.updateWindow(n)
return n, nil
}
@@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) {
t.er = err
return buf, err
}
- t.windowHandler(buf.Len())
+ t.windowHandler.updateWindow(buf.Len())
return buf, nil
}
@@ -454,7 +478,7 @@ type ServerConfig struct {
ConnectionTimeout time.Duration
Credentials credentials.TransportCredentials
InTapHandle tap.ServerInHandle
- StatsHandlers []stats.Handler
+ StatsHandler stats.Handler
KeepaliveParams keepalive.ServerParameters
KeepalivePolicy keepalive.EnforcementPolicy
InitialWindowSize int32
@@ -466,6 +490,7 @@ type ServerConfig struct {
MaxHeaderListSize *uint32
HeaderTableSize *uint32
BufferPool mem.BufferPool
+ StaticWindowSize bool
}
// ConnectOptions covers all relevant options for communicating with the server.
@@ -504,6 +529,8 @@ type ConnectOptions struct {
MaxHeaderListSize *uint32
// The mem.BufferPool to use when reading/writing to the wire.
BufferPool mem.BufferPool
+ // StaticWindowSize controls whether dynamic window sizing is enabled.
+ StaticWindowSize bool
}
// WriteOptions provides additional hints and information for message
@@ -540,6 +567,11 @@ type CallHdr struct {
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
DoneFunc func() // called when the stream is finished
+
+ // Authority is used to explicitly override the `:authority` header. If set,
+ // this value takes precedence over the Host field and will be used as the
+ // value for the `:authority` header.
+ Authority string
}
// ClientTransport is the common interface for all gRPC client-side transport
@@ -607,6 +639,8 @@ type internalServerTransport interface {
write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
writeStatus(s *ServerStream, st *status.Status) error
incrMsgRecv()
+ adjustWindow(s *ServerStream, n uint32)
+ updateWindow(s *ServerStream, n uint32)
}
// connectionErrorf creates an ConnectionError with the specified error description.
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
index c37c58c02..f211e7274 100644
--- a/vendor/google.golang.org/grpc/mem/buffer_pool.go
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -32,6 +32,9 @@ type BufferPool interface {
Get(length int) *[]byte
// Put returns a buffer to the pool.
+ //
+ // The provided pointer must hold a prefix of the buffer obtained via
+ // BufferPool.Get to ensure the buffer's entire capacity can be re-used.
Put(*[]byte)
}
@@ -118,7 +121,11 @@ type sizedBufferPool struct {
}
func (p *sizedBufferPool) Get(size int) *[]byte {
- buf := p.pool.Get().(*[]byte)
+ buf, ok := p.pool.Get().(*[]byte)
+ if !ok {
+ buf := make([]byte, size, p.defaultSize)
+ return &buf
+ }
b := *buf
clear(b[:cap(b)])
*buf = b[:size]
@@ -137,12 +144,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) {
func newSizedBufferPool(size int) *sizedBufferPool {
return &sizedBufferPool{
- pool: sync.Pool{
- New: func() any {
- buf := make([]byte, size)
- return &buf
- },
- },
defaultSize: size,
}
}
@@ -160,6 +161,7 @@ type simpleBufferPool struct {
func (p *simpleBufferPool) Get(size int) *[]byte {
bs, ok := p.pool.Get().(*[]byte)
if ok && cap(*bs) >= size {
+ clear((*bs)[:cap(*bs)])
*bs = (*bs)[:size]
return bs
}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
index 65002e2cc..084fb19c6 100644
--- a/vendor/google.golang.org/grpc/mem/buffer_slice.go
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -19,6 +19,7 @@
package mem
import (
+ "fmt"
"io"
)
@@ -117,47 +118,53 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
// Reader returns a new Reader for the input slice after taking references to
// each underlying buffer.
-func (s BufferSlice) Reader() Reader {
+func (s BufferSlice) Reader() *Reader {
s.Ref()
- return &sliceReader{
+ return &Reader{
data: s,
len: s.Len(),
}
}
// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
-// with other parts systems. It also provides an additional convenience method
-// Remaining(), which returns the number of unread bytes remaining in the slice.
+// with other systems.
+//
// Buffers will be freed as they are read.
-type Reader interface {
- io.Reader
- io.ByteReader
- // Close frees the underlying BufferSlice and never returns an error. Subsequent
- // calls to Read will return (0, io.EOF).
- Close() error
- // Remaining returns the number of unread bytes remaining in the slice.
- Remaining() int
-}
-
-type sliceReader struct {
+//
+// A Reader can be constructed from a BufferSlice; alternatively the zero value
+// of a Reader may be used after calling Reset on it.
+type Reader struct {
data BufferSlice
len int
// The index into data[0].ReadOnlyData().
bufferIdx int
}
-func (r *sliceReader) Remaining() int {
+// Remaining returns the number of unread bytes remaining in the slice.
+func (r *Reader) Remaining() int {
return r.len
}
-func (r *sliceReader) Close() error {
+// Reset frees the currently held buffer slice and starts reading from the
+// provided slice. This allows reusing the reader object.
+func (r *Reader) Reset(s BufferSlice) {
+ r.data.Free()
+ s.Ref()
+ r.data = s
+ r.len = s.Len()
+ r.bufferIdx = 0
+}
+
+// Close frees the underlying BufferSlice and never returns an error. Subsequent
+// calls to Read will return (0, io.EOF).
+func (r *Reader) Close() error {
r.data.Free()
r.data = nil
r.len = 0
return nil
}
-func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+func (r *Reader) freeFirstBufferIfEmpty() bool {
if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
return false
}
@@ -168,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool {
return true
}
-func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+func (r *Reader) Read(buf []byte) (n int, _ error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -191,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) {
return n, nil
}
-func (r *sliceReader) ReadByte() (byte, error) {
+// ReadByte reads a single byte.
+func (r *Reader) ReadByte() (byte, error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -279,3 +287,59 @@ nextBuffer:
}
}
}
+
+// Discard skips the next n bytes, returning the number of bytes discarded.
+//
+// It frees buffers as they are fully consumed.
+//
+// If Discard skips fewer than n bytes, it also returns an error.
+func (r *Reader) Discard(n int) (discarded int, err error) {
+ total := n
+ for n > 0 && r.len > 0 {
+ curData := r.data[0].ReadOnlyData()
+ curSize := min(n, len(curData)-r.bufferIdx)
+ n -= curSize
+ r.len -= curSize
+ r.bufferIdx += curSize
+ if r.bufferIdx >= len(curData) {
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ }
+ }
+ discarded = total - n
+ if n > 0 {
+ return discarded, fmt.Errorf("insufficient bytes in reader")
+ }
+ return discarded, nil
+}
+
+// Peek returns the next n bytes without advancing the reader.
+//
+// Peek appends results to the provided res slice and returns the updated slice.
+// This pattern allows re-using the storage of res if it has sufficient
+// capacity.
+//
+// The returned subslices are views into the underlying buffers and are only
+// valid until the reader is advanced past the corresponding buffer.
+//
+// If Peek returns fewer than n bytes, it also returns an error.
+func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) {
+ for i := 0; n > 0 && i < len(r.data); i++ {
+ curData := r.data[i].ReadOnlyData()
+ start := 0
+ if i == 0 {
+ start = r.bufferIdx
+ }
+ curSize := min(n, len(curData)-start)
+ if curSize == 0 {
+ continue
+ }
+ res = append(res, curData[start:start+curSize])
+ n -= curSize
+ }
+ if n > 0 {
+ return nil, fmt.Errorf("insufficient bytes in reader")
+ }
+ return res, nil
+}
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index a2d2a798d..aa52bfe95 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -29,7 +29,6 @@ import (
"google.golang.org/grpc/internal/channelz"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
@@ -48,14 +47,11 @@ type pickerGeneration struct {
// actions and unblock when there's a picker update.
type pickerWrapper struct {
// If pickerGen holds a nil pointer, the pickerWrapper is closed.
- pickerGen atomic.Pointer[pickerGeneration]
- statsHandlers []stats.Handler // to record blocking picker calls
+ pickerGen atomic.Pointer[pickerGeneration]
}
-func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
- pw := &pickerWrapper{
- statsHandlers: statsHandlers,
- }
+func newPickerWrapper() *pickerWrapper {
+ pw := &pickerWrapper{}
pw.pickerGen.Store(&pickerGeneration{
blockingCh: make(chan struct{}),
})
@@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
}
}
+type pick struct {
+ transport transport.ClientTransport // the selected transport
+ result balancer.PickResult // the contents of the pick from the LB policy
+ blocked bool // set if a picker call queued for a new picker
+}
+
// pick returns the transport that will be used for the RPC.
// It may block in the following cases:
// - there's no picker
@@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
// - the current picker returns other errors and failfast is false.
// - the subConn returned by the current picker is not READY
// When one of these situations happens, pick blocks until the picker gets updated.
-func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) {
var ch chan struct{}
var lastPickErr error
+ pickBlocked := false
for {
pg := pw.pickerGen.Load()
if pg == nil {
- return nil, balancer.PickResult{}, ErrClientConnClosing
+ return pick{}, ErrClientConnClosing
}
if pg.picker == nil {
ch = pg.blockingCh
@@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
switch ctx.Err() {
case context.DeadlineExceeded:
- return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
+ return pick{}, status.Error(codes.DeadlineExceeded, errStr)
case context.Canceled:
- return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
+ return pick{}, status.Error(codes.Canceled, errStr)
}
case <-ch:
}
@@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
// In the second case, the only way it will get to this conditional is
// if there is a new picker.
if ch != nil {
- for _, sh := range pw.statsHandlers {
- sh.HandleRPC(ctx, &stats.PickerUpdated{})
- }
+ pickBlocked = true
}
ch = pg.blockingCh
@@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
}
- return nil, balancer.PickResult{}, dropError{error: err}
+ return pick{}, dropError{error: err}
}
// For all other errors, wait for ready RPCs should block and other
// RPCs should fail with unavailable.
@@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
lastPickErr = err
continue
}
- return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
+ return pick{}, status.Error(codes.Unavailable, err.Error())
}
acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
@@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
if t := acbw.ac.getReadyTransport(); t != nil {
if channelz.IsOn() {
doneChannelzWrapper(acbw, &pickResult)
- return t, pickResult, nil
}
- return t, pickResult, nil
+ return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil
}
if pickResult.Done != nil {
// Calling done with nil error, no bytes sent and no bytes received.
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index ee0ff969a..1e783febf 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
}
// check if the context has the relevant information to prepareMsg
- if rpcInfo.preloaderInfo == nil {
- return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
- }
if rpcInfo.preloaderInfo.codec == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index b84ef26d4..8e6af9514 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -332,6 +332,11 @@ type AuthorityOverrider interface {
// OverrideAuthority returns the authority to use for a ClientConn with the
// given target. The implementation must generate it without blocking,
// typically in line, and must keep it unchanged.
+ //
+ // The returned string must be a valid ":authority" header value, i.e. be
+ // encoded according to
+ // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as
+ // necessary.
OverrideAuthority(Target) string
}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index ad20e9dff..6b04c9e87 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -160,6 +160,7 @@ type callInfo struct {
codec baseCodec
maxRetryRPCBufferSize int
onFinish []func(err error)
+ authority string
}
func defaultCallInfo() *callInfo {
@@ -365,6 +366,36 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
}
func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
+// CallAuthority returns a CallOption that sets the HTTP/2 :authority header of
+// an RPC to the specified value. When using CallAuthority, the credentials in
+// use must implement the AuthorityValidator interface.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func CallAuthority(authority string) CallOption {
+ return AuthorityOverrideCallOption{Authority: authority}
+}
+
+// AuthorityOverrideCallOption is a CallOption that indicates the HTTP/2
+// :authority header value to use for the call.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a later
+// release.
+type AuthorityOverrideCallOption struct {
+ Authority string
+}
+
+func (o AuthorityOverrideCallOption) before(c *callInfo) error {
+ c.authority = o.Authority
+ return nil
+}
+
+func (o AuthorityOverrideCallOption) after(*callInfo, *csAttempt) {}
+
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send. If this is not set, gRPC uses the default
// `math.MaxInt32`.
@@ -626,8 +657,20 @@ type streamReader interface {
Read(n int) (mem.BufferSlice, error)
}
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct {
+}
+
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
+ _ noCopy
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
@@ -918,7 +961,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR
// Information about RPC
type rpcInfo struct {
failfast bool
- preloaderInfo *compressorInfo
+ preloaderInfo compressorInfo
}
// Information about Preloader
@@ -937,7 +980,7 @@ type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
failfast: failfast,
- preloaderInfo: &compressorInfo{
+ preloaderInfo: compressorInfo{
codec: codec,
cp: cp,
comp: comp,
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 976e70ae0..ddd377341 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -124,7 +124,8 @@ type serviceInfo struct {
// Server is a gRPC server to serve RPC requests.
type Server struct {
- opts serverOptions
+ opts serverOptions
+ statsHandler stats.Handler
mu sync.Mutex // guards following
lis map[net.Listener]bool
@@ -179,6 +180,7 @@ type serverOptions struct {
numServerWorkers uint32
bufferPool mem.BufferPool
waitForHandlers bool
+ staticWindowSize bool
}
var defaultServerOptions = serverOptions{
@@ -279,6 +281,7 @@ func ReadBufferSize(s int) ServerOption {
func InitialWindowSize(s int32) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.initialWindowSize = s
+ o.staticWindowSize = true
})
}
@@ -287,6 +290,29 @@ func InitialWindowSize(s int32) ServerOption {
func InitialConnWindowSize(s int32) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.initialConnWindowSize = s
+ o.staticWindowSize = true
+ })
+}
+
+// StaticStreamWindowSize returns a ServerOption to set the initial stream
+// window size to the value provided and disables dynamic flow control.
+// The lower bound for window size is 64K and any value smaller than that
+// will be ignored.
+func StaticStreamWindowSize(s int32) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.initialWindowSize = s
+ o.staticWindowSize = true
+ })
+}
+
+// StaticConnWindowSize returns a ServerOption to set the initial connection
+// window size to the value provided and disables dynamic flow control.
+// The lower bound for window size is 64K and any value smaller than that
+// will be ignored.
+func StaticConnWindowSize(s int32) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.initialConnWindowSize = s
+ o.staticWindowSize = true
})
}
@@ -667,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server {
o.apply(&opts)
}
s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[string]map[transport.ServerTransport]bool),
- services: make(map[string]*serviceInfo),
- quit: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
- channelz: channelz.RegisterServer(""),
+ lis: make(map[net.Listener]bool),
+ opts: opts,
+ statsHandler: istats.NewCombinedHandler(opts.statsHandlers...),
+ conns: make(map[string]map[transport.ServerTransport]bool),
+ services: make(map[string]*serviceInfo),
+ quit: grpcsync.NewEvent(),
+ done: grpcsync.NewEvent(),
+ channelz: channelz.RegisterServer(""),
}
chainUnaryServerInterceptors(s)
chainStreamServerInterceptors(s)
@@ -974,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ConnectionTimeout: s.opts.connectionTimeout,
Credentials: s.opts.creds,
InTapHandle: s.opts.inTapHandle,
- StatsHandlers: s.opts.statsHandlers,
+ StatsHandler: s.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
@@ -986,6 +1013,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
MaxHeaderListSize: s.opts.maxHeaderListSize,
HeaderTableSize: s.opts.headerTableSize,
BufferPool: s.opts.bufferPool,
+ StaticWindowSize: s.opts.staticWindowSize,
}
st, err := transport.NewServerTransport(c, config)
if err != nil {
@@ -1010,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
ctx = transport.SetConnection(ctx, rawConn)
ctx = peer.NewContext(ctx, st.Peer())
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
+ if s.statsHandler != nil {
+ ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{
RemoteAddr: st.Peer().Addr,
LocalAddr: st.Peer().LocalAddr,
})
- sh.HandleConn(ctx, &stats.ConnBegin{})
+ s.statsHandler.HandleConn(ctx, &stats.ConnBegin{})
}
defer func() {
st.Close(errors.New("finished serving streams for the server transport"))
- for _, sh := range s.opts.statsHandlers {
- sh.HandleConn(ctx, &stats.ConnEnd{})
+ if s.statsHandler != nil {
+ s.statsHandler.HandleConn(ctx, &stats.ConnEnd{})
}
}()
@@ -1078,7 +1106,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
+ st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@@ -1172,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
err = stream.Write(hdr, payload, opts)
- if err == nil {
- if len(s.opts.statsHandlers) != 0 {
- for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
- }
- }
+ if err == nil && s.statsHandler != nil {
+ s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
}
return err
}
@@ -1219,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
}
func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
- shs := s.opts.statsHandlers
- if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+ sh := s.statsHandler
+ if sh != nil || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
s.incrCallsStarted()
}
var statsBegin *stats.Begin
- for _, sh := range shs {
- beginTime := time.Now()
+ if sh != nil {
statsBegin = &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: time.Now(),
IsClientStream: false,
IsServerStream: false,
}
@@ -1256,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
trInfo.tr.Finish()
}
- for _, sh := range shs {
+ if sh != nil {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1353,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
}
var payInfo *payloadInfo
- if len(shs) != 0 || len(binlogs) != 0 {
+ if sh != nil || len(binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
@@ -1379,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
- for _, sh := range shs {
+ if sh != nil {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
@@ -1553,32 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
if channelz.IsOn() {
s.incrCallsStarted()
}
- shs := s.opts.statsHandlers
+ sh := s.statsHandler
var statsBegin *stats.Begin
- if len(shs) != 0 {
- beginTime := time.Now()
+ if sh != nil {
statsBegin = &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: time.Now(),
IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams,
}
- for _, sh := range shs {
- sh.HandleRPC(ctx, statsBegin)
- }
+ sh.HandleRPC(ctx, statsBegin)
}
ctx = NewContextWithServerTransportStream(ctx, stream)
ss := &serverStream{
ctx: ctx,
s: stream,
- p: &parser{r: stream, bufferPool: s.opts.bufferPool},
+ p: parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
+ desc: sd,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
- statsHandler: shs,
+ statsHandler: sh,
}
- if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+ if sh != nil || trInfo != nil || channelz.IsOn() {
// See comment in processUnaryRPC on defers.
defer func() {
if trInfo != nil {
@@ -1592,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
ss.mu.Unlock()
}
- if len(shs) != 0 {
+ if sh != nil {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1600,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
- for _, sh := range shs {
- sh.HandleRPC(ctx, end)
- }
+ sh.HandleRPC(ctx, end)
}
if channelz.IsOn() {
@@ -1791,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser
method := sm[pos+1:]
// FromIncomingContext is expensive: skip if there are no statsHandlers
- if len(s.opts.statsHandlers) > 0 {
+ if s.statsHandler != nil {
md, _ := metadata.FromIncomingContext(ctx)
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
- sh.HandleRPC(ctx, &stats.InHeader{
- FullMethod: stream.Method(),
- RemoteAddr: t.Peer().Addr,
- LocalAddr: t.Peer().LocalAddr,
- Compression: stream.RecvCompress(),
- WireLength: stream.HeaderWireLength(),
- Header: md,
- })
- }
+ ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
+ s.statsHandler.HandleRPC(ctx, &stats.InHeader{
+ FullMethod: stream.Method(),
+ RemoteAddr: t.Peer().Addr,
+ LocalAddr: t.Peer().LocalAddr,
+ Compression: stream.RecvCompress(),
+ WireLength: stream.HeaderWireLength(),
+ Header: md,
+ })
}
// To have calls in stream callouts work. Will delete once all stats handler
// calls come from the gRPC layer.
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
index dc03731e4..67194a592 100644
--- a/vendor/google.golang.org/grpc/stats/handlers.go
+++ b/vendor/google.golang.org/grpc/stats/handlers.go
@@ -38,6 +38,15 @@ type RPCTagInfo struct {
// FailFast indicates if this RPC is failfast.
// This field is only valid on client side, it's always false on server side.
FailFast bool
+ // NameResolutionDelay indicates if the RPC needed to wait for the
+ // initial name resolver update before it could begin. This should only
+ // happen if the channel is IDLE when the RPC is started. Note that
+ // all retry or hedging attempts for an RPC that experienced a delay
+ // will have it set.
+ //
+ // This field is only valid on the client side; it is always false on
+ // the server side.
+ NameResolutionDelay bool
}
// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index baf7740ef..10bf998aa 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -64,15 +64,21 @@ func (s *Begin) IsClient() bool { return s.Client }
func (s *Begin) isRPCStats() {}
-// PickerUpdated indicates that the LB policy provided a new picker while the
-// RPC was waiting for one.
-type PickerUpdated struct{}
+// DelayedPickComplete indicates that the RPC is unblocked following a delay in
+// selecting a connection for the call.
+type DelayedPickComplete struct{}
-// IsClient indicates if the stats information is from client side. Only Client
-// Side interfaces with a Picker, thus always returns true.
-func (*PickerUpdated) IsClient() bool { return true }
+// IsClient indicates DelayedPickComplete is available on the client.
+func (*DelayedPickComplete) IsClient() bool { return true }
-func (*PickerUpdated) isRPCStats() {}
+func (*DelayedPickComplete) isRPCStats() {}
+
+// PickerUpdated indicates that the RPC is unblocked following a delay in
+// selecting a connection for the call.
+//
+// Deprecated: will be removed in a future release; use DelayedPickComplete
+// instead.
+type PickerUpdated = DelayedPickComplete
// InPayload contains stats about an incoming payload.
type InPayload struct {
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 12163150b..ca87ff977 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -101,9 +101,9 @@ type ClientStream interface {
// It must only be called after stream.CloseAndRecv has returned, or
// stream.Recv has returned a non-nil error (including io.EOF).
Trailer() metadata.MD
- // CloseSend closes the send direction of the stream. It closes the stream
- // when non-nil error is met. It is also not safe to call CloseSend
- // concurrently with SendMsg.
+ // CloseSend closes the send direction of the stream. This method always
+ // returns a nil error. The status of the stream may be discovered using
+ // RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg.
CloseSend() error
// Context returns the context for this stream.
//
@@ -177,6 +177,8 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return cc.NewStream(ctx, desc, method, opts...)
}
+var emptyMethodConfig = serviceconfig.MethodConfig{}
+
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
// Start tracking the RPC for idleness purposes. This is where a stream is
// created for both streaming and unary RPCs, and hence is a good place to
@@ -212,14 +214,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
// Provide an opportunity for the first RPC to see the first service config
// provided by the resolver.
- if err := cc.waitForResolvedAddrs(ctx); err != nil {
+ nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx)
+ if err != nil {
return nil, err
}
- var mc serviceconfig.MethodConfig
+ mc := &emptyMethodConfig
var onCommit func()
newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
- return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
+ return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...)
}
rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
@@ -239,7 +242,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if rpcConfig.Context != nil {
ctx = rpcConfig.Context
}
- mc = rpcConfig.MethodConfig
+ mc = &rpcConfig.MethodConfig
onCommit = rpcConfig.OnCommitted
if rpcConfig.Interceptor != nil {
rpcInfo.Context = nil
@@ -257,7 +260,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return newStream(ctx, func() {})
}
-func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
callInfo := defaultCallInfo()
if mc.WaitForReady != nil {
callInfo.failFast = !*mc.WaitForReady
@@ -296,6 +299,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
Method: method,
ContentSubtype: callInfo.contentSubtype,
DoneFunc: doneFunc,
+ Authority: callInfo.authority,
}
// Set our outgoing compression according to the UseCompressor CallOption, if
@@ -321,19 +325,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
}
cs := &clientStream{
- callHdr: callHdr,
- ctx: ctx,
- methodConfig: &mc,
- opts: opts,
- callInfo: callInfo,
- cc: cc,
- desc: desc,
- codec: callInfo.codec,
- compressorV0: compressorV0,
- compressorV1: compressorV1,
- cancel: cancel,
- firstAttempt: true,
- onCommit: onCommit,
+ callHdr: callHdr,
+ ctx: ctx,
+ methodConfig: mc,
+ opts: opts,
+ callInfo: callInfo,
+ cc: cc,
+ desc: desc,
+ codec: callInfo.codec,
+ compressorV0: compressorV0,
+ compressorV1: compressorV1,
+ cancel: cancel,
+ firstAttempt: true,
+ onCommit: onCommit,
+ nameResolutionDelay: nameResolutionDelayed,
}
if !cc.dopts.disableRetry {
cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
@@ -415,19 +420,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
method := cs.callHdr.Method
var beginTime time.Time
- shs := cs.cc.dopts.copts.StatsHandlers
- for _, sh := range shs {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
+ sh := cs.cc.statsHandler
+ if sh != nil {
beginTime = time.Now()
- begin := &stats.Begin{
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{
+ FullMethodName: method, FailFast: cs.callInfo.failFast,
+ NameResolutionDelay: cs.nameResolutionDelay,
+ })
+ sh.HandleRPC(ctx, &stats.Begin{
Client: true,
BeginTime: beginTime,
FailFast: cs.callInfo.failFast,
IsClientStream: cs.desc.ClientStreams,
IsServerStream: cs.desc.ServerStreams,
IsTransparentRetryAttempt: isTransparent,
- }
- sh.HandleRPC(ctx, begin)
+ })
}
var trInfo *traceInfo
@@ -458,7 +465,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
beginTime: beginTime,
cs: cs,
decompressorV0: cs.cc.dopts.dc,
- statsHandlers: shs,
+ statsHandler: sh,
trInfo: trInfo,
}, nil
}
@@ -466,8 +473,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
func (a *csAttempt) getTransport() error {
cs := a.cs
- var err error
- a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+ pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method}
+ pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo)
+ a.transport, a.pickResult = pick.transport, pick.result
if err != nil {
if de, ok := err.(dropError); ok {
err = de.error
@@ -478,6 +486,9 @@ func (a *csAttempt) getTransport() error {
if a.trInfo != nil {
a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
}
+ if pick.blocked && a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
+ }
return nil
}
@@ -520,7 +531,7 @@ func (a *csAttempt) newStream() error {
}
a.transportStream = s
a.ctx = s.Context()
- a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
+ a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -540,6 +551,8 @@ type clientStream struct {
sentLast bool // sent an end stream
+ receivedFirstMsg bool // set after the first message is received
+
methodConfig *MethodConfig
ctx context.Context // the application's context, wrapped by stats/tracing
@@ -573,6 +586,9 @@ type clientStream struct {
onCommit func()
replayBuffer []replayOp // operations to replay on retry
replayBufferSize int // current size of replayBuffer
+ // nameResolutionDelay indicates if there was a delay in the name resolution.
+ // This field is only valid on client side, it's always false on server side.
+ nameResolutionDelay bool
}
type replayOp struct {
@@ -587,7 +603,7 @@ type csAttempt struct {
cs *clientStream
transport transport.ClientTransport
transportStream *transport.ClientStream
- parser *parser
+ parser parser
pickResult balancer.PickResult
finished bool
@@ -601,8 +617,8 @@ type csAttempt struct {
// and cleared when the finish method is called.
trInfo *traceInfo
- statsHandlers []stats.Handler
- beginTime time.Time
+ statsHandler stats.Handler
+ beginTime time.Time
// set for newStream errors that may be transparently retried
allowTransparentRetry bool
@@ -987,7 +1003,7 @@ func (cs *clientStream) RecvMsg(m any) error {
func (cs *clientStream) CloseSend() error {
if cs.sentLast {
- // TODO: return an error and finish the stream instead, due to API misuse?
+ // Return a nil error on repeated calls to this method.
return nil
}
cs.sentLast = true
@@ -1008,7 +1024,10 @@ func (cs *clientStream) CloseSend() error {
binlog.Log(cs.ctx, chc)
}
}
- // We never returned an error here for reasons.
+ // We don't return an error here as we expect users to read all messages
+ // from the stream and get the RPC status from RecvMsg(). Note that
+ // SendMsg() must return an error when one occurs so the application
+ // knows to stop sending messages, but that does not apply here.
return nil
}
@@ -1093,17 +1112,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength
}
return io.EOF
}
- if len(a.statsHandlers) != 0 {
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
- }
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
}
return nil
}
func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
- if len(a.statsHandlers) != 0 && payInfo == nil {
+ if a.statsHandler != nil && payInfo == nil {
payInfo = &payloadInfo{}
defer payInfo.free()
}
@@ -1124,16 +1141,21 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
// Only initialize this state once per stream.
a.decompressorSet = true
}
- if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
+ if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
if err == io.EOF {
if statusErr := a.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
+ // Received no msg and status OK for non-server streaming rpcs.
+ if !cs.desc.ServerStreams && !cs.receivedFirstMsg {
+ return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
+ }
return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
+ cs.receivedFirstMsg = true
if a.trInfo != nil {
a.mu.Lock()
if a.trInfo.tr != nil {
@@ -1141,8 +1163,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
a.mu.Unlock()
}
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, &stats.InPayload{
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
Client: true,
RecvTime: time.Now(),
Payload: m,
@@ -1157,12 +1179,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
+ if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
}
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
+ return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message")
}
func (a *csAttempt) finish(err error) {
@@ -1195,15 +1217,14 @@ func (a *csAttempt) finish(err error) {
ServerLoad: balancerload.Parse(tr),
})
}
- for _, sh := range a.statsHandlers {
- end := &stats.End{
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.End{
Client: true,
BeginTime: a.beginTime,
EndTime: time.Now(),
Trailer: tr,
Error: err,
- }
- sh.HandleRPC(a.ctx, end)
+ })
}
if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil {
@@ -1309,7 +1330,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.transportStream = s
- as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
+ as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1344,6 +1365,7 @@ type addrConnStream struct {
transport transport.ClientTransport
ctx context.Context
sentLast bool
+ receivedFirstMsg bool
desc *StreamDesc
codec baseCodec
sendCompressorV0 Compressor
@@ -1351,7 +1373,7 @@ type addrConnStream struct {
decompressorSet bool
decompressorV0 Decompressor
decompressorV1 encoding.Compressor
- parser *parser
+ parser parser
// mu guards finished and is held for the entire finish method.
mu sync.Mutex
@@ -1372,7 +1394,7 @@ func (as *addrConnStream) Trailer() metadata.MD {
func (as *addrConnStream) CloseSend() error {
if as.sentLast {
- // TODO: return an error and finish the stream instead, due to API misuse?
+ // Return a nil error on repeated calls to this method.
return nil
}
as.sentLast = true
@@ -1464,15 +1486,20 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Only initialize this state once per stream.
as.decompressorSet = true
}
- if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
+ if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
if err == io.EOF {
if statusErr := as.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
+ // Received no msg and status OK for non-server streaming rpcs.
+ if !as.desc.ServerStreams && !as.receivedFirstMsg {
+ return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
+ }
return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
+ as.receivedFirstMsg = true
if as.desc.ServerStreams {
// Subsequent messages should be received by subsequent RecvMsg calls.
@@ -1481,12 +1508,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
+ if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
}
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
+ return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message")
}
func (as *addrConnStream) finish(err error) {
@@ -1569,8 +1596,9 @@ type ServerStream interface {
type serverStream struct {
ctx context.Context
s *transport.ServerStream
- p *parser
+ p parser
codec baseCodec
+ desc *StreamDesc
compressorV0 Compressor
compressorV1 encoding.Compressor
@@ -1579,11 +1607,13 @@ type serverStream struct {
sendCompressorName string
+ recvFirstMsg bool // set after the first message is received
+
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
- statsHandler []stats.Handler
+ statsHandler stats.Handler
binlogs []binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
@@ -1719,10 +1749,8 @@ func (ss *serverStream) SendMsg(m any) (err error) {
binlog.Log(ss.ctx, sm)
}
}
- if len(ss.statsHandler) != 0 {
- for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
- }
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
return nil
}
@@ -1753,11 +1781,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
}()
var payInfo *payloadInfo
- if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
+ if ss.statsHandler != nil || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
+ if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1765,6 +1793,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
binlog.Log(ss.ctx, chc)
}
}
+ // Received no request msg for non-client streaming rpcs.
+ if !ss.desc.ClientStreams && !ss.recvFirstMsg {
+ return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC")
+ }
return err
}
if err == io.ErrUnexpectedEOF {
@@ -1772,16 +1804,15 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
return toRPCErr(err)
}
- if len(ss.statsHandler) != 0 {
- for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- Length: payInfo.uncompressedBytes.Len(),
- WireLength: payInfo.compressedLength + headerLen,
- CompressedLength: payInfo.compressedLength,
- })
- }
+ ss.recvFirstMsg = true
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
+ RecvTime: time.Now(),
+ Payload: m,
+ Length: payInfo.uncompressedBytes.Len(),
+ WireLength: payInfo.compressedLength + headerLen,
+ CompressedLength: payInfo.compressedLength,
+ })
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
@@ -1791,7 +1822,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
binlog.Log(ss.ctx, cm)
}
}
- return nil
+
+ if ss.desc.ClientStreams {
+ // Subsequent messages should be received by subsequent RecvMsg calls.
+ return nil
+ }
+ // Special handling for non-client-stream rpcs.
+ // This recv expects EOF or errors, so we don't collect inPayload.
+ if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC")
}
// MethodFromServerStream returns the method string for the input stream.
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 2bae4db89..9e6d018fb 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.72.2"
+const Version = "1.77.0"
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 688aabe43..dbcf90b87 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -72,9 +72,10 @@ type (
EditionFeatures EditionFeatures
}
FileL2 struct {
- Options func() protoreflect.ProtoMessage
- Imports FileImports
- Locations SourceLocations
+ Options func() protoreflect.ProtoMessage
+ Imports FileImports
+ OptionImports func() protoreflect.FileImports
+ Locations SourceLocations
}
// EditionFeatures is a frequently-instantiated struct, so please take care
@@ -126,12 +127,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
func (fd *File) Parent() protoreflect.Descriptor { return nil }
func (fd *File) Index() int { return 0 }
func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax }
-
-// Not exported and just used to reconstruct the original FileDescriptor proto
-func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
-func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
-func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
-func (fd *File) IsPlaceholder() bool { return false }
+func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
+func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
+func (fd *File) IsPlaceholder() bool { return false }
func (fd *File) Options() protoreflect.ProtoMessage {
if f := fd.lazyInit().Options; f != nil {
return f()
@@ -150,6 +148,16 @@ func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatD
func (fd *File) ProtoType(protoreflect.FileDescriptor) {}
func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
+// The next two are not part of the FileDescriptor interface. They are just used to reconstruct
+// the original FileDescriptor proto.
+func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
+func (fd *File) OptionImports() protoreflect.FileImports {
+ if f := fd.lazyInit().OptionImports; f != nil {
+ return f()
+ }
+ return emptyFiles
+}
+
func (fd *File) lazyInit() *FileL2 {
if atomic.LoadUint32(&fd.once) == 0 {
fd.lazyInitOnce()
@@ -182,9 +190,9 @@ type (
L2 *EnumL2 // protected by fileDesc.once
}
EnumL1 struct {
- eagerValues bool // controls whether EnumL2.Values is already populated
-
EditionFeatures EditionFeatures
+ Visibility int32
+ eagerValues bool // controls whether EnumL2.Values is already populated
}
EnumL2 struct {
Options func() protoreflect.ProtoMessage
@@ -219,6 +227,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit()
func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges }
func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {}
+
+// This is not part of the EnumDescriptor interface. It is just used to reconstruct
+// the original FileDescriptor proto.
+func (ed *Enum) Visibility() int32 { return ed.L1.Visibility }
+
func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
@@ -244,13 +257,13 @@ type (
L2 *MessageL2 // protected by fileDesc.once
}
MessageL1 struct {
- Enums Enums
- Messages Messages
- Extensions Extensions
- IsMapEntry bool // promoted from google.protobuf.MessageOptions
- IsMessageSet bool // promoted from google.protobuf.MessageOptions
-
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
EditionFeatures EditionFeatures
+ Visibility int32
+ IsMapEntry bool // promoted from google.protobuf.MessageOptions
+ IsMessageSet bool // promoted from google.protobuf.MessageOptions
}
MessageL2 struct {
Options func() protoreflect.ProtoMessage
@@ -319,6 +332,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L
func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions }
func (md *Message) ProtoType(protoreflect.MessageDescriptor) {}
func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+
+// This is not part of the MessageDescriptor interface. It is just used to reconstruct
+// the original FileDescriptor proto.
+func (md *Message) Visibility() int32 { return md.L1.Visibility }
+
func (md *Message) lazyInit() *MessageL2 {
md.L0.ParentFile.lazyInit() // implicitly initializes L2
return md.L2
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index d2f549497..e91860f5a 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl
case genid.EnumDescriptorProto_Value_field_number:
numValues++
}
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumDescriptorProto_Visibility_field_number:
+ ed.L1.Visibility = int32(v)
+ }
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
@@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
md.unmarshalSeedOptions(v)
}
prevField = num
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.DescriptorProto_Visibility_field_number:
+ md.L1.Visibility = int32(v)
+ }
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index d4c94458b..dd31faaeb 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) {
var enumIdx, messageIdx, extensionIdx, serviceIdx int
var rawOptions []byte
+ var optionImports []string
fd.L2 = new(FileL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) {
imp = PlaceholderFile(path)
}
fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp})
+ case genid.FileDescriptorProto_OptionDependency_field_number:
+ optionImports = append(optionImports, sb.MakeString(v))
case genid.FileDescriptorProto_EnumType_field_number:
fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
@@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) {
}
}
fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
+ if len(optionImports) > 0 {
+ var imps FileImports
+ var once sync.Once
+ fd.L2.OptionImports = func() protoreflect.FileImports {
+ once.Do(func() {
+ imps = make(FileImports, len(optionImports))
+ for i, path := range optionImports {
+ imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
+ if imp == nil {
+ imp = PlaceholderFile(path)
+ }
+ imps[i] = protoreflect.FileImport{FileDescriptor: imp}
+ }
+ })
+ return &imps
+ }
+ }
}
func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 31e79a653..77de0f238 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
- Patch = 9
+ Patch = 10
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 823dbf3ba..9196288e4 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -152,6 +152,28 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
imp := &f.L2.Imports[i]
imps.importPublic(imp.Imports())
}
+ if len(fd.GetOptionDependency()) > 0 {
+ optionImports := make(filedesc.FileImports, len(fd.GetOptionDependency()))
+ for i, path := range fd.GetOptionDependency() {
+ imp := &optionImports[i]
+ f, err := r.FindFileByPath(path)
+ if err == protoregistry.NotFound {
+ // We always allow option imports to be unresolvable.
+ f = filedesc.PlaceholderFile(path)
+ } else if err != nil {
+ return nil, errors.New("could not resolve import %q: %v", path, err)
+ }
+ imp.FileDescriptor = f
+
+ if imps[imp.Path()] {
+ return nil, errors.New("already imported %q", path)
+ }
+ imps[imp.Path()] = true
+ }
+ f.L2.OptionImports = func() protoreflect.FileImports {
+ return &optionImports
+ }
+ }
// Handle source locations.
f.L2.Locations.File = f
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 9da34998b..c826ad043 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -29,6 +29,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt
e.L2.Options = func() protoreflect.ProtoMessage { return opts }
}
e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures())
+ e.L1.Visibility = int32(ed.GetVisibility())
for _, s := range ed.GetReservedName() {
e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s))
}
@@ -70,6 +71,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt
return nil, err
}
m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
+ m.L1.Visibility = int32(md.GetVisibility())
if opts := md.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
m.L2.Options = func() protoreflect.ProtoMessage { return opts }
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index 9b880aa8c..6f91074e3 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -70,16 +70,27 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
p.Syntax = proto.String(file.Syntax().String())
}
+ desc := file
+ if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
+ desc = fileImportDesc.FileDescriptor
+ }
if file.Syntax() == protoreflect.Editions {
- desc := file
- if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
- desc = fileImportDesc.FileDescriptor
- }
-
if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok {
p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum()
}
}
+ type hasOptionImports interface {
+ OptionImports() protoreflect.FileImports
+ }
+ if opts, ok := desc.(hasOptionImports); ok {
+ if optionImports := opts.OptionImports(); optionImports.Len() > 0 {
+ optionDeps := make([]string, optionImports.Len())
+ for i := range optionImports.Len() {
+ optionDeps[i] = optionImports.Get(i).Path()
+ }
+ p.OptionDependency = optionDeps
+ }
+ }
return p
}
@@ -123,6 +134,14 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des
for i, names := 0, message.ReservedNames(); i < names.Len(); i++ {
p.ReservedName = append(p.ReservedName, string(names.Get(i)))
}
+ type hasVisibility interface {
+ Visibility() int32
+ }
+ if vis, ok := message.(hasVisibility); ok {
+ if visibility := vis.Visibility(); visibility > 0 {
+ p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum()
+ }
+ }
return p
}
@@ -216,6 +235,14 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD
for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ {
p.ReservedName = append(p.ReservedName, string(names.Get(i)))
}
+ type hasVisibility interface {
+ Visibility() int32
+ }
+ if vis, ok := enum.(hasVisibility); ok {
+ if visibility := vis.Visibility(); visibility > 0 {
+ p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum()
+ }
+ }
return p
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 3bcc41f6e..c23459963 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -261,8 +261,8 @@ github.com/gdamore/tcell/v2/terminfo/x/xfce
github.com/gdamore/tcell/v2/terminfo/x/xterm
github.com/gdamore/tcell/v2/terminfo/x/xterm_ghostty
github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty
-# github.com/go-jose/go-jose/v4 v4.0.5
-## explicit; go 1.21
+# github.com/go-jose/go-jose/v4 v4.1.3
+## explicit; go 1.24.0
github.com/go-jose/go-jose/v4
github.com/go-jose/go-jose/v4/cipher
github.com/go-jose/go-jose/v4/json
@@ -347,14 +347,6 @@ github.com/klauspost/pgzip
# github.com/kr/fs v0.1.0
## explicit
github.com/kr/fs
-# github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec
-## explicit; go 1.22.0
-github.com/letsencrypt/boulder/core
-github.com/letsencrypt/boulder/goodkey
-github.com/letsencrypt/boulder/identifier
-github.com/letsencrypt/boulder/probs
-github.com/letsencrypt/boulder/revocation
-github.com/letsencrypt/boulder/strictyaml
# github.com/lucasb-eyer/go-colorful v1.3.0
## explicit; go 1.12
github.com/lucasb-eyer/go-colorful
@@ -363,10 +355,10 @@ github.com/lucasb-eyer/go-colorful
github.com/manifoldco/promptui
github.com/manifoldco/promptui/list
github.com/manifoldco/promptui/screenbuf
-# github.com/mattn/go-colorable v0.1.13
-## explicit; go 1.15
+# github.com/mattn/go-colorable v0.1.14
+## explicit; go 1.18
github.com/mattn/go-colorable
-# github.com/mattn/go-isatty v0.0.19
+# github.com/mattn/go-isatty v0.0.20
## explicit; go 1.15
github.com/mattn/go-isatty
# github.com/mattn/go-runewidth v0.0.16
@@ -515,14 +507,14 @@ github.com/rs/zerolog/log
# github.com/secure-systems-lab/go-securesystemslib v0.9.1
## explicit; go 1.23.0
github.com/secure-systems-lab/go-securesystemslib/encrypted
-# github.com/sigstore/fulcio v1.7.1
-## explicit; go 1.24.0
+# github.com/sigstore/fulcio v1.8.3
+## explicit; go 1.25.0
github.com/sigstore/fulcio/pkg/certificate
-# github.com/sigstore/protobuf-specs v0.4.1
+# github.com/sigstore/protobuf-specs v0.5.0
## explicit; go 1.22.0
github.com/sigstore/protobuf-specs/gen/pb-go/common/v1
-# github.com/sigstore/sigstore v1.9.5
-## explicit; go 1.23.0
+# github.com/sigstore/sigstore v1.10.0
+## explicit; go 1.25.0
github.com/sigstore/sigstore/pkg/cryptoutils
github.com/sigstore/sigstore/pkg/signature
github.com/sigstore/sigstore/pkg/signature/options
@@ -537,7 +529,7 @@ github.com/skeema/knownhosts
## explicit; go 1.14
github.com/smallstep/pkcs7
github.com/smallstep/pkcs7/internal/legacy/x509
-# github.com/spf13/cobra v1.10.1
+# github.com/spf13/cobra v1.10.2
## explicit; go 1.15
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.10
@@ -552,9 +544,6 @@ github.com/sylabs/sif/v2/pkg/sif
# github.com/tchap/go-patricia/v2 v2.3.3
## explicit; go 1.16
github.com/tchap/go-patricia/v2/patricia
-# github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399
-## explicit
-github.com/titanous/rocacheck
# github.com/ulikunitz/xz v0.5.15
## explicit; go 1.12
github.com/ulikunitz/xz
@@ -572,17 +561,16 @@ github.com/vbauerster/mpb/v8
github.com/vbauerster/mpb/v8/cwriter
github.com/vbauerster/mpb/v8/decor
github.com/vbauerster/mpb/v8/internal
-# go.opentelemetry.io/auto/sdk v1.1.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/auto/sdk v1.2.1
+## explicit; go 1.24.0
go.opentelemetry.io/auto/sdk
go.opentelemetry.io/auto/sdk/internal/telemetry
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
+# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
## explicit; go 1.23.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
-# go.opentelemetry.io/otel v1.36.0
+# go.opentelemetry.io/otel v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -592,14 +580,14 @@ go.opentelemetry.io/otel/codes
go.opentelemetry.io/otel/internal/baggage
go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
-go.opentelemetry.io/otel/semconv/v1.20.0
-go.opentelemetry.io/otel/semconv/v1.26.0
-# go.opentelemetry.io/otel/metric v1.36.0
+go.opentelemetry.io/otel/semconv/v1.37.0
+go.opentelemetry.io/otel/semconv/v1.37.0/httpconv
+# go.opentelemetry.io/otel/metric v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
-# go.opentelemetry.io/otel/trace v1.36.0
+# go.opentelemetry.io/otel/trace v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
@@ -762,7 +750,7 @@ go.uber.org/automaxprocs
go.uber.org/automaxprocs/internal/cgroups
go.uber.org/automaxprocs/internal/runtime
go.uber.org/automaxprocs/maxprocs
-# go.yaml.in/yaml/v2 v2.4.2
+# go.yaml.in/yaml/v2 v2.4.3
## explicit; go 1.15
go.yaml.in/yaml/v2
# go.yaml.in/yaml/v3 v3.0.4
@@ -779,7 +767,6 @@ golang.org/x/crypto/curve25519
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/nacl/secretbox
-golang.org/x/crypto/ocsp
golang.org/x/crypto/openpgp
golang.org/x/crypto/openpgp/armor
golang.org/x/crypto/openpgp/elgamal
@@ -789,13 +776,12 @@ golang.org/x/crypto/openpgp/s2k
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/salsa20/salsa
golang.org/x/crypto/scrypt
-golang.org/x/crypto/sha3
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
-# golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329
-## explicit; go 1.22.0
+# golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
+## explicit; go 1.23.0
golang.org/x/exp/maps
# golang.org/x/mod v0.30.0
## explicit; go 1.24.0
@@ -849,8 +835,8 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.11.0
-## explicit; go 1.23.0
+# golang.org/x/time v0.14.0
+## explicit; go 1.24.0
golang.org/x/time/rate
# golang.org/x/tools v0.39.0
## explicit; go 1.24.0
@@ -874,15 +860,15 @@ golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
-# google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e
-## explicit; go 1.23.0
+# google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8
+## explicit; go 1.24.0
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e
-## explicit; go 1.23.0
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101
+## explicit; go 1.24.0
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.72.2
-## explicit; go 1.23
+# google.golang.org/grpc v1.77.0
+## explicit; go 1.24.0
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
@@ -892,7 +878,6 @@ google.golang.org/grpc/balancer/endpointsharding
google.golang.org/grpc/balancer/grpclb/state
google.golang.org/grpc/balancer/pickfirst
google.golang.org/grpc/balancer/pickfirst/internal
-google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/channelz
@@ -901,6 +886,7 @@ google.golang.org/grpc/connectivity
google.golang.org/grpc/credentials
google.golang.org/grpc/credentials/insecure
google.golang.org/grpc/encoding
+google.golang.org/grpc/encoding/internal
google.golang.org/grpc/encoding/proto
google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog
@@ -943,7 +929,7 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.36.9
+# google.golang.org/protobuf v1.36.10
## explicit; go 1.23
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext