From 24f2c99ed59696f6cb4e2ec6c537c772377f5ab1 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 16 Jul 2025 09:16:22 -0700 Subject: [PATCH] chore: Add clone to SdkLogger --- .github/repository-settings.md | 18 - Cargo.toml | 6 +- README.md | 4 +- opentelemetry-proto/Cargo.toml | 4 +- opentelemetry-proto/src/proto.rs | 4 +- opentelemetry-proto/tests/json_serde.rs | 23 +- opentelemetry-sdk/CHANGELOG.md | 2 + opentelemetry-sdk/Cargo.toml | 8 +- opentelemetry-sdk/src/logs/logger.rs | 2 +- .../span_processor_with_async_runtime.rs | 194 ++- .../scripts/generate-consts-from-spec.sh | 9 +- .../templates/registry/rust/weaver.yaml | 2 +- .../src/attribute.rs | 395 ++++- opentelemetry-semantic-conventions/src/lib.rs | 2 +- .../src/metric.rs | 1334 +++++++++++++---- .../src/resource.rs | 18 + .../src/trace.rs | 22 +- 17 files changed, 1614 insertions(+), 433 deletions(-) delete mode 100644 .github/repository-settings.md diff --git a/.github/repository-settings.md b/.github/repository-settings.md deleted file mode 100644 index c8d5b712b8..0000000000 --- a/.github/repository-settings.md +++ /dev/null @@ -1,18 +0,0 @@ -# Log of local changes - -Maintainers are expected to maintain this log. This is required as per -[OpenTelemetry Community -guidelines](https://github.com/open-telemetry/community/blob/main/docs/how-to-configure-new-repository.md#collaborators-and-teams). - -## May 6th 2024 - -Modified branch protection for main branch to require the following CI checks as -we now added Windows to CI. -test (ubuntu-latest, stable) -test (stable, windows-latest) - -## April 30th 2024 - -Modified branch protection for main branch to require the following CI checks: -docs -test (stable) diff --git a/Cargo.toml b/Cargo.toml index 43aa4a12ab..b9bab5d982 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,11 +45,11 @@ tonic = { version = "0.13", default-features = false } tonic-build = "0.13" tokio = { version = "1", default-features = false } tokio-stream = "0.1" -# Using `tracing 0.1.40` because 0.1.39 (which is yanked) introduces the ability to set event names in macros, +# Using `tracing 0.1.40` because 0.1.39 (which is yanked) introduces the ability to set event names in macros, # required for OpenTelemetry's internal logging macros. tracing = { version = ">=0.1.40", default-features = false } # `tracing-core >=0.1.33` is required for compatibility with `tracing >=0.1.40`. -tracing-core = { version = ">=0.1.33", default-features = false } +tracing-core = { version = ">=0.1.33", default-features = false } tracing-subscriber = { version = "0.3", default-features = false } url = { version = "2.5", default-features = false } anyhow = "1.0.94" @@ -59,7 +59,7 @@ ctor = "0.2.9" ctrlc = "3.2.5" futures-channel = "0.3" futures-sink = "0.3" -hex = "0.4.3" +const-hex = "1.14.1" lazy_static = "1.4.0" num-format = "0.4.4" num_cpus = "1.15.0" diff --git a/README.md b/README.md index 8679621b3a..46072c2ed1 100644 --- a/README.md +++ b/README.md @@ -192,17 +192,17 @@ For more information about the maintainer role, see the [community repository](h * [Anton Grübel](https://github.com/gruebel), Baz * [Björn Antonsson](https://github.com/bantonsson), Datadog -* [Shaun Cox](https://github.com/shaun-cox), Microsoft * [Scott Gerring](https://github.com/scottgerring), Datadog +* [Shaun Cox](https://github.com/shaun-cox), Microsoft For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). ### Emeritus * [Dirkjan Ochtman](https://github.com/djc) +* [Isobel Redelmeier](https://github.com/iredelmeier) * [Jan Kühle](https://github.com/frigus02) * [Julian Tescher](https://github.com/jtescher) -* [Isobel Redelmeier](https://github.com/iredelmeier) * [Mike Goldsmith](https://github.com/MikeGoldsmith) For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index c6336c7c0f..d9b00bfffc 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -49,7 +49,7 @@ testing = ["opentelemetry/testing"] # add ons internal-logs = ["opentelemetry/internal-logs"] with-schemars = ["schemars"] -with-serde = ["serde", "hex", "base64"] +with-serde = ["serde", "const-hex", "base64"] [dependencies] tonic = { workspace = true, optional = true, features = ["codegen", "prost"] } @@ -58,7 +58,7 @@ opentelemetry = { version = "0.30", default-features = false, path = "../opentel opentelemetry_sdk = { version = "0.30", default-features = false, path = "../opentelemetry-sdk" } schemars = { workspace = true, optional = true } serde = { workspace = true, optional = true, features = ["serde_derive"] } -hex = { workspace = true, optional = true } +const-hex = { workspace = true, optional = true } base64 = { workspace = true, optional = true } [dev-dependencies] diff --git a/opentelemetry-proto/src/proto.rs b/opentelemetry-proto/src/proto.rs index a21f6cef70..3055f02a40 100644 --- a/opentelemetry-proto/src/proto.rs +++ b/opentelemetry-proto/src/proto.rs @@ -16,7 +16,7 @@ pub(crate) mod serializers { where S: Serializer, { - let hex_string = hex::encode(bytes); + let hex_string = const_hex::encode(bytes); serializer.serialize_str(&hex_string) } @@ -37,7 +37,7 @@ pub(crate) mod serializers { where E: de::Error, { - hex::decode(value).map_err(E::custom) + const_hex::decode(value).map_err(E::custom) } } diff --git a/opentelemetry-proto/tests/json_serde.rs b/opentelemetry-proto/tests/json_serde.rs index cfa6179619..982ef2798a 100644 --- a/opentelemetry-proto/tests/json_serde.rs +++ b/opentelemetry-proto/tests/json_serde.rs @@ -61,10 +61,11 @@ mod json_serde { dropped_attributes_count: 0, }), spans: vec![Span { - trace_id: hex::decode("5b8efff798038103d269b633813fc60c").unwrap(), - span_id: hex::decode("eee19b7ec3c1b174").unwrap(), + trace_id: const_hex::decode("5b8efff798038103d269b633813fc60c") + .unwrap(), + span_id: const_hex::decode("eee19b7ec3c1b174").unwrap(), trace_state: String::new(), - parent_span_id: hex::decode("eee19b7ec3c1b173").unwrap(), + parent_span_id: const_hex::decode("eee19b7ec3c1b173").unwrap(), flags: 0, name: String::from("I'm a server span"), kind: 2, @@ -267,10 +268,11 @@ mod json_serde { dropped_attributes_count: 1, }), spans: vec![Span { - trace_id: hex::decode("5b8efff798038103d269b633813fc60c").unwrap(), - span_id: hex::decode("eee19b7ec3c1b174").unwrap(), + trace_id: const_hex::decode("5b8efff798038103d269b633813fc60c") + .unwrap(), + span_id: const_hex::decode("eee19b7ec3c1b174").unwrap(), trace_state: String::from("browser=firefox,os=linux"), - parent_span_id: hex::decode("eee19b7ec3c1b173").unwrap(), + parent_span_id: const_hex::decode("eee19b7ec3c1b173").unwrap(), flags: 1, name: String::from("I'm a server span"), kind: 2, @@ -308,9 +310,9 @@ mod json_serde { }], dropped_events_count: 1, links: vec![Link { - trace_id: hex::decode("5b8efff798038103d269b633813fc60b") + trace_id: const_hex::decode("5b8efff798038103d269b633813fc60b") .unwrap(), - span_id: hex::decode("eee19b7ec3c1b172").unwrap(), + span_id: const_hex::decode("eee19b7ec3c1b172").unwrap(), trace_state: String::from("food=pizza,color=red"), attributes: vec![KeyValue { key: String::from("my.link.attr"), @@ -1272,8 +1274,9 @@ mod json_serde { ], dropped_attributes_count: 0, flags: 0, - trace_id: hex::decode("5b8efff798038103d269b633813fc60c").unwrap(), - span_id: hex::decode("eee19b7ec3c1b174").unwrap(), + trace_id: const_hex::decode("5b8efff798038103d269b633813fc60c") + .unwrap(), + span_id: const_hex::decode("eee19b7ec3c1b174").unwrap(), }], schema_url: String::new(), }], diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 6ba2f705d4..b807cb04b9 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -4,6 +4,8 @@ - TODO: Placeholder for Span processor related things - *Fix* SpanProcessor::on_start is no longer called on non recording spans +- **Fix**: Restore true parallel exports in the async-native `BatchSpanProcessor` by honoring `OTEL_BSP_MAX_CONCURRENT_EXPORTS` ([#2959](https://github.com/open-telemetry/opentelemetry-rust/pull/3028)). A regression in [#2685](https://github.com/open-telemetry/opentelemetry-rust/pull/2685) inadvertently awaited the `export()` future directly in `opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs` instead of spawning it on the runtime, forcing all exports to run sequentially. +- **Feature**: Added `Clone` implementation to `SdkLogger` for consistency with `SdkTracer` ([#3058](https://github.com/open-telemetry/opentelemetry-rust/issues/3058)). ## 0.30.0 diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index aa92787ea7..3a5ebf0e5e 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -22,7 +22,7 @@ serde = { workspace = true, features = ["derive", "rc"], optional = true } serde_json = { workspace = true, optional = true } thiserror = { workspace = true } url = { workspace = true, optional = true } -tokio = { workspace = true, features = ["rt", "time"], optional = true } +tokio = { workspace = true, default-features = false, optional = true } tokio-stream = { workspace = true, optional = true } http = { workspace = true, optional = true } @@ -47,15 +47,15 @@ spec_unstable_logs_enabled = ["logs", "opentelemetry/spec_unstable_logs_enabled" metrics = ["opentelemetry/metrics"] testing = ["opentelemetry/testing", "trace", "metrics", "logs", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"] experimental_async_runtime = [] -rt-tokio = ["tokio", "tokio-stream", "experimental_async_runtime"] -rt-tokio-current-thread = ["tokio", "tokio-stream", "experimental_async_runtime"] +rt-tokio = ["tokio/rt", "tokio/time", "tokio-stream", "experimental_async_runtime"] +rt-tokio-current-thread = ["tokio/rt", "tokio/time", "tokio-stream", "experimental_async_runtime"] internal-logs = ["opentelemetry/internal-logs"] experimental_metrics_periodicreader_with_async_runtime = ["metrics", "experimental_async_runtime"] spec_unstable_metrics_views = ["metrics"] experimental_metrics_custom_reader = ["metrics"] experimental_logs_batch_log_processor_with_async_runtime = ["logs", "experimental_async_runtime"] experimental_logs_concurrent_log_processor = ["logs"] -experimental_trace_batch_span_processor_with_async_runtime = ["trace", "experimental_async_runtime"] +experimental_trace_batch_span_processor_with_async_runtime = ["tokio/sync", "trace", "experimental_async_runtime"] experimental_metrics_disable_name_validation = ["metrics"] [[bench]] diff --git a/opentelemetry-sdk/src/logs/logger.rs b/opentelemetry-sdk/src/logs/logger.rs index f76a43792f..f27ed35f78 100644 --- a/opentelemetry-sdk/src/logs/logger.rs +++ b/opentelemetry-sdk/src/logs/logger.rs @@ -5,7 +5,7 @@ use opentelemetry::{trace::TraceContextExt, Context, InstrumentationScope}; use opentelemetry::logs::Severity; use opentelemetry::time::now; -#[derive(Debug)] +#[derive(Debug, Clone)] /// The object for emitting [`LogRecord`]s. /// /// [`LogRecord`]: opentelemetry::logs::LogRecord diff --git a/opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs b/opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs index 95e5d2397a..b294f74043 100644 --- a/opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs +++ b/opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs @@ -6,19 +6,21 @@ use crate::trace::Span; use crate::trace::SpanProcessor; use crate::trace::{SpanData, SpanExporter}; use futures_channel::oneshot; -use futures_util::pin_mut; use futures_util::{ future::{self, BoxFuture, Either}, - select, + pin_mut, select, stream::{self, FusedStream, FuturesUnordered}, StreamExt as _, }; use opentelemetry::Context; use opentelemetry::{otel_debug, otel_error, otel_warn}; use std::fmt; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; use std::time::Duration; +use tokio::sync::RwLock; /// A [`SpanProcessor`] that asynchronously buffers finished spans and reports /// them at a preconfigured interval. @@ -188,13 +190,22 @@ struct BatchSpanProcessorInternal { spans: Vec, export_tasks: FuturesUnordered>, runtime: R, - exporter: E, config: BatchConfig, + // TODO: Redesign the `SpanExporter` trait to use immutable references (`&self`) + // for all methods. This would allow us to remove the `RwLock` and just use `Arc`, + // similar to how `crate::logs::LogExporter` is implemented. + exporter: Arc>, } -impl BatchSpanProcessorInternal { +impl BatchSpanProcessorInternal { async fn flush(&mut self, res_channel: Option>) { - let export_result = self.export().await; + let export_result = Self::export( + self.spans.split_off(0), + self.exporter.clone(), + self.runtime.clone(), + self.config.max_export_timeout, + ) + .await; let task = Box::pin(async move { if let Some(channel) = res_channel { // If a response channel is provided, attempt to send the export result through it. @@ -243,9 +254,15 @@ impl BatchSpanProcessorInternal { self.export_tasks.next().await; } - let export_result = self.export().await; + let batch = self.spans.split_off(0); + let exporter = self.exporter.clone(); + let runtime = self.runtime.clone(); + let max_export_timeout = self.config.max_export_timeout; + let task = async move { - if let Err(err) = export_result { + if let Err(err) = + Self::export(batch, exporter, runtime, max_export_timeout).await + { otel_error!( name: "BatchSpanProcessor.Export.Error", reason = format!("{}", err) @@ -254,6 +271,7 @@ impl BatchSpanProcessorInternal { Ok(()) }; + // Special case when not using concurrent exports if self.config.max_concurrent_exports == 1 { let _ = task.await; @@ -288,34 +306,39 @@ impl BatchSpanProcessorInternal { // Stream has terminated or processor is shutdown, return to finish execution. BatchMessage::Shutdown(ch) => { self.flush(Some(ch)).await; - let _ = self.exporter.shutdown(); + let _ = self.exporter.write().await.shutdown(); return false; } // propagate the resource BatchMessage::SetResource(resource) => { - self.exporter.set_resource(&resource); + self.exporter.write().await.set_resource(&resource); } } true } - async fn export(&mut self) -> OTelSdkResult { + async fn export( + batch: Vec, + exporter: Arc>, + runtime: R, + max_export_timeout: Duration, + ) -> OTelSdkResult { // Batch size check for flush / shutdown. Those methods may be called // when there's no work to do. - if self.spans.is_empty() { + if batch.is_empty() { return Ok(()); } - let export = self.exporter.export(self.spans.split_off(0)); - let timeout = self.runtime.delay(self.config.max_export_timeout); - let time_out = self.config.max_export_timeout; + let exporter_guard = exporter.read().await; + let export = exporter_guard.export(batch); + let timeout = runtime.delay(max_export_timeout); pin_mut!(export); pin_mut!(timeout); match future::select(export, timeout).await { Either::Left((export_res, _)) => export_res, - Either::Right((_, _)) => Err(OTelSdkError::Timeout(time_out)), + Either::Right((_, _)) => Err(OTelSdkError::Timeout(max_export_timeout)), } } @@ -368,7 +391,7 @@ impl BatchSpanProcessor { export_tasks: FuturesUnordered::new(), runtime: timeout_runtime, config, - exporter, + exporter: Arc::new(RwLock::new(exporter)), }; processor.run(messages).await @@ -435,6 +458,8 @@ mod tests { use crate::trace::{SpanData, SpanExporter}; use futures_util::Future; use std::fmt::Debug; + use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; + use std::sync::Arc; use std::time::Duration; struct BlockingExporter { @@ -463,6 +488,39 @@ mod tests { } } + /// Exporter that records whether two exports overlap in time. + struct TrackingExporter { + /// Artificial delay to keep each export alive for a while. + delay: Duration, + /// Current number of in-flight exports. + active: Arc, + /// Set to true the first time we see overlap. + concurrent_seen: Arc, + } + + impl Debug for TrackingExporter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("tracking exporter") + } + } + + impl SpanExporter for TrackingExporter { + async fn export(&self, _batch: Vec) -> crate::error::OTelSdkResult { + // Increment in-flight counter and note any overlap. + let inflight = self.active.fetch_add(1, Ordering::SeqCst) + 1; + if inflight > 1 { + self.concurrent_seen.store(true, Ordering::SeqCst); + } + + // Keep the export "busy" for a bit. + tokio::time::sleep(self.delay).await; + + // Decrement counter. + self.active.fetch_sub(1, Ordering::SeqCst); + Ok(()) + } + } + #[test] fn test_build_batch_span_processor_builder() { let mut env_vars = vec![ @@ -532,8 +590,8 @@ mod tests { ); } - // If the time_out is true, then the result suppose to ended with timeout. - // otherwise the exporter should be able to export within time out duration. + // If `time_out` is `true`, then the export should fail with a timeout. + // Else, the exporter should be able to export within the timeout duration. async fn timeout_test_tokio(time_out: bool) { let config = BatchConfig { max_export_timeout: Duration::from_millis(if time_out { 5 } else { 60 }), @@ -557,24 +615,92 @@ mod tests { assert!(shutdown_res.is_ok()); } - #[test] - fn test_timeout_tokio_timeout() { + #[tokio::test(flavor = "multi_thread")] + async fn test_timeout_tokio_timeout() { // If time_out is true, then we ask exporter to block for 60s and set timeout to 5s. // If time_out is false, then we ask the exporter to block for 5s and set timeout to 60s. // Either way, the test should be finished within 5s. - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap(); - runtime.block_on(timeout_test_tokio(true)); + timeout_test_tokio(true).await; } - #[test] - fn test_timeout_tokio_not_timeout() { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap(); - runtime.block_on(timeout_test_tokio(false)); + #[tokio::test(flavor = "multi_thread")] + async fn test_timeout_tokio_not_timeout() { + timeout_test_tokio(false).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_concurrent_exports_expected() { + // Shared state for the exporter. + let active = Arc::new(AtomicUsize::new(0)); + let concurrent_seen = Arc::new(AtomicBool::new(false)); + + let exporter = TrackingExporter { + delay: Duration::from_millis(50), + active: active.clone(), + concurrent_seen: concurrent_seen.clone(), + }; + + // Intentionally tiny batch-size so every span forces an export. + let config = BatchConfig { + max_export_batch_size: 1, + max_queue_size: 16, + scheduled_delay: Duration::from_secs(3600), // effectively disabled + max_export_timeout: Duration::from_secs(5), + max_concurrent_exports: 2, // what we want to verify + }; + + // Spawn the processor. + let processor = BatchSpanProcessor::new(exporter, config, runtime::Tokio); + + // Finish three spans in rapid succession. + processor.on_end(new_test_export_span_data()); + processor.on_end(new_test_export_span_data()); + processor.on_end(new_test_export_span_data()); + + // Wait until everything has been exported. + processor.force_flush().expect("force flush failed"); + processor.shutdown().expect("shutdown failed"); + + // Expect at least one period with >1 export in flight. + assert!( + concurrent_seen.load(Ordering::SeqCst), + "exports never overlapped, processor is still serialising them" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_exports_serial_when_max_concurrent_exports_1() { + let active = Arc::new(AtomicUsize::new(0)); + let concurrent_seen = Arc::new(AtomicBool::new(false)); + + let exporter = TrackingExporter { + delay: Duration::from_millis(50), + active: active.clone(), + concurrent_seen: concurrent_seen.clone(), + }; + + let config = BatchConfig { + max_export_batch_size: 1, + max_queue_size: 16, + scheduled_delay: Duration::from_secs(3600), + max_export_timeout: Duration::from_secs(5), + max_concurrent_exports: 1, // what we want to verify + }; + + let processor = BatchSpanProcessor::new(exporter, config, runtime::Tokio); + + // Finish several spans quickly. + processor.on_end(new_test_export_span_data()); + processor.on_end(new_test_export_span_data()); + processor.on_end(new_test_export_span_data()); + + processor.force_flush().expect("force flush failed"); + processor.shutdown().expect("shutdown failed"); + + // There must never have been more than one export in flight. + assert!( + !concurrent_seen.load(Ordering::SeqCst), + "exports overlapped even though max_concurrent_exports was 1" + ); } } diff --git a/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh b/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh index 842c0eb4b7..d4e31fdca2 100755 --- a/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh +++ b/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh @@ -5,8 +5,8 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" CRATE_DIR="${SCRIPT_DIR}/../" # freeze the spec version and generator version to make generation reproducible -SPEC_VERSION=1.34.0 -WEAVER_VERSION=v0.15.2 +SPEC_VERSION=1.36.0 +WEAVER_VERSION=v0.16.1 cd "$CRATE_DIR" @@ -58,4 +58,9 @@ expression=' # TODO: This workaround should be removed once the upstream generator handles this correctly. "${SED[@]}" 's//`key`/g' src/attribute.rs +# Patch: rustdoc warns about bare URLs in doc comments. +# The following line wraps the specific Kubernetes ResourceRequirements URL with <...> +# as suggested by rustdoc warnings, so it becomes a clickable link and the warning goes away. +"${SED[@]}" -E 's|(/// See )(https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)( for details)|\1<\2>\3|g' src/metric.rs + cargo fmt diff --git a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml index 5fa1ed3903..7d9bc46a19 100644 --- a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml @@ -14,7 +14,7 @@ comment_formats: default_comment_format: rust params: - schema_url: "https://opentelemetry.io/schemas/1.34.0" + schema_url: "https://opentelemetry.io/schemas/1.36.0" exclude_root_namespace: [] excluded_attributes: ["messaging.client_id"] diff --git a/opentelemetry-semantic-conventions/src/attribute.rs b/opentelemetry-semantic-conventions/src/attribute.rs index ec40d87cf2..4fcc1fc000 100644 --- a/opentelemetry-semantic-conventions/src/attribute.rs +++ b/opentelemetry-semantic-conventions/src/attribute.rs @@ -874,7 +874,7 @@ pub const AWS_STEP_FUNCTIONS_ACTIVITY_ARN: &str = "aws.step_functions.activity.a #[cfg(feature = "semconv_experimental")] pub const AWS_STEP_FUNCTIONS_STATE_MACHINE_ARN: &str = "aws.step_functions.state_machine.arn"; -/// [Azure Resource Provider Namespace](https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers) as recognized by the client. +/// Deprecated, use `azure.resource_provider.namespace` instead. /// /// ## Notes /// @@ -884,9 +884,12 @@ pub const AWS_STEP_FUNCTIONS_STATE_MACHINE_ARN: &str = "aws.step_functions.state /// - `"Microsoft.KeyVault"` /// - `"Microsoft.ServiceBus"` #[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "{note: Replaced by `azure.resource_provider.namespace`., reason: renamed, renamed_to: azure.resource_provider.namespace}" +)] pub const AZ_NAMESPACE: &str = "az.namespace"; -/// The unique identifier of the service request. It's generated by the Azure service and returned with the response. +/// Deprecated, use `azure.service.request.id` instead. /// /// ## Notes /// @@ -894,6 +897,9 @@ pub const AZ_NAMESPACE: &str = "az.namespace"; /// /// - `"00000000-0000-0000-0000-000000000000"` #[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "{note: Replaced by `azure.service.request.id`., reason: renamed, renamed_to: azure.service.request.id}" +)] pub const AZ_SERVICE_REQUEST_ID: &str = "az.service_request_id"; /// The unique identifier of the client instance. @@ -972,6 +978,28 @@ pub const AZURE_COSMOSDB_REQUEST_BODY_SIZE: &str = "azure.cosmosdb.request.body. #[cfg(feature = "semconv_experimental")] pub const AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE: &str = "azure.cosmosdb.response.sub_status_code"; +/// [Azure Resource Provider Namespace](https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers) as recognized by the client. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Microsoft.Storage"` +/// - `"Microsoft.KeyVault"` +/// - `"Microsoft.ServiceBus"` +#[cfg(feature = "semconv_experimental")] +pub const AZURE_RESOURCE_PROVIDER_NAMESPACE: &str = "azure.resource_provider.namespace"; + +/// The unique identifier of the service request. It's generated by the Azure service and returned with the response. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"00000000-0000-0000-0000-000000000000"` +#[cfg(feature = "semconv_experimental")] +pub const AZURE_SERVICE_REQUEST_ID: &str = "azure.service.request.id"; + /// Array of brand name and version separated by a space /// /// ## Notes @@ -1355,7 +1383,7 @@ pub const CLOUD_REGION: &str = "cloud.region"; /// with the resolved function version, as the same runtime instance may be invocable with /// multiple different aliases. /// - **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) -/// - **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) of the invoked function, +/// - **Azure:** The [Fully Qualified Resource ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) of the invoked function, /// *not* the function app, having the form /// `/subscriptions/[SUBSCRIPTION_GUID]/resourceGroups/[RG]/providers/Microsoft.Web/sites/[FUNCAPP]/functions/[FUNC]`. /// This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share @@ -2749,6 +2777,19 @@ pub const DEVICE_MODEL_NAME: &str = "device.model.name"; #[cfg(feature = "semconv_experimental")] pub const DISK_IO_DIRECTION: &str = "disk.io.direction"; +/// The list of IPv4 or IPv6 addresses resolved during DNS lookup. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `[ +/// "10.0.0.1", +/// "2001:0db8:85a3:0000:0000:8a2e:0370:7334", +/// ]` +#[cfg(feature = "semconv_experimental")] +pub const DNS_ANSWERS: &str = "dns.answers"; + /// The name being queried. /// /// ## Notes @@ -4597,7 +4638,9 @@ pub const HTTP_RESPONSE_STATUS_CODE: &str = "http.response.status_code"; /// /// - `3495` #[cfg(feature = "semconv_experimental")] -#[deprecated(note = "{note: hp.response.header.content-length, reason: uncategorized}")] +#[deprecated( + note = "{note: Replaced by `http.response.header.content-length`., reason: uncategorized}" +)] pub const HTTP_RESPONSE_CONTENT_LENGTH: &str = "http.response_content_length"; /// Deprecated, use `http.response.body.size` instead. @@ -4921,6 +4964,36 @@ pub const K8S_CONTAINER_RESTART_COUNT: &str = "k8s.container.restart_count"; pub const K8S_CONTAINER_STATUS_LAST_TERMINATED_REASON: &str = "k8s.container.status.last_terminated_reason"; +/// The reason for the container state. Corresponds to the `reason` field of the: [K8s ContainerStateWaiting](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core) or [K8s ContainerStateTerminated](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core) +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"ContainerCreating"` +/// - `"CrashLoopBackOff"` +/// - `"CreateContainerConfigError"` +/// - `"ErrImagePull"` +/// - `"ImagePullBackOff"` +/// - `"OOMKilled"` +/// - `"Completed"` +/// - `"Error"` +/// - `"ContainerCannotRun"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_CONTAINER_STATUS_REASON: &str = "k8s.container.status.reason"; + +/// The state of the container. [K8s ContainerState](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core) +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"terminated"` +/// - `"running"` +/// - `"waiting"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_CONTAINER_STATUS_STATE: &str = "k8s.container.status.state"; + /// The cronjob annotation placed on the CronJob, the ``key`` being the annotation name, the value being the annotation value. /// /// ## Notes @@ -4977,29 +5050,39 @@ pub const K8S_CRONJOB_NAME: &str = "k8s.cronjob.name"; #[cfg(feature = "semconv_experimental")] pub const K8S_CRONJOB_UID: &str = "k8s.cronjob.uid"; -/// The annotation key-value pairs placed on the DaemonSet. +/// The annotation placed on the DaemonSet, the ``key`` being the annotation name, the value being the annotation value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the annotation name, the value being the annotation value, even if the value is empty. +/// Examples: +/// +/// - A label `replicas` with value `1` SHOULD be recorded +/// as the `k8s.daemonset.annotation.replicas` attribute with value `"1"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.daemonset.annotation.data` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.daemonset.annotation.replicas=1"` -/// - `"k8s.daemonset.annotation.data="` +/// - `"1"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_DAEMONSET_ANNOTATION: &str = "k8s.daemonset.annotation"; -/// The label key-value pairs placed on the DaemonSet. +/// The label placed on the DaemonSet, the ``key`` being the label name, the value being the label value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the label name, the value being the label value, even if the value is empty. +/// Examples: +/// +/// - A label `app` with value `guestbook` SHOULD be recorded +/// as the `k8s.daemonset.label.app` attribute with value `"guestbook"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.daemonset.label.injected` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.daemonset.label.app=guestbook"` -/// - `"k8s.daemonset.label.injected="` +/// - `"guestbook"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_DAEMONSET_LABEL: &str = "k8s.daemonset.label"; @@ -5023,29 +5106,39 @@ pub const K8S_DAEMONSET_NAME: &str = "k8s.daemonset.name"; #[cfg(feature = "semconv_experimental")] pub const K8S_DAEMONSET_UID: &str = "k8s.daemonset.uid"; -/// The annotation key-value pairs placed on the Deployment. +/// The annotation placed on the Deployment, the ``key`` being the annotation name, the value being the annotation value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the annotation name, the value being the annotation value, even if the value is empty. +/// Examples: +/// +/// - A label `replicas` with value `1` SHOULD be recorded +/// as the `k8s.deployment.annotation.replicas` attribute with value `"1"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.deployment.annotation.data` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.deployment.annotation.replicas=1"` -/// - `"k8s.deployment.annotation.data="` +/// - `"1"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_DEPLOYMENT_ANNOTATION: &str = "k8s.deployment.annotation"; -/// The label key-value pairs placed on the Deployment. +/// The label placed on the Deployment, the ``key`` being the label name, the value being the label value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the label name, the value being the label value, even if the value is empty. +/// Examples: +/// +/// - A label `replicas` with value `0` SHOULD be recorded +/// as the `k8s.deployment.label.app` attribute with value `"guestbook"`. +/// - A label `injected` with empty string value SHOULD be recorded as +/// the `k8s.deployment.label.injected` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.deployment.label.app=guestbook"` -/// - `"k8s.deployment.label.injected="` +/// - `"guestbook"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_DEPLOYMENT_LABEL: &str = "k8s.deployment.label"; @@ -5069,6 +5162,19 @@ pub const K8S_DEPLOYMENT_NAME: &str = "k8s.deployment.name"; #[cfg(feature = "semconv_experimental")] pub const K8S_DEPLOYMENT_UID: &str = "k8s.deployment.uid"; +/// The type of metric source for the horizontal pod autoscaler. +/// +/// ## Notes +/// +/// This attribute reflects the `type` field of spec.metrics\[\] in the HPA. +/// +/// # Examples +/// +/// - `"Resource"` +/// - `"ContainerResource"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_HPA_METRIC_TYPE: &str = "k8s.hpa.metric.type"; + /// The name of the horizontal pod autoscaler. /// /// ## Notes @@ -5079,6 +5185,45 @@ pub const K8S_DEPLOYMENT_UID: &str = "k8s.deployment.uid"; #[cfg(feature = "semconv_experimental")] pub const K8S_HPA_NAME: &str = "k8s.hpa.name"; +/// The API version of the target resource to scale for the HorizontalPodAutoscaler. +/// +/// ## Notes +/// +/// This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA spec. +/// +/// # Examples +/// +/// - `"apps/v1"` +/// - `"autoscaling/v2"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_HPA_SCALETARGETREF_API_VERSION: &str = "k8s.hpa.scaletargetref.api_version"; + +/// The kind of the target resource to scale for the HorizontalPodAutoscaler. +/// +/// ## Notes +/// +/// This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. +/// +/// # Examples +/// +/// - `"Deployment"` +/// - `"StatefulSet"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_HPA_SCALETARGETREF_KIND: &str = "k8s.hpa.scaletargetref.kind"; + +/// The name of the target resource to scale for the HorizontalPodAutoscaler. +/// +/// ## Notes +/// +/// This maps to the `name` field in the `scaleTargetRef` of the HPA spec. +/// +/// # Examples +/// +/// - `"my-deployment"` +/// - `"my-statefulset"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_HPA_SCALETARGETREF_NAME: &str = "k8s.hpa.scaletargetref.name"; + /// The UID of the horizontal pod autoscaler. /// /// ## Notes @@ -5089,29 +5234,49 @@ pub const K8S_HPA_NAME: &str = "k8s.hpa.name"; #[cfg(feature = "semconv_experimental")] pub const K8S_HPA_UID: &str = "k8s.hpa.uid"; -/// The annotation key-value pairs placed on the Job. +/// The size (identifier) of the K8s huge page. /// /// ## Notes /// -/// The `[key]` being the annotation name, the value being the annotation value, even if the value is empty. +/// # Examples +/// +/// - `"2Mi"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_HUGEPAGE_SIZE: &str = "k8s.hugepage.size"; + +/// The annotation placed on the Job, the ``key`` being the annotation name, the value being the annotation value, even if the value is empty. +/// +/// ## Notes +/// +/// Examples: +/// +/// - A label `number` with value `1` SHOULD be recorded +/// as the `k8s.job.annotation.number` attribute with value `"1"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.job.annotation.data` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.job.annotation.number=1"` -/// - `"k8s.job.annotation.data="` +/// - `"1"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_JOB_ANNOTATION: &str = "k8s.job.annotation"; -/// The label key-value pairs placed on the Job. +/// The label placed on the Job, the ``key`` being the label name, the value being the label value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the label name, the value being the label value, even if the value is empty. +/// Examples: +/// +/// - A label `jobtype` with value `ci` SHOULD be recorded +/// as the `k8s.job.label.jobtype` attribute with value `"ci"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.job.label.automated` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.job.label.jobtype=ci"` -/// - `"k8s.job.label.automated="` +/// - `"ci"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_JOB_LABEL: &str = "k8s.job.label"; @@ -5135,29 +5300,39 @@ pub const K8S_JOB_NAME: &str = "k8s.job.name"; #[cfg(feature = "semconv_experimental")] pub const K8S_JOB_UID: &str = "k8s.job.uid"; -/// The annotation key-value pairs placed on the Namespace. +/// The annotation placed on the Namespace, the ``key`` being the annotation name, the value being the annotation value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the annotation name, the value being the annotation value, even if the value is empty. +/// Examples: +/// +/// - A label `ttl` with value `0` SHOULD be recorded +/// as the `k8s.namespace.annotation.ttl` attribute with value `"0"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.namespace.annotation.data` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.namespace.annotation.ttl=0"` -/// - `"k8s.namespace.annotation.data="` +/// - `"0"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_NAMESPACE_ANNOTATION: &str = "k8s.namespace.annotation"; -/// The label key-value pairs placed on the Namespace. +/// The label placed on the Namespace, the ``key`` being the label name, the value being the label value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the label name, the value being the label value, even if the value is empty. +/// Examples: +/// +/// - A label `kubernetes.io/metadata.name` with value `default` SHOULD be recorded +/// as the `k8s.namespace.label.kubernetes.io/metadata.name` attribute with value `"default"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.namespace.label.data` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.namespace.label.kubernetes.io/metadata.name=default"` -/// - `"k8s.namespace.label.data="` +/// - `"default"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_NAMESPACE_LABEL: &str = "k8s.namespace.label"; @@ -5203,6 +5378,42 @@ pub const K8S_NAMESPACE_PHASE: &str = "k8s.namespace.phase"; #[cfg(feature = "semconv_experimental")] pub const K8S_NODE_ANNOTATION: &str = "k8s.node.annotation"; +/// The status of the condition, one of True, False, Unknown. +/// +/// ## Notes +/// +/// This attribute aligns with the `status` field of the +/// [NodeCondition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core) +/// +/// # Examples +/// +/// - `"true"` +/// - `"false"` +/// - `"unknown"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_CONDITION_STATUS: &str = "k8s.node.condition.status"; + +/// The condition type of a K8s Node. +/// +/// ## Notes +/// +/// K8s Node conditions as described +/// by [K8s documentation](https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition). +/// +/// This attribute aligns with the `type` field of the +/// [NodeCondition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core) +/// +/// The set of possible values is not limited to those listed here. Managed Kubernetes environments, +/// or custom controllers MAY introduce additional node condition types. +/// When this occurs, the exact value as reported by the Kubernetes API SHOULD be used. +/// +/// # Examples +/// +/// - `"Ready"` +/// - `"DiskPressure"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_CONDITION_TYPE: &str = "k8s.node.condition.type"; + /// The label placed on the Node, the ``key`` being the label name, the value being the label value, even if the value is empty. /// /// ## Notes @@ -5316,29 +5527,39 @@ pub const K8S_POD_NAME: &str = "k8s.pod.name"; #[cfg(feature = "semconv_experimental")] pub const K8S_POD_UID: &str = "k8s.pod.uid"; -/// The annotation key-value pairs placed on the ReplicaSet. +/// The annotation placed on the ReplicaSet, the ``key`` being the annotation name, the value being the annotation value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the annotation name, the value being the annotation value, even if the value is empty. +/// Examples: +/// +/// - A label `replicas` with value `0` SHOULD be recorded +/// as the `k8s.replicaset.annotation.replicas` attribute with value `"0"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.replicaset.annotation.data` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.replicaset.annotation.replicas=0"` -/// - `"k8s.replicaset.annotation.data="` +/// - `"0"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_REPLICASET_ANNOTATION: &str = "k8s.replicaset.annotation"; -/// The label key-value pairs placed on the ReplicaSet. +/// The label placed on the ReplicaSet, the ``key`` being the label name, the value being the label value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the label name, the value being the label value, even if the value is empty. +/// Examples: +/// +/// - A label `app` with value `guestbook` SHOULD be recorded +/// as the `k8s.replicaset.label.app` attribute with value `"guestbook"`. +/// - A label `injected` with empty string value SHOULD be recorded as +/// the `k8s.replicaset.label.injected` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.replicaset.label.app=guestbook"` -/// - `"k8s.replicaset.label.injected="` +/// - `"guestbook"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_REPLICASET_LABEL: &str = "k8s.replicaset.label"; @@ -5392,6 +5613,18 @@ pub const K8S_REPLICATIONCONTROLLER_UID: &str = "k8s.replicationcontroller.uid"; #[cfg(feature = "semconv_experimental")] pub const K8S_RESOURCEQUOTA_NAME: &str = "k8s.resourcequota.name"; +/// The name of the K8s resource a resource quota defines. +/// +/// ## Notes +/// +/// The value for this attribute can be either the full `count/[resource][.[group]]` string (e.g., count/deployments.apps, count/pods), or, for certain core Kubernetes resources, just the resource name (e.g., pods, services, configmaps). Both forms are supported by Kubernetes for object count quotas. See [Kubernetes Resource Quotas documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota) for more details. +/// +/// # Examples +/// +/// - `"count/replicationcontrollers"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_RESOURCE_NAME: &str = "k8s.resourcequota.resource_name"; + /// The UID of the resource quota. /// /// ## Notes @@ -5402,29 +5635,39 @@ pub const K8S_RESOURCEQUOTA_NAME: &str = "k8s.resourcequota.name"; #[cfg(feature = "semconv_experimental")] pub const K8S_RESOURCEQUOTA_UID: &str = "k8s.resourcequota.uid"; -/// The annotation key-value pairs placed on the StatefulSet. +/// The annotation placed on the StatefulSet, the ``key`` being the annotation name, the value being the annotation value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the annotation name, the value being the annotation value, even if the value is empty. +/// Examples: +/// +/// - A label `replicas` with value `1` SHOULD be recorded +/// as the `k8s.statefulset.annotation.replicas` attribute with value `"1"`. +/// - A label `data` with empty string value SHOULD be recorded as +/// the `k8s.statefulset.annotation.data` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.statefulset.annotation.replicas=1"` -/// - `"k8s.statefulset.annotation.data="` +/// - `"1"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_STATEFULSET_ANNOTATION: &str = "k8s.statefulset.annotation"; -/// The label key-value pairs placed on the StatefulSet. +/// The label placed on the StatefulSet, the ``key`` being the label name, the value being the label value, even if the value is empty. /// /// ## Notes /// -/// The `[key]` being the label name, the value being the label value, even if the value is empty. +/// Examples: +/// +/// - A label `replicas` with value `0` SHOULD be recorded +/// as the `k8s.statefulset.label.app` attribute with value `"guestbook"`. +/// - A label `injected` with empty string value SHOULD be recorded as +/// the `k8s.statefulset.label.injected` attribute with value `""`. /// /// # Examples /// -/// - `"k8s.statefulset.label.app=guestbook"` -/// - `"k8s.statefulset.label.injected="` +/// - `"guestbook"` +/// - `""` #[cfg(feature = "semconv_experimental")] pub const K8S_STATEFULSET_LABEL: &str = "k8s.statefulset.label"; @@ -5448,6 +5691,16 @@ pub const K8S_STATEFULSET_NAME: &str = "k8s.statefulset.name"; #[cfg(feature = "semconv_experimental")] pub const K8S_STATEFULSET_UID: &str = "k8s.statefulset.uid"; +/// The name of K8s [StorageClass](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io) object. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"gold.storageclass.storage.k8s.io"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_STORAGECLASS_NAME: &str = "k8s.storageclass.name"; + /// The name of the K8s volume. /// /// ## Notes @@ -5552,6 +5805,16 @@ pub const LOG_RECORD_ORIGINAL: &str = "log.record.original"; #[cfg(feature = "semconv_experimental")] pub const LOG_RECORD_UID: &str = "log.record.uid"; +/// Name of the logical partition that hosts a systems with a mainframe operating system. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"LPAR01"` +#[cfg(feature = "semconv_experimental")] +pub const MAINFRAME_LPAR_NAME: &str = "mainframe.lpar.name"; + /// Deprecated, use `rpc.message.compressed_size` instead. /// /// ## Notes @@ -6612,6 +6875,12 @@ pub const OTEL_SCOPE_NAME: &str = "otel.scope.name"; /// - `"1.0.0"` pub const OTEL_SCOPE_VERSION: &str = "otel.scope.version"; +/// Determines whether the span has a parent span, and if so, [whether it is a remote parent](https://opentelemetry.io/docs/specs/otel/trace/api/#isremote) +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const OTEL_SPAN_PARENT_ORIGIN: &str = "otel.span.parent.origin"; + /// The result value of the sampler for this span /// /// ## Notes @@ -6736,7 +7005,7 @@ pub const PROCESS_CPU_STATE: &str = "process.cpu.state"; #[cfg(feature = "semconv_experimental")] pub const PROCESS_CREATION_TIME: &str = "process.creation.time"; -/// Process environment variables, `key` being the environment variable name, the value being the environment variable value. +/// Process environment variables, ``key`` being the environment variable name, the value being the environment variable value. /// /// ## Notes /// @@ -8727,3 +8996,23 @@ pub const WEBENGINE_NAME: &str = "webengine.name"; /// - `"21.0.0"` #[cfg(feature = "semconv_experimental")] pub const WEBENGINE_VERSION: &str = "webengine.version"; + +/// The System Management Facility (SMF) Identifier uniquely identified a z/OS system within a SYSPLEX or mainframe environment and is used for system and performance analysis. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"SYS1"` +#[cfg(feature = "semconv_experimental")] +pub const ZOS_SMF_ID: &str = "zos.smf.id"; + +/// The name of the SYSPLEX to which the z/OS system belongs too. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"SYSPLEX1"` +#[cfg(feature = "semconv_experimental")] +pub const ZOS_SYSPLEX_NAME: &str = "zos.sysplex.name"; diff --git a/opentelemetry-semantic-conventions/src/lib.rs b/opentelemetry-semantic-conventions/src/lib.rs index c82d653f9f..b2ac9dc752 100644 --- a/opentelemetry-semantic-conventions/src/lib.rs +++ b/opentelemetry-semantic-conventions/src/lib.rs @@ -22,4 +22,4 @@ pub mod trace; /// The schema URL that matches the version of the semantic conventions that /// this crate defines. -pub const SCHEMA_URL: &str = "https://opentelemetry.io/schemas/1.34.0"; +pub const SCHEMA_URL: &str = "https://opentelemetry.io/schemas/1.36.0"; diff --git a/opentelemetry-semantic-conventions/src/metric.rs b/opentelemetry-semantic-conventions/src/metric.rs index 39f309e93b..9eb0bc7cde 100644 --- a/opentelemetry-semantic-conventions/src/metric.rs +++ b/opentelemetry-semantic-conventions/src/metric.rs @@ -450,24 +450,22 @@ pub const CONTAINER_UPTIME: &str = "container.uptime"; /// ## Description /// -/// Operating frequency of the logical CPU in Hertz +/// Deprecated. Use `system.cpu.frequency` instead /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `gauge` | -/// | Unit: | `Hz` | +/// | Unit: | `{Hz}` | /// | Status: | `Development` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::CPU_LOGICAL_NUMBER`] | `Recommended` #[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "{note: Replaced by `system.cpu.frequency`., reason: renamed, renamed_to: system.cpu.frequency}" +)] pub const CPU_FREQUENCY: &str = "cpu.frequency"; /// ## Description /// -/// Seconds each logical CPU spent on each mode +/// Deprecated. Use `system.cpu.time` instead /// ## Metadata /// | | | /// |:-|:- @@ -481,11 +479,14 @@ pub const CPU_FREQUENCY: &str = "cpu.frequency"; /// | [`crate::attribute::CPU_LOGICAL_NUMBER`] | `Recommended` /// | [`crate::attribute::CPU_MODE`] | `Recommended` #[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "{note: Replaced by `system.cpu.time`., reason: renamed, renamed_to: system.cpu.time}" +)] pub const CPU_TIME: &str = "cpu.time"; /// ## Description /// -/// For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time +/// Deprecated. Use `system.cpu.utilization` instead /// ## Metadata /// | | | /// |:-|:- @@ -499,6 +500,9 @@ pub const CPU_TIME: &str = "cpu.time"; /// | [`crate::attribute::CPU_LOGICAL_NUMBER`] | `Recommended` /// | [`crate::attribute::CPU_MODE`] | `Recommended` #[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "{note: Replaced by `system.cpu.utilization`., reason: renamed, renamed_to: system.cpu.utilization}" +)] pub const CPU_UTILIZATION: &str = "cpu.utilization"; /// ## Description @@ -2504,315 +2508,306 @@ pub const JVM_THREAD_COUNT: &str = "jvm.thread.count"; /// ## Description /// -/// The number of actively running jobs for a cronjob +/// Maximum CPU resource limit set for the container /// /// ## Notes /// -/// This metric aligns with the `active` field of the -/// [K8s CronJobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.cronjob`](../resource/k8s.md#cronjob) resource +/// See for details /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{job}` | +/// | Unit: | `{cpu}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_CRONJOB_ACTIVE_JOBS: &str = "k8s.cronjob.active_jobs"; +pub const K8S_CONTAINER_CPU_LIMIT: &str = "k8s.container.cpu.limit"; /// ## Description /// -/// Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod +/// CPU resource requested for the container /// /// ## Notes /// -/// This metric aligns with the `currentNumberScheduled` field of the -/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.daemonset`](../resource/k8s.md#daemonset) resource +/// See for details /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{node}` | +/// | Unit: | `{cpu}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_DAEMONSET_CURRENT_SCHEDULED_NODES: &str = "k8s.daemonset.current_scheduled_nodes"; +pub const K8S_CONTAINER_CPU_REQUEST: &str = "k8s.container.cpu.request"; /// ## Description /// -/// Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) +/// Maximum ephemeral storage resource limit set for the container /// /// ## Notes /// -/// This metric aligns with the `desiredNumberScheduled` field of the -/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.daemonset`](../resource/k8s.md#daemonset) resource +/// See for details /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{node}` | +/// | Unit: | `By` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_DAEMONSET_DESIRED_SCHEDULED_NODES: &str = "k8s.daemonset.desired_scheduled_nodes"; +pub const K8S_CONTAINER_EPHEMERAL_STORAGE_LIMIT: &str = "k8s.container.ephemeral_storage.limit"; /// ## Description /// -/// Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod +/// Ephemeral storage resource requested for the container /// /// ## Notes /// -/// This metric aligns with the `numberMisscheduled` field of the -/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.daemonset`](../resource/k8s.md#daemonset) resource +/// See for details /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{node}` | +/// | Unit: | `By` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_DAEMONSET_MISSCHEDULED_NODES: &str = "k8s.daemonset.misscheduled_nodes"; +pub const K8S_CONTAINER_EPHEMERAL_STORAGE_REQUEST: &str = "k8s.container.ephemeral_storage.request"; /// ## Description /// -/// Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready +/// Maximum memory resource limit set for the container /// /// ## Notes /// -/// This metric aligns with the `numberReady` field of the -/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.daemonset`](../resource/k8s.md#daemonset) resource +/// See for details /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{node}` | +/// | Unit: | `By` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_DAEMONSET_READY_NODES: &str = "k8s.daemonset.ready_nodes"; +pub const K8S_CONTAINER_MEMORY_LIMIT: &str = "k8s.container.memory.limit"; /// ## Description /// -/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment +/// Memory resource requested for the container /// /// ## Notes /// -/// This metric aligns with the `availableReplicas` field of the -/// [K8s DeploymentStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.deployment`](../resource/k8s.md#deployment) resource +/// See for details /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `By` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_DEPLOYMENT_AVAILABLE_PODS: &str = "k8s.deployment.available_pods"; +pub const K8S_CONTAINER_MEMORY_REQUEST: &str = "k8s.container.memory.request"; /// ## Description /// -/// Number of desired replica pods in this deployment +/// Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready) /// /// ## Notes /// -/// This metric aligns with the `replicas` field of the -/// [K8s DeploymentSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.deployment`](../resource/k8s.md#deployment) resource +/// This metric SHOULD reflect the value of the `ready` field in the +/// [K8s ContainerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatus-v1-core) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{container}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_DEPLOYMENT_DESIRED_PODS: &str = "k8s.deployment.desired_pods"; +pub const K8S_CONTAINER_READY: &str = "k8s.container.ready"; /// ## Description /// -/// Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler +/// Describes how many times the container has restarted (since the last counter reset) /// /// ## Notes /// -/// This metric aligns with the `currentReplicas` field of the -/// [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling) -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource +/// This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 +/// at any time depending on how your kubelet is configured to prune dead containers. +/// It is best to not depend too much on the exact value but rather look at it as +/// either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case +/// you can conclude there were restarts in the recent past, and not try and analyze the value beyond that /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{restart}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_HPA_CURRENT_PODS: &str = "k8s.hpa.current_pods"; +pub const K8S_CONTAINER_RESTART_COUNT: &str = "k8s.container.restart.count"; /// ## Description /// -/// Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler +/// Describes the number of K8s containers that are currently in a state for a given reason /// /// ## Notes /// -/// This metric aligns with the `desiredReplicas` field of the -/// [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling) -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource +/// All possible container state reasons will be reported at each time interval to avoid missing metrics. +/// Only the value corresponding to the current state reason will be non-zero /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{container}` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_CONTAINER_STATUS_REASON`] | `Required` #[cfg(feature = "semconv_experimental")] -pub const K8S_HPA_DESIRED_PODS: &str = "k8s.hpa.desired_pods"; +pub const K8S_CONTAINER_STATUS_REASON: &str = "k8s.container.status.reason"; /// ## Description /// -/// The upper limit for the number of replica pods to which the autoscaler can scale up +/// Describes the number of K8s containers that are currently in a given state /// /// ## Notes /// -/// This metric aligns with the `maxReplicas` field of the -/// [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling) -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource +/// All possible container states will be reported at each time interval to avoid missing metrics. +/// Only the value corresponding to the current state will be non-zero /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{container}` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_CONTAINER_STATUS_STATE`] | `Required` #[cfg(feature = "semconv_experimental")] -pub const K8S_HPA_MAX_PODS: &str = "k8s.hpa.max_pods"; +pub const K8S_CONTAINER_STATUS_STATE: &str = "k8s.container.status.state"; /// ## Description /// -/// The lower limit for the number of replica pods to which the autoscaler can scale down +/// Maximum storage resource limit set for the container /// /// ## Notes /// -/// This metric aligns with the `minReplicas` field of the -/// [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling) +/// See for details +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_CONTAINER_STORAGE_LIMIT: &str = "k8s.container.storage.limit"; + +/// ## Description +/// +/// Storage resource requested for the container /// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource +/// ## Notes +/// +/// See for details /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `By` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_HPA_MIN_PODS: &str = "k8s.hpa.min_pods"; +pub const K8S_CONTAINER_STORAGE_REQUEST: &str = "k8s.container.storage.request"; /// ## Description /// -/// The number of pending and actively running pods for a job +/// The number of actively running jobs for a cronjob /// /// ## Notes /// /// This metric aligns with the `active` field of the -/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.job`](../resource/k8s.md#job) resource +/// [K8s CronJobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{job}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_JOB_ACTIVE_PODS: &str = "k8s.job.active_pods"; +pub const K8S_CRONJOB_ACTIVE_JOBS: &str = "k8s.cronjob.active_jobs"; /// ## Description /// -/// The desired number of successfully finished pods the job should be run with +/// Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod /// /// ## Notes /// -/// This metric aligns with the `completions` field of the -/// [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.job`](../resource/k8s.md#job) resource +/// This metric aligns with the `currentNumberScheduled` field of the +/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{node}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_JOB_DESIRED_SUCCESSFUL_PODS: &str = "k8s.job.desired_successful_pods"; +pub const K8S_DAEMONSET_CURRENT_SCHEDULED_NODES: &str = "k8s.daemonset.current_scheduled_nodes"; /// ## Description /// -/// The number of pods which reached phase Failed for a job +/// Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) /// /// ## Notes /// -/// This metric aligns with the `failed` field of the -/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.job`](../resource/k8s.md#job) resource +/// This metric aligns with the `desiredNumberScheduled` field of the +/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{node}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_JOB_FAILED_PODS: &str = "k8s.job.failed_pods"; +pub const K8S_DAEMONSET_DESIRED_SCHEDULED_NODES: &str = "k8s.daemonset.desired_scheduled_nodes"; /// ## Description /// -/// The max desired number of pods the job should run at any given time +/// Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod /// /// ## Notes /// -/// This metric aligns with the `parallelism` field of the -/// [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.job`](../resource/k8s.md#job) resource +/// This metric aligns with the `numberMisscheduled` field of the +/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{node}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_JOB_MAX_PARALLEL_PODS: &str = "k8s.job.max_parallel_pods"; +pub const K8S_DAEMONSET_MISSCHEDULED_NODES: &str = "k8s.daemonset.misscheduled_nodes"; /// ## Description /// -/// The number of pods which reached phase Succeeded for a job +/// Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready /// /// ## Notes /// -/// This metric aligns with the `succeeded` field of the -/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). +/// This metric aligns with the `numberReady` field of the +/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{node}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_DAEMONSET_READY_NODES: &str = "k8s.daemonset.ready_nodes"; + +/// ## Description +/// +/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment +/// +/// ## Notes /// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.job`](../resource/k8s.md#job) resource +/// This metric aligns with the `availableReplicas` field of the +/// [K8s DeploymentStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps) /// ## Metadata /// | | | /// |:-|:- @@ -2820,343 +2815,1062 @@ pub const K8S_JOB_MAX_PARALLEL_PODS: &str = "k8s.job.max_parallel_pods"; /// | Unit: | `{pod}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_JOB_SUCCESSFUL_PODS: &str = "k8s.job.successful_pods"; +pub const K8S_DEPLOYMENT_AVAILABLE_PODS: &str = "k8s.deployment.available_pods"; /// ## Description /// -/// Describes number of K8s namespaces that are currently in a given phase. +/// Number of desired replica pods in this deployment /// /// ## Notes /// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.namespace`](../resource/k8s.md#namespace) resource +/// This metric aligns with the `replicas` field of the +/// [K8s DeploymentSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{namespace}` | +/// | Unit: | `{pod}` | /// | Status: | `Development` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::K8S_NAMESPACE_PHASE`] | `Required` #[cfg(feature = "semconv_experimental")] -pub const K8S_NAMESPACE_PHASE: &str = "k8s.namespace.phase"; +pub const K8S_DEPLOYMENT_DESIRED_PODS: &str = "k8s.deployment.desired_pods"; /// ## Description /// -/// Total CPU time consumed +/// Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler /// /// ## Notes /// -/// Total CPU time consumed by the specific Node on all available CPU cores +/// This metric aligns with the `currentReplicas` field of the +/// [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `s` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_NODE_CPU_TIME: &str = "k8s.node.cpu.time"; +pub const K8S_HPA_CURRENT_PODS: &str = "k8s.hpa.current_pods"; /// ## Description /// -/// Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +/// Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler /// /// ## Notes /// -/// CPU usage of the specific Node on all available CPU cores, averaged over the sample window +/// This metric aligns with the `desiredReplicas` field of the +/// [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `gauge` | -/// | Unit: | `{cpu}` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_NODE_CPU_USAGE: &str = "k8s.node.cpu.usage"; +pub const K8S_HPA_DESIRED_PODS: &str = "k8s.hpa.desired_pods"; /// ## Description /// -/// Memory usage of the Node +/// The upper limit for the number of replica pods to which the autoscaler can scale up /// /// ## Notes /// -/// Total memory usage of the Node +/// This metric aligns with the `maxReplicas` field of the +/// [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `gauge` | -/// | Unit: | `By` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_NODE_MEMORY_USAGE: &str = "k8s.node.memory.usage"; +pub const K8S_HPA_MAX_PODS: &str = "k8s.hpa.max_pods"; /// ## Description /// -/// Node network errors +/// Target average utilization, in percentage, for CPU resource in HPA config. +/// +/// ## Notes +/// +/// This metric aligns with the `averageUtilization` field of the +/// [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling). +/// If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis), +/// the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `{error}` | +/// | Instrument: | `gauge` | +/// | Unit: | `1` | /// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::K8S_CONTAINER_NAME`] | `Conditionally_required`: if and only if k8s.hpa.metric.type is ContainerResource. +/// | [`crate::attribute::K8S_HPA_METRIC_TYPE`] | `Recommended` #[cfg(feature = "semconv_experimental")] -pub const K8S_NODE_NETWORK_ERRORS: &str = "k8s.node.network.errors"; +pub const K8S_HPA_METRIC_TARGET_CPU_AVERAGE_UTILIZATION: &str = + "k8s.hpa.metric.target.cpu.average_utilization"; /// ## Description /// -/// Network bytes for the Node +/// Target average value for CPU resource in HPA config. +/// +/// ## Notes +/// +/// This metric aligns with the `averageValue` field of the +/// [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling). +/// If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis), +/// the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `By` | +/// | Instrument: | `gauge` | +/// | Unit: | `{cpu}` | /// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::K8S_CONTAINER_NAME`] | `Conditionally_required`: if and only if k8s.hpa.metric.type is ContainerResource +/// | [`crate::attribute::K8S_HPA_METRIC_TYPE`] | `Recommended` #[cfg(feature = "semconv_experimental")] -pub const K8S_NODE_NETWORK_IO: &str = "k8s.node.network.io"; +pub const K8S_HPA_METRIC_TARGET_CPU_AVERAGE_VALUE: &str = "k8s.hpa.metric.target.cpu.average_value"; /// ## Description /// -/// The time the Node has been running +/// Target value for CPU resource in HPA config. /// /// ## Notes /// -/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. -/// The actual accuracy would depend on the instrumentation and operating system +/// This metric aligns with the `value` field of the +/// [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling). +/// If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis), +/// the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `gauge` | -/// | Unit: | `s` | +/// | Unit: | `{cpu}` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_CONTAINER_NAME`] | `Conditionally_required`: if and only if k8s.hpa.metric.type is ContainerResource +/// | [`crate::attribute::K8S_HPA_METRIC_TYPE`] | `Recommended` #[cfg(feature = "semconv_experimental")] -pub const K8S_NODE_UPTIME: &str = "k8s.node.uptime"; +pub const K8S_HPA_METRIC_TARGET_CPU_VALUE: &str = "k8s.hpa.metric.target.cpu.value"; /// ## Description /// -/// Total CPU time consumed +/// The lower limit for the number of replica pods to which the autoscaler can scale down /// /// ## Notes /// -/// Total CPU time consumed by the specific Pod on all available CPU cores +/// This metric aligns with the `minReplicas` field of the +/// [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `s` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_POD_CPU_TIME: &str = "k8s.pod.cpu.time"; +pub const K8S_HPA_MIN_PODS: &str = "k8s.hpa.min_pods"; /// ## Description /// -/// Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +/// The number of pending and actively running pods for a job /// /// ## Notes /// -/// CPU usage of the specific Pod on all available CPU cores, averaged over the sample window +/// This metric aligns with the `active` field of the +/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `gauge` | -/// | Unit: | `{cpu}` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_POD_CPU_USAGE: &str = "k8s.pod.cpu.usage"; +pub const K8S_JOB_ACTIVE_PODS: &str = "k8s.job.active_pods"; /// ## Description /// -/// Memory usage of the Pod +/// The desired number of successfully finished pods the job should be run with /// /// ## Notes /// -/// Total memory usage of the Pod +/// This metric aligns with the `completions` field of the +/// [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_DESIRED_SUCCESSFUL_PODS: &str = "k8s.job.desired_successful_pods"; + +/// ## Description +/// +/// The number of pods which reached phase Failed for a job +/// +/// ## Notes +/// +/// This metric aligns with the `failed` field of the +/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_FAILED_PODS: &str = "k8s.job.failed_pods"; + +/// ## Description +/// +/// The max desired number of pods the job should run at any given time +/// +/// ## Notes +/// +/// This metric aligns with the `parallelism` field of the +/// [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_MAX_PARALLEL_PODS: &str = "k8s.job.max_parallel_pods"; + +/// ## Description +/// +/// The number of pods which reached phase Succeeded for a job +/// +/// ## Notes +/// +/// This metric aligns with the `succeeded` field of the +/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_SUCCESSFUL_PODS: &str = "k8s.job.successful_pods"; + +/// ## Description +/// +/// Describes number of K8s namespaces that are currently in a given phase +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{namespace}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_NAMESPACE_PHASE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NAMESPACE_PHASE: &str = "k8s.namespace.phase"; + +/// ## Description +/// +/// Amount of cpu allocatable on the node +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{cpu}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_ALLOCATABLE_CPU: &str = "k8s.node.allocatable.cpu"; + +/// ## Description +/// +/// Amount of ephemeral-storage allocatable on the node +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_ALLOCATABLE_EPHEMERAL_STORAGE: &str = "k8s.node.allocatable.ephemeral_storage"; + +/// ## Description +/// +/// Amount of memory allocatable on the node +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_ALLOCATABLE_MEMORY: &str = "k8s.node.allocatable.memory"; + +/// ## Description +/// +/// Amount of pods allocatable on the node +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_ALLOCATABLE_PODS: &str = "k8s.node.allocatable.pods"; + +/// ## Description +/// +/// Describes the condition of a particular Node. +/// +/// ## Notes +/// +/// All possible node condition pairs (type and status) will be reported at each time interval to avoid missing metrics. Condition pairs corresponding to the current conditions' statuses will be non-zero +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{node}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_NODE_CONDITION_STATUS`] | `Required` +/// | [`crate::attribute::K8S_NODE_CONDITION_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_CONDITION_STATUS: &str = "k8s.node.condition.status"; + +/// ## Description +/// +/// Total CPU time consumed +/// +/// ## Notes +/// +/// Total CPU time consumed by the specific Node on all available CPU cores +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `s` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_CPU_TIME: &str = "k8s.node.cpu.time"; + +/// ## Description +/// +/// Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +/// +/// ## Notes +/// +/// CPU usage of the specific Node on all available CPU cores, averaged over the sample window +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `{cpu}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_CPU_USAGE: &str = "k8s.node.cpu.usage"; + +/// ## Description +/// +/// Memory usage of the Node +/// +/// ## Notes +/// +/// Total memory usage of the Node +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_MEMORY_USAGE: &str = "k8s.node.memory.usage"; + +/// ## Description +/// +/// Node network errors +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{error}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_NETWORK_ERRORS: &str = "k8s.node.network.errors"; + +/// ## Description +/// +/// Network bytes for the Node +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_NETWORK_IO: &str = "k8s.node.network.io"; + +/// ## Description +/// +/// The time the Node has been running +/// +/// ## Notes +/// +/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +/// The actual accuracy would depend on the instrumentation and operating system +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_UPTIME: &str = "k8s.node.uptime"; + +/// ## Description +/// +/// Total CPU time consumed +/// +/// ## Notes +/// +/// Total CPU time consumed by the specific Pod on all available CPU cores +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `s` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_CPU_TIME: &str = "k8s.pod.cpu.time"; + +/// ## Description +/// +/// Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +/// +/// ## Notes +/// +/// CPU usage of the specific Pod on all available CPU cores, averaged over the sample window +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `{cpu}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_CPU_USAGE: &str = "k8s.pod.cpu.usage"; + +/// ## Description +/// +/// Memory usage of the Pod +/// +/// ## Notes +/// +/// Total memory usage of the Pod +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_MEMORY_USAGE: &str = "k8s.pod.memory.usage"; + +/// ## Description +/// +/// Pod network errors +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{error}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_NETWORK_ERRORS: &str = "k8s.pod.network.errors"; + +/// ## Description +/// +/// Network bytes for the Pod +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_NETWORK_IO: &str = "k8s.pod.network.io"; + +/// ## Description +/// +/// The time the Pod has been running +/// +/// ## Notes +/// +/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +/// The actual accuracy would depend on the instrumentation and operating system +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_UPTIME: &str = "k8s.pod.uptime"; + +/// ## Description +/// +/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset +/// +/// ## Notes +/// +/// This metric aligns with the `availableReplicas` field of the +/// [K8s ReplicaSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_REPLICASET_AVAILABLE_PODS: &str = "k8s.replicaset.available_pods"; + +/// ## Description +/// +/// Number of desired replica pods in this replicaset +/// +/// ## Notes +/// +/// This metric aligns with the `replicas` field of the +/// [K8s ReplicaSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_REPLICASET_DESIRED_PODS: &str = "k8s.replicaset.desired_pods"; + +/// ## Description +/// +/// Deprecated, use `k8s.replicationcontroller.available_pods` instead +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "{note: Replaced by `k8s.replicationcontroller.available_pods`., reason: renamed, renamed_to: k8s.replicationcontroller.available_pods}" +)] +pub const K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS: &str = + "k8s.replication_controller.available_pods"; + +/// ## Description +/// +/// Deprecated, use `k8s.replicationcontroller.desired_pods` instead +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "{note: Replaced by `k8s.replicationcontroller.desired_pods`., reason: renamed, renamed_to: k8s.replicationcontroller.desired_pods}" +)] +pub const K8S_REPLICATION_CONTROLLER_DESIRED_PODS: &str = "k8s.replication_controller.desired_pods"; + +/// ## Description +/// +/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller +/// +/// ## Notes +/// +/// This metric aligns with the `availableReplicas` field of the +/// [K8s ReplicationControllerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS: &str = + "k8s.replicationcontroller.available_pods"; + +/// ## Description +/// +/// Number of desired replica pods in this replication controller +/// +/// ## Notes +/// +/// This metric aligns with the `replicas` field of the +/// [K8s ReplicationControllerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_REPLICATIONCONTROLLER_DESIRED_PODS: &str = "k8s.replicationcontroller.desired_pods"; + +/// ## Description +/// +/// The CPU limits in a specific namespace. +/// The value represents the configured quota limit of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{cpu}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_CPU_LIMIT_HARD: &str = "k8s.resourcequota.cpu.limit.hard"; + +/// ## Description +/// +/// The CPU limits in a specific namespace. +/// The value represents the current observed total usage of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{cpu}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_CPU_LIMIT_USED: &str = "k8s.resourcequota.cpu.limit.used"; + +/// ## Description +/// +/// The CPU requests in a specific namespace. +/// The value represents the configured quota limit of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{cpu}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_CPU_REQUEST_HARD: &str = "k8s.resourcequota.cpu.request.hard"; + +/// ## Description +/// +/// The CPU requests in a specific namespace. +/// The value represents the current observed total usage of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{cpu}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_CPU_REQUEST_USED: &str = "k8s.resourcequota.cpu.request.used"; + +/// ## Description +/// +/// The sum of local ephemeral storage limits in the namespace. +/// The value represents the configured quota limit of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `gauge` | +/// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_POD_MEMORY_USAGE: &str = "k8s.pod.memory.usage"; +pub const K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_LIMIT_HARD: &str = + "k8s.resourcequota.ephemeral_storage.limit.hard"; /// ## Description /// -/// Pod network errors +/// The sum of local ephemeral storage limits in the namespace. +/// The value represents the current observed total usage of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `{error}` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_LIMIT_USED: &str = + "k8s.resourcequota.ephemeral_storage.limit.used"; + +/// ## Description +/// +/// The sum of local ephemeral storage requests in the namespace. +/// The value represents the configured quota limit of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_REQUEST_HARD: &str = + "k8s.resourcequota.ephemeral_storage.request.hard"; + +/// ## Description +/// +/// The sum of local ephemeral storage requests in the namespace. +/// The value represents the current observed total usage of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_REQUEST_USED: &str = + "k8s.resourcequota.ephemeral_storage.request.used"; + +/// ## Description +/// +/// The huge page requests in a specific namespace. +/// The value represents the configured quota limit of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{hugepage}` | /// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::K8S_HUGEPAGE_SIZE`] | `Required` #[cfg(feature = "semconv_experimental")] -pub const K8S_POD_NETWORK_ERRORS: &str = "k8s.pod.network.errors"; +pub const K8S_RESOURCEQUOTA_HUGEPAGE_COUNT_REQUEST_HARD: &str = + "k8s.resourcequota.hugepage_count.request.hard"; /// ## Description /// -/// Network bytes for the Pod +/// The huge page requests in a specific namespace. +/// The value represents the current observed total usage of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `By` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `{hugepage}` | /// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::K8S_HUGEPAGE_SIZE`] | `Required` #[cfg(feature = "semconv_experimental")] -pub const K8S_POD_NETWORK_IO: &str = "k8s.pod.network.io"; +pub const K8S_RESOURCEQUOTA_HUGEPAGE_COUNT_REQUEST_USED: &str = + "k8s.resourcequota.hugepage_count.request.used"; /// ## Description /// -/// The time the Pod has been running +/// The memory limits in a specific namespace. +/// The value represents the configured quota limit of the resource in the namespace. /// /// ## Notes /// -/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. -/// The actual accuracy would depend on the instrumentation and operating system +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `gauge` | -/// | Unit: | `s` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_POD_UPTIME: &str = "k8s.pod.uptime"; +pub const K8S_RESOURCEQUOTA_MEMORY_LIMIT_HARD: &str = "k8s.resourcequota.memory.limit.hard"; /// ## Description /// -/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset +/// The memory limits in a specific namespace. +/// The value represents the current observed total usage of the resource in the namespace. /// /// ## Notes /// -/// This metric aligns with the `availableReplicas` field of the -/// [K8s ReplicaSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps). +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_MEMORY_LIMIT_USED: &str = "k8s.resourcequota.memory.limit.used"; + +/// ## Description +/// +/// The memory requests in a specific namespace. +/// The value represents the configured quota limit of the resource in the namespace. +/// +/// ## Notes /// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.replicaset`](../resource/k8s.md#replicaset) resource +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `By` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -pub const K8S_REPLICASET_AVAILABLE_PODS: &str = "k8s.replicaset.available_pods"; +pub const K8S_RESOURCEQUOTA_MEMORY_REQUEST_HARD: &str = "k8s.resourcequota.memory.request.hard"; /// ## Description /// -/// Number of desired replica pods in this replicaset +/// The memory requests in a specific namespace. +/// The value represents the current observed total usage of the resource in the namespace. /// /// ## Notes /// -/// This metric aligns with the `replicas` field of the -/// [K8s ReplicaSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps). +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_MEMORY_REQUEST_USED: &str = "k8s.resourcequota.memory.request.used"; + +/// ## Description +/// +/// The object count limits in a specific namespace. +/// The value represents the configured quota limit of the resource in the namespace. /// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.replicaset`](../resource/k8s.md#replicaset) resource +/// ## Notes +/// +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{object}` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_RESOURCEQUOTA_RESOURCE_NAME`] | `Required` #[cfg(feature = "semconv_experimental")] -pub const K8S_REPLICASET_DESIRED_PODS: &str = "k8s.replicaset.desired_pods"; +pub const K8S_RESOURCEQUOTA_OBJECT_COUNT_HARD: &str = "k8s.resourcequota.object_count.hard"; /// ## Description /// -/// Deprecated, use `k8s.replicationcontroller.available_pods` instead +/// The object count limits in a specific namespace. +/// The value represents the current observed total usage of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{object}` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_RESOURCEQUOTA_RESOURCE_NAME`] | `Required` #[cfg(feature = "semconv_experimental")] -#[deprecated( - note = "{note: Replaced by `k8s.replicationcontroller.available_pods`., reason: renamed, renamed_to: k8s.replicationcontroller.available_pods}" -)] -pub const K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS: &str = - "k8s.replication_controller.available_pods"; +pub const K8S_RESOURCEQUOTA_OBJECT_COUNT_USED: &str = "k8s.resourcequota.object_count.used"; /// ## Description /// -/// Deprecated, use `k8s.replicationcontroller.desired_pods` instead +/// The total number of PersistentVolumeClaims that can exist in the namespace. +/// The value represents the configured quota limit of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). +/// +/// The `k8s.storageclass.name` should be required when a resource quota is defined for a specific +/// storage class /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{persistentvolumeclaim}` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_STORAGECLASS_NAME`] | `Conditionally_required`: The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. #[cfg(feature = "semconv_experimental")] -#[deprecated( - note = "{note: Replaced by `k8s.replicationcontroller.desired_pods`., reason: renamed, renamed_to: k8s.replicationcontroller.desired_pods}" -)] -pub const K8S_REPLICATION_CONTROLLER_DESIRED_PODS: &str = "k8s.replication_controller.desired_pods"; +pub const K8S_RESOURCEQUOTA_PERSISTENTVOLUMECLAIM_COUNT_HARD: &str = + "k8s.resourcequota.persistentvolumeclaim_count.hard"; /// ## Description /// -/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller +/// The total number of PersistentVolumeClaims that can exist in the namespace. +/// The value represents the current observed total usage of the resource in the namespace. /// /// ## Notes /// -/// This metric aligns with the `availableReplicas` field of the -/// [K8s ReplicationControllerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core) +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). /// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.replicationcontroller`](../resource/k8s.md#replicationcontroller) resource +/// The `k8s.storageclass.name` should be required when a resource quota is defined for a specific +/// storage class /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `{persistentvolumeclaim}` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_STORAGECLASS_NAME`] | `Conditionally_required`: The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. #[cfg(feature = "semconv_experimental")] -pub const K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS: &str = - "k8s.replicationcontroller.available_pods"; +pub const K8S_RESOURCEQUOTA_PERSISTENTVOLUMECLAIM_COUNT_USED: &str = + "k8s.resourcequota.persistentvolumeclaim_count.used"; /// ## Description /// -/// Number of desired replica pods in this replication controller +/// The storage requests in a specific namespace. +/// The value represents the configured quota limit of the resource in the namespace. /// /// ## Notes /// -/// This metric aligns with the `replicas` field of the -/// [K8s ReplicationControllerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core) +/// This metric is retrieved from the `hard` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). /// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.replicationcontroller`](../resource/k8s.md#replicationcontroller) resource +/// The `k8s.storageclass.name` should be required when a resource quota is defined for a specific +/// storage class /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | -/// | Unit: | `{pod}` | +/// | Unit: | `By` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_STORAGECLASS_NAME`] | `Conditionally_required`: The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. #[cfg(feature = "semconv_experimental")] -pub const K8S_REPLICATIONCONTROLLER_DESIRED_PODS: &str = "k8s.replicationcontroller.desired_pods"; +pub const K8S_RESOURCEQUOTA_STORAGE_REQUEST_HARD: &str = "k8s.resourcequota.storage.request.hard"; + +/// ## Description +/// +/// The storage requests in a specific namespace. +/// The value represents the current observed total usage of the resource in the namespace. +/// +/// ## Notes +/// +/// This metric is retrieved from the `used` field of the +/// [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). +/// +/// The `k8s.storageclass.name` should be required when a resource quota is defined for a specific +/// storage class +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_STORAGECLASS_NAME`] | `Conditionally_required`: The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. +#[cfg(feature = "semconv_experimental")] +pub const K8S_RESOURCEQUOTA_STORAGE_REQUEST_USED: &str = "k8s.resourcequota.storage.request.used"; /// ## Description /// @@ -3165,10 +3879,7 @@ pub const K8S_REPLICATIONCONTROLLER_DESIRED_PODS: &str = "k8s.replicationcontrol /// ## Notes /// /// This metric aligns with the `currentReplicas` field of the -/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.statefulset`](../resource/k8s.md#statefulset) resource +/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps) /// ## Metadata /// | | | /// |:-|:- @@ -3185,10 +3896,7 @@ pub const K8S_STATEFULSET_CURRENT_PODS: &str = "k8s.statefulset.current_pods"; /// ## Notes /// /// This metric aligns with the `replicas` field of the -/// [K8s StatefulSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.statefulset`](../resource/k8s.md#statefulset) resource +/// [K8s StatefulSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps) /// ## Metadata /// | | | /// |:-|:- @@ -3205,10 +3913,7 @@ pub const K8S_STATEFULSET_DESIRED_PODS: &str = "k8s.statefulset.desired_pods"; /// ## Notes /// /// This metric aligns with the `readyReplicas` field of the -/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.statefulset`](../resource/k8s.md#statefulset) resource +/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps) /// ## Metadata /// | | | /// |:-|:- @@ -3225,10 +3930,7 @@ pub const K8S_STATEFULSET_READY_PODS: &str = "k8s.statefulset.ready_pods"; /// ## Notes /// /// This metric aligns with the `updatedReplicas` field of the -/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). -/// -/// This metric SHOULD, at a minimum, be reported against a -/// [`k8s.statefulset`](../resource/k8s.md#statefulset) resource +/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps) /// ## Metadata /// | | | /// |:-|:- @@ -4249,29 +4951,20 @@ pub const OTEL_SDK_PROCESSOR_SPAN_QUEUE_SIZE: &str = "otel.sdk.processor.span.qu /// ## Description /// -/// The number of created spans for which the end operation was called -/// -/// ## Notes -/// -/// For spans with `recording=true`: Implementations MUST record both `otel.sdk.span.live` and `otel.sdk.span.ended`. -/// For spans with `recording=false`: If implementations decide to record this metric, they MUST also record `otel.sdk.span.live` +/// Use `otel.sdk.span.started` minus `otel.sdk.span.live` to derive this value /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{span}` | /// | Status: | `Development` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::OTEL_SPAN_SAMPLING_RESULT`] | `Recommended` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "{note: Obsoleted., reason: obsoleted}")] pub const OTEL_SDK_SPAN_ENDED: &str = "otel.sdk.span.ended"; /// ## Description /// -/// Deprecated, use `otel.sdk.span.ended` instead +/// Use `otel.sdk.span.started` minus `otel.sdk.span.live` to derive this value /// ## Metadata /// | | | /// |:-|:- @@ -4279,19 +4972,12 @@ pub const OTEL_SDK_SPAN_ENDED: &str = "otel.sdk.span.ended"; /// | Unit: | `{span}` | /// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] -#[deprecated( - note = "{note: Replaced by `otel.sdk.span.ended`., reason: renamed, renamed_to: otel.sdk.span.ended}" -)] +#[deprecated(note = "{note: Obsoleted., reason: obsoleted}")] pub const OTEL_SDK_SPAN_ENDED_COUNT: &str = "otel.sdk.span.ended.count"; /// ## Description /// -/// The number of created spans for which the end operation has not been called yet -/// -/// ## Notes -/// -/// For spans with `recording=true`: Implementations MUST record both `otel.sdk.span.live` and `otel.sdk.span.ended`. -/// For spans with `recording=false`: If implementations decide to record this metric, they MUST also record `otel.sdk.span.ended` +/// The number of created spans with `recording=true` for which the end operation has not been called yet /// ## Metadata /// | | | /// |:-|:- @@ -4321,6 +5007,28 @@ pub const OTEL_SDK_SPAN_LIVE: &str = "otel.sdk.span.live"; )] pub const OTEL_SDK_SPAN_LIVE_COUNT: &str = "otel.sdk.span.live.count"; +/// ## Description +/// +/// The number of created spans +/// +/// ## Notes +/// +/// Implementations MUST record this metric for all spans, even for non-recording ones +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{span}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::OTEL_SPAN_PARENT_ORIGIN`] | `Recommended` +/// | [`crate::attribute::OTEL_SPAN_SAMPLING_RESULT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const OTEL_SDK_SPAN_STARTED: &str = "otel.sdk.span.started"; + /// ## Description /// /// Number of times the process has been context switched @@ -4706,17 +5414,19 @@ pub const SIGNALR_SERVER_CONNECTION_DURATION: &str = "signalr.server.connection. /// ## Description /// -/// Deprecated. Use `cpu.frequency` instead +/// Operating frequency of the logical CPU in Hertz /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `gauge` | -/// | Unit: | `{Hz}` | +/// | Unit: | `Hz` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CPU_LOGICAL_NUMBER`] | `Recommended` #[cfg(feature = "semconv_experimental")] -#[deprecated( - note = "{note: Replaced by `cpu.frequency`., reason: renamed, renamed_to: cpu.frequency}" -)] pub const SYSTEM_CPU_FREQUENCY: &str = "system.cpu.frequency"; /// ## Description @@ -4753,30 +5463,38 @@ pub const SYSTEM_CPU_PHYSICAL_COUNT: &str = "system.cpu.physical.count"; /// ## Description /// -/// Deprecated. Use `cpu.time` instead +/// Seconds each logical CPU spent on each mode /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CPU_LOGICAL_NUMBER`] | `Recommended` +/// | [`crate::attribute::CPU_MODE`] | `Recommended` #[cfg(feature = "semconv_experimental")] -#[deprecated(note = "{note: Replaced by `cpu.time`., reason: renamed, renamed_to: cpu.time}")] pub const SYSTEM_CPU_TIME: &str = "system.cpu.time"; /// ## Description /// -/// Deprecated. Use `cpu.utilization` instead +/// For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | /// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CPU_LOGICAL_NUMBER`] | `Recommended` +/// | [`crate::attribute::CPU_MODE`] | `Recommended` #[cfg(feature = "semconv_experimental")] -#[deprecated( - note = "{note: Replaced by `cpu.utilization`., reason: renamed, renamed_to: cpu.utilization}" -)] pub const SYSTEM_CPU_UTILIZATION: &str = "system.cpu.utilization"; /// ## Description @@ -5088,6 +5806,28 @@ pub const SYSTEM_MEMORY_UTILIZATION: &str = "system.memory.utilization"; /// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` /// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` #[cfg(feature = "semconv_experimental")] +pub const SYSTEM_NETWORK_CONNECTION_COUNT: &str = "system.network.connection.count"; + +/// ## Description +/// +/// Deprecated, use `system.network.connection.count` instead +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{connection}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::NETWORK_CONNECTION_STATE`] | `Recommended` +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "{note: Replaced by `system.network.connection.count`., reason: renamed, renamed_to: system.network.connection.count}" +)] pub const SYSTEM_NETWORK_CONNECTIONS: &str = "system.network.connections"; /// ## Description diff --git a/opentelemetry-semantic-conventions/src/resource.rs b/opentelemetry-semantic-conventions/src/resource.rs index b4778d0255..cfc002da1b 100644 --- a/opentelemetry-semantic-conventions/src/resource.rs +++ b/opentelemetry-semantic-conventions/src/resource.rs @@ -350,6 +350,15 @@ pub use crate::attribute::K8S_DEPLOYMENT_UID; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_HPA_NAME; +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::K8S_HPA_SCALETARGETREF_API_VERSION; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::K8S_HPA_SCALETARGETREF_KIND; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::K8S_HPA_SCALETARGETREF_NAME; + #[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_HPA_UID; @@ -434,6 +443,9 @@ pub use crate::attribute::K8S_STATEFULSET_NAME; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_STATEFULSET_UID; +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::MAINFRAME_LPAR_NAME; + #[cfg(feature = "semconv_experimental")] pub use crate::attribute::OCI_MANIFEST_DIGEST; @@ -539,3 +551,9 @@ pub use crate::attribute::WEBENGINE_NAME; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::WEBENGINE_VERSION; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::ZOS_SMF_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::ZOS_SYSPLEX_NAME; diff --git a/opentelemetry-semantic-conventions/src/trace.rs b/opentelemetry-semantic-conventions/src/trace.rs index d04978485f..559303fb5c 100644 --- a/opentelemetry-semantic-conventions/src/trace.rs +++ b/opentelemetry-semantic-conventions/src/trace.rs @@ -146,9 +146,7 @@ pub use crate::attribute::AWS_S3_PART_NUMBER; pub use crate::attribute::AWS_S3_UPLOAD_ID; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::AZ_NAMESPACE; - -#[cfg(feature = "semconv_experimental")] +#[allow(deprecated)] pub use crate::attribute::AZ_SERVICE_REQUEST_ID; #[cfg(feature = "semconv_experimental")] @@ -172,6 +170,12 @@ pub use crate::attribute::AZURE_COSMOSDB_REQUEST_BODY_SIZE; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE; +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::AZURE_RESOURCE_PROVIDER_NAMESPACE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::AZURE_SERVICE_REQUEST_ID; + #[cfg(feature = "semconv_experimental")] pub use crate::attribute::CASSANDRA_CONSISTENCY_LEVEL; @@ -245,6 +249,12 @@ pub use crate::attribute::DB_STORED_PROCEDURE_NAME; pub use crate::attribute::DB_SYSTEM_NAME; +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DNS_ANSWERS; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DNS_QUESTION_NAME; + #[cfg(feature = "semconv_experimental")] pub use crate::attribute::ELASTICSEARCH_NODE_NAME; @@ -509,6 +519,12 @@ pub use crate::attribute::SESSION_ID; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::SESSION_PREVIOUS_ID; +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::TLS_PROTOCOL_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::TLS_PROTOCOL_VERSION; + pub use crate::attribute::URL_FULL; pub use crate::attribute::URL_PATH;