From 90f54d3c19aa9e5e6f53c5a1faf23cd2b7aee587 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 26 Aug 2025 09:54:41 +0200 Subject: [PATCH 1/2] semantic-conventions: bump to 1.37.0 --- .../_incubating/attributes/app_attributes.py | 23 +- .../_incubating/attributes/aws_attributes.py | 4 +- .../attributes/azure_attributes.py | 10 +- .../attributes/cassandra_attributes.py | 22 +- .../attributes/container_attributes.py | 15 + .../_incubating/attributes/cpu_attributes.py | 16 +- .../_incubating/attributes/db_attributes.py | 4 +- .../attributes/device_attributes.py | 2 +- .../attributes/enduser_attributes.py | 2 +- .../attributes/gen_ai_attributes.py | 159 ++++- .../_incubating/attributes/hw_attributes.py | 176 ++++- .../attributes/messaging_attributes.py | 2 + .../attributes/openai_attributes.py | 40 ++ .../_incubating/attributes/otel_attributes.py | 5 + .../attributes/system_attributes.py | 2 +- .../_incubating/metrics/azure_metrics.py | 4 +- .../_incubating/metrics/container_metrics.py | 72 +- .../semconv/_incubating/metrics/db_metrics.py | 18 +- .../_incubating/metrics/faas_metrics.py | 18 +- .../_incubating/metrics/gen_ai_metrics.py | 14 +- .../semconv/_incubating/metrics/hw_metrics.py | 656 +++++++++++++++++- .../_incubating/metrics/k8s_metrics.py | 364 ++++++++-- .../_incubating/metrics/otel_metrics.py | 30 +- .../_incubating/metrics/system_metrics.py | 133 ++-- .../_incubating/metrics/vcs_metrics.py | 8 +- .../src/opentelemetry/semconv/schemas.py | 5 + scripts/semconv/generate.sh | 4 +- .../templates/registry/semantic_attributes.j2 | 4 +- 28 files changed, 1589 insertions(+), 223 deletions(-) create mode 100644 opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/openai_attributes.py diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/app_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/app_attributes.py index 4ab7879d833..73456b671ea 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/app_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/app_attributes.py @@ -14,6 +14,11 @@ from typing import Final +APP_BUILD_ID: Final = "app.build_id" +""" +Unique identifier for a particular build or compilation of the application. +""" + APP_INSTALLATION_ID: Final = "app.installation.id" """ A unique identifier representing the installation of an application on a specific device. @@ -33,7 +38,23 @@ - [App set ID](https://developer.android.com/identity/app-set-id). - [`Settings.getString(Settings.Secure.ANDROID_ID)`](https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID). -More information about Android identifier best practices can be found [here](https://developer.android.com/training/articles/user-data-ids). +More information about Android identifier best practices can be found in the [Android user data IDs guide](https://developer.android.com/training/articles/user-data-ids). +""" + +APP_JANK_FRAME_COUNT: Final = "app.jank.frame_count" +""" +A number of frame renders that experienced jank. +Note: Depending on platform limitations, the value provided MAY be approximation. +""" + +APP_JANK_PERIOD: Final = "app.jank.period" +""" +The time period, in seconds, for which this jank is being reported. +""" + +APP_JANK_THRESHOLD: Final = "app.jank.threshold" +""" +The minimum rendering threshold for this jank, in seconds. """ APP_SCREEN_COORDINATE_X: Final = "app.screen.coordinate.x" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/aws_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/aws_attributes.py index b4a969fbbd8..da42769ae0c 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/aws_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/aws_attributes.py @@ -340,6 +340,6 @@ class AwsEcsLaunchtypeValues(Enum): EC2 = "ec2" - """ec2.""" + """Amazon EC2.""" FARGATE = "fargate" - """fargate.""" + """Amazon Fargate.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/azure_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/azure_attributes.py index eb883d222c8..9db27f62989 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/azure_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/azure_attributes.py @@ -77,12 +77,12 @@ class AzureCosmosdbConnectionModeValues(Enum): class AzureCosmosdbConsistencyLevelValues(Enum): STRONG = "Strong" - """strong.""" + """Strong.""" BOUNDED_STALENESS = "BoundedStaleness" - """bounded_staleness.""" + """Bounded Staleness.""" SESSION = "Session" - """session.""" + """Session.""" EVENTUAL = "Eventual" - """eventual.""" + """Eventual.""" CONSISTENT_PREFIX = "ConsistentPrefix" - """consistent_prefix.""" + """Consistent Prefix.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/cassandra_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/cassandra_attributes.py index 17fbd4ca224..96aae6dc144 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/cassandra_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/cassandra_attributes.py @@ -50,24 +50,24 @@ class CassandraConsistencyLevelValues(Enum): ALL = "all" - """all.""" + """All.""" EACH_QUORUM = "each_quorum" - """each_quorum.""" + """Each Quorum.""" QUORUM = "quorum" - """quorum.""" + """Quorum.""" LOCAL_QUORUM = "local_quorum" - """local_quorum.""" + """Local Quorum.""" ONE = "one" - """one.""" + """One.""" TWO = "two" - """two.""" + """Two.""" THREE = "three" - """three.""" + """Three.""" LOCAL_ONE = "local_one" - """local_one.""" + """Local One.""" ANY = "any" - """any.""" + """Any.""" SERIAL = "serial" - """serial.""" + """Serial.""" LOCAL_SERIAL = "local_serial" - """local_serial.""" + """Local Serial.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/container_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/container_attributes.py index cd6eccb9cf6..ef04de504c0 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/container_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/container_attributes.py @@ -97,9 +97,24 @@ CONTAINER_RUNTIME: Final = "container.runtime" """ +Deprecated: Replaced by `container.runtime.name`. +""" + +CONTAINER_RUNTIME_DESCRIPTION: Final = "container.runtime.description" +""" +A description about the runtime which could include, for example details about the CRI/API version being used or other customisations. +""" + +CONTAINER_RUNTIME_NAME: Final = "container.runtime.name" +""" The container runtime managing this container. """ +CONTAINER_RUNTIME_VERSION: Final = "container.runtime.version" +""" +The version of the runtime of this process, as returned by the runtime without modification. +""" + @deprecated( "The attribute container.cpu.state is deprecated - Replaced by `cpu.mode`" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/cpu_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/cpu_attributes.py index e960e203ae2..e550c569eed 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/cpu_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/cpu_attributes.py @@ -28,18 +28,18 @@ class CpuModeValues(Enum): USER = "user" - """user.""" + """User.""" SYSTEM = "system" - """system.""" + """System.""" NICE = "nice" - """nice.""" + """Nice.""" IDLE = "idle" - """idle.""" + """Idle.""" IOWAIT = "iowait" - """iowait.""" + """IO Wait.""" INTERRUPT = "interrupt" - """interrupt.""" + """Interrupt.""" STEAL = "steal" - """steal.""" + """Steal.""" KERNEL = "kernel" - """kernel.""" + """Kernel.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/db_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/db_attributes.py index 61ef5ff256b..8c40189eff6 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/db_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/db_attributes.py @@ -410,7 +410,7 @@ class DbSystemValues(Enum): COCKROACHDB = "cockroachdb" """CockroachDB.""" COLDFUSION = "coldfusion" - """Deprecated: Removed.""" + """Deprecated: Obsoleted.""" COSMOSDB = "cosmosdb" """Microsoft Azure Cosmos DB.""" COUCHBASE = "couchbase" @@ -466,7 +466,7 @@ class DbSystemValues(Enum): MSSQL = "mssql" """Microsoft SQL Server.""" MSSQLCOMPACT = "mssqlcompact" - """Deprecated: Removed, use `other_sql` instead.""" + """Deprecated: Replaced by `other_sql`.""" MYSQL = "mysql" """MySQL.""" NEO4J = "neo4j" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/device_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/device_attributes.py index b79d5ab0f30..4af3f95ba81 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/device_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/device_attributes.py @@ -21,7 +21,7 @@ However, it might be resettable by the user for all apps on a device. Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be used as values. -More information about Android identifier best practices can be found [here](https://developer.android.com/training/articles/user-data-ids). +More information about Android identifier best practices can be found in the [Android user data IDs guide](https://developer.android.com/training/articles/user-data-ids). > [!WARNING] > diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py index d07132941f6..aec9804d7f6 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py @@ -34,7 +34,7 @@ ENDUSER_ROLE: Final = "enduser.role" """ -Deprecated: Use `user.roles` attribute instead. +Deprecated: Replaced by `user.roles`. """ ENDUSER_SCOPE: Final = "enduser.scope" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py index 67c91d988dc..0de1d391fbf 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py @@ -48,6 +48,25 @@ Note: Data sources are used by AI agents and RAG applications to store grounding data. A data source may be an external database, object store, document collection, website, or any other storage system used by the GenAI agent or application. The `gen_ai.data_source.id` SHOULD match the identifier used by the GenAI system rather than a name specific to the external storage, such as a database or object store. Semantic conventions referencing `gen_ai.data_source.id` MAY also leverage additional attributes, such as `db.*`, to further identify and describe the data source. """ +GEN_AI_INPUT_MESSAGES: Final = "gen_ai.input.messages" +""" +The chat history provided to the model as an input. +Note: Instrumentations MUST follow [Input messages JSON schema](/docs/gen-ai/gen-ai-input-messages.json). +When the attribute is recorded on events, it MUST be recorded in structured +form. When recorded on spans, it MAY be recorded as a JSON string if structured +format is not supported and SHOULD be recorded in structured form otherwise. + +Messages MUST be provided in the order they were sent to the model. +Instrumentations MAY provide a way for users to filter or truncate +input messages. + +> [!Warning] +> This attribute is likely to contain sensitive information including user/PII data. + +See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes) +section for more details. +""" + GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: Final = ( "gen_ai.openai.request.response_format" ) @@ -64,21 +83,21 @@ "gen_ai.openai.request.service_tier" ) """ -The service tier requested. May be a specific tier, default, or auto. +Deprecated: Replaced by `openai.request.service_tier`. """ GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: Final = ( "gen_ai.openai.response.service_tier" ) """ -The service tier used for the response. +Deprecated: Replaced by `openai.response.service_tier`. """ GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT: Final = ( "gen_ai.openai.response.system_fingerprint" ) """ -A fingerprint to track any eventual change in the Generative AI environment. +Deprecated: Replaced by `openai.response.system_fingerprint`. """ GEN_AI_OPERATION_NAME: Final = "gen_ai.operation.name" @@ -87,6 +106,30 @@ Note: If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value. """ +GEN_AI_OUTPUT_MESSAGES: Final = "gen_ai.output.messages" +""" +Messages returned by the model where each message represents a specific model response (choice, candidate). +Note: Instrumentations MUST follow [Output messages JSON schema](/docs/gen-ai/gen-ai-output-messages.json) + +Each message represents a single output choice/candidate generated by +the model. Each message corresponds to exactly one generation +(choice/candidate) and vice versa - one choice cannot be split across +multiple messages or one message cannot contain parts from multiple choices. + +When the attribute is recorded on events, it MUST be recorded in structured +form. When recorded on spans, it MAY be recorded as a JSON string if structured +format is not supported and SHOULD be recorded in structured form otherwise. + +Instrumentations MAY provide a way for users to filter or truncate +output messages. + +> [!Warning] +> This attribute is likely to contain sensitive information including user/PII data. + +See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes) +section for more details. +""" + GEN_AI_OUTPUT_TYPE: Final = "gen_ai.output.type" """ Represents the content type requested by the client. @@ -100,6 +143,29 @@ Deprecated: Removed, no replacement at this time. """ +GEN_AI_PROVIDER_NAME: Final = "gen_ai.provider.name" +""" +The Generative AI provider as identified by the client or server instrumentation. +Note: The attribute SHOULD be set based on the instrumentation's best +knowledge and may differ from the actual model provider. + +Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms +are accessible using the OpenAI REST API and corresponding client libraries, +but may proxy or host models from different providers. + +The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` +attributes may help identify the actual system in use. + +The `gen_ai.provider.name` attribute acts as a discriminator that +identifies the GenAI telemetry format flavor specific to that provider +within GenAI semantic conventions. +It SHOULD be set consistently with provider-specific attributes and signals. +For example, GenAI spans, metrics, and events related to AWS Bedrock +should have the `gen_ai.provider.name` set to `aws.bedrock` and include +applicable `aws.bedrock.*` attributes and are not expected to include +`openai.*` attributes. +""" + GEN_AI_REQUEST_CHOICE_COUNT: Final = "gen_ai.request.choice.count" """ The target number of candidate completions to return. @@ -173,18 +239,32 @@ GEN_AI_SYSTEM: Final = "gen_ai.system" """ -The Generative AI product as identified by the client or server instrumentation. -Note: The `gen_ai.system` describes a family of GenAI models with specific model identified -by `gen_ai.request.model` and `gen_ai.response.model` attributes. +Deprecated: Replaced by `gen_ai.provider.name`. +""" + +GEN_AI_SYSTEM_INSTRUCTIONS: Final = "gen_ai.system_instructions" +""" +The system message or instructions provided to the GenAI model separately from the chat history. +Note: This attribute SHOULD be used when the corresponding provider or API +allows to provide system instructions or messages separately from the +chat history. + +Instructions that are part of the chat history SHOULD be recorded in +`gen_ai.input.messages` attribute instead. + +Instrumentations MUST follow [System instructions JSON schema](/docs/gen-ai/gen-ai-system-instructions.json). + +When recorded on spans, it MAY be recorded as a JSON string if structured +format is not supported and SHOULD be recorded in structured form otherwise. + +Instrumentations MAY provide a way for users to filter or truncate +system instructions. -The actual GenAI product may differ from the one identified by the client. -Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI client -libraries. In such cases, the `gen_ai.system` is set to `openai` based on the -instrumentation's best knowledge, instead of the actual system. The `server.address` -attribute may help identify the actual system in use for `openai`. +> [!Warning] +> This attribute may contain sensitive information. -For custom model, a custom friendly name SHOULD be used. -If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER`. +See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes) +section for more details. """ GEN_AI_TOKEN_TYPE: Final = "gen_ai.token.type" @@ -250,6 +330,9 @@ class GenAiOpenaiRequestResponseFormatValues(Enum): """JSON schema response format.""" +@deprecated( + "The attribute gen_ai.openai.request.service_tier is deprecated - Replaced by `openai.request.service_tier`" +) class GenAiOpenaiRequestServiceTierValues(Enum): AUTO = "auto" """The system will utilize scale tier credits until they are exhausted.""" @@ -285,6 +368,42 @@ class GenAiOutputTypeValues(Enum): """Speech.""" +class GenAiProviderNameValues(Enum): + OPENAI = "openai" + """[OpenAI](https://openai.com/).""" + GCP_GEN_AI = "gcp.gen_ai" + """Any Google generative AI endpoint.""" + GCP_VERTEX_AI = "gcp.vertex_ai" + """[Vertex AI](https://cloud.google.com/vertex-ai).""" + GCP_GEMINI = "gcp.gemini" + """[Gemini](https://cloud.google.com/products/gemini).""" + ANTHROPIC = "anthropic" + """[Anthropic](https://www.anthropic.com/).""" + COHERE = "cohere" + """[Cohere](https://cohere.com/).""" + AZURE_AI_INFERENCE = "azure.ai.inference" + """Azure AI Inference.""" + AZURE_AI_OPENAI = "azure.ai.openai" + """[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/).""" + IBM_WATSONX_AI = "ibm.watsonx.ai" + """[IBM Watsonx AI](https://www.ibm.com/products/watsonx-ai).""" + AWS_BEDROCK = "aws.bedrock" + """[AWS Bedrock](https://aws.amazon.com/bedrock).""" + PERPLEXITY = "perplexity" + """[Perplexity](https://www.perplexity.ai/).""" + X_AI = "x_ai" + """[xAI](https://x.ai/).""" + DEEPSEEK = "deepseek" + """[DeepSeek](https://www.deepseek.com/).""" + GROQ = "groq" + """[Groq](https://groq.com/).""" + MISTRAL_AI = "mistral_ai" + """[Mistral AI](https://mistral.ai/).""" + + +@deprecated( + "The attribute gen_ai.system is deprecated - Replaced by `gen_ai.provider.name`" +) class GenAiSystemValues(Enum): OPENAI = "openai" """OpenAI.""" @@ -295,21 +414,21 @@ class GenAiSystemValues(Enum): GCP_GEMINI = "gcp.gemini" """Gemini.""" VERTEX_AI = "vertex_ai" - """Deprecated: Use 'gcp.vertex_ai' instead.""" + """Deprecated: Replaced by `gcp.vertex_ai`.""" GEMINI = "gemini" - """Deprecated: Use 'gcp.gemini' instead.""" + """Deprecated: Replaced by `gcp.gemini`.""" ANTHROPIC = "anthropic" """Anthropic.""" COHERE = "cohere" """Cohere.""" + AZ_AI_INFERENCE = "az.ai.inference" + """Azure AI Inference.""" + AZ_AI_OPENAI = "az.ai.openai" + """Azure OpenAI.""" AZURE_AI_INFERENCE = "azure.ai.inference" """Azure AI Inference.""" AZURE_AI_OPENAI = "azure.ai.openai" """Azure OpenAI.""" - AZ_AI_INFERENCE = "az.ai.inference" - """Deprecated: Replaced by azure.ai.inference.""" - AZ_AI_OPENAI = "azure.ai.openai" - """Deprecated: Replaced by azure.ai.openai.""" IBM_WATSONX_AI = "ibm.watsonx.ai" """IBM Watsonx AI.""" AWS_BEDROCK = "aws.bedrock" @@ -317,7 +436,7 @@ class GenAiSystemValues(Enum): PERPLEXITY = "perplexity" """Perplexity.""" XAI = "xai" - """xAI.""" + """Deprecated: Replaced by `x_ai`.""" DEEPSEEK = "deepseek" """DeepSeek.""" GROQ = "groq" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/hw_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/hw_attributes.py index 510eb976491..d16f1579421 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/hw_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/hw_attributes.py @@ -15,40 +15,212 @@ from enum import Enum from typing import Final +HW_BATTERY_CAPACITY: Final = "hw.battery.capacity" +""" +Design capacity in Watts-hours or Amper-hours. +""" + +HW_BATTERY_CHEMISTRY: Final = "hw.battery.chemistry" +""" +Battery [chemistry](https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html), e.g. Lithium-Ion, Nickel-Cadmium, etc. +""" + +HW_BATTERY_STATE: Final = "hw.battery.state" +""" +The current state of the battery. +""" + +HW_BIOS_VERSION: Final = "hw.bios_version" +""" +BIOS version of the hardware component. +""" + +HW_DRIVER_VERSION: Final = "hw.driver_version" +""" +Driver version for the hardware component. +""" + +HW_ENCLOSURE_TYPE: Final = "hw.enclosure.type" +""" +Type of the enclosure (useful for modular systems). +""" + +HW_FIRMWARE_VERSION: Final = "hw.firmware_version" +""" +Firmware version of the hardware component. +""" + +HW_GPU_TASK: Final = "hw.gpu.task" +""" +Type of task the GPU is performing. +""" + HW_ID: Final = "hw.id" """ An identifier for the hardware component, unique within the monitored host. """ +HW_LIMIT_TYPE: Final = "hw.limit_type" +""" +Type of limit for hardware components. +""" + +HW_LOGICAL_DISK_RAID_LEVEL: Final = "hw.logical_disk.raid_level" +""" +RAID Level of the logical disk. +""" + +HW_LOGICAL_DISK_STATE: Final = "hw.logical_disk.state" +""" +State of the logical disk space usage. +""" + +HW_MEMORY_TYPE: Final = "hw.memory.type" +""" +Type of the memory module. +""" + +HW_MODEL: Final = "hw.model" +""" +Descriptive model name of the hardware component. +""" + HW_NAME: Final = "hw.name" """ An easily-recognizable name for the hardware component. """ +HW_NETWORK_LOGICAL_ADDRESSES: Final = "hw.network.logical_addresses" +""" +Logical addresses of the adapter (e.g. IP address, or WWPN). +""" + +HW_NETWORK_PHYSICAL_ADDRESS: Final = "hw.network.physical_address" +""" +Physical address of the adapter (e.g. MAC address, or WWNN). +""" + HW_PARENT: Final = "hw.parent" """ Unique identifier of the parent component (typically the `hw.id` attribute of the enclosure, or disk controller). """ +HW_PHYSICAL_DISK_SMART_ATTRIBUTE: Final = "hw.physical_disk.smart_attribute" +""" +[S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute of the physical disk. +""" + +HW_PHYSICAL_DISK_STATE: Final = "hw.physical_disk.state" +""" +State of the physical disk endurance utilization. +""" + +HW_PHYSICAL_DISK_TYPE: Final = "hw.physical_disk.type" +""" +Type of the physical disk. +""" + +HW_SENSOR_LOCATION: Final = "hw.sensor_location" +""" +Location of the sensor. +""" + +HW_SERIAL_NUMBER: Final = "hw.serial_number" +""" +Serial number of the hardware component. +""" + HW_STATE: Final = "hw.state" """ The current state of the component. """ +HW_TAPE_DRIVE_OPERATION_TYPE: Final = "hw.tape_drive.operation_type" +""" +Type of tape drive operation. +""" + HW_TYPE: Final = "hw.type" """ Type of the component. Note: Describes the category of the hardware component for which `hw.state` is being reported. For example, `hw.type=temperature` along with `hw.state=degraded` would indicate that the temperature of the hardware component has been reported as `degraded`. """ +HW_VENDOR: Final = "hw.vendor" +""" +Vendor name of the hardware component. +""" + + +class HwBatteryStateValues(Enum): + CHARGING = "charging" + """Charging.""" + DISCHARGING = "discharging" + """Discharging.""" + + +class HwGpuTaskValues(Enum): + DECODER = "decoder" + """Decoder.""" + ENCODER = "encoder" + """Encoder.""" + GENERAL = "general" + """General.""" + + +class HwLimitTypeValues(Enum): + CRITICAL = "critical" + """Critical.""" + DEGRADED = "degraded" + """Degraded.""" + HIGH_CRITICAL = "high.critical" + """High Critical.""" + HIGH_DEGRADED = "high.degraded" + """High Degraded.""" + LOW_CRITICAL = "low.critical" + """Low Critical.""" + LOW_DEGRADED = "low.degraded" + """Low Degraded.""" + MAX = "max" + """Maximum.""" + THROTTLED = "throttled" + """Throttled.""" + TURBO = "turbo" + """Turbo.""" + + +class HwLogicalDiskStateValues(Enum): + USED = "used" + """Used.""" + FREE = "free" + """Free.""" + + +class HwPhysicalDiskStateValues(Enum): + REMAINING = "remaining" + """Remaining.""" + class HwStateValues(Enum): - OK = "ok" - """Ok.""" DEGRADED = "degraded" """Degraded.""" FAILED = "failed" """Failed.""" + NEEDS_CLEANING = "needs_cleaning" + """Needs Cleaning.""" + OK = "ok" + """OK.""" + PREDICTED_FAILURE = "predicted_failure" + """Predicted Failure.""" + + +class HwTapeDriveOperationTypeValues(Enum): + MOUNT = "mount" + """Mount.""" + UNMOUNT = "unmount" + """Unmount.""" + CLEAN = "clean" + """Clean.""" class HwTypeValues(Enum): diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py index 7756a0aba13..3deff31d15a 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py @@ -348,6 +348,8 @@ class MessagingServicebusDispositionStatusValues(Enum): class MessagingSystemValues(Enum): ACTIVEMQ = "activemq" """Apache ActiveMQ.""" + AWS_SNS = "aws.sns" + """Amazon Simple Notification Service (SNS).""" AWS_SQS = "aws_sqs" """Amazon Simple Queue Service (SQS).""" EVENTGRID = "eventgrid" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/openai_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/openai_attributes.py new file mode 100644 index 00000000000..8c044436d1b --- /dev/null +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/openai_attributes.py @@ -0,0 +1,40 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +OPENAI_REQUEST_SERVICE_TIER: Final = "openai.request.service_tier" +""" +The service tier requested. May be a specific tier, default, or auto. +""" + +OPENAI_RESPONSE_SERVICE_TIER: Final = "openai.response.service_tier" +""" +The service tier used for the response. +""" + +OPENAI_RESPONSE_SYSTEM_FINGERPRINT: Final = ( + "openai.response.system_fingerprint" +) +""" +A fingerprint to track any eventual change in the Generative AI environment. +""" + + +class OpenaiRequestServiceTierValues(Enum): + AUTO = "auto" + """The system will utilize scale tier credits until they are exhausted.""" + DEFAULT = "default" + """The system will utilize the default scale tier.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/otel_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/otel_attributes.py index 7f580842d78..fda3ca0ec1e 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/otel_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/otel_attributes.py @@ -57,6 +57,11 @@ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_SCOPE_NAME`. """ +OTEL_SCOPE_SCHEMA_URL: Final = "otel.scope.schema_url" +""" +The schema URL of the instrumentation scope. +""" + OTEL_SCOPE_VERSION: Final = "otel.scope.version" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_SCOPE_VERSION`. diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/system_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/system_attributes.py index 57a48b06dd3..5d011f4313b 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/system_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/system_attributes.py @@ -134,7 +134,7 @@ class SystemFilesystemTypeValues(Enum): class SystemMemoryStateValues(Enum): USED = "used" - """used.""" + """Actual used virtual memory in bytes.""" FREE = "free" """free.""" SHARED = "shared" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/azure_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/azure_metrics.py index 2e45a2cab72..2273ac80c02 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/azure_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/azure_metrics.py @@ -33,7 +33,7 @@ def create_azure_cosmosdb_client_active_instance_count( """Number of active client instances""" return meter.create_up_down_counter( name=AZURE_COSMOSDB_CLIENT_ACTIVE_INSTANCE_COUNT, - description="Number of active client instances", + description="Number of active client instances.", unit="{instance}", ) @@ -54,6 +54,6 @@ def create_azure_cosmosdb_client_operation_request_charge( """[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation""" return meter.create_histogram( name=AZURE_COSMOSDB_CLIENT_OPERATION_REQUEST_CHARGE, - description="[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation", + description="[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation.", unit="{request_unit}", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/container_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/container_metrics.py index ca4a91317a0..f300e087133 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/container_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/container_metrics.py @@ -29,6 +29,7 @@ Meter, ObservableGauge, Observation, + UpDownCounter, ) # pylint: disable=invalid-name @@ -50,7 +51,7 @@ def create_container_cpu_time(meter: Meter) -> Counter: """Total CPU time consumed""" return meter.create_counter( name=CONTAINER_CPU_TIME, - description="Total CPU time consumed", + description="Total CPU time consumed.", unit="s", ) @@ -71,7 +72,7 @@ def create_container_cpu_usage( return meter.create_observable_gauge( name=CONTAINER_CPU_USAGE, callbacks=callbacks, - description="Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs", + description="Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs.", unit="{cpu}", ) @@ -94,6 +95,71 @@ def create_container_disk_io(meter: Meter) -> Counter: ) +CONTAINER_FILESYSTEM_AVAILABLE: Final = "container.filesystem.available" +""" +Container filesystem available bytes +Instrument: updowncounter +Unit: By +Note: In K8s, this metric is derived from the +[FsStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [ContainerStats.Rootfs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats) +of the Kubelet's stats API. +""" + + +def create_container_filesystem_available(meter: Meter) -> UpDownCounter: + """Container filesystem available bytes""" + return meter.create_up_down_counter( + name=CONTAINER_FILESYSTEM_AVAILABLE, + description="Container filesystem available bytes.", + unit="By", + ) + + +CONTAINER_FILESYSTEM_CAPACITY: Final = "container.filesystem.capacity" +""" +Container filesystem capacity +Instrument: updowncounter +Unit: By +Note: In K8s, this metric is derived from the +[FsStats.CapacityBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [ContainerStats.Rootfs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats) +of the Kubelet's stats API. +""" + + +def create_container_filesystem_capacity(meter: Meter) -> UpDownCounter: + """Container filesystem capacity""" + return meter.create_up_down_counter( + name=CONTAINER_FILESYSTEM_CAPACITY, + description="Container filesystem capacity.", + unit="By", + ) + + +CONTAINER_FILESYSTEM_USAGE: Final = "container.filesystem.usage" +""" +Container filesystem usage +Instrument: updowncounter +Unit: By +Note: This may not equal capacity - available. + +In K8s, this metric is derived from the +[FsStats.UsedBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [ContainerStats.Rootfs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats) +of the Kubelet's stats API. +""" + + +def create_container_filesystem_usage(meter: Meter) -> UpDownCounter: + """Container filesystem usage""" + return meter.create_up_down_counter( + name=CONTAINER_FILESYSTEM_USAGE, + description="Container filesystem usage.", + unit="By", + ) + + CONTAINER_MEMORY_USAGE: Final = "container.memory.usage" """ Memory usage of the container @@ -147,6 +213,6 @@ def create_container_uptime( return meter.create_observable_gauge( name=CONTAINER_UPTIME, callbacks=callbacks, - description="The time the container has been running", + description="The time the container has been running.", unit="s", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/db_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/db_metrics.py index e78dc6b246c..4df9d1e5720 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/db_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/db_metrics.py @@ -29,7 +29,7 @@ def create_db_client_connection_count(meter: Meter) -> UpDownCounter: """The number of connections that are currently in state described by the `state` attribute""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_COUNT, - description="The number of connections that are currently in state described by the `state` attribute", + description="The number of connections that are currently in state described by the `state` attribute.", unit="{connection}", ) @@ -46,7 +46,7 @@ def create_db_client_connection_create_time(meter: Meter) -> Histogram: """The time it took to create a new connection""" return meter.create_histogram( name=DB_CLIENT_CONNECTION_CREATE_TIME, - description="The time it took to create a new connection", + description="The time it took to create a new connection.", unit="s", ) @@ -63,7 +63,7 @@ def create_db_client_connection_idle_max(meter: Meter) -> UpDownCounter: """The maximum number of idle open connections allowed""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_IDLE_MAX, - description="The maximum number of idle open connections allowed", + description="The maximum number of idle open connections allowed.", unit="{connection}", ) @@ -80,7 +80,7 @@ def create_db_client_connection_idle_min(meter: Meter) -> UpDownCounter: """The minimum number of idle open connections allowed""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_IDLE_MIN, - description="The minimum number of idle open connections allowed", + description="The minimum number of idle open connections allowed.", unit="{connection}", ) @@ -97,7 +97,7 @@ def create_db_client_connection_max(meter: Meter) -> UpDownCounter: """The maximum number of open connections allowed""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_MAX, - description="The maximum number of open connections allowed", + description="The maximum number of open connections allowed.", unit="{connection}", ) @@ -118,7 +118,7 @@ def create_db_client_connection_pending_requests( """The number of current pending requests for an open connection""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_PENDING_REQUESTS, - description="The number of current pending requests for an open connection", + description="The number of current pending requests for an open connection.", unit="{request}", ) @@ -135,7 +135,7 @@ def create_db_client_connection_timeouts(meter: Meter) -> Counter: """The number of connection timeouts that have occurred trying to obtain a connection from the pool""" return meter.create_counter( name=DB_CLIENT_CONNECTION_TIMEOUTS, - description="The number of connection timeouts that have occurred trying to obtain a connection from the pool", + description="The number of connection timeouts that have occurred trying to obtain a connection from the pool.", unit="{timeout}", ) @@ -152,7 +152,7 @@ def create_db_client_connection_use_time(meter: Meter) -> Histogram: """The time between borrowing a connection and returning it to the pool""" return meter.create_histogram( name=DB_CLIENT_CONNECTION_USE_TIME, - description="The time between borrowing a connection and returning it to the pool", + description="The time between borrowing a connection and returning it to the pool.", unit="s", ) @@ -169,7 +169,7 @@ def create_db_client_connection_wait_time(meter: Meter) -> Histogram: """The time it took to obtain an open connection from the pool""" return meter.create_histogram( name=DB_CLIENT_CONNECTION_WAIT_TIME, - description="The time it took to obtain an open connection from the pool", + description="The time it took to obtain an open connection from the pool.", unit="s", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/faas_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/faas_metrics.py index 5fd14149ab8..8d64c8227a4 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/faas_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/faas_metrics.py @@ -29,7 +29,7 @@ def create_faas_coldstarts(meter: Meter) -> Counter: """Number of invocation cold starts""" return meter.create_counter( name=FAAS_COLDSTARTS, - description="Number of invocation cold starts", + description="Number of invocation cold starts.", unit="{coldstart}", ) @@ -46,7 +46,7 @@ def create_faas_cpu_usage(meter: Meter) -> Histogram: """Distribution of CPU usage per invocation""" return meter.create_histogram( name=FAAS_CPU_USAGE, - description="Distribution of CPU usage per invocation", + description="Distribution of CPU usage per invocation.", unit="s", ) @@ -63,7 +63,7 @@ def create_faas_errors(meter: Meter) -> Counter: """Number of invocation errors""" return meter.create_counter( name=FAAS_ERRORS, - description="Number of invocation errors", + description="Number of invocation errors.", unit="{error}", ) @@ -80,7 +80,7 @@ def create_faas_init_duration(meter: Meter) -> Histogram: """Measures the duration of the function's initialization, such as a cold start""" return meter.create_histogram( name=FAAS_INIT_DURATION, - description="Measures the duration of the function's initialization, such as a cold start", + description="Measures the duration of the function's initialization, such as a cold start.", unit="s", ) @@ -97,7 +97,7 @@ def create_faas_invocations(meter: Meter) -> Counter: """Number of successful invocations""" return meter.create_counter( name=FAAS_INVOCATIONS, - description="Number of successful invocations", + description="Number of successful invocations.", unit="{invocation}", ) @@ -114,7 +114,7 @@ def create_faas_invoke_duration(meter: Meter) -> Histogram: """Measures the duration of the function's logic execution""" return meter.create_histogram( name=FAAS_INVOKE_DURATION, - description="Measures the duration of the function's logic execution", + description="Measures the duration of the function's logic execution.", unit="s", ) @@ -131,7 +131,7 @@ def create_faas_mem_usage(meter: Meter) -> Histogram: """Distribution of max memory usage per invocation""" return meter.create_histogram( name=FAAS_MEM_USAGE, - description="Distribution of max memory usage per invocation", + description="Distribution of max memory usage per invocation.", unit="By", ) @@ -148,7 +148,7 @@ def create_faas_net_io(meter: Meter) -> Histogram: """Distribution of net I/O usage per invocation""" return meter.create_histogram( name=FAAS_NET_IO, - description="Distribution of net I/O usage per invocation", + description="Distribution of net I/O usage per invocation.", unit="By", ) @@ -165,6 +165,6 @@ def create_faas_timeouts(meter: Meter) -> Counter: """Number of invocation timeouts""" return meter.create_counter( name=FAAS_TIMEOUTS, - description="Number of invocation timeouts", + description="Number of invocation timeouts.", unit="{timeout}", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py index 97d9dd00afc..7a7afa33888 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py @@ -29,24 +29,24 @@ def create_gen_ai_client_operation_duration(meter: Meter) -> Histogram: """GenAI operation duration""" return meter.create_histogram( name=GEN_AI_CLIENT_OPERATION_DURATION, - description="GenAI operation duration", + description="GenAI operation duration.", unit="s", ) GEN_AI_CLIENT_TOKEN_USAGE: Final = "gen_ai.client.token.usage" """ -Measures number of input and output tokens used +Number of input and output tokens used Instrument: histogram Unit: {token} """ def create_gen_ai_client_token_usage(meter: Meter) -> Histogram: - """Measures number of input and output tokens used""" + """Number of input and output tokens used""" return meter.create_histogram( name=GEN_AI_CLIENT_TOKEN_USAGE, - description="Measures number of input and output tokens used", + description="Number of input and output tokens used.", unit="{token}", ) @@ -63,7 +63,7 @@ def create_gen_ai_server_request_duration(meter: Meter) -> Histogram: """Generative AI server request duration such as time-to-last byte or last output token""" return meter.create_histogram( name=GEN_AI_SERVER_REQUEST_DURATION, - description="Generative AI server request duration such as time-to-last byte or last output token", + description="Generative AI server request duration such as time-to-last byte or last output token.", unit="s", ) @@ -82,7 +82,7 @@ def create_gen_ai_server_time_per_output_token(meter: Meter) -> Histogram: """Time per output token generated after the first token for successful responses""" return meter.create_histogram( name=GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN, - description="Time per output token generated after the first token for successful responses", + description="Time per output token generated after the first token for successful responses.", unit="s", ) @@ -99,6 +99,6 @@ def create_gen_ai_server_time_to_first_token(meter: Meter) -> Histogram: """Time to generate first token for successful responses""" return meter.create_histogram( name=GEN_AI_SERVER_TIME_TO_FIRST_TOKEN, - description="Time to generate first token for successful responses", + description="Time to generate first token for successful responses.", unit="s", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/hw_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/hw_metrics.py index d06890fd2f0..6e47186cbf3 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/hw_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/hw_metrics.py @@ -38,6 +38,106 @@ Generator[Iterable[Observation], CallbackOptions, None], ] +HW_BATTERY_CHARGE: Final = "hw.battery.charge" +""" +Remaining fraction of battery charge +Instrument: gauge +Unit: 1 +""" + + +def create_hw_battery_charge( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Remaining fraction of battery charge""" + return meter.create_observable_gauge( + name=HW_BATTERY_CHARGE, + callbacks=callbacks, + description="Remaining fraction of battery charge.", + unit="1", + ) + + +HW_BATTERY_CHARGE_LIMIT: Final = "hw.battery.charge.limit" +""" +Lower limit of battery charge fraction to ensure proper operation +Instrument: gauge +Unit: 1 +""" + + +def create_hw_battery_charge_limit( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Lower limit of battery charge fraction to ensure proper operation""" + return meter.create_observable_gauge( + name=HW_BATTERY_CHARGE_LIMIT, + callbacks=callbacks, + description="Lower limit of battery charge fraction to ensure proper operation.", + unit="1", + ) + + +HW_BATTERY_TIME_LEFT: Final = "hw.battery.time_left" +""" +Time left before battery is completely charged or discharged +Instrument: gauge +Unit: s +""" + + +def create_hw_battery_time_left( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Time left before battery is completely charged or discharged""" + return meter.create_observable_gauge( + name=HW_BATTERY_TIME_LEFT, + callbacks=callbacks, + description="Time left before battery is completely charged or discharged.", + unit="s", + ) + + +HW_CPU_SPEED: Final = "hw.cpu.speed" +""" +CPU current frequency +Instrument: gauge +Unit: Hz +""" + + +def create_hw_cpu_speed( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """CPU current frequency""" + return meter.create_observable_gauge( + name=HW_CPU_SPEED, + callbacks=callbacks, + description="CPU current frequency.", + unit="Hz", + ) + + +HW_CPU_SPEED_LIMIT: Final = "hw.cpu.speed.limit" +""" +CPU maximum frequency +Instrument: gauge +Unit: Hz +""" + + +def create_hw_cpu_speed_limit( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """CPU maximum frequency""" + return meter.create_observable_gauge( + name=HW_CPU_SPEED_LIMIT, + callbacks=callbacks, + description="CPU maximum frequency.", + unit="Hz", + ) + + HW_ENERGY: Final = "hw.energy" """ Energy consumed by the component @@ -50,7 +150,7 @@ def create_hw_energy(meter: Meter) -> Counter: """Energy consumed by the component""" return meter.create_counter( name=HW_ENERGY, - description="Energy consumed by the component", + description="Energy consumed by the component.", unit="J", ) @@ -67,11 +167,162 @@ def create_hw_errors(meter: Meter) -> Counter: """Number of errors encountered by the component""" return meter.create_counter( name=HW_ERRORS, - description="Number of errors encountered by the component", + description="Number of errors encountered by the component.", unit="{error}", ) +HW_FAN_SPEED: Final = "hw.fan.speed" +""" +Fan speed in revolutions per minute +Instrument: gauge +Unit: rpm +""" + + +def create_hw_fan_speed( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Fan speed in revolutions per minute""" + return meter.create_observable_gauge( + name=HW_FAN_SPEED, + callbacks=callbacks, + description="Fan speed in revolutions per minute.", + unit="rpm", + ) + + +HW_FAN_SPEED_LIMIT: Final = "hw.fan.speed.limit" +""" +Speed limit in rpm +Instrument: gauge +Unit: rpm +""" + + +def create_hw_fan_speed_limit( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Speed limit in rpm""" + return meter.create_observable_gauge( + name=HW_FAN_SPEED_LIMIT, + callbacks=callbacks, + description="Speed limit in rpm.", + unit="rpm", + ) + + +HW_FAN_SPEED_RATIO: Final = "hw.fan.speed_ratio" +""" +Fan speed expressed as a fraction of its maximum speed +Instrument: gauge +Unit: 1 +""" + + +def create_hw_fan_speed_ratio( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Fan speed expressed as a fraction of its maximum speed""" + return meter.create_observable_gauge( + name=HW_FAN_SPEED_RATIO, + callbacks=callbacks, + description="Fan speed expressed as a fraction of its maximum speed.", + unit="1", + ) + + +HW_GPU_IO: Final = "hw.gpu.io" +""" +Received and transmitted bytes by the GPU +Instrument: counter +Unit: By +""" + + +def create_hw_gpu_io(meter: Meter) -> Counter: + """Received and transmitted bytes by the GPU""" + return meter.create_counter( + name=HW_GPU_IO, + description="Received and transmitted bytes by the GPU.", + unit="By", + ) + + +HW_GPU_MEMORY_LIMIT: Final = "hw.gpu.memory.limit" +""" +Size of the GPU memory +Instrument: updowncounter +Unit: By +""" + + +def create_hw_gpu_memory_limit(meter: Meter) -> UpDownCounter: + """Size of the GPU memory""" + return meter.create_up_down_counter( + name=HW_GPU_MEMORY_LIMIT, + description="Size of the GPU memory.", + unit="By", + ) + + +HW_GPU_MEMORY_USAGE: Final = "hw.gpu.memory.usage" +""" +GPU memory used +Instrument: updowncounter +Unit: By +""" + + +def create_hw_gpu_memory_usage(meter: Meter) -> UpDownCounter: + """GPU memory used""" + return meter.create_up_down_counter( + name=HW_GPU_MEMORY_USAGE, + description="GPU memory used.", + unit="By", + ) + + +HW_GPU_MEMORY_UTILIZATION: Final = "hw.gpu.memory.utilization" +""" +Fraction of GPU memory used +Instrument: gauge +Unit: 1 +""" + + +def create_hw_gpu_memory_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Fraction of GPU memory used""" + return meter.create_observable_gauge( + name=HW_GPU_MEMORY_UTILIZATION, + callbacks=callbacks, + description="Fraction of GPU memory used.", + unit="1", + ) + + +HW_GPU_UTILIZATION: Final = "hw.gpu.utilization" +""" +Fraction of time spent in a specific task +Instrument: gauge +Unit: 1 +""" + + +def create_hw_gpu_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Fraction of time spent in a specific task""" + return meter.create_observable_gauge( + name=HW_GPU_UTILIZATION, + callbacks=callbacks, + description="Fraction of time spent in a specific task.", + unit="1", + ) + + HW_HOST_AMBIENT_TEMPERATURE: Final = "hw.host.ambient_temperature" """ Ambient (external) temperature of the physical host @@ -87,7 +338,7 @@ def create_hw_host_ambient_temperature( return meter.create_observable_gauge( name=HW_HOST_AMBIENT_TEMPERATURE, callbacks=callbacks, - description="Ambient (external) temperature of the physical host", + description="Ambient (external) temperature of the physical host.", unit="Cel", ) @@ -105,7 +356,7 @@ def create_hw_host_energy(meter: Meter) -> Counter: """Total energy consumed by the entire physical host, in joules""" return meter.create_counter( name=HW_HOST_ENERGY, - description="Total energy consumed by the entire physical host, in joules", + description="Total energy consumed by the entire physical host, in joules.", unit="J", ) @@ -125,7 +376,7 @@ def create_hw_host_heating_margin( return meter.create_observable_gauge( name=HW_HOST_HEATING_MARGIN, callbacks=callbacks, - description="By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors", + description="By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors.", unit="Cel", ) @@ -146,11 +397,229 @@ def create_hw_host_power( return meter.create_observable_gauge( name=HW_HOST_POWER, callbacks=callbacks, - description="Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)", + description="Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred).", unit="W", ) +HW_LOGICAL_DISK_LIMIT: Final = "hw.logical_disk.limit" +""" +Size of the logical disk +Instrument: updowncounter +Unit: By +""" + + +def create_hw_logical_disk_limit(meter: Meter) -> UpDownCounter: + """Size of the logical disk""" + return meter.create_up_down_counter( + name=HW_LOGICAL_DISK_LIMIT, + description="Size of the logical disk.", + unit="By", + ) + + +HW_LOGICAL_DISK_USAGE: Final = "hw.logical_disk.usage" +""" +Logical disk space usage +Instrument: updowncounter +Unit: By +""" + + +def create_hw_logical_disk_usage(meter: Meter) -> UpDownCounter: + """Logical disk space usage""" + return meter.create_up_down_counter( + name=HW_LOGICAL_DISK_USAGE, + description="Logical disk space usage.", + unit="By", + ) + + +HW_LOGICAL_DISK_UTILIZATION: Final = "hw.logical_disk.utilization" +""" +Logical disk space utilization as a fraction +Instrument: gauge +Unit: 1 +""" + + +def create_hw_logical_disk_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Logical disk space utilization as a fraction""" + return meter.create_observable_gauge( + name=HW_LOGICAL_DISK_UTILIZATION, + callbacks=callbacks, + description="Logical disk space utilization as a fraction.", + unit="1", + ) + + +HW_MEMORY_SIZE: Final = "hw.memory.size" +""" +Size of the memory module +Instrument: updowncounter +Unit: By +""" + + +def create_hw_memory_size(meter: Meter) -> UpDownCounter: + """Size of the memory module""" + return meter.create_up_down_counter( + name=HW_MEMORY_SIZE, + description="Size of the memory module.", + unit="By", + ) + + +HW_NETWORK_BANDWIDTH_LIMIT: Final = "hw.network.bandwidth.limit" +""" +Link speed +Instrument: updowncounter +Unit: By/s +""" + + +def create_hw_network_bandwidth_limit(meter: Meter) -> UpDownCounter: + """Link speed""" + return meter.create_up_down_counter( + name=HW_NETWORK_BANDWIDTH_LIMIT, + description="Link speed.", + unit="By/s", + ) + + +HW_NETWORK_BANDWIDTH_UTILIZATION: Final = "hw.network.bandwidth.utilization" +""" +Utilization of the network bandwidth as a fraction +Instrument: gauge +Unit: 1 +""" + + +def create_hw_network_bandwidth_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Utilization of the network bandwidth as a fraction""" + return meter.create_observable_gauge( + name=HW_NETWORK_BANDWIDTH_UTILIZATION, + callbacks=callbacks, + description="Utilization of the network bandwidth as a fraction.", + unit="1", + ) + + +HW_NETWORK_IO: Final = "hw.network.io" +""" +Received and transmitted network traffic in bytes +Instrument: counter +Unit: By +""" + + +def create_hw_network_io(meter: Meter) -> Counter: + """Received and transmitted network traffic in bytes""" + return meter.create_counter( + name=HW_NETWORK_IO, + description="Received and transmitted network traffic in bytes.", + unit="By", + ) + + +HW_NETWORK_PACKETS: Final = "hw.network.packets" +""" +Received and transmitted network traffic in packets (or frames) +Instrument: counter +Unit: {packet} +""" + + +def create_hw_network_packets(meter: Meter) -> Counter: + """Received and transmitted network traffic in packets (or frames)""" + return meter.create_counter( + name=HW_NETWORK_PACKETS, + description="Received and transmitted network traffic in packets (or frames).", + unit="{packet}", + ) + + +HW_NETWORK_UP: Final = "hw.network.up" +""" +Link status: `1` (up) or `0` (down) +Instrument: updowncounter +Unit: 1 +""" + + +def create_hw_network_up(meter: Meter) -> UpDownCounter: + """Link status: `1` (up) or `0` (down)""" + return meter.create_up_down_counter( + name=HW_NETWORK_UP, + description="Link status: `1` (up) or `0` (down).", + unit="1", + ) + + +HW_PHYSICAL_DISK_ENDURANCE_UTILIZATION: Final = ( + "hw.physical_disk.endurance_utilization" +) +""" +Endurance remaining for this SSD disk +Instrument: gauge +Unit: 1 +""" + + +def create_hw_physical_disk_endurance_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Endurance remaining for this SSD disk""" + return meter.create_observable_gauge( + name=HW_PHYSICAL_DISK_ENDURANCE_UTILIZATION, + callbacks=callbacks, + description="Endurance remaining for this SSD disk.", + unit="1", + ) + + +HW_PHYSICAL_DISK_SIZE: Final = "hw.physical_disk.size" +""" +Size of the disk +Instrument: updowncounter +Unit: By +""" + + +def create_hw_physical_disk_size(meter: Meter) -> UpDownCounter: + """Size of the disk""" + return meter.create_up_down_counter( + name=HW_PHYSICAL_DISK_SIZE, + description="Size of the disk.", + unit="By", + ) + + +HW_PHYSICAL_DISK_SMART: Final = "hw.physical_disk.smart" +""" +Value of the corresponding [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute +Instrument: gauge +Unit: 1 +""" + + +def create_hw_physical_disk_smart( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Value of the corresponding [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute""" + return meter.create_observable_gauge( + name=HW_PHYSICAL_DISK_SMART, + callbacks=callbacks, + description="Value of the corresponding [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute.", + unit="1", + ) + + HW_POWER: Final = "hw.power" """ Instantaneous power consumed by the component @@ -167,11 +636,65 @@ def create_hw_power( return meter.create_observable_gauge( name=HW_POWER, callbacks=callbacks, - description="Instantaneous power consumed by the component", + description="Instantaneous power consumed by the component.", + unit="W", + ) + + +HW_POWER_SUPPLY_LIMIT: Final = "hw.power_supply.limit" +""" +Maximum power output of the power supply +Instrument: updowncounter +Unit: W +""" + + +def create_hw_power_supply_limit(meter: Meter) -> UpDownCounter: + """Maximum power output of the power supply""" + return meter.create_up_down_counter( + name=HW_POWER_SUPPLY_LIMIT, + description="Maximum power output of the power supply.", unit="W", ) +HW_POWER_SUPPLY_USAGE: Final = "hw.power_supply.usage" +""" +Current power output of the power supply +Instrument: updowncounter +Unit: W +""" + + +def create_hw_power_supply_usage(meter: Meter) -> UpDownCounter: + """Current power output of the power supply""" + return meter.create_up_down_counter( + name=HW_POWER_SUPPLY_USAGE, + description="Current power output of the power supply.", + unit="W", + ) + + +HW_POWER_SUPPLY_UTILIZATION: Final = "hw.power_supply.utilization" +""" +Utilization of the power supply as a fraction of its maximum output +Instrument: gauge +Unit: 1 +""" + + +def create_hw_power_supply_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Utilization of the power supply as a fraction of its maximum output""" + return meter.create_observable_gauge( + name=HW_POWER_SUPPLY_UTILIZATION, + callbacks=callbacks, + description="Utilization of the power supply as a fraction of its maximum output.", + unit="1", + ) + + HW_STATUS: Final = "hw.status" """ Operational status: `1` (true) or `0` (false) for each of the possible states @@ -185,6 +708,123 @@ def create_hw_status(meter: Meter) -> UpDownCounter: """Operational status: `1` (true) or `0` (false) for each of the possible states""" return meter.create_up_down_counter( name=HW_STATUS, - description="Operational status: `1` (true) or `0` (false) for each of the possible states", + description="Operational status: `1` (true) or `0` (false) for each of the possible states.", unit="1", ) + + +HW_TAPE_DRIVE_OPERATIONS: Final = "hw.tape_drive.operations" +""" +Operations performed by the tape drive +Instrument: counter +Unit: {operation} +""" + + +def create_hw_tape_drive_operations(meter: Meter) -> Counter: + """Operations performed by the tape drive""" + return meter.create_counter( + name=HW_TAPE_DRIVE_OPERATIONS, + description="Operations performed by the tape drive.", + unit="{operation}", + ) + + +HW_TEMPERATURE: Final = "hw.temperature" +""" +Temperature in degrees Celsius +Instrument: gauge +Unit: Cel +""" + + +def create_hw_temperature( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Temperature in degrees Celsius""" + return meter.create_observable_gauge( + name=HW_TEMPERATURE, + callbacks=callbacks, + description="Temperature in degrees Celsius.", + unit="Cel", + ) + + +HW_TEMPERATURE_LIMIT: Final = "hw.temperature.limit" +""" +Temperature limit in degrees Celsius +Instrument: gauge +Unit: Cel +""" + + +def create_hw_temperature_limit( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Temperature limit in degrees Celsius""" + return meter.create_observable_gauge( + name=HW_TEMPERATURE_LIMIT, + callbacks=callbacks, + description="Temperature limit in degrees Celsius.", + unit="Cel", + ) + + +HW_VOLTAGE: Final = "hw.voltage" +""" +Voltage measured by the sensor +Instrument: gauge +Unit: V +""" + + +def create_hw_voltage( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Voltage measured by the sensor""" + return meter.create_observable_gauge( + name=HW_VOLTAGE, + callbacks=callbacks, + description="Voltage measured by the sensor.", + unit="V", + ) + + +HW_VOLTAGE_LIMIT: Final = "hw.voltage.limit" +""" +Voltage limit in Volts +Instrument: gauge +Unit: V +""" + + +def create_hw_voltage_limit( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Voltage limit in Volts""" + return meter.create_observable_gauge( + name=HW_VOLTAGE_LIMIT, + callbacks=callbacks, + description="Voltage limit in Volts.", + unit="V", + ) + + +HW_VOLTAGE_NOMINAL: Final = "hw.voltage.nominal" +""" +Nominal (expected) voltage +Instrument: gauge +Unit: V +""" + + +def create_hw_voltage_nominal( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Nominal (expected) voltage""" + return meter.create_observable_gauge( + name=HW_VOLTAGE_NOMINAL, + callbacks=callbacks, + description="Nominal (expected) voltage.", + unit="V", + ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py index e88ea8254d0..aa14e94dd98 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py @@ -51,7 +51,7 @@ def create_k8s_container_cpu_limit(meter: Meter) -> UpDownCounter: """Maximum CPU resource limit set for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_CPU_LIMIT, - description="Maximum CPU resource limit set for the container", + description="Maximum CPU resource limit set for the container.", unit="{cpu}", ) @@ -69,7 +69,7 @@ def create_k8s_container_cpu_request(meter: Meter) -> UpDownCounter: """CPU resource requested for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_CPU_REQUEST, - description="CPU resource requested for the container", + description="CPU resource requested for the container.", unit="{cpu}", ) @@ -91,7 +91,7 @@ def create_k8s_container_ephemeral_storage_limit( """Maximum ephemeral storage resource limit set for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_EPHEMERAL_STORAGE_LIMIT, - description="Maximum ephemeral storage resource limit set for the container", + description="Maximum ephemeral storage resource limit set for the container.", unit="By", ) @@ -113,7 +113,7 @@ def create_k8s_container_ephemeral_storage_request( """Ephemeral storage resource requested for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_EPHEMERAL_STORAGE_REQUEST, - description="Ephemeral storage resource requested for the container", + description="Ephemeral storage resource requested for the container.", unit="By", ) @@ -131,7 +131,7 @@ def create_k8s_container_memory_limit(meter: Meter) -> UpDownCounter: """Maximum memory resource limit set for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_MEMORY_LIMIT, - description="Maximum memory resource limit set for the container", + description="Maximum memory resource limit set for the container.", unit="By", ) @@ -149,7 +149,7 @@ def create_k8s_container_memory_request(meter: Meter) -> UpDownCounter: """Memory resource requested for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_MEMORY_REQUEST, - description="Memory resource requested for the container", + description="Memory resource requested for the container.", unit="By", ) @@ -168,7 +168,7 @@ def create_k8s_container_ready(meter: Meter) -> UpDownCounter: """Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready)""" return meter.create_up_down_counter( name=K8S_CONTAINER_READY, - description="Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready)", + description="Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready).", unit="{container}", ) @@ -190,7 +190,7 @@ def create_k8s_container_restart_count(meter: Meter) -> UpDownCounter: """Describes how many times the container has restarted (since the last counter reset)""" return meter.create_up_down_counter( name=K8S_CONTAINER_RESTART_COUNT, - description="Describes how many times the container has restarted (since the last counter reset)", + description="Describes how many times the container has restarted (since the last counter reset).", unit="{restart}", ) @@ -209,7 +209,7 @@ def create_k8s_container_status_reason(meter: Meter) -> UpDownCounter: """Describes the number of K8s containers that are currently in a state for a given reason""" return meter.create_up_down_counter( name=K8S_CONTAINER_STATUS_REASON, - description="Describes the number of K8s containers that are currently in a state for a given reason", + description="Describes the number of K8s containers that are currently in a state for a given reason.", unit="{container}", ) @@ -228,7 +228,7 @@ def create_k8s_container_status_state(meter: Meter) -> UpDownCounter: """Describes the number of K8s containers that are currently in a given state""" return meter.create_up_down_counter( name=K8S_CONTAINER_STATUS_STATE, - description="Describes the number of K8s containers that are currently in a given state", + description="Describes the number of K8s containers that are currently in a given state.", unit="{container}", ) @@ -246,7 +246,7 @@ def create_k8s_container_storage_limit(meter: Meter) -> UpDownCounter: """Maximum storage resource limit set for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_STORAGE_LIMIT, - description="Maximum storage resource limit set for the container", + description="Maximum storage resource limit set for the container.", unit="By", ) @@ -264,7 +264,7 @@ def create_k8s_container_storage_request(meter: Meter) -> UpDownCounter: """Storage resource requested for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_STORAGE_REQUEST, - description="Storage resource requested for the container", + description="Storage resource requested for the container.", unit="By", ) @@ -283,7 +283,7 @@ def create_k8s_cronjob_active_jobs(meter: Meter) -> UpDownCounter: """The number of actively running jobs for a cronjob""" return meter.create_up_down_counter( name=K8S_CRONJOB_ACTIVE_JOBS, - description="The number of actively running jobs for a cronjob", + description="The number of actively running jobs for a cronjob.", unit="{job}", ) @@ -306,7 +306,7 @@ def create_k8s_daemonset_current_scheduled_nodes( """Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod""" return meter.create_up_down_counter( name=K8S_DAEMONSET_CURRENT_SCHEDULED_NODES, - description="Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod", + description="Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod.", unit="{node}", ) @@ -329,7 +329,7 @@ def create_k8s_daemonset_desired_scheduled_nodes( """Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)""" return meter.create_up_down_counter( name=K8S_DAEMONSET_DESIRED_SCHEDULED_NODES, - description="Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)", + description="Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod).", unit="{node}", ) @@ -348,7 +348,7 @@ def create_k8s_daemonset_misscheduled_nodes(meter: Meter) -> UpDownCounter: """Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod""" return meter.create_up_down_counter( name=K8S_DAEMONSET_MISSCHEDULED_NODES, - description="Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod", + description="Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod.", unit="{node}", ) @@ -367,7 +367,7 @@ def create_k8s_daemonset_ready_nodes(meter: Meter) -> UpDownCounter: """Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready""" return meter.create_up_down_counter( name=K8S_DAEMONSET_READY_NODES, - description="Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready", + description="Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", unit="{node}", ) @@ -386,7 +386,7 @@ def create_k8s_deployment_available_pods(meter: Meter) -> UpDownCounter: """Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment""" return meter.create_up_down_counter( name=K8S_DEPLOYMENT_AVAILABLE_PODS, - description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment", + description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment.", unit="{pod}", ) @@ -405,7 +405,7 @@ def create_k8s_deployment_desired_pods(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this deployment""" return meter.create_up_down_counter( name=K8S_DEPLOYMENT_DESIRED_PODS, - description="Number of desired replica pods in this deployment", + description="Number of desired replica pods in this deployment.", unit="{pod}", ) @@ -424,7 +424,7 @@ def create_k8s_hpa_current_pods(meter: Meter) -> UpDownCounter: """Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler""" return meter.create_up_down_counter( name=K8S_HPA_CURRENT_PODS, - description="Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler", + description="Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler.", unit="{pod}", ) @@ -443,7 +443,7 @@ def create_k8s_hpa_desired_pods(meter: Meter) -> UpDownCounter: """Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler""" return meter.create_up_down_counter( name=K8S_HPA_DESIRED_PODS, - description="Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler", + description="Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler.", unit="{pod}", ) @@ -462,7 +462,7 @@ def create_k8s_hpa_max_pods(meter: Meter) -> UpDownCounter: """The upper limit for the number of replica pods to which the autoscaler can scale up""" return meter.create_up_down_counter( name=K8S_HPA_MAX_PODS, - description="The upper limit for the number of replica pods to which the autoscaler can scale up", + description="The upper limit for the number of replica pods to which the autoscaler can scale up.", unit="{pod}", ) @@ -557,7 +557,7 @@ def create_k8s_hpa_min_pods(meter: Meter) -> UpDownCounter: """The lower limit for the number of replica pods to which the autoscaler can scale down""" return meter.create_up_down_counter( name=K8S_HPA_MIN_PODS, - description="The lower limit for the number of replica pods to which the autoscaler can scale down", + description="The lower limit for the number of replica pods to which the autoscaler can scale down.", unit="{pod}", ) @@ -576,7 +576,7 @@ def create_k8s_job_active_pods(meter: Meter) -> UpDownCounter: """The number of pending and actively running pods for a job""" return meter.create_up_down_counter( name=K8S_JOB_ACTIVE_PODS, - description="The number of pending and actively running pods for a job", + description="The number of pending and actively running pods for a job.", unit="{pod}", ) @@ -595,7 +595,7 @@ def create_k8s_job_desired_successful_pods(meter: Meter) -> UpDownCounter: """The desired number of successfully finished pods the job should be run with""" return meter.create_up_down_counter( name=K8S_JOB_DESIRED_SUCCESSFUL_PODS, - description="The desired number of successfully finished pods the job should be run with", + description="The desired number of successfully finished pods the job should be run with.", unit="{pod}", ) @@ -614,7 +614,7 @@ def create_k8s_job_failed_pods(meter: Meter) -> UpDownCounter: """The number of pods which reached phase Failed for a job""" return meter.create_up_down_counter( name=K8S_JOB_FAILED_PODS, - description="The number of pods which reached phase Failed for a job", + description="The number of pods which reached phase Failed for a job.", unit="{pod}", ) @@ -633,7 +633,7 @@ def create_k8s_job_max_parallel_pods(meter: Meter) -> UpDownCounter: """The max desired number of pods the job should run at any given time""" return meter.create_up_down_counter( name=K8S_JOB_MAX_PARALLEL_PODS, - description="The max desired number of pods the job should run at any given time", + description="The max desired number of pods the job should run at any given time.", unit="{pod}", ) @@ -652,7 +652,7 @@ def create_k8s_job_successful_pods(meter: Meter) -> UpDownCounter: """The number of pods which reached phase Succeeded for a job""" return meter.create_up_down_counter( name=K8S_JOB_SUCCESSFUL_PODS, - description="The number of pods which reached phase Succeeded for a job", + description="The number of pods which reached phase Succeeded for a job.", unit="{pod}", ) @@ -686,7 +686,7 @@ def create_k8s_node_allocatable_cpu(meter: Meter) -> UpDownCounter: """Amount of cpu allocatable on the node""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_CPU, - description="Amount of cpu allocatable on the node", + description="Amount of cpu allocatable on the node.", unit="{cpu}", ) @@ -707,7 +707,7 @@ def create_k8s_node_allocatable_ephemeral_storage( """Amount of ephemeral-storage allocatable on the node""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_EPHEMERAL_STORAGE, - description="Amount of ephemeral-storage allocatable on the node", + description="Amount of ephemeral-storage allocatable on the node.", unit="By", ) @@ -724,7 +724,7 @@ def create_k8s_node_allocatable_memory(meter: Meter) -> UpDownCounter: """Amount of memory allocatable on the node""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_MEMORY, - description="Amount of memory allocatable on the node", + description="Amount of memory allocatable on the node.", unit="By", ) @@ -741,7 +741,7 @@ def create_k8s_node_allocatable_pods(meter: Meter) -> UpDownCounter: """Amount of pods allocatable on the node""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_PODS, - description="Amount of pods allocatable on the node", + description="Amount of pods allocatable on the node.", unit="{pod}", ) @@ -777,7 +777,7 @@ def create_k8s_node_cpu_time(meter: Meter) -> Counter: """Total CPU time consumed""" return meter.create_counter( name=K8S_NODE_CPU_TIME, - description="Total CPU time consumed", + description="Total CPU time consumed.", unit="s", ) @@ -798,11 +798,76 @@ def create_k8s_node_cpu_usage( return meter.create_observable_gauge( name=K8S_NODE_CPU_USAGE, callbacks=callbacks, - description="Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs", + description="Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs.", unit="{cpu}", ) +K8S_NODE_FILESYSTEM_AVAILABLE: Final = "k8s.node.filesystem.available" +""" +Node filesystem available bytes +Instrument: updowncounter +Unit: By +Note: This metric is derived from the +[FsStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [NodeStats.Fs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats) +of the Kubelet's stats API. +""" + + +def create_k8s_node_filesystem_available(meter: Meter) -> UpDownCounter: + """Node filesystem available bytes""" + return meter.create_up_down_counter( + name=K8S_NODE_FILESYSTEM_AVAILABLE, + description="Node filesystem available bytes.", + unit="By", + ) + + +K8S_NODE_FILESYSTEM_CAPACITY: Final = "k8s.node.filesystem.capacity" +""" +Node filesystem capacity +Instrument: updowncounter +Unit: By +Note: This metric is derived from the +[FsStats.CapacityBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [NodeStats.Fs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats) +of the Kubelet's stats API. +""" + + +def create_k8s_node_filesystem_capacity(meter: Meter) -> UpDownCounter: + """Node filesystem capacity""" + return meter.create_up_down_counter( + name=K8S_NODE_FILESYSTEM_CAPACITY, + description="Node filesystem capacity.", + unit="By", + ) + + +K8S_NODE_FILESYSTEM_USAGE: Final = "k8s.node.filesystem.usage" +""" +Node filesystem usage +Instrument: updowncounter +Unit: By +Note: This may not equal capacity - available. + +This metric is derived from the +[FsStats.UsedBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [NodeStats.Fs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats) +of the Kubelet's stats API. +""" + + +def create_k8s_node_filesystem_usage(meter: Meter) -> UpDownCounter: + """Node filesystem usage""" + return meter.create_up_down_counter( + name=K8S_NODE_FILESYSTEM_USAGE, + description="Node filesystem usage.", + unit="By", + ) + + K8S_NODE_MEMORY_USAGE: Final = "k8s.node.memory.usage" """ Memory usage of the Node @@ -819,7 +884,7 @@ def create_k8s_node_memory_usage( return meter.create_observable_gauge( name=K8S_NODE_MEMORY_USAGE, callbacks=callbacks, - description="Memory usage of the Node", + description="Memory usage of the Node.", unit="By", ) @@ -836,7 +901,7 @@ def create_k8s_node_network_errors(meter: Meter) -> Counter: """Node network errors""" return meter.create_counter( name=K8S_NODE_NETWORK_ERRORS, - description="Node network errors", + description="Node network errors.", unit="{error}", ) @@ -853,7 +918,7 @@ def create_k8s_node_network_io(meter: Meter) -> Counter: """Network bytes for the Node""" return meter.create_counter( name=K8S_NODE_NETWORK_IO, - description="Network bytes for the Node", + description="Network bytes for the Node.", unit="By", ) @@ -875,7 +940,7 @@ def create_k8s_node_uptime( return meter.create_observable_gauge( name=K8S_NODE_UPTIME, callbacks=callbacks, - description="The time the Node has been running", + description="The time the Node has been running.", unit="s", ) @@ -893,7 +958,7 @@ def create_k8s_pod_cpu_time(meter: Meter) -> Counter: """Total CPU time consumed""" return meter.create_counter( name=K8S_POD_CPU_TIME, - description="Total CPU time consumed", + description="Total CPU time consumed.", unit="s", ) @@ -914,11 +979,76 @@ def create_k8s_pod_cpu_usage( return meter.create_observable_gauge( name=K8S_POD_CPU_USAGE, callbacks=callbacks, - description="Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs", + description="Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs.", unit="{cpu}", ) +K8S_POD_FILESYSTEM_AVAILABLE: Final = "k8s.pod.filesystem.available" +""" +Pod filesystem available bytes +Instrument: updowncounter +Unit: By +Note: This metric is derived from the +[FsStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [PodStats.EphemeralStorage](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) +of the Kubelet's stats API. +""" + + +def create_k8s_pod_filesystem_available(meter: Meter) -> UpDownCounter: + """Pod filesystem available bytes""" + return meter.create_up_down_counter( + name=K8S_POD_FILESYSTEM_AVAILABLE, + description="Pod filesystem available bytes.", + unit="By", + ) + + +K8S_POD_FILESYSTEM_CAPACITY: Final = "k8s.pod.filesystem.capacity" +""" +Pod filesystem capacity +Instrument: updowncounter +Unit: By +Note: This metric is derived from the +[FsStats.CapacityBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [PodStats.EphemeralStorage](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) +of the Kubelet's stats API. +""" + + +def create_k8s_pod_filesystem_capacity(meter: Meter) -> UpDownCounter: + """Pod filesystem capacity""" + return meter.create_up_down_counter( + name=K8S_POD_FILESYSTEM_CAPACITY, + description="Pod filesystem capacity.", + unit="By", + ) + + +K8S_POD_FILESYSTEM_USAGE: Final = "k8s.pod.filesystem.usage" +""" +Pod filesystem usage +Instrument: updowncounter +Unit: By +Note: This may not equal capacity - available. + +This metric is derived from the +[FsStats.UsedBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field +of the [PodStats.EphemeralStorage](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) +of the Kubelet's stats API. +""" + + +def create_k8s_pod_filesystem_usage(meter: Meter) -> UpDownCounter: + """Pod filesystem usage""" + return meter.create_up_down_counter( + name=K8S_POD_FILESYSTEM_USAGE, + description="Pod filesystem usage.", + unit="By", + ) + + K8S_POD_MEMORY_USAGE: Final = "k8s.pod.memory.usage" """ Memory usage of the Pod @@ -935,7 +1065,7 @@ def create_k8s_pod_memory_usage( return meter.create_observable_gauge( name=K8S_POD_MEMORY_USAGE, callbacks=callbacks, - description="Memory usage of the Pod", + description="Memory usage of the Pod.", unit="By", ) @@ -952,7 +1082,7 @@ def create_k8s_pod_network_errors(meter: Meter) -> Counter: """Pod network errors""" return meter.create_counter( name=K8S_POD_NETWORK_ERRORS, - description="Pod network errors", + description="Pod network errors.", unit="{error}", ) @@ -969,7 +1099,7 @@ def create_k8s_pod_network_io(meter: Meter) -> Counter: """Network bytes for the Pod""" return meter.create_counter( name=K8S_POD_NETWORK_IO, - description="Network bytes for the Pod", + description="Network bytes for the Pod.", unit="By", ) @@ -991,11 +1121,141 @@ def create_k8s_pod_uptime( return meter.create_observable_gauge( name=K8S_POD_UPTIME, callbacks=callbacks, - description="The time the Pod has been running", + description="The time the Pod has been running.", unit="s", ) +K8S_POD_VOLUME_AVAILABLE: Final = "k8s.pod.volume.available" +""" +Pod volume storage space available +Instrument: updowncounter +Unit: By +Note: This metric is derived from the +[VolumeStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field +of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the +Kubelet's stats API. +""" + + +def create_k8s_pod_volume_available(meter: Meter) -> UpDownCounter: + """Pod volume storage space available""" + return meter.create_up_down_counter( + name=K8S_POD_VOLUME_AVAILABLE, + description="Pod volume storage space available.", + unit="By", + ) + + +K8S_POD_VOLUME_CAPACITY: Final = "k8s.pod.volume.capacity" +""" +Pod volume total capacity +Instrument: updowncounter +Unit: By +Note: This metric is derived from the +[VolumeStats.CapacityBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field +of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the +Kubelet's stats API. +""" + + +def create_k8s_pod_volume_capacity(meter: Meter) -> UpDownCounter: + """Pod volume total capacity""" + return meter.create_up_down_counter( + name=K8S_POD_VOLUME_CAPACITY, + description="Pod volume total capacity.", + unit="By", + ) + + +K8S_POD_VOLUME_INODE_COUNT: Final = "k8s.pod.volume.inode.count" +""" +The total inodes in the filesystem of the Pod's volume +Instrument: updowncounter +Unit: {inode} +Note: This metric is derived from the +[VolumeStats.Inodes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field +of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the +Kubelet's stats API. +""" + + +def create_k8s_pod_volume_inode_count(meter: Meter) -> UpDownCounter: + """The total inodes in the filesystem of the Pod's volume""" + return meter.create_up_down_counter( + name=K8S_POD_VOLUME_INODE_COUNT, + description="The total inodes in the filesystem of the Pod's volume.", + unit="{inode}", + ) + + +K8S_POD_VOLUME_INODE_FREE: Final = "k8s.pod.volume.inode.free" +""" +The free inodes in the filesystem of the Pod's volume +Instrument: updowncounter +Unit: {inode} +Note: This metric is derived from the +[VolumeStats.InodesFree](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field +of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the +Kubelet's stats API. +""" + + +def create_k8s_pod_volume_inode_free(meter: Meter) -> UpDownCounter: + """The free inodes in the filesystem of the Pod's volume""" + return meter.create_up_down_counter( + name=K8S_POD_VOLUME_INODE_FREE, + description="The free inodes in the filesystem of the Pod's volume.", + unit="{inode}", + ) + + +K8S_POD_VOLUME_INODE_USED: Final = "k8s.pod.volume.inode.used" +""" +The inodes used by the filesystem of the Pod's volume +Instrument: updowncounter +Unit: {inode} +Note: This metric is derived from the +[VolumeStats.InodesUsed](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field +of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the +Kubelet's stats API. + +This may not be equal to `inodes - free` because filesystem may share inodes with other filesystems. +""" + + +def create_k8s_pod_volume_inode_used(meter: Meter) -> UpDownCounter: + """The inodes used by the filesystem of the Pod's volume""" + return meter.create_up_down_counter( + name=K8S_POD_VOLUME_INODE_USED, + description="The inodes used by the filesystem of the Pod's volume.", + unit="{inode}", + ) + + +K8S_POD_VOLUME_USAGE: Final = "k8s.pod.volume.usage" +""" +Pod volume usage +Instrument: updowncounter +Unit: By +Note: This may not equal capacity - available. + +This metric is derived from the +[VolumeStats.UsedBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field +of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the +Kubelet's stats API. +""" + + +def create_k8s_pod_volume_usage(meter: Meter) -> UpDownCounter: + """Pod volume usage""" + return meter.create_up_down_counter( + name=K8S_POD_VOLUME_USAGE, + description="Pod volume usage.", + unit="By", + ) + + K8S_REPLICASET_AVAILABLE_PODS: Final = "k8s.replicaset.available_pods" """ Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset @@ -1010,7 +1270,7 @@ def create_k8s_replicaset_available_pods(meter: Meter) -> UpDownCounter: """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset""" return meter.create_up_down_counter( name=K8S_REPLICASET_AVAILABLE_PODS, - description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset", + description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset.", unit="{pod}", ) @@ -1029,7 +1289,7 @@ def create_k8s_replicaset_desired_pods(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this replicaset""" return meter.create_up_down_counter( name=K8S_REPLICASET_DESIRED_PODS, - description="Number of desired replica pods in this replicaset", + description="Number of desired replica pods in this replicaset.", unit="{pod}", ) @@ -1090,7 +1350,7 @@ def create_k8s_replicationcontroller_available_pods( """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller""" return meter.create_up_down_counter( name=K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS, - description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller", + description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller.", unit="{pod}", ) @@ -1113,7 +1373,7 @@ def create_k8s_replicationcontroller_desired_pods( """Number of desired replica pods in this replication controller""" return meter.create_up_down_counter( name=K8S_REPLICATIONCONTROLLER_DESIRED_PODS, - description="Number of desired replica pods in this replication controller", + description="Number of desired replica pods in this replication controller.", unit="{pod}", ) @@ -1624,7 +1884,7 @@ def create_k8s_statefulset_current_pods(meter: Meter) -> UpDownCounter: """The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision""" return meter.create_up_down_counter( name=K8S_STATEFULSET_CURRENT_PODS, - description="The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision", + description="The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision.", unit="{pod}", ) @@ -1643,7 +1903,7 @@ def create_k8s_statefulset_desired_pods(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this statefulset""" return meter.create_up_down_counter( name=K8S_STATEFULSET_DESIRED_PODS, - description="Number of desired replica pods in this statefulset", + description="Number of desired replica pods in this statefulset.", unit="{pod}", ) @@ -1662,7 +1922,7 @@ def create_k8s_statefulset_ready_pods(meter: Meter) -> UpDownCounter: """The number of replica pods created for this statefulset with a Ready Condition""" return meter.create_up_down_counter( name=K8S_STATEFULSET_READY_PODS, - description="The number of replica pods created for this statefulset with a Ready Condition", + description="The number of replica pods created for this statefulset with a Ready Condition.", unit="{pod}", ) @@ -1681,6 +1941,6 @@ def create_k8s_statefulset_updated_pods(meter: Meter) -> UpDownCounter: """Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision""" return meter.create_up_down_counter( name=K8S_STATEFULSET_UPDATED_PODS, - description="Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision", + description="Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision.", unit="{pod}", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/otel_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/otel_metrics.py index 8290065b8a9..a3f24d219f5 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/otel_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/otel_metrics.py @@ -32,7 +32,7 @@ def create_otel_sdk_exporter_log_exported(meter: Meter) -> Counter: """The number of log records for which the export has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_EXPORTER_LOG_EXPORTED, - description="The number of log records for which the export has finished, either successful or failed", + description="The number of log records for which the export has finished, either successful or failed.", unit="{log_record}", ) @@ -50,7 +50,7 @@ def create_otel_sdk_exporter_log_inflight(meter: Meter) -> UpDownCounter: """The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)""" return meter.create_up_down_counter( name=OTEL_SDK_EXPORTER_LOG_INFLIGHT, - description="The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)", + description="The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed).", unit="{log_record}", ) @@ -74,7 +74,7 @@ def create_otel_sdk_exporter_metric_data_point_exported( """The number of metric data points for which the export has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_EXPORTER_METRIC_DATA_POINT_EXPORTED, - description="The number of metric data points for which the export has finished, either successful or failed", + description="The number of metric data points for which the export has finished, either successful or failed.", unit="{data_point}", ) @@ -96,7 +96,7 @@ def create_otel_sdk_exporter_metric_data_point_inflight( """The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)""" return meter.create_up_down_counter( name=OTEL_SDK_EXPORTER_METRIC_DATA_POINT_INFLIGHT, - description="The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)", + description="The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed).", unit="{data_point}", ) @@ -138,7 +138,7 @@ def create_otel_sdk_exporter_span_exported(meter: Meter) -> Counter: """The number of spans for which the export has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_EXPORTER_SPAN_EXPORTED, - description="The number of spans for which the export has finished, either successful or failed", + description="The number of spans for which the export has finished, either successful or failed.", unit="{span}", ) @@ -175,7 +175,7 @@ def create_otel_sdk_exporter_span_inflight(meter: Meter) -> UpDownCounter: """The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)""" return meter.create_up_down_counter( name=OTEL_SDK_EXPORTER_SPAN_INFLIGHT, - description="The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)", + description="The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed).", unit="{span}", ) @@ -211,7 +211,7 @@ def create_otel_sdk_log_created(meter: Meter) -> Counter: """The number of logs submitted to enabled SDK Loggers""" return meter.create_counter( name=OTEL_SDK_LOG_CREATED, - description="The number of logs submitted to enabled SDK Loggers", + description="The number of logs submitted to enabled SDK Loggers.", unit="{log_record}", ) @@ -254,7 +254,7 @@ def create_otel_sdk_processor_log_processed(meter: Meter) -> Counter: """The number of log records for which the processing has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_PROCESSOR_LOG_PROCESSED, - description="The number of log records for which the processing has finished, either successful or failed", + description="The number of log records for which the processing has finished, either successful or failed.", unit="{log_record}", ) @@ -276,7 +276,7 @@ def create_otel_sdk_processor_log_queue_capacity( """The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_LOG_QUEUE_CAPACITY, - description="The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold", + description="The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold.", unit="{log_record}", ) @@ -294,7 +294,7 @@ def create_otel_sdk_processor_log_queue_size(meter: Meter) -> UpDownCounter: """The number of log records in the queue of a given instance of an SDK log processor""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_LOG_QUEUE_SIZE, - description="The number of log records in the queue of a given instance of an SDK log processor", + description="The number of log records in the queue of a given instance of an SDK log processor.", unit="{log_record}", ) @@ -313,7 +313,7 @@ def create_otel_sdk_processor_span_processed(meter: Meter) -> Counter: """The number of spans for which the processing has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_PROCESSOR_SPAN_PROCESSED, - description="The number of spans for which the processing has finished, either successful or failed", + description="The number of spans for which the processing has finished, either successful or failed.", unit="{span}", ) @@ -354,7 +354,7 @@ def create_otel_sdk_processor_span_queue_capacity( """The maximum number of spans the queue of a given instance of an SDK span processor can hold""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_SPAN_QUEUE_CAPACITY, - description="The maximum number of spans the queue of a given instance of an SDK span processor can hold", + description="The maximum number of spans the queue of a given instance of an SDK span processor can hold.", unit="{span}", ) @@ -374,7 +374,7 @@ def create_otel_sdk_processor_span_queue_size(meter: Meter) -> UpDownCounter: """The number of spans in the queue of a given instance of an SDK span processor""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_SPAN_QUEUE_SIZE, - description="The number of spans in the queue of a given instance of an SDK span processor", + description="The number of spans in the queue of a given instance of an SDK span processor.", unit="{span}", ) @@ -421,7 +421,7 @@ def create_otel_sdk_span_live(meter: Meter) -> UpDownCounter: """The number of created spans with `recording=true` for which the end operation has not been called yet""" return meter.create_up_down_counter( name=OTEL_SDK_SPAN_LIVE, - description="The number of created spans with `recording=true` for which the end operation has not been called yet", + description="The number of created spans with `recording=true` for which the end operation has not been called yet.", unit="{span}", ) @@ -454,6 +454,6 @@ def create_otel_sdk_span_started(meter: Meter) -> Counter: """The number of created spans""" return meter.create_counter( name=OTEL_SDK_SPAN_STARTED, - description="The number of created spans", + description="The number of created spans.", unit="{span}", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/system_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/system_metrics.py index e0ec178a7b7..0bdec11b367 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/system_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/system_metrics.py @@ -71,7 +71,7 @@ def create_system_cpu_logical_count(meter: Meter) -> UpDownCounter: """Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking""" return meter.create_up_down_counter( name=SYSTEM_CPU_LOGICAL_COUNT, - description="Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking", + description="Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking.", unit="{cpu}", ) @@ -89,7 +89,7 @@ def create_system_cpu_physical_count(meter: Meter) -> UpDownCounter: """Reports the number of actual physical processor cores on the hardware""" return meter.create_up_down_counter( name=SYSTEM_CPU_PHYSICAL_COUNT, - description="Reports the number of actual physical processor cores on the hardware", + description="Reports the number of actual physical processor cores on the hardware.", unit="{cpu}", ) @@ -106,7 +106,7 @@ def create_system_cpu_time(meter: Meter) -> Counter: """Seconds each logical CPU spent on each mode""" return meter.create_counter( name=SYSTEM_CPU_TIME, - description="Seconds each logical CPU spent on each mode", + description="Seconds each logical CPU spent on each mode.", unit="s", ) @@ -133,15 +133,17 @@ def create_system_cpu_utilization( SYSTEM_DISK_IO: Final = "system.disk.io" """ +TODO Instrument: counter Unit: By """ def create_system_disk_io(meter: Meter) -> Counter: + """TODO""" return meter.create_counter( name=SYSTEM_DISK_IO, - description="", + description="TODO.", unit="By", ) @@ -164,7 +166,7 @@ def create_system_disk_io_time(meter: Meter) -> Counter: """Time disk spent activated""" return meter.create_counter( name=SYSTEM_DISK_IO_TIME, - description="Time disk spent activated", + description="Time disk spent activated.", unit="s", ) @@ -181,22 +183,24 @@ def create_system_disk_limit(meter: Meter) -> UpDownCounter: """The total storage capacity of the disk""" return meter.create_up_down_counter( name=SYSTEM_DISK_LIMIT, - description="The total storage capacity of the disk", + description="The total storage capacity of the disk.", unit="By", ) SYSTEM_DISK_MERGED: Final = "system.disk.merged" """ +TODO Instrument: counter Unit: {operation} """ def create_system_disk_merged(meter: Meter) -> Counter: + """TODO""" return meter.create_counter( name=SYSTEM_DISK_MERGED, - description="", + description="TODO.", unit="{operation}", ) @@ -217,22 +221,24 @@ def create_system_disk_operation_time(meter: Meter) -> Counter: """Sum of the time each operation took to complete""" return meter.create_counter( name=SYSTEM_DISK_OPERATION_TIME, - description="Sum of the time each operation took to complete", + description="Sum of the time each operation took to complete.", unit="s", ) SYSTEM_DISK_OPERATIONS: Final = "system.disk.operations" """ +TODO Instrument: counter Unit: {operation} """ def create_system_disk_operations(meter: Meter) -> Counter: + """TODO""" return meter.create_counter( name=SYSTEM_DISK_OPERATIONS, - description="", + description="TODO.", unit="{operation}", ) @@ -249,7 +255,7 @@ def create_system_filesystem_limit(meter: Meter) -> UpDownCounter: """The total storage capacity of the filesystem""" return meter.create_up_down_counter( name=SYSTEM_FILESYSTEM_LIMIT, - description="The total storage capacity of the filesystem", + description="The total storage capacity of the filesystem.", unit="By", ) @@ -275,6 +281,7 @@ def create_system_filesystem_usage(meter: Meter) -> UpDownCounter: SYSTEM_FILESYSTEM_UTILIZATION: Final = "system.filesystem.utilization" """ +TODO Instrument: gauge Unit: 1 """ @@ -283,10 +290,11 @@ def create_system_filesystem_usage(meter: Meter) -> UpDownCounter: def create_system_filesystem_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: + """TODO""" return meter.create_observable_gauge( name=SYSTEM_FILESYSTEM_UTILIZATION, callbacks=callbacks, - description="", + description="TODO.", unit="1", ) @@ -308,7 +316,7 @@ def create_system_linux_memory_available(meter: Meter) -> UpDownCounter: """An estimate of how much memory is available for starting new applications, without causing swapping""" return meter.create_up_down_counter( name=SYSTEM_LINUX_MEMORY_AVAILABLE, - description="An estimate of how much memory is available for starting new applications, without causing swapping", + description="An estimate of how much memory is available for starting new applications, without causing swapping.", unit="By", ) @@ -335,18 +343,17 @@ def create_system_linux_memory_slab_usage(meter: Meter) -> UpDownCounter: SYSTEM_MEMORY_LIMIT: Final = "system.memory.limit" """ -Total memory available in the system +Total virtual memory available in the system Instrument: updowncounter Unit: By -Note: Its value SHOULD equal the sum of `system.memory.state` over all states. """ def create_system_memory_limit(meter: Meter) -> UpDownCounter: - """Total memory available in the system""" + """Total virtual memory available in the system""" return meter.create_up_down_counter( name=SYSTEM_MEMORY_LIMIT, - description="Total memory available in the system.", + description="Total virtual memory available in the system.", unit="By", ) @@ -375,8 +382,6 @@ def create_system_memory_shared(meter: Meter) -> UpDownCounter: Reports memory in use by state Instrument: updowncounter Unit: By -Note: The sum over all `system.memory.state` values SHOULD equal the total memory -available on the system, that is `system.memory.limit`. """ @@ -391,6 +396,7 @@ def create_system_memory_usage(meter: Meter) -> UpDownCounter: SYSTEM_MEMORY_UTILIZATION: Final = "system.memory.utilization" """ +TODO Instrument: gauge Unit: 1 """ @@ -399,25 +405,28 @@ def create_system_memory_usage(meter: Meter) -> UpDownCounter: def create_system_memory_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: + """TODO""" return meter.create_observable_gauge( name=SYSTEM_MEMORY_UTILIZATION, callbacks=callbacks, - description="", + description="TODO.", unit="1", ) SYSTEM_NETWORK_CONNECTION_COUNT: Final = "system.network.connection.count" """ +TODO Instrument: updowncounter Unit: {connection} """ def create_system_network_connection_count(meter: Meter) -> UpDownCounter: + """TODO""" return meter.create_up_down_counter( name=SYSTEM_NETWORK_CONNECTION_COUNT, - description="", + description="TODO.", unit="{connection}", ) @@ -432,33 +441,11 @@ def create_system_network_connections(meter: Meter) -> UpDownCounter: """Deprecated, use `system.network.connection.count` instead""" return meter.create_up_down_counter( name=SYSTEM_NETWORK_CONNECTIONS, - description="Deprecated, use `system.network.connection.count` instead", + description="Deprecated, use `system.network.connection.count` instead.", unit="{connection}", ) -SYSTEM_NETWORK_DROPPED: Final = "system.network.dropped" -""" -Count of packets that are dropped or discarded even though there was no error -Instrument: counter -Unit: {packet} -Note: Measured as: - -- Linux: the `drop` column in `/proc/dev/net` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)) -- Windows: [`InDiscards`/`OutDiscards`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2) - from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2). -""" - - -def create_system_network_dropped(meter: Meter) -> Counter: - """Count of packets that are dropped or discarded even though there was no error""" - return meter.create_counter( - name=SYSTEM_NETWORK_DROPPED, - description="Count of packets that are dropped or discarded even though there was no error", - unit="{packet}", - ) - - SYSTEM_NETWORK_ERRORS: Final = "system.network.errors" """ Count of network errors detected @@ -466,7 +453,7 @@ def create_system_network_dropped(meter: Meter) -> Counter: Unit: {error} Note: Measured as: -- Linux: the `errs` column in `/proc/dev/net` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)). +- Linux: the `errs` column in `/proc/net/dev` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)). - Windows: [`InErrors`/`OutErrors`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2) from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2). """ @@ -476,67 +463,97 @@ def create_system_network_errors(meter: Meter) -> Counter: """Count of network errors detected""" return meter.create_counter( name=SYSTEM_NETWORK_ERRORS, - description="Count of network errors detected", + description="Count of network errors detected.", unit="{error}", ) SYSTEM_NETWORK_IO: Final = "system.network.io" """ +TODO Instrument: counter Unit: By """ def create_system_network_io(meter: Meter) -> Counter: + """TODO""" return meter.create_counter( name=SYSTEM_NETWORK_IO, - description="", + description="TODO.", unit="By", ) -SYSTEM_NETWORK_PACKETS: Final = "system.network.packets" +SYSTEM_NETWORK_PACKET_COUNT: Final = "system.network.packet.count" """ +TODO Instrument: counter Unit: {packet} """ -def create_system_network_packets(meter: Meter) -> Counter: +def create_system_network_packet_count(meter: Meter) -> Counter: + """TODO""" + return meter.create_counter( + name=SYSTEM_NETWORK_PACKET_COUNT, + description="TODO.", + unit="{packet}", + ) + + +SYSTEM_NETWORK_PACKET_DROPPED: Final = "system.network.packet.dropped" +""" +Count of packets that are dropped or discarded even though there was no error +Instrument: counter +Unit: {packet} +Note: Measured as: + +- Linux: the `drop` column in `/proc/net/dev` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)) +- Windows: [`InDiscards`/`OutDiscards`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2) + from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2). +""" + + +def create_system_network_packet_dropped(meter: Meter) -> Counter: + """Count of packets that are dropped or discarded even though there was no error""" return meter.create_counter( - name=SYSTEM_NETWORK_PACKETS, - description="", + name=SYSTEM_NETWORK_PACKET_DROPPED, + description="Count of packets that are dropped or discarded even though there was no error.", unit="{packet}", ) SYSTEM_PAGING_FAULTS: Final = "system.paging.faults" """ +TODO Instrument: counter Unit: {fault} """ def create_system_paging_faults(meter: Meter) -> Counter: + """TODO""" return meter.create_counter( name=SYSTEM_PAGING_FAULTS, - description="", + description="TODO.", unit="{fault}", ) SYSTEM_PAGING_OPERATIONS: Final = "system.paging.operations" """ +TODO Instrument: counter Unit: {operation} """ def create_system_paging_operations(meter: Meter) -> Counter: + """TODO""" return meter.create_counter( name=SYSTEM_PAGING_OPERATIONS, - description="", + description="TODO.", unit="{operation}", ) @@ -553,13 +570,14 @@ def create_system_paging_usage(meter: Meter) -> UpDownCounter: """Unix swap or windows pagefile usage""" return meter.create_up_down_counter( name=SYSTEM_PAGING_USAGE, - description="Unix swap or windows pagefile usage", + description="Unix swap or windows pagefile usage.", unit="By", ) SYSTEM_PAGING_UTILIZATION: Final = "system.paging.utilization" """ +TODO Instrument: gauge Unit: 1 """ @@ -568,10 +586,11 @@ def create_system_paging_usage(meter: Meter) -> UpDownCounter: def create_system_paging_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: + """TODO""" return meter.create_observable_gauge( name=SYSTEM_PAGING_UTILIZATION, callbacks=callbacks, - description="", + description="TODO.", unit="1", ) @@ -588,7 +607,7 @@ def create_system_process_count(meter: Meter) -> UpDownCounter: """Total number of processes in each state""" return meter.create_up_down_counter( name=SYSTEM_PROCESS_COUNT, - description="Total number of processes in each state", + description="Total number of processes in each state.", unit="{process}", ) @@ -605,7 +624,7 @@ def create_system_process_created(meter: Meter) -> Counter: """Total number of processes created over uptime of the host""" return meter.create_counter( name=SYSTEM_PROCESS_CREATED, - description="Total number of processes created over uptime of the host", + description="Total number of processes created over uptime of the host.", unit="{process}", ) @@ -627,6 +646,6 @@ def create_system_uptime( return meter.create_observable_gauge( name=SYSTEM_UPTIME, callbacks=callbacks, - description="The time the system has been running", + description="The time the system has been running.", unit="s", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py index c232751c546..f3737ff287b 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py @@ -49,7 +49,7 @@ def create_vcs_change_count(meter: Meter) -> UpDownCounter: """The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)""" return meter.create_up_down_counter( name=VCS_CHANGE_COUNT, - description="The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)", + description="The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged).", unit="{change}", ) @@ -129,7 +129,7 @@ def create_vcs_contributor_count( return meter.create_observable_gauge( name=VCS_CONTRIBUTOR_COUNT, callbacks=callbacks, - description="The number of unique contributors to a repository", + description="The number of unique contributors to a repository.", unit="{contributor}", ) @@ -191,7 +191,7 @@ def create_vcs_ref_revisions_delta( return meter.create_observable_gauge( name=VCS_REF_REVISIONS_DELTA, callbacks=callbacks, - description="The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute", + description="The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute.", unit="{revision}", ) @@ -211,7 +211,7 @@ def create_vcs_ref_time( return meter.create_observable_gauge( name=VCS_REF_TIME, callbacks=callbacks, - description="Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`", + description="Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`.", unit="s", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py index 6258f869547..6adaa8c89a4 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py @@ -80,5 +80,10 @@ class Schemas(Enum): The URL of the OpenTelemetry schema version 1.36.0. """ + V1_37_0 = "https://opentelemetry.io/schemas/1.37.0" + """ + The URL of the OpenTelemetry schema version 1.37.0. + """ + # when generating new semantic conventions, # make sure to add new versions version here. diff --git a/scripts/semconv/generate.sh b/scripts/semconv/generate.sh index 0fdaa6f81b1..cc5f7bfec38 100755 --- a/scripts/semconv/generate.sh +++ b/scripts/semconv/generate.sh @@ -5,9 +5,9 @@ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" ROOT_DIR="${SCRIPT_DIR}/../.." # freeze the spec version to make SemanticAttributes generation reproducible -SEMCONV_VERSION=1.36.0 +SEMCONV_VERSION=1.37.0 SEMCONV_VERSION_TAG=v$SEMCONV_VERSION -OTEL_WEAVER_IMG_VERSION=v0.16.1 +OTEL_WEAVER_IMG_VERSION=v0.17.0 INCUBATING_DIR=_incubating cd ${SCRIPT_DIR} diff --git a/scripts/semconv/templates/registry/semantic_attributes.j2 b/scripts/semconv/templates/registry/semantic_attributes.j2 index 9de036d75aa..e9ffeebe7af 100644 --- a/scripts/semconv/templates/registry/semantic_attributes.j2 +++ b/scripts/semconv/templates/registry/semantic_attributes.j2 @@ -34,7 +34,9 @@ from typing import Final {%- macro write_docstring(name, brief, note, deprecated_note, stability, multiline) -%} {%- if multiline %}""" {% endif %} - {%- if c.str_or_empty(deprecated_note)|length -%} + {%- if deprecated_note is mapping -%} +{{prefix}}Deprecated: {{c.comment_with_prefix(deprecated_note.note, "")}}. + {%- elif c.str_or_empty(deprecated_note)|length -%} {{prefix}}Deprecated: {{c.comment_with_prefix(deprecated_note, "")}}. {%- elif ctx.filter == "any" and stability == "stable" -%} {{prefix}}Deprecated in favor of stable :py:const:`{{stable_class_ref(name, '.')}}`. From b898cfe3476c7c5da0660a47feb85e6316e02383 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 26 Aug 2025 17:28:41 +0200 Subject: [PATCH 2/2] Add changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0bbae7ee0a..b7c72b78411 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#4695](https://github.com/open-telemetry/opentelemetry-python/pull/4695)). - docs: linked the examples with their github source code location and added Prometheus example ([#4728](https://github.com/open-telemetry/opentelemetry-python/pull/4728)) +- semantic-conventions: Bump to 1.37.0 + ([#4731](https://github.com/open-telemetry/opentelemetry-python/pull/4731)) ## Version 1.36.0/0.57b0 (2025-07-29)