diff --git a/Makefile b/Makefile index 16b6768853700..828d6a6b644b8 100644 --- a/Makefile +++ b/Makefile @@ -70,6 +70,9 @@ build-cdocs: @echo "Compiling .mdoc files to HTML"; @node ./local/bin/js/cdocs-build.js; +llm-support-demo: + @node ./assets/scripts/llm-support-demo.js; + # build .mdoc.md files, then watch for changes watch-cdocs: @echo "Compiling .mdoc files to HTML"; diff --git a/assets/scripts/llm-support-demo.js b/assets/scripts/llm-support-demo.js new file mode 100644 index 0000000000000..cf355275d94ff --- /dev/null +++ b/assets/scripts/llm-support-demo.js @@ -0,0 +1,16 @@ +const path = require('path'); +const { htmlDirToMdoc } = require('html-to-mdoc'); + +const inDir = path.resolve(__dirname, '../../public/opentelemetry'); +const outDir = path.resolve(__dirname, '../../opentelemetry-mdoc'); +console.log(`Converting HTML files in ${inDir} to .mdoc files in ${outDir}`); + +htmlDirToMdoc({ + inDir, + outDir, + options: { + purgeOutDirBeforeProcessing: true + } +}).then((result) => { + console.log(result.stats); +}); diff --git a/opentelemetry-mdoc/compatibility/index.md b/opentelemetry-mdoc/compatibility/index.md new file mode 100644 index 0000000000000..f488af5fb23f0 --- /dev/null +++ b/opentelemetry-mdoc/compatibility/index.md @@ -0,0 +1,77 @@ +--- +title: Datadog and OpenTelemetry Compatibility +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Datadog and OpenTelemetry Compatibility +--- + +# Datadog and OpenTelemetry Compatibility + +## Overview{% #overview %} + +Datadog offers multiple setup options to accommodate various use cases, from full OpenTelemetry (OTel) implementations to hybrid setups using both OpenTelemetry and Datadog components. This page covers the compatibility between different setups and supported Datadog products and features, helping you choose the best configuration for your needs. + +## Setups{% #setups %} + +Datadog supports several configurations for using OpenTelemetry. The primary difference between these setups is the choice of SDK (OpenTelemetry or Datadog) and the collector used to process and forward telemetry data. + +| Setup Type | API | SDK | Collector/Agent | +| ------------------------------------------------------------------------------------------------ | ----------------------- | ----------- | --------------------------------------------- | +| [**Datadog SDK + DDOT (Recommended)**](http://localhost:1313/opentelemetry/setup/ddot_collector) | Datadog API or OTel API | Datadog SDK | Datadog Distribution of OTel Collector (DDOT) | +| [**OTel SDK + DDOT**](http://localhost:1313/opentelemetry/setup/ddot_collector) | OTel API | OTel SDK | Datadog Distribution of OTel Collector (DDOT) | +| [**OTel SDK + OSS Collector**](http://localhost:1313/opentelemetry/collector_exporter/) | OTel API | OTel SDK | OTel Collector (OSS) | +| [**Direct OTLP Ingest**](http://localhost:1313/opentelemetry/setup/agentless) | OTel API | OTel SDK | N/A (Direct to Datadog endpoint) | + +## Feature compatibility{% #feature-compatibility %} + +The following table shows feature compatibility across different setups: + +| Feature | Datadog SDK + DDOT (Recommended) | OTel SDK + DDOT | OTel SDK + OSS Collector | Direct OTLP Ingest | +| -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [Cloud SIEM](http://localhost:1313/security/cloud_siem/) | yes | yes | yes | yes | +| [Correlated Traces, Metrics, Logs](http://localhost:1313/opentelemetry/correlate/) | yes | yes | yes | yes | +| [Distributed Tracing](http://localhost:1313/tracing/trace_collection/) | yes | yes | yes | yes | +| [Runtime Metrics](http://localhost:1313/tracing/metrics/runtime_metrics/) | yes | yes(Java, .NET, Go only) | yes(Java, .NET, Go only) | yes(Java, .NET, Go only) | +| [Span Links](http://localhost:1313/tracing/trace_collection/span_links/) | yes | yes | yes | yes | +| [Trace Metrics](http://localhost:1313/tracing/metrics/metrics_namespace/) | yes | yes | yes | yes(Sampled (Trace metrics are calculated on the backend based on ingested spans that have passed through sampling, not on 100% of local traces before sampling.)) | +| [Database Monitoring](http://localhost:1313/opentelemetry/correlate/dbm_and_traces/) (DBM) | yes | yes | yes | +| [Cloud Network Monitoring](http://localhost:1313/network_monitoring/performance/) (CNM) | yes | yes | +| [Live Container Monitoring/Kubernetes Explorer](http://localhost:1313/containers/) | yes | yes | +| [Live Processes](http://localhost:1313/infrastructure/process/) | yes | yes | +| [Universal Service Monitoring](http://localhost:1313/universal_service_monitoring/) (USM) | yes | yes | +| [App and API Protection](http://localhost:1313/security/application_security/) (AAP) | yes | +| [Continuous Profiler](http://localhost:1313/profiler/) | yes | +| [Data Jobs Monitoring](http://localhost:1313/data_jobs/) (DJM) | yes | +| [Data Streams Monitoring](http://localhost:1313/data_streams/) (DSM) | yes | N/A (OTel does not offer DSM functionality) | N/A (OTel does not offer DSM functionality) | +| [Real User Monitoring](http://localhost:1313/opentelemetry/correlate/rum_and_traces/?tab=browserrum#opentelemetry-support) (RUM) | yes | +| [Source code integration](http://localhost:1313/integrations/guide/source-code-integration/) | yes | + +## More details{% #more-details %} + +### Runtime metrics{% #runtime-metrics %} + +Setups using the OpenTelemetry SDK follow the [OpenTelemetry Runtime Metrics](http://localhost:1313/opentelemetry/integrations/runtime_metrics/) specification. + +### Real User Monitoring (RUM){% #real-user-monitoring-rum %} + +To enable full RUM functionality, you need to [inject supported headers](http://localhost:1313/real_user_monitoring/correlate_with_other_telemetry/apm/) to correlate RUM and traces. + +### Cloud Network Monitoring (CNM){% #cloud-network-monitoring-cnm %} + +Span-level or endpoint-level monitoring is **not** supported. + +For more information, see [Cloud Network Monitoring Setup](http://localhost:1313/network_monitoring/cloud_network_monitoring/setup/). + +### Source Code Integration{% #source-code-integration %} + +For unsupported languages in OpenTelemetry setups, [configure telemetry tagging](http://localhost:1313/integrations/guide/source-code-integration/?tab=go#configure-telemetry-tagging) to link data to a specific commit. + +## Best practices{% #best-practices %} + +When using Datadog and OpenTelemetry together, Datadog recommends the following best practices to ensure optimal performance and to avoid potential issues: + +- **Avoid mixed instrumentation**: Do not use both a Datadog SDK and an OpenTelemetry SDK to instrument the same application, as this leads to undefined behavior. +- **Avoid Agent and separate Collector on same host**: Do not run the Datadog Agent and a separate OpenTelemetry Collector on the same host, as this may cause issues. However, you can run Agents and Collectors on different hosts within the same fleet. + +## Further reading{% #further-reading %} + +- [OpenTelemetry Troubleshooting](http://localhost:1313/opentelemetry/troubleshooting/) diff --git a/opentelemetry-mdoc/config/collector_batch_memory/index.md b/opentelemetry-mdoc/config/collector_batch_memory/index.md new file mode 100644 index 0000000000000..b923cb69a76f1 --- /dev/null +++ b/opentelemetry-mdoc/config/collector_batch_memory/index.md @@ -0,0 +1,76 @@ +--- +title: Batch and Memory Settings +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > OpenTelemetry Configuration > Batch and + Memory Settings +--- + +# Batch and Memory Settings + +## Overview{% #overview %} + +To edit your OpenTelemetry Collector batch and memory settings, configure the [batch processor](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/batchprocessor) in your Datadog Exporter. + +For more information, see the OpenTelemetry project documentation for the [batch processor](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/batchprocessor). + +## Setup{% #setup %} + +{% tab title="Host" %} +Add the following lines to your Collector configuration: + +```yaml +processors: + batch: + # Datadog APM Intake limit is 3.2MB. + send_batch_max_size: 1000 + send_batch_size: 100 + timeout: 10s + memory_limiter: + check_interval: 1s + limit_mib: 1000 +``` + +{% /tab %} + +{% tab title="Kubernetes" %} +Add the following lines to `values.yaml`: + +```yaml +resources: + limits: + cpu: 512m + memory: 1Gi +``` + +Add the following in the Collector configuration: + +```yaml +processors: + batch: + # Datadog APM Intake limit is 3.2MB. + send_batch_max_size: 1000 + send_batch_size: 100 + timeout: 10s +``` + +{% /tab %} + +## Data collected{% #data-collected %} + +None. + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`batch-memory.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/batch-memory.yaml). + +## Example logging output{% #example-logging-output %} + +``` +2023-12-05T09:52:58.568Z warn memorylimiterprocessor@v0.90.1/memorylimiter.go:276 +Memory usage is above hard limit. Forcing a GC. +{"kind": "processor", "name": "memory_limiter", "pipeline": "traces", "cur_mem_mib": 44} +2023-12-05T09:52:58.590Z info memorylimiterprocessor@v0.90.1/memorylimiter.go:266 +Memory usage after GC. +{"kind": "processor", "name": "memory_limiter", "pipeline": "traces", "cur_mem_mib": 34} +``` diff --git a/opentelemetry-mdoc/config/environment_variable_support/index.md b/opentelemetry-mdoc/config/environment_variable_support/index.md new file mode 100644 index 0000000000000..641ce792fb16b --- /dev/null +++ b/opentelemetry-mdoc/config/environment_variable_support/index.md @@ -0,0 +1,186 @@ +--- +title: Using OpenTelemetry Environment Variables with Datadog SDKs +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > OpenTelemetry Configuration > Using + OpenTelemetry Environment Variables with Datadog SDKs +--- + +# Using OpenTelemetry Environment Variables with Datadog SDKs + +Datadog SDKs implement the OpenTelemetry Tracing APIs, allowing you to use OpenTelemetry environment variables to configure Datadog tracing for your applications. Replace the OpenTelemetry SDK with the Datadog SDK in your application to receive traces and additional Datadog telemetry with minimal changes to your existing configuration. This page describes the OpenTelemetry SDK options Datadog supports. + +{% alert level="info" %} +If both Datadog and OpenTelemetry environment variables are set, Datadog takes precedence. Datadog defaults also override OpenTelemetry defaults. See the relevant [SDK Configuration page](http://localhost:1313/tracing/trace_collection/library_config/) for default values and more information. +{% /alert %} + +## General SDK configuration{% #general-sdk-configuration %} + +Datadog SDKs support the following general OpenTelemetry SDK options. For more information, see the related [OpenTelemetry documentation](https://opentelemetry.io/docs/specs/otel/configuration/SDK-environment-variables/#general-SDK-configuration). + +{% dl %} + +{% dt %} +`OTEL_SERVICE_NAME` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_SERVICE`Sets the service name**Notes**: If `service.name` is also provided in `OTEL_RESOURCE_ATTRIBUTES`, then `OTEL_SERVICE_NAME` takes precedence +{% /dd %} + +{% dt %} +`OTEL_LOG_LEVEL` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_LOG_LEVEL`Log level used by the SDK logger**Notes**: A log level of debug also maps to `DD_TRACE_DEBUG=true`In the Node.js & PHP SDKs this maps to `DD_TRACE_LOG_LEVEL`In the Go SDK only mapped values between `OTEL_LOG_LEVEL` & `DD_TRACE_DEBUG` are supported: +- `info`|`false` +- `debug`|`true`**Not Supported In**: Python, .NET, Ruby, and Go SDKs + +{% /dd %} + +{% dt %} +`OTEL_PROPAGATORS` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_PROPAGATION_STYLE`Propagators to be used as a comma-separated list**Notes**: The only supported values for most Datadog SDKs are `tracecontext`, `b3`, `b3multi`, `none`, `datadog`. `xray` is also supported for the Java SDKValues MUST be deduplicated in order to register a `Propagator` only once +{% /dd %} + +{% dt %} +`OTEL_TRACES_SAMPLER & OTEL_TRACES_SAMPLER_ARG` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_SAMPLE_RATE``OTEL_TRACES_SAMPLER`: Sampler to be used for traces & `OTEL_TRACES_SAMPLER_ARG`: String value to be used as the sampler argument**Notes**: The specified value is only used if `OTEL_TRACES_SAMPLER` is set. Each Sampler type defines its own expected input, if any. Invalid or unrecognized input MUST be logged and MUST be otherwise ignored. In such cases, the implementation MUST behave as if `OTEL_TRACES_SAMPLER_ARG` is not setMapped values between `OTEL_TRACES_SAMPLER` & `DD_TRACE_SAMPLE_RATE`: +- `parentbased_always_on`|`1.0` +- `parentbased_always_off`|`0.0` +- `parentbased_traceidratio`|`${OTEL_TRACES_SAMPLER_ARG}` +- `always_on`|`1.0` +- `always_off`|`0.0` +- `traceidratio`|`${OTEL_TRACES_SAMPLER_ARG}` + +{% /dd %} + +{% dt %} +`OTEL_TRACES_EXPORTER` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_ENABLED=false`Trace exporter to be used**Notes**: Only a value of `none` is accepted +{% /dd %} + +{% dt %} +`OTEL_METRICS_EXPORTER` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_RUNTIME_METRICS_ENABLED=false`Metrics exporter to be used**Notes**: only a value of `none` is accepted +{% /dd %} + +{% dt %} +`OTEL_RESOURCE_ATTRIBUTES` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TAGS`Key-value pairs to be used as resource attributes. See [Resource semantic conventions](https://opentelemetry.io/docs/specs/semconv/resource/#semantic-attributes-with-dedicated-environment-variable) for details**Notes**: Only the first 10 key-value pairs are used; the subsequent values are dropped`deployment.environment` and `deployment.environment.name` map to the `DD_ENV` environment variable`service.name` maps to the `DD_SERVICE` environment variable`service.version` maps to the `DD_VERSION` environment variable +{% /dd %} + +{% dt %} +`OTEL_SDK_DISABLED` +{% /dt %} + +{% dd %} +****Datadog convention****: `!DD_TRACE_OTEL_ENABLED`Disable the SDK for all signals**Notes**: Mapped values between `OTEL_SDK_DISABLED` & `DD_TRACE_OTEL_ENABLED`: +- `true`|`false` +- `false`|`true`**Ruby & Go SDKs**: The OpenTelemetry SDK activates automatically upon import and configuration, so this setting is not applicable. + +{% /dd %} + +{% /dl %} + +## Java-specific configuration{% #java-specific-configuration %} + +Datadog SDKs support the following Java-specific OpenTelemetry configuration options. For more information, see the [OpenTelemetry documentation on Java agent configuration](https://opentelemetry.io/docs/zero-code/java/agent/configuration/#configuring-the-agent). + +{% dl %} + +{% dt %} +`OTEL_INSTRUMENTATION_COMMON_DEFAULT_ENABLED` +{% /dt %} + +{% dd %} +****Datadog convention****: `!DD_INTEGRATIONS_ENABLED`Set to `false` to disable all instrumentation in the agent**Notes**: Mapped values between `OTEL_INSTRUMENTATION_COMMON_DEFAULT_ENABLED` & `DD_INTEGRATIONS_ENABLED`: +- `true`|`false` +- `false`|`true` + +{% /dd %} + +{% dt %} +`OTEL_INSTRUMENTATION_[NAME]_ENABLED` +{% /dt %} + +{% dd %} +**Description**: Enables/disables the named OTel drop-in instrumentation +{% /dd %} + +{% dt %} +`OTEL_JAVAAGENT_CONFIGURATION_FILE` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_CONFIG`Path to valid Java properties file which contains the agent configuration**Notes**: When OTEL_JAVAAGENT_CONFIGURATION_FILE and DD_TRACE_CONFIG are both set we apply the configuration from both files. This is an exception to the usual rule where the Datadog setting overrides the OTel one +{% /dd %} + +{% dt %} +`OTEL_INSTRUMENTATION_HTTP_CLIENT_CAPTURE_REQUEST_HEADERS` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_REQUEST_HEADER_TAGS`A comma-separated list of HTTP header names. HTTP client instrumentations capture HTTP request header values for all configured header names**Notes**: Header tagging configured using OTel environment variables follows the OTel tag name convention of `http.request.header.` rather than the Datadog convention of `http.request.headers.` +{% /dd %} + +{% dt %} +`OTEL_INSTRUMENTATION_HTTP_CLIENT_CAPTURE_RESPONSE_HEADERS` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_RESPONSE_HEADER_TAGS`A comma-separated list of HTTP header names. HTTP client instrumentations capture HTTP response header values for all configured header names**Notes**: Header tagging configured using OTel environment variables follows the OTel tag name convention of `http.response.header.` rather than the Datadog convention of `http.response.headers.` +{% /dd %} + +{% dt %} +`OTEL_INSTRUMENTATION_HTTP_SERVER_CAPTURE_REQUEST_HEADERS` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_REQUEST_HEADER_TAGS`A comma-separated list of HTTP header names. HTTP server instrumentations capture HTTP request header values for all configured header names**Notes**: Header tagging configured using OTel environment variables follows the OTel tag name convention of `http.request.header.` rather than the Datadog convention of `http.request.headers.` +{% /dd %} + +{% dt %} +`OTEL_INSTRUMENTATION_HTTP_SERVER_CAPTURE_RESPONSE_HEADERS` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_RESPONSE_HEADER_TAGS`A comma-separated list of HTTP header names. HTTP server instrumentations capture HTTP response header values for all configured header names**Notes**: Header tagging configured using OTel environment variables follows the OTel tag name convention of `http.response.header.` rather than the Datadog convention of `http.response.headers.` +{% /dd %} + +{% dt %} +`OTEL_JAVAAGENT_EXTENSIONS` +{% /dt %} + +{% dd %} +****Datadog convention****: `DD_TRACE_EXTENSIONS_PATH`A comma-separated list of paths to extension jar files, or folders containing jar files. If pointing to a folder, every jar file in that folder is treated as a separate, independent extension. +{% /dd %} + +{% /dl %} + +## Further Reading{% #further-reading %} + +- [.NET Core SDK Configuration](http://localhost:1313/tracing/trace_collection/library_config/dotnet-core) +- [.NET Framework SDK Configuration](http://localhost:1313/tracing/trace_collection/library_config/dotnet-framework) +- [Go SDK Configuration](http://localhost:1313/tracing/trace_collection/library_config/go) +- [Java SDK Configuration](http://localhost:1313/tracing/trace_collection/library_config/java) +- [Node.js SDK Configuration](http://localhost:1313/tracing/trace_collection/library_config/nodejs) +- [PHP SDK Configuration](http://localhost:1313/tracing/trace_collection/library_config/php) +- [Python SDK Configuration](http://localhost:1313/tracing/trace_collection/library_config/python) +- [Ruby SDK Configuration](http://localhost:1313/tracing/trace_collection/library_config/ruby) diff --git a/opentelemetry-mdoc/config/hostname_tagging/index.md b/opentelemetry-mdoc/config/hostname_tagging/index.md new file mode 100644 index 0000000000000..d96224eab38d2 --- /dev/null +++ b/opentelemetry-mdoc/config/hostname_tagging/index.md @@ -0,0 +1,495 @@ +--- +title: Hostname and Tagging +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > OpenTelemetry Configuration > Hostname and + Tagging +--- + +# Hostname and Tagging + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/hostname_tagging.7a4413449f667cb67c92e808cc795941.png?auto=format" + alt="Hostname information collected from OpenTelemetry" /%} + +## Overview{% #overview %} + +To extract the correct hostname and host tags, Datadog Exporter uses the [resource detection processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md) and the [Kubernetes attributes processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/k8sattributesprocessor/README.md). These processors allow for extracting information from hosts and containers in the form of [resource semantic conventions](https://opentelemetry.io/docs/specs/semconv/resource/), which is then used to build the hostname, host tags, and container tags. These tags enable automatic correlation among telemetry signals and tag-based navigation for filtering and grouping telemetry data within Datadog. + +For more information, see the OpenTelemetry project documentation for the [resource detection](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md) and [Kubernetes attributes](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/k8sattributesprocessor/README.md) processors. + +## Setup{% #setup %} + +{% tab title="Host" %} +Add the following lines to your Collector configuration: + +```yaml +processors: + resourcedetection: + # bare metal + detectors: [env, system] + system: + resource_attributes: + os.description: + enabled: true + host.arch: + enabled: true + host.cpu.vendor.id: + enabled: true + host.cpu.family: + enabled: true + host.cpu.model.id: + enabled: true + host.cpu.model.name: + enabled: true + host.cpu.stepping: + enabled: true + host.cpu.cache.l2.size: + enabled: true + # GCP + detectors: [env, gcp, system] + # AWS + detectors: [env, ecs, ec2, system] + # Azure + detectors: [env, azure, system] + timeout: 2s + override: false +``` + +{% /tab %} + +{% tab title="Kubernetes Daemonset" %} +Add the following lines to `values.yaml`: + +```yaml +presets: + kubernetesAttributes: + enabled: true +``` + +The Helm `kubernetesAttributes` preset sets up the service account necessary for the Kubernetes attributes processor to extract metadata from pods. Read [Important Components for Kubernetes](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) for additional information about the required service account. + +Add the following in the Collector configuration: + +```yaml +processors: + k8sattributes: + passthrough: false + auth_type: "serviceAccount" + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.node.name + - k8s.namespace.name + - k8s.pod.start_time + - k8s.replicaset.name + - k8s.replicaset.uid + - k8s.daemonset.name + - k8s.daemonset.uid + - k8s.job.name + - k8s.job.uid + - k8s.cronjob.name + - k8s.statefulset.name + - k8s.statefulset.uid + - container.image.name + - container.image.tag + - container.id + - k8s.container.name + - container.image.name + - container.image.tag + - container.id + labels: + - tag_name: kube_app_name + key: app.kubernetes.io/name + from: pod + - tag_name: kube_app_instance + key: app.kubernetes.io/instance + from: pod + - tag_name: kube_app_version + key: app.kubernetes.io/version + from: pod + - tag_name: kube_app_component + key: app.kubernetes.io/component + from: pod + - tag_name: kube_app_part_of + key: app.kubernetes.io/part-of + from: pod + - tag_name: kube_app_managed_by + key: app.kubernetes.io/managed-by + from: pod + resourcedetection: + # remove the ones that you do not use + detectors: [env, eks, ec2, aks, azure, gke, gce, system] + timeout: 2s + override: false +``` + +{% /tab %} + +{% tab title="Kubernetes DaemonSet -> Gateway" %} +Add the following lines to `values.yaml`: + +```yaml +presets: + kubernetesAttributes: + enabled: true +``` + +Use the Helm `k8sattributes` preset in both Daemonset and Gateway, to set up the service account necessary for `k8sattributesprocessor` to extract metadata from pods. Read [Important Components for Kubernetes](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) for additional information about the required service account. + +DaemonSet: + +```yaml +processors: + k8sattributes: + passthrough: true + auth_type: "serviceAccount" + resourcedetection: + detectors: [env, , , , system] + timeout: 2s + override: false +``` + +Because the processor is in passthrough mode in the DaemonSet, it adds only the pod IP addresses. These addresses are then used by the Gateway processor to make Kubernetes API calls and extract metadata. + +Gateway: + +```yaml +processors: + k8sattributes: + passthrough: false + auth_type: "serviceAccount" + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.node.name + - k8s.namespace.name + - k8s.pod.start_time + - k8s.replicaset.name + - k8s.replicaset.uid + - k8s.daemonset.name + - k8s.daemonset.uid + - k8s.job.name + - k8s.job.uid + - k8s.cronjob.name + - k8s.statefulset.name + - k8s.statefulset.uid + - container.image.name + - container.image.tag + - container.id + - k8s.container.name + - container.image.name + - container.image.tag + - container.id + labels: + - tag_name: kube_app_name + key: app.kubernetes.io/name + from: pod + - tag_name: kube_app_instance + key: app.kubernetes.io/instance + from: pod + - tag_name: kube_app_version + key: app.kubernetes.io/version + from: pod + - tag_name: kube_app_component + key: app.kubernetes.io/component + from: pod + - tag_name: kube_app_part_of + key: app.kubernetes.io/part-of + from: pod + - tag_name: kube_app_managed_by + key: app.kubernetes.io/managed-by + from: pod +``` + +{% /tab %} + +{% tab title="Kubernetes Gateway" %} +Add the following lines to `values.yaml`: + +```yaml +presets: + kubernetesAttributes: + enabled: true +``` + +The Helm `kubernetesAttributes` preset sets up the service account necessary for the Kubernetes attributes processor to extract metadata from pods. Read [Important Components for Kubernetes](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) for additional information about the required service account. + +Add the following in the Collector configuration: + +```yaml +processors: + k8sattributes: + passthrough: false + auth_type: "serviceAccount" + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.node.name + - k8s.namespace.name + - k8s.pod.start_time + - k8s.replicaset.name + - k8s.replicaset.uid + - k8s.daemonset.name + - k8s.daemonset.uid + - k8s.job.name + - k8s.job.uid + - k8s.cronjob.name + - k8s.statefulset.name + - k8s.statefulset.uid + - container.image.name + - container.image.tag + - container.id + - k8s.container.name + - container.image.name + - container.image.tag + - container.id + labels: + - tag_name: kube_app_name + key: app.kubernetes.io/name + from: pod + - tag_name: kube_app_instance + key: app.kubernetes.io/instance + from: pod + - tag_name: kube_app_version + key: app.kubernetes.io/version + from: pod + - tag_name: kube_app_component + key: app.kubernetes.io/component + from: pod + - tag_name: kube_app_part_of + key: app.kubernetes.io/part-of + from: pod + - tag_name: kube_app_managed_by + key: app.kubernetes.io/managed-by + from: pod + resourcedetection: + detectors: [env, , , , system] + timeout: 2s + override: false +``` + +{% /tab %} + +## Data collected{% #data-collected %} + +| OpenTelemetry attribute | Datadog Tag | Processor | +| --------------------------- | --------------------------------------------------------- | ------------------------------------------------------- | +| `host.arch` | `resourcedetectionprocessor{system}` | +| `host.name` | `resourcedetectionprocessor{system,gcp,ec2,azure}` | +| `host.id` | `resourcedetectionprocessor{system,gcp,ec2,azure}` | +| `host.cpu.vendor.id` | `resourcedetectionprocessor{system}` | +| `host.cpu.family` | `resourcedetectionprocessor{system}` | +| `host.cpu.model.id` | `resourcedetectionprocessor{system}` | +| `host.cpu.model.name` | `resourcedetectionprocessor{system}` | +| `host.cpu.stepping` | `resourcedetectionprocessor{system}` | +| `host.cpu.cache.l2.size` | `resourcedetectionprocessor{system}` | +| `os.description` | `resourcedetectionprocessor{system}` | +| `os.type` | `resourcedetectionprocessor{system}` | +| `cloud.provider` | `cloud_provider` | `resourcedetectionprocessor{gcp,ec2,ecs,eks,azure,aks}` | +| `cloud.platform` | `"resourcedetectionprocessor{gcp,ec2,ecs,eks,azure,aks}"` | +| `cloud.account.id` | `"resourcedetectionprocessor{gcp,ec2,ecs,azure}"` | +| `cloud.region` | `region` | `resourcedetectionprocessor{gcp,ec2,ecs,azure}` | +| `cloud.availability_zone` | `zone` | `resourcedetectionprocessor{gcp,ec2,ecs}` | +| `host.type` | `"resourcedetectionprocessor{gcp,ec2}"` | +| `gcp.gce.instance.hostname` | `resourcedetectionprocessor{gcp}` | +| `gcp.gce.instance.name` | `resourcedetectionprocessor{gcp}` | +| `k8s.cluster.name` | `kube_cluster_name` | `resourcedetectionprocessor{gcp,eks}` | +| `host.image.id` | `resourcedetectionprocessor{ec2}` | +| `aws.ecs.cluster.arn` | `ecs_cluster_name` | `k8sattributes` | +| `aws.ecs.task.arn` | `task_arn` | `k8sattributes` | +| `aws.ecs.task.family` | `task_family` | `k8sattributes` | +| `aws.ecs.task.revision` | `task_version` | `k8sattributes` | +| `aws.ecs.launchtype` | `k8sattributes` | +| `aws.log.group.names` | `k8sattributes` | +| `aws.log.group.arns` | `k8sattributes` | +| `aws.log.stream.names` | `k8sattributes` | +| `aws.log.stream.arns` | `k8sattributes` | +| `azure.vm.name` | `k8sattributes` | +| `azure.vm.size` | `k8sattributes` | +| `azure.vm.scaleset.name` | `k8sattributes` | +| `azure.resourcegroup.name` | `k8sattributes` | +| `k8s.cluster.uid` | `k8sattributes` | +| `k8s.namespace.name` | `kube_namespace` | `k8sattributes` | +| `k8s.pod.name` | `pod_name` | `k8sattributes` | +| `k8s.pod.uid` | `k8sattributes` | +| `k8s.pod.start_time` | `k8sattributes` | +| `k8s.deployment.name` | `kube_deployment` | `k8sattributes` | +| `k8s.replicaset.name` | `kube_replica_set` | `k8sattributes` | +| `k8s.replicaset.uid` | `k8sattributes` | +| `k8s.daemonset.name` | `kube_daemon_set` | `k8sattributes` | +| `k8s.daemonset.uid` | `k8sattributes` | +| `k8s.statefulset.name` | `kube_stateful_set` | `k8sattributes` | +| `k8s.statefulset.uid` | `k8sattributes` | +| `k8s.container.name` | `kube_container_name` | `k8sattributes` | +| `k8s.job.name` | `kube_job` | `k8sattributes` | +| `k8s.job.uid` | `k8sattributes` | +| `k8s.cronjob.name` | `kube_cronjob` | `k8sattributes` | +| `k8s.node.name` | `k8sattributes` | +| `container.id` | `container_id` | `k8sattributes` | +| `container.image.name` | `image_name` | `k8sattributes` | +| `container.image.tag` | `image_tag` | `k8sattributes` | + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`k8s-values.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/k8s-values.yaml). This example is for Amazon EKS. + +## Example logging output{% #example-logging-output %} + +``` +ResourceSpans #0 +Resource SchemaURL: https://opentelemetry.io/schemas/1.6.1 +Resource attributes: + -> container.id: Str(0cb82a1bf21466b4189414cf326683d653114c0f61994c73f78d1750b9fcdf06) + -> service.name: Str(cartservice) + -> service.instance.id: Str(5f35cd94-1b9c-47ff-bf45-50ac4a998a6b) + -> service.namespace: Str(opentelemetry-demo) + -> k8s.namespace.name: Str(otel-gateway) + -> k8s.node.name: Str(ip-192-168-61-208.ec2.internal) + -> k8s.pod.name: Str(opentelemetry-demo-cartservice-567765cd64-cbmwz) + -> deployment.environment: Str(otel-gateway) + -> k8s.pod.ip: Str(192.168.45.90) + -> telemetry.sdk.name: Str(opentelemetry) + -> telemetry.sdk.language: Str(dotnet) + -> telemetry.sdk.version: Str(1.5.1) + -> cloud.provider: Str(aws) + -> cloud.platform: Str(aws_ec2) + -> cloud.region: Str(us-east-1) + -> cloud.account.id: Str(XXXXXXXXXX) + -> cloud.availability_zone: Str(us-east-1c) + -> host.id: Str(i-09e82186d7d8d7c95) + -> host.image.id: Str(ami-06f28e19c3ba73ef7) + -> host.type: Str(m5.large) + -> host.name: Str(ip-192-168-50-0.ec2.internal) + -> os.type: Str(linux) + -> k8s.deployment.name: Str(opentelemetry-demo-cartservice) + -> kube_app_name: Str(opentelemetry-demo-cartservice) + -> k8s.replicaset.uid: Str(ddb3d058-6d6d-4423-aca9-0437c3688217) + -> k8s.replicaset.name: Str(opentelemetry-demo-cartservice-567765cd64) + -> kube_app_instance: Str(opentelemetry-demo) + -> kube_app_component: Str(cartservice) + -> k8s.pod.start_time: Str(2023-11-13T15:03:46Z) + -> k8s.pod.uid: Str(5f35cd94-1b9c-47ff-bf45-50ac4a998a6b) + -> k8s.container.name: Str(cartservice) + -> container.image.name: Str(XXXXXXXXX.dkr.ecr.us-east-1.amazonaws.com/otel-demo) + -> container.image.tag: Str(v4615c8d7-cartservice) +ScopeSpans #0 +ScopeSpans SchemaURL: +InstrumentationScope Microsoft.AspNetCore +Span #0 + Trace ID : fc6794b53df7e44bab9dced42bdfbf7b + Parent ID : 2d3ba75ad6a6b1a0 + ID : f669b0fcd98365b9 + Name : oteldemo.CartService/AddItem + Kind : Server + Start time : 2023-11-20 13:37:11.2060978 +0000 UTC + End time : 2023-11-20 13:37:11.2084166 +0000 UTC + Status code : Unset + Status message : +Attributes: + -> net.host.name: Str(opentelemetry-demo-cartservice) + -> net.host.port: Int(8080) + -> http.method: Str(POST) + -> http.scheme: Str(http) + -> http.target: Str(/oteldemo.CartService/AddItem) + -> http.url: Str(http://opentelemetry-demo-cartservice:8080/oteldemo.CartService/AddItem) + -> http.flavor: Str(2.0) + -> http.user_agent: Str(grpc-node-js/1.8.14) + -> app.user.id: Str(e8521c8c-87a9-11ee-b20a-4eaeb9e6ddbc) + -> app.product.id: Str(LS4PSXUNUM) + -> app.product.quantity: Int(3) + -> http.status_code: Int(200) + -> rpc.system: Str(grpc) + -> net.peer.ip: Str(::ffff:192.168.36.112) + -> net.peer.port: Int(36654) + -> rpc.service: Str(oteldemo.CartService) + -> rpc.method: Str(AddItem) + -> rpc.grpc.status_code: Int(0) +``` + +## Custom tagging{% #custom-tagging %} + +### Custom host tags{% #custom-host-tags %} + +#### In the Datadog exporter{% #in-the-datadog-exporter %} + +Set custom hosts tags directly in the Datadog exporter: + +```gdscript3 + ## @param tags - list of strings - optional - default: empty list + ## List of host tags to be sent as part of the host metadata. + ## These tags will be attached to telemetry signals that have the host metadata hostname. + ## + ## To attach tags to telemetry signals regardless of the host, use a processor instead. + # + tags: ["team:infra", ":"] +``` + +See all configurations options [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/collector.yaml). + +#### As OTLP resource attributes{% #as-otlp-resource-attributes %} + +Custom host tags can also be set as resource attributes that start with the namespace `datadog.host.tag`. + +This can be set as an env var `OTEL_RESOURCE_ATTRIBUTES=datadog.host.tag.=` in an [OTel SDK](https://opentelemetry.io/docs/languages/js/resources/). Or this can be set in a processor: + +``` +processors: + resource: + attributes: + - key: datadog.host.tag. + action: upsert + from_attribute: +``` + +**Note:** This is only supported if you have opted-in as described [here](https://docs.datadoghq.com/opentelemetry/schema_semantics/host_metadata/). + +### Host aliases{% #host-aliases %} + +You can set host aliases with the resource attribute `datadog.host.aliases`. + +Set this attribute in a processor: + +``` +processors: + transform: + trace_statements: &statements + - context: resource + statements: + - set(attributes["datadog.host.aliases"], ["alias1", "alias2", "alias3"]) +``` + +**Note:** This is only supported if you have opted-in as described [here](https://docs.datadoghq.com/opentelemetry/schema_semantics/host_metadata/). + +### Custom container tags{% #custom-container-tags %} + +Same as for custom host tags, custom containers tags can be set by prefixing resource attributes by `datadog.container.tag` in your OTEL instrumentation. + +This can be set as an env var `OTEL_RESOURCE_ATTRIBUTES=datadog.container.tag.=` in an [OTel SDK](https://opentelemetry.io/docs/languages/js/resources/). Or this can be set in a processor: + +``` +processors: + resource: + attributes: + - key: datadog.container.tag. + action: upsert + from_attribute: +``` diff --git a/opentelemetry-mdoc/config/index.md b/opentelemetry-mdoc/config/index.md new file mode 100644 index 0000000000000..4ecc6cc07fea1 --- /dev/null +++ b/opentelemetry-mdoc/config/index.md @@ -0,0 +1,21 @@ +--- +title: OpenTelemetry Configuration +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > OpenTelemetry Configuration +--- + +# OpenTelemetry Configuration + +## Overview{% #overview %} + +Configure your OpenTelemetry setup to optimize data collection and ensure proper integration with Datadog. Choose the configuration area you need to adjust: + +- [Edit batch and memory configuration](http://localhost:1313/opentelemetry/config/collector_batch_memory/) +- [Configure OpenTelemetry behavior using environment variables supported by Datadog](http://localhost:1313/opentelemetry/config/environment_variable_support/) +- [Set up host identification and tag attribution for your telemetry](http://localhost:1313/opentelemetry/config/hostname_tagging/) +- [Configure log collection and processing for OpenTelemetry data](http://localhost:1313/opentelemetry/config/log_collection/) +- [Configure the OTLP receiver in your Collector to collect metrics, logs, and traces](http://localhost:1313/opentelemetry/config/otlp_receiver/) + +## Further reading{% #further-reading %} + +- [Datadog's partnership with OpenTelemetry](https://www.datadoghq.com/blog/opentelemetry-instrumentation/) diff --git a/opentelemetry-mdoc/config/log_collection/index.md b/opentelemetry-mdoc/config/log_collection/index.md new file mode 100644 index 0000000000000..e73ddaef723b1 --- /dev/null +++ b/opentelemetry-mdoc/config/log_collection/index.md @@ -0,0 +1,152 @@ +--- +title: Log Collection +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > OpenTelemetry Configuration > Log Collection +--- + +# Log Collection + +{% alert level="info" %} +The Datadog Agent logs pipeline is enabled by default in the Datadog Exporter in v0.108.0. This may cause a breaking change if [`logs::dump_payloads`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/b52e760f184b77c6e1a9ccc5121ff7b88d2b8f75/exporter/datadogexporter/examples/collector.yaml#L456-L463) is in use while upgrading, since this option is invalid when the Datadog Agent logs pipeline is enabled. To avoid this issue, remove the `logs::dump_payloads` config option or temporarily disable the `exporter.datadogexporter.UseLogsAgentExporter` feature gate. +{% /alert %} + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/log_collection.fac82a9987836f4208e9959813078603.png?auto=format" + alt="An information log sent from OpenTelemetry" /%} + +To collect logs from files, configure the [filelog receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver) in your Datadog Exporter. + +For more information, see the OpenTelemetry project documentation for the [filelog receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). + +## Setup{% #setup %} + +{% tab title="Host" %} +For a collector deployed on the same host as the log files to be collected, specify the paths of the log files to collect in your Collector configuration: + +```yaml +receivers: + filelog: + include_file_path: true + poll_interval: 500ms + include: + - /var/log/*/app.log + operators: + - type: json_parser + # Layout must match log timestamp format. If this section is removed, timestamp will correspond to the time of log intake by Datadog. + - type: time_parser + parse_from: attributes.time + layout: '%Y-%m-%dT%H:%M:%S%z' +``` + +{% /tab %} + +{% tab title="Kubernetes" %} +Add the following lines to `values.yaml`: + +```yaml +presets: + logsCollection: + enabled: true + includeCollectorLogs: true +``` + +The filelog receiver needs access to the file paths. The preset mounts the necessary volumes to the collector container for `/var/log/pods` and collects all logs from `/var/log/pods/*/*/*.log`. See [Important components for Kubernetes](https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver) for a full list of settings set by the preset. + +Collector configuration sets up a list of operators to parse the logs based on different formats: + +```yaml +filelog: + include: + - /var/log/pods/*/*/*.log + exclude: + - /var/log/pods/abc/*.log + operators: + - type: json_parser + - type: trace_parser + trace_id: + parse_from: attributes.trace_id + span_id: + parse_from: attributes.span_id + trace_flags: + parse_from: attributes.trace_flags + - type: time_parser + parse_from: attributes.time + layout: '%Y-%m-%dT%H:%M:%S%z' +``` + +{% /tab %} + +### Custom tags{% #custom-tags %} + +In order to add custom Datadog tags to logs, set the `ddtags` attribute on the logs. For example, this can be done with the [transform processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor): + +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - set(attributes["ddtags"], "first_custom:tag, second_custom:tag") +``` + +## Data collected{% #data-collected %} + +Logs from the configured files. + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`logs.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/logs.yaml). + +## Example logging output{% #example-logging-output %} + +```gdscript3 +ResourceLog #0 +Resource SchemaURL: https://opentelemetry.io/schemas/1.6.1 +Resource attributes: + -> k8s.container.name: Str(loadgenerator) + -> k8s.namespace.name: Str(otel-staging) + -> k8s.pod.name: Str(opentelemetry-demo-loadgenerator-d8c4d699d-ztt98) + -> k8s.container.restart_count: Str(1) + -> k8s.pod.uid: Str(92bf09ed-0db9-4f69-a9d6-1dadf12e01aa) + -> k8s.pod.ip: Str(192.168.55.78) + -> cloud.provider: Str(aws) + -> cloud.platform: Str(aws_ec2) + -> cloud.region: Str(us-east-1) + -> cloud.account.id: Str(XXXXXXXXX) + -> cloud.availability_zone: Str(us-east-1c) + -> host.id: Str(i-0368add8e328c28f7) + -> host.image.id: Str(ami-08a2e6a8e82737230) + -> host.type: Str(m5.large) + -> host.name: Str(ip-192-168-53-115.ec2.internal) + -> os.type: Str(linux) + -> k8s.daemonset.uid: Str(6d6fef61-d4c7-4226-9b7b-7d6b893cb31d) + -> k8s.daemonset.name: Str(opentelemetry-collector-agent) + -> k8s.node.name: Str(ip-192-168-53-115.ec2.internal) + -> kube_app_name: Str(opentelemetry-collector) + -> kube_app_instance: Str(opentelemetry-collector) + -> k8s.pod.start_time: Str(2023-11-20T12:53:23Z) +ScopeLogs #0 +ScopeLogs SchemaURL: +InstrumentationScope +LogRecord #0 +ObservedTimestamp: 2023-11-20 13:02:04.332021519 +0000 UTC +Timestamp: 2023-11-20 13:01:46.095736502 +0000 UTC +SeverityText: +SeverityNumber: Unspecified(0) +Body: Str( return wrapped_send(self, request, **kwargs)) +Attributes: + -> log.file.path: Str(/var/log/pods/otel-staging_opentelemetry-demo-loadgenerator-d8c4d699d-ztt98_92bf09ed-0db9-4f69-a9d6-1dadf12e01aa/loadgenerator/1.log) + -> time: Str(2023-11-20T13:01:46.095736502Z) + -> logtag: Str(F) + -> log.iostream: Str(stderr) +Trace ID: +Span ID: +Flags: 0 +``` + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/collector_exporter/) +- [Correlate OpenTelemetry Traces and Logs](http://localhost:1313/opentelemetry/correlate/logs_and_traces/) diff --git a/opentelemetry-mdoc/config/otlp_receiver/index.md b/opentelemetry-mdoc/config/otlp_receiver/index.md new file mode 100644 index 0000000000000..cef018a335032 --- /dev/null +++ b/opentelemetry-mdoc/config/otlp_receiver/index.md @@ -0,0 +1,196 @@ +--- +title: OTLP Receiver +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > OpenTelemetry Configuration > OTLP Receiver +--- + +# OTLP Receiver + +## Overview{% #overview %} + +To collect OTLP metrics, logs, and traces, configure the [OTLP receiver](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/README.md) in your Collector. + +For more information, see the OpenTelemetry project documentation for the [OTLP receiver](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/README.md). + +## Setup{% #setup %} + +Add the following lines to your Collector configuration: + +{% alert level="warning" %} +The following examples use `0.0.0.0` as the endpoint address for convenience. This allows connections from any network interface. For enhanced security, especially in local deployments, consider using `localhost` instead. For more information on secure endpoint configuration, see the [OpenTelemetry security documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks). +{% /alert %} + +```yaml +receivers: + otlp: + protocols: + http: + endpoint: "0.0.0.0:4318" + grpc: + endpoint: "0.0.0.0:4317" +``` + +## Data collected{% #data-collected %} + +Traces, metrics, and logs. + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`otlp.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/otlp.yaml). + +## Example logging output{% #example-logging-output %} + +```gdscript3 +ResourceSpans #0 +Resource SchemaURL: https://opentelemetry.io/schemas/1.6.1 +Resource attributes: + -> k8s.node.name: Str(ip-192-168-61-208.ec2.internal) + -> process.command_args: Slice(["/app/shippingservice"]) + -> k8s.namespace.name: Str(otel-gateway) + -> process.pid: Int(1) + -> service.name: Str(shippingservice) + -> service.namespace: Str(opentelemetry-demo) + -> os.type: Str(linux) + -> k8s.pod.ip: Str(192.168.57.77) + -> deployment.environment: Str(otel-gateway) + -> service.instance.id: Str(82323d5f-0e47-4ae6-92b6-583dc1fa33a1) + -> k8s.pod.name: Str(opentelemetry-demo-shippingservice-7f9b565549-4p2pj) + -> cloud.provider: Str(aws) + -> cloud.platform: Str(aws_ec2) + -> cloud.region: Str(us-east-1) + -> cloud.account.id: Str(XXXXXXXXX) + -> cloud.availability_zone: Str(us-east-1c) + -> host.id: Str(i-0e0b580bbe11883dc) + -> host.image.id: Str(ami-06f28e19c3ba73ef7) + -> host.type: Str(m5.large) + -> host.name: Str(ip-192-168-61-208.ec2.internal) + -> k8s.pod.start_time: Str(2023-11-13T15:03:50Z) + -> k8s.replicaset.uid: Str(537d3c30-fe3d-4999-be1a-51227c90a5e4) + -> kube_app_name: Str(opentelemetry-demo-shippingservice) + -> kube_app_instance: Str(opentelemetry-demo) + -> k8s.pod.uid: Str(82323d5f-0e47-4ae6-92b6-583dc1fa33a1) + -> k8s.replicaset.name: Str(opentelemetry-demo-shippingservice-7f9b565549) + -> kube_app_component: Str(shippingservice) +ScopeSpans #0 +ScopeSpans SchemaURL: +InstrumentationScope opentelemetry-otlp 0.11.0 +Span #0 + Trace ID : c4f6d4a8831a5d7b95727da5443ad8a4 + Parent ID : c7e98372030f17b1 + ID : f226428843050d12 + Name : reqwest-http-client + Kind : Client + Start time : 2023-11-20 12:56:26.401728438 +0000 UTC + End time : 2023-11-20 12:56:26.403518138 +0000 UTC + Status code : Unset + Status message : +Attributes: + -> http.host: Str(opentelemetry-demo-quoteservice) + -> http.url: Str(http://opentelemetry-demo-quoteservice:8080/getquote) + -> http.status_code: Int(200) + -> thread.name: Str(tokio-runtime-worker) + -> busy_ns: Int(199789) + -> code.namespace: Str(reqwest_tracing::reqwest_otel_span_builder) + -> code.lineno: Int(128) + -> thread.id: Int(3) + -> http.method: Str(POST) + -> http.user_agent: Str() + -> idle_ns: Int(1588692) + -> code.filepath: Str(/usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/reqwest-tracing-0.4.0/src/reqwest_otel_span_builder.rs) + -> net.host.port: Str(8080) + -> http.scheme: Str(http) +ResourceMetrics #0 +Resource SchemaURL: https://opentelemetry.io/schemas/1.6.1 +Resource attributes: + -> service.name: Str(opentelemetry-collector) + -> net.host.name: Str(192.168.38.72) + -> service.instance.id: Str(192.168.38.72:8888) + -> net.host.port: Str(8888) + -> http.scheme: Str(http) + -> k8s.pod.ip: Str(192.168.38.72) + -> cloud.provider: Str(aws) + -> cloud.platform: Str(aws_ec2) + -> cloud.region: Str(us-east-1) + -> cloud.account.id: Str(XXXXXXXXX) + -> cloud.availability_zone: Str(us-east-1c) + -> host.id: Str(i-0fb30793f89bd81ab) + -> host.image.id: Str(ami-0cbbb5a8c6f670bb6) + -> host.type: Str(m5.large) + -> host.name: Str(ip-192-168-37-51.ec2.internal) + -> os.type: Str(linux) + -> k8s.pod.uid: Str(01f039fa-abf3-4ab1-8683-923dcf94b391) + -> k8s.namespace.name: Str(otel-ds-gateway) + -> k8s.pod.start_time: Str(2023-11-20T12:53:40Z) + -> k8s.daemonset.uid: Str(694b994d-3488-418a-9f94-284792f1f8da) + -> k8s.daemonset.name: Str(opentelemetry-collector-agent) + -> k8s.node.name: Str(ip-192-168-37-51.ec2.internal) + -> kube_app_name: Str(opentelemetry-collector) + -> kube_app_instance: Str(opentelemetry-collector) + -> k8s.pod.name: Str(opentelemetry-collector-agent-4dm92) +ScopeMetrics #0 +ScopeMetrics SchemaURL: +InstrumentationScope otelcol/prometheusreceiver 0.88.0-dev +Metric #0 +Descriptor: + -> Name: otelcol_exporter_queue_capacity + -> Description: Fixed capacity of the retry queue (in batches) + -> Unit: + -> DataType: Gauge +NumberDataPoints #0 +Data point attributes: + -> exporter: Str(otlp) + -> service_instance_id: Str(ab73126a-0eff-4678-922d-a96b1fdabcdc) + -> service_name: Str(otelcontribcol) + -> service_version: Str(0.88.0-dev) +StartTimestamp: 1970-01-01 00:00:00 +0000 UTC +Timestamp: 2023-11-20 13:01:26.074 +0000 UTC +Value: 1000.000000 + +ResourceLog #0 +Resource SchemaURL: https://opentelemetry.io/schemas/1.6.1 +Resource attributes: + -> k8s.container.name: Str(loadgenerator) + -> k8s.namespace.name: Str(otel-staging) + -> k8s.pod.name: Str(opentelemetry-demo-loadgenerator-d8c4d699d-ztt98) + -> k8s.container.restart_count: Str(1) + -> k8s.pod.uid: Str(92bf09ed-0db9-4f69-a9d6-1dadf12e01aa) + -> k8s.pod.ip: Str(192.168.55.78) + -> cloud.provider: Str(aws) + -> cloud.platform: Str(aws_ec2) + -> cloud.region: Str(us-east-1) + -> cloud.account.id: Str(XXXXXXXXX) + -> cloud.availability_zone: Str(us-east-1c) + -> host.id: Str(i-0368add8e328c28f7) + -> host.image.id: Str(ami-08a2e6a8e82737230) + -> host.type: Str(m5.large) + -> host.name: Str(ip-192-168-53-115.ec2.internal) + -> os.type: Str(linux) + -> k8s.daemonset.uid: Str(6d6fef61-d4c7-4226-9b7b-7d6b893cb31d) + -> k8s.daemonset.name: Str(opentelemetry-collector-agent) + -> k8s.node.name: Str(ip-192-168-53-115.ec2.internal) + -> kube_app_name: Str(opentelemetry-collector) + -> kube_app_instance: Str(opentelemetry-collector) + -> k8s.pod.start_time: Str(2023-11-20T12:53:23Z) +ScopeLogs #0 +ScopeLogs SchemaURL: +InstrumentationScope +LogRecord #0 +ObservedTimestamp: 2023-11-20 13:02:04.332021519 +0000 UTC +Timestamp: 2023-11-20 13:01:46.095736502 +0000 UTC +SeverityText: +SeverityNumber: Unspecified(0) +Body: Str( return wrapped_send(self, request, **kwargs)) +Attributes: + -> log.file.path: Str(/var/log/pods/otel-staging_opentelemetry-demo-loadgenerator-d8c4d699d-ztt98_92bf09ed-0db9-4f69-a9d6-1dadf12e01aa/loadgenerator/1.log) + -> time: Str(2023-11-20T13:01:46.095736502Z) + -> logtag: Str(F) + -> log.iostream: Str(stderr) +Trace ID: +Span ID: +Flags: 0 +``` + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/collector_exporter/) +- [OTLP Ingestion by the Datadog Agent](http://localhost:1313/opentelemetry/otlp_ingest_in_the_agent/) diff --git a/opentelemetry-mdoc/correlate/dbm_and_traces/index.md b/opentelemetry-mdoc/correlate/dbm_and_traces/index.md new file mode 100644 index 0000000000000..81abd33e71e70 --- /dev/null +++ b/opentelemetry-mdoc/correlate/dbm_and_traces/index.md @@ -0,0 +1,161 @@ +--- +title: Correlate OpenTelemetry Traces and DBM +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Correlate OpenTelemetry Data > Correlate + OpenTelemetry Traces and DBM +--- + +# Correlate OpenTelemetry Traces and DBM + +## Overview{% #overview %} + +Datadog Database Monitoring (DBM) correlates backend traces from your OpenTelemetry-instrumented application with detailed database performance data. This allows you to link spans from your application to related query metrics and execution plans, helping you identify the exact queries that are slowing down your services. + +## Requirements{% #requirements %} + +Before you begin, ensure you have configured [unified service tagging](http://localhost:1313/opentelemetry/correlate/#prerequisite-unified-service-tagging). This is required for all data correlation in Datadog. + +## Setup{% #setup %} + +To correlate traces and metrics, you must: + +1. **Instrument database spans**: Add specific OpenTelemetry attributes to your database spans to enable correlation with DBM. + +1. **Configure trace ingestion path**: Enable the correct feature gate on your Collector or Agent to ensure database spans are properly processed for DBM. + +### Step 1: Instrument your database spans{% #step-1-instrument-your-database-spans %} + +For DBM correlation to work, your database spans must include the following attributes. + +| Attribute | Required? | Description | Example | +| -------------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------- | +| `db.system` | Yes | The database technology. | `postgres`, `mysql`, `sqlserver` | +| `db.statement` | Yes | The raw SQL query text. Datadog uses this to set the span's resource name after obfuscation and normalization. | `SELECT * FROM users WHERE id = ?` | +| `span.type` | Yes* | **(Datadog-specific)** Identifies database spans. *Derived automatically from other OpenTelemetry attributes according to the [span type mapping rules](http://localhost:1313/opentelemetry/mapping/semantic_mapping/#span-type-mapping). Only set it manually when creating spans directly with an SDK. | `sql`, `postgres`, `mysql`, `sql.query` | +| `db.name` | No | The logical database or schema name being queried. | `user_accounts` | + +{% alert level="info" %} +The `span.type` attribute is a Datadog-specific convention for identifying and processing database spans. When using OpenTelemetry auto-instrumentation or the Datadog Agent, this attribute is set automatically. Only add it manually if you are instrumenting spans directly with the SDK. +{% /alert %} + +#### Using auto instrumentation{% #using-auto-instrumentation %} + +To get started, instrument your application using the appropriate OpenTelemetry auto-instrumentation library for your language. For setup instructions, see the official [OpenTelemetry instrumentation documentation](https://opentelemetry.io/docs/languages/). + +These libraries automatically add the required `db.system` and `db.statement` attributes. The Datadog Agent or SDK then derives `span.type` automatically, so no manual attribute configuration is needed. + +{% collapsible-section %} +#### Set attributes manually (advanced) + +If your environment involves a custom database client or spans not recognized by the library, you can enrich them using the OpenTelemetry Collector's `attributes` processor. + +For example, you can add `span.type: sql` to any span that has the `db.system` attribute: + +```yaml +processors: + attributes/add_span_type: + actions: + - key: span.type + value: "sql" + action: insert + # Apply this action only to spans that have the db.system attribute + from_context: span + when: + - span.attributes["db.system"] != nil + +service: + pipelines: + traces: + # Add the processor to your traces pipeline + processors: [..., attributes/add_span_type, ...] +``` + +{% /collapsible-section %} + +#### Using manual instrumentation{% #using-manual-instrumentation %} + +If you are manually creating spans with the OpenTelemetry SDK, you can set the attributes directly in your code. For more information, see the [OpenTelemetry documentation](https://opentelemetry.io/docs/languages/). + +The following is a conceptual example of manual instrumentation using Python's OpenTelemetry SDK: + +```python +from opentelemetry import trace + +tracer = trace.get_tracer("my-app.instrumentation") + +# When making a database call, create a span and set attributes +with tracer.start_as_current_span("postgres.query") as span: + # Set attributes required for DBM correlation + span.set_attribute("span.type", "sql") + span.set_attribute("db.system", "postgres") + span.set_attribute("db.statement", "SELECT * FROM users WHERE id = ?") + span.set_attribute("db.name", "user_accounts") + + # Your actual database call would go here + # db_cursor.execute("SELECT * FROM users WHERE id = %s", (user_id,)) +``` + +### Step 2: Configure your ingest path{% #step-2-configure-your-ingest-path %} + +Depending on how you send traces to Datadog, you may need to enable specific feature gates to ensure database spans are processed correctly. + +{% tab title="Datadog Agent (DDOT Collector)" %} +If you are using the Datadog Helm chart (v3.107.0 or later), set the feature gate in your `values.yaml`: + +```yaml +datadog: + otelCollector: + featureGates: "datadog.EnableReceiveResourceSpansV2,datadog.EnableOperationAndResourceNameV2" +``` + +{% /tab %} + +{% tab title="OTel Collector" %} +When starting the Collector, you must enable the correct feature gate for your version. + +#### Collector v0.124.0 and later{% #collector-v01240-and-later %} + +For recent versions of the Collector, enable the `datadog.EnableOperationAndResourceNameV2` feature gate: + +```sh +otelcontribcol --config=config.yaml \ +--feature-gates=datadog.EnableOperationAndResourceNameV2 +``` + +#### Collector v0.118.0 - v0.123.0{% #collector-v01180---v01230 %} + +For older versions of the Collector, both of the following feature gates are required: + +```sh +otelcontribcol --config=config.yaml \ +--feature-gates=datadog.EnableReceiveResourceSpansV2,datadog.EnableOperationAndResourceNameV2 +``` + +{% /tab %} + +{% tab title="Datadog Agent (OTLP Ingest)" %} +In your Datadog Agent configuration, ensure the `DD_APM_FEATURES` environment variable includes `enable_operation_and_resource_name_logic_v2`. +{% /tab %} + +### View correlated data in Datadog{% #view-correlated-data-in-datadog %} + +After your application is sending traces, you can see the correlation in the APM Trace View: + +1. Navigate to [**APM** > **Traces**](https://app.datadoghq.com/apm/traces). +1. Find and click on a trace from your instrumented service. +1. In the trace's flame graph, select a database span (for example, a span with `span.type: sql`) +1. In the details panel, click the **SQL Queries** tab. You should see performance metrics and execution plans for the query. + +## Troubleshooting{% #troubleshooting %} + +If you don't see the expected correlation between your APM traces and DBM, it's typically due to a missing or incorrect configuration. Check the following common causes: + +- **Missing attributes**: The database span must contain `db.system` and `db.statement`. The `span.type` attribute is also required but is typically derived automatically by Datadog. +- **Incorrect unified service tagging**: The `service` tag on your database spans must be set. Verify that [unified service tagging](http://localhost:1313/opentelemetry/correlate/#prerequisite-unified-service-tagging) is configured correctly. +- **The SQL query may not be parsable**: The correlation relies on Datadog's ability to parse the SQL query from the `db.statement` attribute. If the query uses non-standard or complex syntax, parsing may fail. If you suspect this is the case, [contact Datadog support](http://localhost:1313/help) for assistance. +- **The correct feature gates must be enabled** for your specific trace ingestion path as described in the setup steps. + +## Further reading{% #further-reading %} + +- [Send OpenTelemetry Traces to Datadog](http://localhost:1313/opentelemetry/otel_tracing/) diff --git a/opentelemetry-mdoc/correlate/index.md b/opentelemetry-mdoc/correlate/index.md new file mode 100644 index 0000000000000..98832d8bee837 --- /dev/null +++ b/opentelemetry-mdoc/correlate/index.md @@ -0,0 +1,91 @@ +--- +title: Correlate OpenTelemetry Data +description: >- + Learn how to correlate your OpenTelemetry traces, metrics, logs, and other + telemetry in Datadog to get a unified view of your application's performance. +breadcrumbs: Docs > OpenTelemetry in Datadog > Correlate OpenTelemetry Data +--- + +# Correlate OpenTelemetry Data + +## Overview{% #overview %} + +Getting a unified view of your application's performance requires connecting its traces, metrics, logs, user interactions, and more. By correlating your OpenTelemetry data in Datadog, you can navigate between all related telemetry in a single view, allowing you to diagnose and resolve issues faster. + +## Prerequisite: Unified service tagging{% #prerequisite-unified-service-tagging %} + +Datadog uses three standard tags to link telemetry together: `env`, `service`, and `version`. + +To ensure your OpenTelemetry data is properly correlated, you must configure your application or system to use these tags by setting a standard set of OpenTelemetry resource attributes. Datadog automatically maps these attributes to the correct tags. + +| OpenTelemetry Resource Attribute | Datadog Tag | Notes | +| -------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| `deployment.environment.name` | `env` | **Recommended**. Supported in Agent v7.58.0+ and Collector Exporter v0.110.0+. | +| `deployment.environment` | `env` | Use instead of `deployment.environment.name` if you are running an Agent version older than v7.58.0 or a Collector Exporter older than v0.110.0. | +| `service.name` | `service` | +| `service.version` | `version` | + +You can set these attributes in your application's environment variables, SDK, or in the OpenTelemetry Collector. + +{% tab title="Environment Variables" %} +Set the `OTEL_RESOURCE_ATTRIBUTES` environment variable with your service's information: + +```sh +export OTEL_SERVICE_NAME="my-service" +export OTEL_RESOURCE_ATTRIBUTES="deployment.environment.name=production,service.version=1.2.3" +``` + +{% /tab %} + +{% tab title="SDK" %} +Create a Resource with the required attributes and associate it with your TracerProvider in your application code. + +Here's an example using the OpenTelemetry SDK for Python: + +```python +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider + +resource = Resource(attributes={ + "service.name": "", + "deployment.environment.name": "", + "service.version": "" +}) +tracer_provider = TracerProvider(resource=resource) +``` + +{% /tab %} + +{% tab title="Collector" %} +Use the `resource` processor in your Collector configuration to set the resource attributes on your telemetry data: + +```yaml +processors: + resource: + attributes: + - key: service.name + value: "my-service" + action: upsert + - key: deployment.environment.name + value: "production" + action: upsert + - key: service.version + value: "1.2.3" + action: upsert +... +``` + +{% /tab %} + +## Correlate telemetry{% #correlate-telemetry %} + +After unified service tagging is configured, you can connect your various telemetry streams. Select a guide below for platform-specific instructions. + +- [Correlate logs and traces](http://localhost:1313/opentelemetry/correlate/logs_and_traces) +- [Correlate metrics and traces](http://localhost:1313/opentelemetry/correlate/metrics_and_traces) +- [Correlate RUM and traces](http://localhost:1313/opentelemetry/correlate/rum_and_traces) +- [Correlate DBM and traces](http://localhost:1313/opentelemetry/correlate/dbm_and_traces) + +## Further reading{% #further-reading %} + +- [Datadog's partnership with OpenTelemetry](https://www.datadoghq.com/blog/opentelemetry-instrumentation/) diff --git a/opentelemetry-mdoc/correlate/logs_and_traces/index.md b/opentelemetry-mdoc/correlate/logs_and_traces/index.md new file mode 100644 index 0000000000000..b868903c6a860 --- /dev/null +++ b/opentelemetry-mdoc/correlate/logs_and_traces/index.md @@ -0,0 +1,228 @@ +--- +title: Correlate OpenTelemetry Traces and Logs +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Correlate OpenTelemetry Data > Correlate + OpenTelemetry Traces and Logs +--- + +# Correlate OpenTelemetry Traces and Logs + +## Overview{% #overview %} + +Correlating traces and logs allows you to investigate issues by navigating from a specific span in a trace directly to the logs that were generated during that operation. This makes debugging faster and more intuitive by providing the exact context needed to understand an error or performance problem. + +## Requirements{% #requirements %} + +Before you begin, ensure you have configured [unified service tagging](http://localhost:1313/opentelemetry/correlate/#prerequisite-unified-service-tagging). This is required for all data correlation in Datadog. + +## Setup{% #setup %} + +To correlate OpenTelemetry traces and logs in Datadog, you must: + +- **Inject Trace Context**: Your application's logger must be configured to enrich your logs with the `trace_id` and `span_id` from the active trace. The recommended approach is to use an OpenTelemetry-aware logging library or appender. These tools automatically capture the active trace context and embed the `trace_id` and `span_id` as top-level fields in your log records, which is the standard method for correlation. + +- **Send Logs to Datadog**: Your logs, enriched with trace context, must be collected and sent to Datadog. + +#### 1. Inject trace context into your logs + +The following examples for Go and Java use logging bridges. These bridges intercept logs from common logging libraries (such as `zap` and `Logback`), convert them into the OpenTelemetry log data model, and forward them to the OpenTelemetry SDK. This process automatically enriches the logs with the active trace context. + +For complete, working applications, see the [Datadog OpenTelemetry Examples repository](https://github.com/DataDog/opentelemetry-examples). + +{% tab title="Go" %} +First, ensure you have an initialized OpenTelemetry `LoggerProvider`. Then, use it to create your `zap` logger instance: + +```go +import "go.opentelemetry.io/contrib/bridges/otelzap" + +// Replace your standard zap logger with the otelzap-backed one +logger := zap.New(otelzap.NewCore( + "my-service-name", + otelzap.WithLoggerProvider(loggerProvider), +)) + +// Now, logs written with this logger are automatically correlated +logger.Info("Processing user request") +``` + +To see how the `LoggerProvider` is configured in a complete application, see the [full Go example in the examples repository](https://github.com/DataDog/opentelemetry-examples/blob/main/apps/rest-services/golang/calendar/main.go). +{% /tab %} + +{% tab title="Java" %} +To inject the trace context in Java, you can use the OpenTelemetry Logback Appender. Add the `io.opentelemetry.instrumentation:opentelemetry-logback-appender-1.0` dependency to your project and configure it in your `logback.xml`: + +```xml + + + true + + + + + + +``` + +For complete, working example configuration, see the [full Java example in the examples repository](https://github.com/DataDog/opentelemetry-examples/blob/main/apps/rest-services/java/calendar/src/main/resources/logback.xml). +{% /tab %} + +### 2. Choose your log pipeline + +Once your logs are instrumented with trace context, you need to send them to Datadog. The simplest approach is to send them directly from your application to the OpenTelemetry Collector using OTLP. However, you can also scrape logs from files or collect logs using the Datadog Agent. + +#### Send logs using OTLP{% #send-logs-using-otlp %} + +This is the simplest and most direct method. Your application sends logs directly to an OTLP endpoint, avoiding the complexity of writing to and parsing local files. + +The OpenTelemetry Collector and the Datadog Agent can both receive OTLP logs. + +1. **Configure your Application to Export Logs using OTLP**: In your OpenTelemetry SDK setup, configure a `LogRecordProcessor` to use an `OTLPLogExporter`. The following example shows how to do this in Python: + ```python + # In your OTel SDK setup for Python + import logging + from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler + from opentelemetry.sdk._logs.export import BatchLogRecordProcessor + from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter + + # Configure the OTLP Log Exporter to send to your Collector + # Note: The endpoint should point to your OpenTelemetry Collector. + # The default port is 4317 for gRPC and 4318 for HTTP. + exporter = OTLPLogExporter(endpoint="localhost:4317", insecure=True) + + log_provider = LoggerProvider() + log_provider.add_log_record_processor(BatchLogRecordProcessor(exporter)) + + # Attach to the root logger + handler = LoggingHandler(logger_provider=log_provider) + logging.getLogger().addHandler(handler) + ``` +1. **Configure the Collector to Receive OTLP Logs**: In your Collector's `config.yaml`, enable the `otlp` receiver and add it to your `logs` pipeline: + ```yaml + receivers: + otlp: + protocols: + grpc: + http: + + exporters: + datadog: + # ... your datadog exporter config + + service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [datadog] + ``` + +#### Scrape logs from files{% #scrape-logs-from-files %} + +This approach is useful if you have a requirement to keep local log files for compliance or other tooling. + +For Datadog to correlate your logs and traces, your JSON log files must contain specific fields formatted correctly: + +- `trace_id`: The ID of the trace. It must be a 32-character lowercase hexadecimal string. +- `span_id`: The ID of the span. It must be a 16-character lowercase hexadecimal string. + +The OpenTelemetry SDK typically provides these in a raw format (such as an integer or byte array), which must be formatted into hexadecimal strings without any `0x` prefix. + +1. **Configure your Application to Output JSON Logs**: Use a standard logging library to write logs as JSON to a file or `stdout`. The following Python example uses the standard `logging` library. + +1. **Manually Inject Trace Context**: In your application code, retrieve the current span context and add the `trace_id` and `span_id` to your log records. The following Python example shows how to create a custom logging.Filter to do this automatically: + + ```python + import logging + import sys + from opentelemetry import trace + from pythonjsonlogger import jsonlogger + + # 1. Create a filter to inject trace context + class TraceContextFilter(logging.Filter): + def filter(self, record): + span = trace.get_current_span() + if span.is_recording(): + span_context = span.get_span_context() + record.trace_id = f'{span_context.trace_id:032x}' + record.span_id = f'{span_context.span_id:016x}' + return True + + # 2. Configure a JSON logger + logger = logging.getLogger("my-json-logger") + logger.setLevel(logging.DEBUG) + + # 3. Add the filter to the logger + logger.addFilter(TraceContextFilter()) + + handler = logging.StreamHandler(sys.stdout) + formatter = jsonlogger.JsonFormatter( + '%(asctime)s %(name)s %(levelname)s %(message)s %(trace_id)s %(span_id)s' + ) + handler.setFormatter(formatter) + logger.addHandler(handler) + + # Logs will now contain the trace_id and span_id + logger.info("Processing user request with trace context.") + ``` + +1. **Configure the Collector to Scrape Log Files**: In your Collector's `config.yaml`, enable the `filelog` receiver. Configure it to find your log files and parse them as JSON. + + ```yaml + receivers: + filelog: + include: [ /var/log/my-app/*.log ] # Path to your log files + operators: + - type: json_parser + # The timestamp and severity fields should match your JSON output + timestamp: + parse_from: attributes.asctime + layout: '%Y-%m-%d %H:%M:%S,%f' + severity: + parse_from: attributes.levelname + # ... your logs pipeline ... + ``` + +This manual approach gives you full control over the log format, ensuring it is clean and easily parsable by the Collector or Datadog Agent. + +#### Collect logs using the Datadog Agent{% #collect-logs-using-the-datadog-agent %} + +If you collect logs directly with the Datadog Agent (without sending them through the OpenTelemetry Collector), you must ensure the trace IDs in your logs use the Datadog format. + +- **Trace ID format**: The Datadog Agent requires the trace ID to be in the `dd.trace_id` field. + + - If you are using **Datadog's tracing libraries** (like `dd-trace-py`), this is handled for you automatically. + - If you are generating logs with OpenTelemetry `trace_id` and `span_id` (as shown in the file-scraping example), you must use a [Log Processing Rule](http://localhost:1313/logs/log_configuration/processors) in Datadog to remap your `trace_id` attribute to `dd.trace_id`. + +- **Attribute Mapping**: The Datadog Agent does not automatically convert OTel resource attributes (for example, `service.name`) to Datadog's standard tags. You may need to manually remap these attributes in your log processing pipeline to maintain unified service tagging. + +## View correlated data in Datadog{% #view-correlated-data-in-datadog %} + +After your application is sending traces, you can navigate between them in Datadog. + +### From a trace to logs{% #from-a-trace-to-logs %} + +1. Navigate to [**APM** > **Traces**](https://app.datadoghq.com/apm/traces). +1. Find and click on a trace from your instrumented service. +1. Select any span in the flame graph to view its details. +1. Click the **Logs** tab. + +Here, you can see all the logs generated during the execution of that specific span. + +### From a log to a trace{% #from-a-log-to-a-trace %} + +1. Navigate to [**Logs** > **Explorer**](https://app.datadoghq.com/logs). +1. Find and click a log entry from your instrumented service. +1. Click the **Trace** tab. + +Here, you can see a flame graph of the associated trace, with the span that generated the log. + +Click **View Trace in APM** to pivot directly to the full APM trace associated with that log event, allowing you to see the context of the entire request. + +## Further reading{% #further-reading %} + +- [Send OpenTelemetry Traces to Datadog](http://localhost:1313/opentelemetry/otel_tracing/) +- [Collector documentation](https://opentelemetry.io/docs/collector/) +- [Datadog's partnership with OpenTelemetry](https://www.datadoghq.com/blog/opentelemetry-instrumentation/) +- [Ease troubleshooting with cross product correlation.](http://localhost:1313/logs/guide/ease-troubleshooting-with-cross-product-correlation/) diff --git a/opentelemetry-mdoc/correlate/metrics_and_traces/index.md b/opentelemetry-mdoc/correlate/metrics_and_traces/index.md new file mode 100644 index 0000000000000..e08dfd57342ee --- /dev/null +++ b/opentelemetry-mdoc/correlate/metrics_and_traces/index.md @@ -0,0 +1,75 @@ +--- +title: Correlate OpenTelemetry Traces and Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Correlate OpenTelemetry Data > Correlate + OpenTelemetry Traces and Metrics +--- + +# Correlate OpenTelemetry Traces and Metrics + +## Overview{% #overview %} + +Correlating traces with host metrics allows you to pivot from a slow request directly to the CPU and memory metrics of the host or container it ran on. This helps you determine if resource contention was the root cause of a performance issue. + +Correlation between traces and metrics relies on the following resource attributes: + +- `host.name`: For correlating with host metrics (CPU, memory, disk). +- `container.id`: For correlating with container metrics. + +## Requirements{% #requirements %} + +Before you begin, ensure you have configured [unified service tagging](http://localhost:1313/opentelemetry/correlate/#prerequisite-unified-service-tagging). This is required for all data correlation in Datadog. + +## Setup{% #setup %} + +To correlate traces and metrics, you must: + +1. **Collect Host Metrics**: You must have the OpenTelemetry Collector configured to collect and send host metrics to Datadog. + +1. **Ensure Consistent Tagging**: Your traces and metrics must share a consistent `host.name` (for hosts) or `container.id` (for containers) attribute for Datadog to link them. + +### 1. Collect host metrics + +To collect system-level metrics from your infrastructure, enable the `hostmetrics` receiver in your OpenTelemetry Collector configuration. This receiver gathers metrics like CPU, memory, disk, and network usage. + +Add the `hostmetrics` receiver to the `receivers` section of your Collector configuration and enable it in your `metrics` pipeline: + +```yaml +receivers: + hostmetrics: + collection_interval: 10s + scrapers: + cpu: + memory: + disk: + ... + +service: + pipelines: + metrics: + receivers: [hostmetrics, ...] + processors: [...] + exporters: [...] +``` + +For the complete, working configuration, including Kubernetes-specific setup, see the [Host Metrics](http://localhost:1313/opentelemetry/integrations/host_metrics) documentation. + +### 2. Ensure consistent host and container tagging + +For correlation to work, the `host.name` (or `container.id`) attribute on your traces must match the corresponding attribute on the metrics collected by the `hostmetrics` receiver. + +## View correlated data in Datadog{% #view-correlated-data-in-datadog %} + +After your application is sending traces and the Collector is sending host metrics, you can see the correlation in the APM Trace View. + +1. Navigate to [**APM** > **Traces**](https://app.datadoghq.com/apm/traces). +1. Find and click on a trace from your instrumented service. +1. In the trace's flame graph, select a span that ran on the instrumented host. +1. In the details panel, click the **Infrastructure** tab. You should see the host metrics, like CPU and memory utilization, from the host that executed that part of the request. + +This allows you to immediately determine if a spike in host metrics corresponds with the performance of a specific request. + +## Further reading{% #further-reading %} + +- [Send OpenTelemetry Traces to Datadog](http://localhost:1313/opentelemetry/otel_tracing/) diff --git a/opentelemetry-mdoc/correlate/rum_and_traces/index.md b/opentelemetry-mdoc/correlate/rum_and_traces/index.md new file mode 100644 index 0000000000000..c6454b24cb5ff --- /dev/null +++ b/opentelemetry-mdoc/correlate/rum_and_traces/index.md @@ -0,0 +1,841 @@ +--- +title: Correlate RUM and Traces +description: Learn how to integrate Real User Monitoring with APM. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Correlate OpenTelemetry Data > Correlate RUM + and Traces +--- + +# Correlate RUM and Traces + +{% image + source="http://localhost:1313/images/real_user_monitoring/connect_rum_and_traces/rum-trace-tab.a48c0837c522ae9e5f26dd251b797a5e.png?auto=format" + alt="RUM and Traces" /%} + +## Overview{% #overview %} + +The APM integration with Real User Monitoring allows you to link requests from your web and mobile applications to their corresponding backend traces. This combination enables you to see your full frontend and backend data through one lens. + +Use frontend data from RUM, as well as backend, infrastructure, and log information from trace ID injection to pinpoint issues anywhere in your stack and understand what your users are experiencing. + +To start sending just your iOS application's traces to Datadog, see [iOS Trace Collection](http://localhost:1313/tracing/trace_collection/dd_libraries/ios/?tab=swiftpackagemanagerspm). + +## Usage{% #usage %} + +### Prerequisites{% #prerequisites %} + +- You have set up [APM tracing](http://localhost:1313/tracing) on the services targeted by your RUM applications. +- Your services use an HTTP server. +- Your HTTP servers are using a library that supports distributed tracing. +- You have the following set up based on your SDK: + - With the **Browser SDK**, you have added the XMLHttpRequest (XHR) or Fetch resources on the RUM Explorer to your `allowedTracingUrls`. + - With the **Mobile SDK**, you have added the Native or XMLHttpRequest (XHR) to your `firstPartyHosts`. +- You have a corresponding trace for requests to `allowedTracingUrls` or `firstPartyHosts`. + +### Setup RUM{% #setup-rum %} + +**Note:** Configuring RUM and Traces makes use of APM paid data in RUM, which may impact your APM billing. + +{% tab title="Browser RUM" %} + +1. Set up [RUM Browser Monitoring](http://localhost:1313/real_user_monitoring/browser/). + +1. Initialize the RUM SDK. Configure the `allowedTracingUrls` initialization parameter with the list of internal, first-party origins called by your browser application. + +For **npm install**: + + ```javascript + import { datadogRum } from '@datadog/browser-rum' + + datadogRum.init({ + clientToken: '', + applicationId: '', + site: 'datadoghq.com', + // service: 'my-web-application', + // env: 'production', + // version: '1.0.0', + allowedTracingUrls: [ + "https://api.example.com", + // Matches any subdomain of my-api-domain.com, such as https://foo.my-api-domain.com + /^https:\/\/[^\/]+\.my-api-domain\.com/, + // You can also use a function for advanced matching: + (url) => url.startsWith("https://api.example.com") + ], + sessionSampleRate: 100, + sessionReplaySampleRate: 100, // if not specified, defaults to 100 + trackResources: true, + trackLongTasks: true, + trackUserInteractions: true, + }) + ``` + +For **CDN install**: + + ```javascript + window.DD_RUM.init({ + clientToken: '', + applicationId: '', + site: 'datadoghq.com', + // service: 'my-web-application', + // env: 'production', + // version: '1.0.0', + allowedTracingUrls: [ + "https://api.example.com", + // Matches any subdomain of my-api-domain.com, such as https://foo.my-api-domain.com + /^https:\/\/[^\/]+\.my-api-domain\.com/, + // You can also use a function for advanced matching: + (url) => url.startsWith("https://api.example.com") + ], + sessionSampleRate: 100, + sessionReplaySampleRate: 100, // if not included, the default is 100 + trackResources: true, + trackLongTasks: true, + trackUserInteractions: true, + }) + ``` + +To connect RUM to Traces, you need to specify your browser application in the `service` field. + +`allowedTracingUrls` matches the full URL (`://[:]/[?][#]`). It accepts the following types: + + - `string`: matches any URL that starts with the value, so `https://api.example.com` matches `https://api.example.com/v1/resource`. + - `RegExp`: matches if any substring of the URL matches the provided RegExp. For example, `/^https:\/\/[^\/]+\.my-api-domain\.com/` matches URLs like `https://foo.my-api-domain.com/path`, but not `https://notintended.com/?from=guess.my-api-domain.com`. **Note:** The RegExp is not anchored to the start of the URL unless you use `^`. Be careful, as overly broad patterns can unintentionally match unwanted URLs and cause CORS errors. + - `function`: evaluates with the URL as parameter. Returning a `boolean` set to `true` indicates a match. + +{% alert level="warning" %} +When using RegExp, the pattern is tested against the entire URL as a substring, not just the prefix. To avoid unintended matches, anchor your RegExp with `^` and be as specific as possible. +{% /alert %} + +*(Optional)* Configure the `traceSampleRate` initialization parameter to keep a defined percentage of the backend traces. If not set, 100% of the traces coming from browser requests are sent to Datadog. To keep 20% of backend traces, for example: + +```javascript +import { datadogRum } from '@datadog/browser-rum' + +datadogRum.init({ + ...otherConfig, + traceSampleRate: 20 +}) +``` + +**Note**: `traceSampleRate` **does not** impact RUM sessions sampling. Only backend traces are sampled out. + +*(Optional)* If you set a `traceSampleRate`, to ensure backend services' sampling decisions are still applied, configure the `traceContextInjection` initialization parameter to `sampled` (set to `sampled` by default). + +For example, if you set the `traceSampleRate` to 20% in the Browser SDK: + +- When `traceContextInjection` is set to `all`, **20%** of backend traces are kept and **80%** of backend traces are dropped. + +{% image + source="http://localhost:1313/images/real_user_monitoring/connect_rum_and_traces/traceContextInjection_all-2.e5e60848ccc43280ba4d010674d73d9d.png?auto=format" + alt="traceContextInjection set to all" /%} + +- When `traceContextInjection` is set to `sampled`, **20%** of backend traces are kept. For the remaining **80%**, the browser SDK **does not inject** a sampling decision. The decision is made on the server side and is based on the tracing library head-based sampling [configuration](http://localhost:1313/tracing/trace_pipeline/ingestion_mechanisms/#head-based-sampling). In the example below, the backend sample rate is set to 40%, and therefore 32% of the remaining backend traces are kept. + + {% image + source="http://localhost:1313/images/real_user_monitoring/connect_rum_and_traces/traceContextInjection_sampled-3.39694a559199dae82b3c8888cdc18548.png?auto=format" + alt="traceContextInjection set to sampled" /%} + +{% alert level="info" %} +End-to-end tracing is available for requests fired after the Browser SDK is initialized. End-to-end tracing of the initial HTML document and early browser requests is not supported. +{% /alert %} + +{% /tab %} + +{% tab title="Android RUM" %} + +1. Set up [RUM Android Monitoring](http://localhost:1313/real_user_monitoring/android/). + +1. Set up [Android Trace Collection](http://localhost:1313/tracing/trace_collection/dd_libraries/android/?tab=kotlin). + +1. Add the Gradle dependency to the `dd-sdk-android-okhttp` library in the module-level `build.gradle` file: + + ```groovy + dependencies { + implementation "com.datadoghq:dd-sdk-android-okhttp:x.x.x" + } + ``` + +1. Configure the `OkHttpClient` interceptor with the list of internal, first-party origins called by your Android application. + + ```kotlin + val tracedHosts = listOf("example.com", "example.eu") + + val okHttpClient = OkHttpClient.Builder() + .addInterceptor(DatadogInterceptor.Builder(tracedHosts).build()) + .addNetworkInterceptor(TracingInterceptor.Builder(tracedHosts).build()) + .eventListenerFactory(DatadogEventListener.Factory()) + .build() + ``` + +By default, all subdomains of listed hosts are traced. For instance, if you add `example.com`, you also enable the tracing for `api.example.com` and `foo.example.com`. + +1. *(Optional)* Configure the `traceSampler` parameter to keep a defined percentage of the backend traces. If not set, 20% of the traces coming from application requests are sent to Datadog. To keep 100% of backend traces: + + ```kotlin + val tracedHosts = listOf("example.com") + + val okHttpClient = OkHttpClient.Builder() + .addInterceptor( + DatadogInterceptor.Builder(tracedHosts) + .setTraceSampler(RateBasedSampler(100f)) + .build() + ) + .build() + ``` + +**Note**: + +- `traceSampler` **does not** impact RUM sessions sampling. Only backend traces are sampled out. +- If you define custom tracing header types in the Datadog configuration and are using a tracer registered with `GlobalTracer`, make sure the same tracing header types are set for the tracer in use. + +{% /tab %} + +{% tab title="iOS RUM" %} + +1. Set up [RUM iOS Monitoring](http://localhost:1313/real_user_monitoring/ios/). + +1. Enable `RUM` with the `urlSessionTracking` option and `firstPartyHostsTracing` parameter: + + ```swift + RUM.enable( + with: RUM.Configuration( + applicationID: "", + urlSessionTracking: .init( + firstPartyHostsTracing: .trace( + hosts: [ + "example.com", + "api.yourdomain.com" + ] + ) + ) + ) + ) + ``` + +1. Enable URLSession instrumentation for your `SessionDelegate` type, which conforms to `URLSessionDataDelegate` protocol: + + ```swift + URLSessionInstrumentation.enable( + with: .init( + delegateClass: .self + ) + ) + ``` + +1. Initialize URLSession as stated in [Setup](http://localhost:1313/real_user_monitoring/ios/): + + ```swift + let session = URLSession( + configuration: ..., + delegate: (), + delegateQueue: ... + ) + ``` + +By default, all subdomains of listed hosts are traced. For instance, if you add `example.com`, you also enable tracing for `api.example.com` and `foo.example.com`. + +Trace ID injection works when you are providing a `URLRequest` to the `URLSession`. Distributed tracing does not work when you are using a `URL` object. + +1. *(Optional)* Set the `sampleRate` parameter to keep a defined percentage of the backend traces. If not set, 20% of the traces coming from application requests are sent to Datadog. + +To keep 100% of backend traces: + + ```swift + RUM.enable( + with: RUM.Configuration( + applicationID: "", + urlSessionTracking: .init( + firstPartyHostsTracing: .trace( + hosts: [ + "example.com", + "api.yourdomain.com" + ], + sampleRate: 100 + ) + ) + ) + ) + ``` + +**Note**: `sampleRate` **does not** impact RUM sessions sampling. Only backend traces are sampled out. +{% /tab %} + +{% tab title="React Native RUM" %} + +1. Set up [RUM React Native Monitoring](http://localhost:1313/real_user_monitoring/reactnative/). + +1. Set the `firstPartyHosts` initialization parameter to define the list of internal, first-party origins called by your React Native application: + + ```javascript + const config = new DatadogProviderConfiguration( + // ... + ); + config.firstPartyHosts = ["example.com", "api.yourdomain.com"]; + ``` + +By default, all subdomains of listed hosts are traced. For instance, if you add `example.com`, you also enable tracing for `api.example.com` and `foo.example.com`. + +1. *(Optional)* Set the `resourceTracingSamplingRate` initialization parameter to keep a defined percentage of the backend traces. If not set, 20% of the traces coming from application requests are sent to Datadog. + +To keep 100% of backend traces: + + ```javascript + const config = new DatadogProviderConfiguration( + // ... + ); + config.resourceTracingSamplingRate = 100; + ``` + +**Note**: `resourceTracingSamplingRate` **does not** impact RUM sessions sampling. Only backend traces are sampled out. + +{% /tab %} + +{% tab title="Flutter RUM" %} + +1. Set up [RUM Flutter Monitoring](http://localhost:1313/real_user_monitoring/application_monitoring/flutter/setup/). + +1. Follow the instructions under [Automatically track resources](http://localhost:1313/real_user_monitoring/application_monitoring/flutter/advanced_configuration#automatically-track-resources) to include the Datadog Tracking HTTP Client package and enable HTTP tracking. This includes the following changes to your initialization to add a list of internal, first-party origins called by your Flutter application: + + ```dart + final configuration = DatadogConfiguration( + // ... + // added configuration + firstPartyHosts: ['example.com', 'api.yourdomain.com'], + )..enableHttpTracking() + ``` + +{% /tab %} + +{% tab title="Roku RUM" %} + +{% callout %} +# Important note for users on the following Datadog sites: app.ddog-gov.com + + + +{% alert level="warning" %} +RUM for Roku is not available on the US1-FED Datadog site. +{% /alert %} + + +{% /callout %} + +1. Set up [RUM Roku Monitoring](http://localhost:1313/real_user_monitoring/application_monitoring/roku/setup/). + +1. Use the `datadogroku_DdUrlTransfer` component to perform your network requests. + + ``` + ddUrlTransfer = datadogroku_DdUrlTransfer(m.global.datadogRumAgent) + ddUrlTransfer.SetUrl(url) + ddUrlTransfer.EnablePeerVerification(false) + ddUrlTransfer.EnableHostVerification(false) + result = ddUrlTransfer.GetToString() + ``` + +{% /tab %} + +{% tab title="Kotlin Multiplatform RUM" %} + +1. Set up [RUM Kotlin Multiplatform Monitoring](http://localhost:1313/real_user_monitoring/application_monitoring/kotlin_multiplatform/setup). + +1. Set up [Ktor instrumentation](http://localhost:1313/real_user_monitoring/application_monitoring/kotlin_multiplatform/setup?tab=rum#initialize-the-rum-ktor-plugin-to-track-network-events-made-with-ktor). + +1. Set the `tracedHosts` initialization parameter in the Datadog Ktor Plugin configuration to define the list of internal, first-party origins called by your Kotlin Multiplatform application: + + ```kotlin + val ktorClient = HttpClient { + install( + datadogKtorPlugin( + tracedHosts = mapOf( + "example.com" to setOf(TracingHeaderType.DATADOG), + "example.eu" to setOf(TracingHeaderType.DATADOG) + ), + traceSampleRate = 100f + ) + ) + } + ``` + +By default, all subdomains of listed hosts are traced. For instance, if you add `example.com`, you also enable tracing for `api.example.com` and `foo.example.com`. + +1. *(Optional)* Set the `traceSampleRate` initialization parameter to keep a defined percentage of the backend traces. If not set, 20% of the traces coming from application requests are sent to Datadog. + +To keep 100% of backend traces: + + ```kotlin + val ktorClient = HttpClient { + install( + datadogKtorPlugin( + tracedHosts = mapOf( + "example.com" to setOf(TracingHeaderType.DATADOG), + "example.eu" to setOf(TracingHeaderType.DATADOG) + ), + traceSampleRate = 100f + ) + ) + } + ``` + +**Note**: `traceSampleRate` **does not** impact RUM sessions sampling. Only backend traces are sampled out. + +{% /tab %} + +### Verifying setup{% #verifying-setup %} + +To verify you've configured the APM integration with RUM, follow the steps below based on the SDK you installed RUM with. + +{% tab title="Browser" %} + +1. Visit a page in your application. +1. In your browser's developer tools, go to the **Network** tab. +1. Check the request headers for a resource request that you expect to be correlated contains the [correlation headers from Datadog](http://localhost:1313/real_user_monitoring/correlate_with_other_telemetry/apm?tab=browserrum#how-rum-resources-are-linked-to-traces). + +{% /tab %} + +{% tab title="Android" %} + +1. Run your application from Android Studio. +1. Visit a screen in your application. +1. Open Android Studio's [Network Inspector](https://developer.android.com/studio/debug/network-profiler#network-inspector-overview). +1. Check the request headers for a RUM resource and verify that the [required headers are set by the SDK](http://localhost:1313/real_user_monitoring/correlate_with_other_telemetry/apm?tab=androidrum#how-rum-resources-are-linked-to-traces). + +{% /tab %} + +{% tab title="iOS" %} + +1. Run your application from Xcode. +1. Visit a screen in your application. +1. Open Xcode's [Network Connections and HTTP Traffic instrument](https://developer.apple.com/documentation/foundation/url_loading_system/analyzing_http_traffic_with_instruments). +1. Check the request headers for a RUM resource and verify that the [required headers are set by the SDK](http://localhost:1313/real_user_monitoring/correlate_with_other_telemetry/apm/?tab=iosrum#how-rum-resources-are-linked-to-traces). + +{% /tab %} + +{% tab title="React Native" %} + +1. Run your application from Xcode (iOS) or Android Studio (Android). +1. Visit a screen in your application. +1. Open Xcode's [Network Connections and HTTP Traffic instrument](https://developer.apple.com/documentation/foundation/url_loading_system/analyzing_http_traffic_with_instruments) or Android Studio's [Network Inspector](https://developer.android.com/studio/debug/network-profiler#network-inspector-overview). +1. Check the request headers for a RUM resource and verify that the [required headers are set by the SDK](http://localhost:1313/real_user_monitoring/correlate_with_other_telemetry/apm/?tab=reactnativerum#how-rum-resources-are-linked-to-traces). + +{% /tab %} + +{% tab title="Flutter" %} + +1. Run your application using your preferred IDE or `flutter run`. +1. Visit a screen in your application. +1. Open Flutter's [Dev Tools](https://docs.flutter.dev/tools/devtools/overview) and navigate to [Network View](https://docs.flutter.dev/tools/devtools/network). +1. Check the request headers for a RUM resource and verify that the [required headers are set by the SDK](http://localhost:1313/real_user_monitoring/correlate_with_other_telemetry/apm/?tab=reactnativerum#how-rum-resources-are-linked-to-traces). + +{% /tab %} + +{% tab title="Kotlin Multiplatform" %} + +1. Run your application from Xcode (iOS) or Android Studio (Android). +1. Visit a screen in your application. +1. Open Xcode's [Network Connections and HTTP Traffic instrument](https://developer.apple.com/documentation/foundation/url_loading_system/analyzing_http_traffic_with_instruments) or Android Studio's [Network Inspector](https://developer.android.com/studio/debug/network-profiler#network-inspector-overview). +1. Check the request headers for a RUM resource and verify that the [required headers are set by the SDK](http://localhost:1313/real_user_monitoring/correlate_with_other_telemetry/apm/?tab=kotlinmultiplatformrum#how-rum-resources-are-linked-to-traces). + +{% /tab %} + +## RUM Explorer to Traces{% #rum-explorer-to-traces %} + +{% image + source="http://localhost:1313/images/real_user_monitoring/connect_rum_and_traces/rum-trace-apm-link.34b60fa384659e9bd5230f03794afd0a.png?auto=format" + alt="RUM and Traces" /%} + +To view traces from the RUM Explorer: + +1. Navigate to your [list of sessions](https://app.datadoghq.com/rum/explorer) and click on a session that has traces available. You can also query for sessions with traces by using`@_dd.trace_id:*`. + +When you select a session, the session panel appears with a request duration breakdown, a flame graph for each span, and a **View Trace in APM** link. + +## Traces to RUM Explorer{% #traces-to-rum-explorer %} + +{% image + source="http://localhost:1313/images/real_user_monitoring/connect_rum_and_traces/rum-traces-to-rum.5866e11511b33bb0f2b3d4dd2cad7936.png?auto=format" + alt="RUM and Traces" /%} + +To view the RUM event from Traces: + +1. Within a trace view, click **VIEW** to see all traces created during the view's lifespan, or **RESOURCE** to see traces associated with the specific resource from the Overview tab. +1. Click **See View in RUM** or **See Resource in RUM** to open the corresponding event in the RUM Explorer. + +## Supported libraries{% #supported-libraries %} + +Below is a list of the supported backend libraries that need to be on the services receiving the network requests. + +| Library | Minimum Version | +| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| [Python](http://localhost:1313/tracing/trace_collection/dd_libraries/python/) | [0.22.0](https://github.com/DataDog/dd-trace-py/releases/tag/v0.22.0) | +| [Go](http://localhost:1313/tracing/trace_collection/dd_libraries/go/) | [1.10.0](https://github.com/DataDog/dd-trace-go/releases/tag/v1.10.0) | +| [Java](http://localhost:1313/tracing/trace_collection/dd_libraries/java/) | [0.24.1](https://github.com/DataDog/dd-trace-java/releases/tag/v0.24.1) | +| [Ruby](http://localhost:1313/tracing/trace_collection/dd_libraries/ruby/) | [0.20.0](https://github.com/DataDog/dd-trace-rb/releases/tag/v0.20.0) | +| [JavaScript](http://localhost:1313/tracing/trace_collection/dd_libraries/nodejs/) | [0.10.0](https://github.com/DataDog/dd-trace-js/releases/tag/v0.10.0) | +| [PHP](http://localhost:1313/tracing/trace_collection/dd_libraries/php/) | [0.33.0](https://github.com/DataDog/dd-trace-php/releases/tag/0.33.0) | +| [.NET](http://localhost:1313/tracing/trace_collection/dd_libraries/dotnet-core/) | [1.18.2](https://github.com/DataDog/dd-trace-dotnet/releases/tag/v1.18.2) | + +## OpenTelemetry support{% #opentelemetry-support %} + +RUM supports several propagator types to connect resources with backends that are instrumented with OpenTelemetry libraries. + +The default injection style is `tracecontext`, `Datadog`. + +{% tab title="Browser RUM" %} +**Note**: If you are using a backend framework such as Next.js/Vercel that uses OpenTelemetry, follow these steps. + +1. Set up RUM to connect with APM as described above. + +1. Modify `allowedTracingUrls` as follows: + + ```javascript + import { datadogRum } from '@datadog/browser-rum' + + datadogRum.init({ + ...otherConfig, + allowedTracingUrls: [ + { match: "https://api.example.com", propagatorTypes: ["tracecontext"]} + ] + }) + ``` + +`match` accepts the same parameter types (`string`, `RegExp` or `function`) as when used in its simple form, described above. + +`propagatorTypes` accepts a list of strings for desired propagators: + + - `datadog`: Datadog's propagator (`x-datadog-*`) + - `tracecontext`: [W3C Trace Context](https://www.w3.org/TR/trace-context/) (`traceparent`, `tracestate`) + - `b3`: [B3 single header](https://github.com/openzipkin/b3-propagation#single-header) (`b3`) + - `b3multi`: [B3 multiple headers](https://github.com/openzipkin/b3-propagation#multiple-headers) (`X-B3-*`) + +{% /tab %} + +{% tab title="iOS RUM" %} + +1. Set up RUM to connect with APM as described above. + +1. Use `.traceWithHeaders(hostsWithHeaders:sampleRate:)` instead of `.trace(hosts:sampleRate:)` as follows: + + ```swift + RUM.enable( + with: RUM.Configuration( + applicationID: "", + urlSessionTracking: .init( + firstPartyHostsTracing: .traceWithHeaders( + hostsWithHeaders: [ + "api.example.com": [.tracecontext] + ], + sampleRate: 100 + ) + ) + ) + ) + ``` + +`.traceWithHeaders(hostsWithHeaders:sampleRate:)` takes `Dictionary>` as a parameter, where the key is a host and the value is a list of supported tracing header types. + +`TracingHeaderType` in an enum representing the following tracing header types: + + - `.datadog`: Datadog's propagator (`x-datadog-*`) + - `.tracecontext`: [W3C Trace Context](https://www.w3.org/TR/trace-context/) (`traceparent`) + - `.b3`: [B3 single header](https://github.com/openzipkin/b3-propagation#single-header) (`b3`) + - `.b3multi`: [B3 multiple headers](https://github.com/openzipkin/b3-propagation#multiple-headers) (`X-B3-*`) + +{% /tab %} + +{% tab title="Android RUM" %} + +1. Set up RUM to connect with APM as described above. + +1. Configure the `OkHttpClient` interceptor with the list of internal, first-party origins and the tracing header type to use as follows: + + ```kotlin + val tracedHosts = mapOf("example.com" to setOf(TracingHeaderType.TRACECONTEXT), + "example.eu" to setOf(TracingHeaderType.DATADOG)) + + val okHttpClient = OkHttpClient.Builder() + .addInterceptor(DatadogInterceptor.Builder(tracedHosts).build()) + .addNetworkInterceptor(TracingInterceptor.Builder(tracedHosts).build()) + .eventListenerFactory(DatadogEventListener.Factory()) + .build() + ``` + +`TracingHeaderType` is an enum representing the following tracing header types: + + - `.DATADOG`: Datadog's propagator (`x-datadog-*`) + - `.TRACECONTEXT`: [W3C Trace Context](https://www.w3.org/TR/trace-context/) (`traceparent`) + - `.B3`: [B3 single header](https://github.com/openzipkin/b3-propagation#single-header) (`b3`) + - `.B3MULTI`: [B3 multiple headers](https://github.com/openzipkin/b3-propagation#multiple-headers) (`X-B3-*`) + +{% /tab %} + +{% tab title="React Native RUM" %} + +1. Set up RUM to connect with APM. + +1. Configure the RUM SDK with the list of internal, first-party origins and the tracing header type to use as follows: + + ```javascript + const config = new DatadogProviderConfiguration( + // ... + ); + config.firstPartyHosts = [{ + match: "example.com", + propagatorTypes: [ + PropagatorType.TRACECONTEXT, + PropagatorType.DATADOG + ] + }]; + ``` + +`PropagatorType` is an enum representing the following tracing header types: + + - `PropagatorType.DATADOG`: Datadog's propagator (`x-datadog-*`) + - `PropagatorType.TRACECONTEXT`: [W3C Trace Context](https://www.w3.org/TR/trace-context/) (`traceparent`) + - `PropagatorType.B3`: [B3 single header](https://github.com/openzipkin/b3-propagation#single-header) (`b3`) + - `PropagatorType.B3MULTI`: [B3 multiple headers](https://github.com/openzipkin/b3-propagation#multiple-headers) (`X-B3-*`) + +{% /tab %} + +{% tab title="Flutter RUM" %} + +1. Set up RUM to connect with APM as described above. + +1. Use `firstPartyHostsWithTracingHeaders` instead of `firstPartyHosts` as follows: + + ```dart + final configuration = DatadogConfiguration( + // ... + // added configuration + firstPartyHostsWithTracingHeaders: { + 'example.com': { TracingHeaderType.tracecontext }, + }, + )..enableHttpTracking() + ``` + +`firstPartyHostsWithTracingHeaders` takes `Map>` as a parameter, where the key is a host and the value is a list of supported tracing header types. + +`TracingHeaderType` in an enum representing the following tracing header types: + + - `TracingHeaderType.datadog`: Datadog's propagator (`x-datadog-*`) + - `TracingHeaderType.tracecontext`: [W3C Trace Context](https://www.w3.org/TR/trace-context/) (`traceparent`) + - `TracingHeaderType.b3`: [B3 single header](https://github.com/openzipkin/b3-propagation#single-header) (`b3`) + - `TracingHeaderType.b3multi`: [B3 multiple headers](https://github.com/openzipkin/b3-propagation#multiple-headers) (`X-B3-*`) + +{% /tab %} + +{% tab title="Kotlin Multiplatform RUM" %} + +1. Set up RUM to connect with APM. + +1. Configure the RUM SDK with the list of internal, first-party origins and the tracing header type to use as follows: + + ```kotlin + val ktorClient = HttpClient { + install( + datadogKtorPlugin( + tracedHosts = mapOf( + "example.com" to setOf(TracingHeaderType.DATADOG), + "example.eu" to setOf(TracingHeaderType.DATADOG) + ), + traceSampleRate = 100f + ) + ) + } + ``` + +`TracingHeaderType` is an enum representing the following tracing header types: + + - `TracingHeaderType.DATADOG`: Datadog's propagator (`x-datadog-*`) + - `TracingHeaderType.TRACECONTEXT`: [W3C Trace Context](https://www.w3.org/TR/trace-context/) (`traceparent`) + - `TracingHeaderType.B3`: [B3 single header](https://github.com/openzipkin/b3-propagation#single-header) (`b3`) + - `TracingHeaderType.B3MULTI`: [B3 multiple headers](https://github.com/openzipkin/b3-propagation#multiple-headers) (`X-B3-*`) + +{% /tab %} + +## How RUM resources are linked to traces{% #how-rum-resources-are-linked-to-traces %} + +Datadog uses the distributed tracing protocol and sets up the HTTP headers below. By default, both trace context and Datadog-specific headers are used. + +{% tab title="Datadog" %} + +{% dl %} + +{% dt %} +`x-datadog-trace-id` +{% /dt %} + +{% dd %} +Generated from the Real User Monitoring SDK. Allows Datadog to link the trace with the RUM resource. +{% /dd %} + +{% dt %} +`x-datadog-parent-id` +{% /dt %} + +{% dd %} +Generated from the Real User Monitoring SDK. Allows Datadog to generate the first span from the trace. +{% /dd %} + +{% dt %} +`x-datadog-origin: rum` +{% /dt %} + +{% dd %} +To make sure the generated traces from Real User Monitoring don't affect your APM Index Spans counts. +{% /dd %} + +{% dt %} +`x-datadog-sampling-priority` +{% /dt %} + +{% dd %} +Set to `1` by the Real User Monitoring SDK if the trace was sampled, or `0` if it was not. +{% /dd %} + +{% /dl %} + +{% /tab %} + +{% tab title="W3C Trace Context" %} + +{% dl %} + +{% dt %} +`traceparent: [version]-[trace id]-[parent id]-[trace flags]` +{% /dt %} + +{% dd %} +`version`: The current specification assumes version is set to `00`. +{% /dd %} + +{% dd %} +`trace id`: 128 bits trace ID, hexadecimal on 32 characters. The source trace ID is 64 bits to keep compatibility with APM. +{% /dd %} + +{% dd %} +`parent id`: 64 bits span ID, hexadecimal on 16 characters. +{% /dd %} + +{% dd %} +`trace flags`: Sampled (`01`) or not sampled (`00`) +{% /dd %} + +{% /dl %} + +**Trace ID Conversion**: The 128-bit W3C trace ID is created by padding the original 64-bit source trace ID with leading zeros. This ensures compatibility with APM while conforming to the W3C Trace Context specification. The original 64-bit trace ID becomes the lower 64 bits of the 128-bit W3C trace ID. + +{% dl %} + +{% dt %} +`tracestate: dd=s:[sampling priority];o:[origin]` +{% /dt %} + +{% dd %} +`dd`: Datadog's vendor prefix. +{% /dd %} + +{% dd %} +`sampling priority`: Set to `1` if the trace was sampled, or `0` if it was not. +{% /dd %} + +{% dd %} +`origin`: Always set to `rum` to make sure the generated traces from Real User Monitoring don't affect your APM Index Spans counts. +{% /dd %} + +{% /dl %} + +**Examples**: + +Source trace ID (64-bit): `8448eb211c80319c` + +W3C Trace Context (128-bit): `00000000000000008448eb211c80319c` + +The relationship shows that the original 64-bit trace ID `8448eb211c80319c` is padded with 16 leading zeros (`0000000000000000`) to create the 128-bit W3C trace ID. + +{% dl %} + +{% dt %} +Complete traceparent example: +{% /dt %} + +{% dd %} +`traceparent: 00-00000000000000008448eb211c80319c-b7ad6b7169203331-01` +{% /dd %} + +{% dd %} +`tracestate: dd=s:1;o:rum` +{% /dd %} + +{% /dl %} + +{% /tab %} + +{% tab title="b3 / b3 Multiple Headers" %} + +{% dl %} + +{% dt %} +`b3: [trace id]-[span id]-[sampled]` +{% /dt %} + +{% dd %} +`trace id`: 64 bits trace ID, hexadecimal on 16 characters. +{% /dd %} + +{% dd %} +`span id`: 64 bits span ID, hexadecimal on 16 characters. +{% /dd %} + +{% dd %} +`sampled`: True (`1`) or False (`0`) +{% /dd %} + +{% dt %} +Example for b3 single header: +{% /dt %} + +{% dd %} +`b3: 8448eb211c80319c-b7ad6b7169203331-1` +{% /dd %} + +{% dt %} +Example for b3 multiple headers: +{% /dt %} + +{% dd %} +`X-B3-TraceId: 8448eb211c80319c` +{% /dd %} + +{% dd %} +`X-B3-SpanId: b7ad6b7169203331` +{% /dd %} + +{% dd %} +`X-B3-Sampled: 1` +{% /dd %} + +{% /dl %} + +{% /tab %} + + + +These HTTP headers are not CORS-safelisted, so you need to [configure Access-Control-Allow-Headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers) on your server handling requests that the SDK is set up to monitor. The server must also accept [preflight requests](https://developer.mozilla.org/en-US/docs/Glossary/Preflight_request) (OPTIONS requests), which are made by the browser prior to every request when tracing is allowed on cross-site URLs. + +## Effect on APM quotas{% #effect-on-apm-quotas %} + +Connecting RUM and traces may significantly increase the APM-ingested volumes. Use the initialization parameter `traceSampleRate` to keep a share of the backend traces starting from browser and mobile requests. + +## Trace retention{% #trace-retention %} + +These traces are available for 15 minutes in the [Live Search](http://localhost:1313/tracing/trace_explorer/#live-search-for-15-minutes) explorer. To retain the traces for a longer period of time, create [retention filters](http://localhost:1313/tracing/trace_pipeline/trace_retention/#retention-filters). Scope these retention filters on any span tag to retain traces for critical pages and user actions. + +## Further Reading{% #further-reading %} + +- [Real User Monitoring](https://www.datadoghq.com/blog/real-user-monitoring-with-datadog/) +- [Start monitoring single-page applications](https://www.datadoghq.com/blog/modern-frontend-monitoring/) +- [Ease troubleshooting with cross-product correlation](http://localhost:1313/logs/guide/ease-troubleshooting-with-cross-product-correlation/) +- [APM and Distributed Tracing](http://localhost:1313/tracing/) +- [RUM & Session Replay](http://localhost:1313/real_user_monitoring) +- [Troubleshoot with Session Replay browser dev tools](https://www.datadoghq.com/blog/troubleshoot-with-session-replay-developer-tools/) +- [Correlate Datadog RUM events with traces from OpenTelemetry-instrumented applications](https://www.datadoghq.com/blog/correlate-traces-datadog-rum-otel/) diff --git a/opentelemetry-mdoc/getting_started/datadog_example/index.md b/opentelemetry-mdoc/getting_started/datadog_example/index.md new file mode 100644 index 0000000000000..b20226d69b049 --- /dev/null +++ b/opentelemetry-mdoc/getting_started/datadog_example/index.md @@ -0,0 +1,358 @@ +--- +title: Getting Started with OpenTelemetry at Datadog +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Getting Started with OpenTelemetry at + Datadog > Getting Started with OpenTelemetry at Datadog +--- + +# Getting Started with OpenTelemetry at Datadog + +{% callout %} +##### Try "Understanding OpenTelemetry" in the Learning Center + +Learn the fundamentals of OpenTelemetry, including its capabilities and benefits, key components, and how OTel and Datadog work together. + +[ENROLL NOW](https://learn.datadoghq.com/courses/understanding-opentelemetry) +{% /callout %} + +## Overview{% #overview %} + +[OpenTelemetry](https://opentelemetry.io/) is an open source observability framework that provides IT teams with standardized protocols and tools for collecting and routing observability data from software applications. OpenTelemetry provides a consistent format for instrumenting (Instrumentation is the process of adding code to your application to capture and report observability data to Datadog, such as traces, metrics, and logs.), generating, gathering, and exporting application observability data—namely metrics, logs, and traces—to monitoring platforms for analysis and insight. + +This guide demonstrates how to configure [a sample OpenTelemetry application](https://github.com/DataDog/opentelemetry-examples/tree/main/apps/rest-services/java/calendar) to send observability data to Datadog using the OpenTelemetry SDK, OpenTelemetry Collector, and [Datadog Exporter](http://localhost:1313/opentelemetry/collector_exporter/). This guide also shows you how to explore this data in the Datadog UI. + +Follow this guide to: + +1. Instrument the application with the OpenTelemetry API. +1. Configure the application to send observability data to Datadog. +1. Correlate observability data with unified service tagging. +1. Run the application to generate observability data. +1. Explore observability data in the Datadog UI. + +## Prerequisites{% #prerequisites %} + +To complete this guide, you need the following: + +1. [Create a Datadog account](https://www.datadoghq.com/free-datadog-trial/) if you haven't yet. +1. Set up your Datadog API key: + 1. Find or create your [Datadog API key](https://app.datadoghq.com/organization-settings/api-keys/). + 1. Export your Datadog API key to an environment variable: + ```sh + export DD_API_KEY= +``` +1. Get the sample [Calendar](https://github.com/DataDog/opentelemetry-examples/tree/main/apps/rest-services/java/calendar) application. + 1. Clone the `opentelemetry-examples` repository to your device: + ```sh + git clone https://github.com/DataDog/opentelemetry-examples.git +``` + 1. Navigate to the `/calendar` directory: + ```sh + cd opentelemetry-examples/apps/rest-services/java/calendar +``` +1. Install [Docker Compose](https://docs.docker.com/compose/install/). +1. (Optional) Use Linux to send infrastructure metrics. + +The Calendar application uses OpenTelemetry tools to generate and collect metrics, logs, and traces. The following steps explain how to get this observability data into Datadog. + +## Instrumenting the application{% #instrumenting-the-application %} + +The Calendar sample application is already partially [instrumented](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/): + +1. Go to the main `CalendarService.java` file located at: `./src/main/java/com/otel/service/CalendarService.java`. + +1. The following code instruments `getDate()` using the OpenTelemetry annotations and API: + +In the `CalendarService.java` file: + + ```java + @WithSpan(kind = SpanKind.CLIENT) + public String getDate() { + Span span = Span.current(); + span.setAttribute("peer.service", "random-date-service"); + ... + } +``` + +When the Calendar application runs, the `getDate()` call generates [traces](http://localhost:1313/tracing/glossary/#trace) and spans. + +## Configuring the application{% #configuring-the-application %} + +### OTLP Receiver{% #otlp-receiver %} + +The Calendar application is already configured to send data from the OpenTelemetry SDK to the [OpenTelemetry Protocol (OTLP) receiver](http://localhost:1313/opentelemetry/collector_exporter/otlp_receiver/) in the OpenTelemetry Collector. + +1. Go to the Collector configuration file located at: `./src/main/resources/otelcol-config.yaml`. + +1. The following lines configure the OTLP Receiver to receive metrics, traces, and logs: + +In the `otelcol-config.yaml` file: + + ```yaml + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + ... + service: + pipelines: + traces: + receivers: [otlp] + metrics: + receivers: [otlp] + logs: + receivers: [otlp] +``` + +### Datadog Exporter{% #datadog-exporter %} + +The Datadog Exporter sends data collected by the OTLP Receiver to the Datadog backend. + +1. Go to the `otelcol-config.yaml` file. + +1. The following lines configure the Datadog Exporter to send observability data to Datadog: + +In the `otelcol-config.yaml` file: + + ```yaml + exporters: + datadog: + traces: + compute_stats_by_span_kind: true + trace_buffer: 500 + hostname: "otelcol-docker" + api: + key: ${DD_API_KEY} + site: datadoghq.com + + connectors: + datadog/connector: + traces: + compute_stats_by_span_kind: true + + service: + pipelines: + metrics: + receivers: [otlp, datadog/connector] # <- update this line + exporters: [datadog] + traces: + exporters: [datadog, datadog/connector] + logs: + exporters: [datadog] +``` + +1. Set `exporters.datadog.api.site` to your [Datadog site](http://localhost:1313/getting_started/site/). Otherwise, it defaults to US1. + +This configuration allows the Datadog Exporter to send runtime metrics, traces, and logs to Datadog. However, sending infrastructure metrics requires additional configuration. + +### OpenTelemetry Collector{% #opentelemetry-collector %} + +In this example, configure your OpenTelemetry Collector to send infrastructure metrics. + +{% alert level="info" %} +To send infrastructure metrics from the OpenTelemetry Collector to Datadog, you must use Linux. This is a limitation of the Docker Stats receiver. +{% /alert %} + +To collect container metrics, configure the [Docker stats receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/dockerstatsreceiver/) in your Datadog Exporter: + +1. Add a `docker_stats` block to the `receivers` section of `otel-config.yaml`: + +In the `otelcol-config.yaml` file: + + ```yaml + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + # add the following block + docker_stats: + endpoint: unix:///var/run/docker.sock # default; if this is not the Docker socket path, update to the correct path + metrics: + container.network.io.usage.rx_packets: + enabled: true + container.network.io.usage.tx_packets: + enabled: true + container.cpu.usage.system: + enabled: true + container.memory.rss: + enabled: true + container.blockio.io_serviced_recursive: + enabled: true + container.uptime: + enabled: true + container.memory.hierarchical_memory_limit: + enabled: true +``` + +1. Update `service.pipelines.metrics.receivers` to include `docker_stats`: + +In the `otelcol-config.yaml` file: + + ```yaml + service: + pipelines: + metrics: + receivers: [otlp, datadog/connector, docker_stats] # <- update this line +``` + +This configuration allows the Calendar application to send container metrics to Datadog for you to explore in Datadog. + +### Sending observability data with OTLP{% #sending-observability-data-with-otlp %} + +The Calendar application uses the OpenTelemetry logging exporter in its Logback configuration to send logs with OpenTelemetry Layer Processor (OTLP). + +1. Go to the Calendar application's Logback XML configuration file at `/src/main/resources/logback.xml`. + +1. The following lines define the `OpenTelemetry` appender: + +In the `logback.xml` file: + + ```xml + + true + true + true + +``` + +1. The `` line references the `OpenTelemetry` appender in the root level configuration: + +In the `logback.xml` file: + + ```xml + + + + +``` + +Additionally, environment variables configure the OpenTelemetry environment to export logs, metrics, and traces: + +1. Go to the Calendar application's Docker Compose file at `./deploys/docker/docker-compose-otelcol.yml`. +1. The `OTEL_EXPORTER_OTLP_ENDPOINT=http://otelcol:4317` configuration allows the metrics, traces, and logs to be sent with OTLP. + +## Correlating observability data{% #correlating-observability-data %} + +[Unified service tagging](http://localhost:1313/getting_started/tagging/unified_service_tagging/) ties observability data together in Datadog so you can navigate across metrics, traces, and logs with consistent tags. + +The Calendar application is already configured with unified service tagging: + +1. Go to the Calendar application's Docker Compose file at `./deploys/docker/docker-compose-otelcol.yml`. + +1. The following lines enable the correlation between application traces and other observability data: + +In the `docker-compose-otelcol.yml` file: + + ```yaml + environment: + - OTEL_SERVICE_NAME=calendar-otel + - OTEL_RESOURCE_ATTRIBUTES=deployment.environment=docker,host.name=otelcol-docker,service.version= +``` + +## Running the application{% #running-the-application %} + +To start generating and forwarding observability data to Datadog, you need to run the Calendar application with the OpenTelemetry SDK: + +1. Run the application from the `calendar/` folder: + + + + ```sh + docker compose -f deploys/docker/docker-compose-otelcol.yml up +``` +This command creates a Docker container with the OpenTelemetry Collector and the Calendar service. + + +1. To test that the Calendar application is running correctly, execute the following command from another terminal window: + + ```sh + curl localhost:9090/calendar +``` + +1. Verify that you receive a response like: + + ```sh + {"date":"2022-12-30"} +``` + +1. Run the `curl` command several times to ensure at least one trace exports to the Datadog backend. +Important alert (level: info): The Calendar application uses the probabilistic sampler processor, so only 30% of traces sent through the application reach the target backend. + +Each call to the Calendar application results in metrics, traces, and logs being forwarded to the OpenTelemetry Collector, then to the Datadog Exporter, and finally to the Datadog backend. + +## Exploring observability data in Datadog{% #exploring-observability-data-in-datadog %} + +Use the Datadog UI to explore the Calendar application's observability data. + +**Note**: It may take a few minutes for your trace data to appear. + +### Runtime and infrastructure metrics{% #runtime-and-infrastructure-metrics %} + +View runtime and infrastructure metrics to visualize, monitor, and measure the performance of your applications, hosts, containers, and processes. + +1. Go to **APM** > **Software Catalog**. + +1. Hover over the `calendar-otel` service and select **Full Page**. + +1. Scroll to the bottom panel and select: + + - **Infrastructure Metrics** to see your Docker container metrics, such as CPU and memory usage. + - **JVM Metrics** to see runtime metrics, such as heap usage and thread count. + + {% image + source="http://localhost:1313/images/getting_started/opentelemetry/infra_and_jvm2.ee035ce972f22cc98ad390910586b971.png?auto=format" + alt="View Infrastructure metrics and JVM Runtime metrics for the Calendar application" /%} + +### Logs{% #logs %} + +View logs to monitor and troubleshoot application and system operations. + +1. Go to **Logs**. +1. If you have other logs in the list, add `@service.name:calendar-otel` to the **Search for** field to only see logs from the Calendar application. +1. Select a log from the list to see more details. + +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/logs2.43c742c87041b0969a2624681f040143.png?auto=format" + alt="View Logs for the Calendar application" /%} + +### Traces{% #traces %} + +View traces and spans to observe the status and performance of requests processed by your application. + +1. Go to **APM** > **Traces**. + +1. Find the **Service** section in the filter menu, and select the `calendar-otel` facet to display all `calendar-otel` traces: + + {% image + source="http://localhost:1313/images/getting_started/opentelemetry/traces2.b1f59d9e3f89f11e0a4747c1d26056ba.png?auto=format" + alt="View Traces for the Calendar application" /%} + +1. [Explore your `calendar-otel` traces](http://localhost:1313/tracing/glossary/#trace). + +To start, click on a trace to open the trace side panel and find more details about the trace and its spans. For example, the [Flame Graph](https://www.datadoghq.com/knowledge-center/distributed-tracing/flame-graph/) captures how much time was spent in each component of the Calendar execution path: + + {% image + source="http://localhost:1313/images/getting_started/opentelemetry/flame_graph2.d5056f017bfdb484c4c45b7360e86541.png?auto=format" + alt="View the Flame Graph for a Calendar application trace" /%} + +1. Notice that you can select **Infrastructure**, **Metrics**, or **Logs** in the bottom panel to correlate your trace with other observability data. + + {% image + source="http://localhost:1313/images/getting_started/opentelemetry/trace_logs_correlation.3365e01c45c01ddc449d9e8556c9e7ba.png?auto=format" + alt="Correlate a Calendar application trace with logs" /%} + +## Further reading{% #further-reading %} + +- [OpenTelemetry Documentation](https://opentelemetry.io/docs/) +- [Datadog OpenTelemetry Documentation](http://localhost:1313/opentelemetry) +- [Custom Instrumentation with OpenTelemetry](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation) +- [Monitor runtime metrics from OTel-instrumented apps in Datadog APM](https://www.datadoghq.com/blog/opentelemetry-runtime-metrics-datadog) +- [Correlate Datadog RUM events with traces from OTel-instrumented applications](https://www.datadoghq.com/blog/correlate-traces-datadog-rum-otel/) +- [Send metrics and traces from OpenTelemetry Collector to Datadog via Datadog Exporter](https://www.datadoghq.com/blog/ingest-opentelemetry-traces-metrics-with-datadog-exporter) +- [Forward logs from the OpenTelemetry Collector with the Datadog Exporter](https://www.datadoghq.com/blog/opentelemetry-logs-datadog-exporter) diff --git a/opentelemetry-mdoc/getting_started/index.md b/opentelemetry-mdoc/getting_started/index.md new file mode 100644 index 0000000000000..99fefd990fff1 --- /dev/null +++ b/opentelemetry-mdoc/getting_started/index.md @@ -0,0 +1,38 @@ +--- +title: Getting Started with OpenTelemetry at Datadog +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Getting Started with OpenTelemetry at + Datadog +--- + +# Getting Started with OpenTelemetry at Datadog + +{% callout %} +##### Try "Understanding OpenTelemetry" in the Learning Center + +Learn the fundamentals of OpenTelemetry, including its capabilities and benefits, key components, and how OTel and Datadog work together. + +[ENROLL NOW](https://learn.datadoghq.com/courses/understanding-opentelemetry) +{% /callout %} + +## Overview{% #overview %} + +OpenTelemetry is an open source framework that provides standardized tools for collecting observability data from your applications. Datadog fully supports OpenTelemetry, allowing you to send your metrics, traces, and logs for powerful analysis and monitoring. + +These guides provide two hands-on ways to learn how to send OpenTelemetry data to Datadog. Choose the tutorial that best fits your learning goal. + +## Getting started tutorials{% #getting-started-tutorials %} + +- [ + ### Explore OTel with a Datadog Example +Follow a step-by-step guide using a simple, Datadog-provided Java application. This is the quickest way to see how the OTLP receiver and Datadog Exporter work together.](http://localhost:1313/opentelemetry/getting_started/datadog_example) +- [ + ### Set up the Official OpenTelemetry Demo +Deploy the community-maintained microservices demo application. This is a more comprehensive example that showcases a realistic, multi-service environment with traces, metrics, and logs.](http://localhost:1313/opentelemetry/getting_started/otel_demo_to_datadog) + +## Further reading{% #further-reading %} + +- [Instrument Your Applications](http://localhost:1313/opentelemetry/instrument/) +- [Send Data to Datadog](http://localhost:1313/opentelemetry/setup/) +- [OpenTelemetry Guides](http://localhost:1313/opentelemetry/guide) diff --git a/opentelemetry-mdoc/getting_started/otel_demo_to_datadog/index.md b/opentelemetry-mdoc/getting_started/otel_demo_to_datadog/index.md new file mode 100644 index 0000000000000..3bc831267bfb4 --- /dev/null +++ b/opentelemetry-mdoc/getting_started/otel_demo_to_datadog/index.md @@ -0,0 +1,345 @@ +--- +title: Sending Data from the OpenTelemetry Demo to Datadog +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Getting Started with OpenTelemetry at + Datadog > Sending Data from the OpenTelemetry Demo to Datadog +--- + +# Sending Data from the OpenTelemetry Demo to Datadog + +## Overview{% #overview %} + +The [OpenTelemetry Demo](https://github.com/open-telemetry/opentelemetry-demo) is a microservices demo application developed by the community to demonstrate OpenTelemetry (OTel) instrumentation and its observability capabilities. It is an e-commerce web page composed of multiple microservices communicating with each other through HTTP and gRPC. All services are instrumented with OpenTelemetry and produce traces, metrics, and logs. + +This page guides you through the steps required to deploy the OpenTelemetry Demo and send its data to Datadog. + +## Prerequisites{% #prerequisites %} + +To complete this guide, ensure you have the following: + +1. [Create a Datadog account](https://www.datadoghq.com/free-datadog-trial/) if you haven't yet. +1. Find or create your [Datadog API key](https://app.datadoghq.com/organization-settings/api-keys/). +1. 6 GB of free RAM for the application. + +You can deploy the demo using Docker or Kubernetes (with Helm). Choose your preferred deployment method and make sure you have the necessary tools installed: + +{% tab title="Docker" %} + +- Docker +- Docker Compose v2.0.0+ +- Make (optional) + +{% /tab %} + +{% tab title="Kubernetes" %} + +- Kubernetes 1.24+ +- Helm 3.9+ +- An active Kubernetes cluster with kubectl configured to connect to it + +{% /tab %} + +## Configuring and deploying the demo{% #configuring-and-deploying-the-demo %} + +### Cloning the repository{% #cloning-the-repository %} + +Clone the `opentelemetry-demo` repository to your device: + +```shell +git clone https://github.com/open-telemetry/opentelemetry-demo.git +``` + +### Configuring the OpenTelemetry Collector{% #configuring-the-opentelemetry-collector %} + +To send the demo's telemetry data to Datadog you need to add three components to the OpenTelemetry Collector configuration: + +- `Resource Processor` is an `optional` component which is recommended, used to set the `env` tag for Datadog. +- `Datadog Connector` is responsible for computing Datadog APM Trace Metrics. +- `Datadog Exporter` is responsible for exporting Traces, Metrics and Logs to Datadog. +- `Datadog Extension` is an `optional` component which allows you to view OpenTelemetry Collector configuration within infrastructure monitoring. (Read more at [Datadog Extension](http://localhost:1313/opentelemetry/integrations/datadog_extension/)). + +Complete the following steps to configure these three components. + +{% tab title="Docker" %} + +1. Open the demo repository. Create a file called `docker-compose.override.yml` in the root folder. + +1. Open the created file. Paste the following content and set the [Datadog site](http://localhost:1313/getting_started/site/) and [Datadog API key](https://app.datadoghq.com/organization-settings/api-keys/) environment variables: + + ```yaml + services: + otel-collector: + command: + - "--config=/etc/otelcol-config.yml" + - "--config=/etc/otelcol-config-extras.yml" + - "--feature-gates=datadog.EnableOperationAndResourceNameV2" + environment: + - DD_SITE_PARAMETER= + - DD_API_KEY= + ``` + +1. To configure the OpenTelemetry Collector, open `src/otel-collector/otelcol-config-extras.yml` and add the following to the file: + + ```yaml + extensions: + datadog/extension: + api: + site: ${env:DD_SITE_PARAMETER} + key: ${env:DD_API_KEY} + http: + endpoint: "localhost:9875" + path: "/metadata" + + exporters: + datadog: + traces: + compute_stats_by_span_kind: true + trace_buffer: 500 + hostname: "otel-collector-docker" + api: + site: ${env:DD_SITE_PARAMETER} + key: ${env:DD_API_KEY} + + processors: + resource: + attributes: + - key: deployment.environment + value: "otel" + action: upsert + + connectors: + datadog/connector: + traces: + compute_stats_by_span_kind: true + + service: + extensions: [datadog/extension] + pipelines: + traces: + processors: [resource, resourcedetection, memory_limiter, transform, batch] + exporters: [otlp, debug, spanmetrics, datadog, datadog/connector] + metrics: + receivers: [datadog/connector, docker_stats, httpcheck/frontend-proxy, hostmetrics, nginx, otlp, postgresql, redis, spanmetrics] + processors: [resource, resourcedetection, memory_limiter, transform, batch] + exporters: [otlphttp/prometheus, debug, datadog] + logs: + processors: [resource, resourcedetection, memory_limiter, transform, batch] + exporters: [opensearch, debug, datadog] + ``` + +By default, the collector in the demo application merges the configuration from two files: + + - `src/otel-collector/otelcol-config.yml`: contains the default configuration for the collector. + - `src/otel-collector/otelcol-config-extras.yml`: used to add extra configuration to the collector. +Important alert (level: info): When merging YAML values, objects are merged and arrays are replaced. That's why there are more components specified in the pipelines than actually configured. The previous configuration does not replace the values configured in the main `otelcol-config` file. + +{% /tab %} + +{% tab title="Kubernetes" %} + +1. Create a secret named `dd-secrets` to store Datadog Site and API Key secrets: + + ```shell + kubectl create secret generic dd-secrets --from-literal="DD_SITE_PARAMETER=" --from-literal="DD_API_KEY=" + ``` + +1. Add the OpenTelemetry [Helm chart](https://opentelemetry.io/docs/demo/kubernetes-deployment/) to your repo to manage and deploy the OpenTelemetry Demo: + + ```shell + helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts + ``` + +1. Create a file named `my-values-file.yml` with the following content: + + ```yaml + opentelemetry-collector: + extraEnvsFrom: + - secretRef: + name: dd-secrets + config: + extensions: + datadog/extension: + api: + site: ${env:DD_SITE_PARAMETER} + key: ${env:DD_API_KEY} + http: + endpoint: "localhost:9875" + path: "/metadata" + exporters: + datadog: + traces: + compute_stats_by_span_kind: true + trace_buffer: 500 + hostname: "otelcol-helm" + api: + site: ${env:DD_SITE_PARAMETER} + key: ${env:DD_API_KEY} + + processors: + resource: + attributes: + - key: deployment.environment + value: "otel" + action: upsert + + connectors: + datadog/connector: + traces: + compute_stats_by_span_kind: true + + service: + extensions: [datadog/extension] + pipelines: + traces: + processors: [resource, resourcedetection, memory_limiter, transform, batch] + exporters: [otlp, debug, spanmetrics, datadog, datadog/connector] + metrics: + receivers: [datadog/connector, docker_stats, httpcheck/frontend-proxy, hostmetrics, nginx, otlp, postgresql, redis, spanmetrics] + processors: [resource, resourcedetection, memory_limiter, transform, batch] + exporters: [otlphttp/prometheus, debug, datadog] + logs: + processors: [resource, resourcedetection, memory_limiter, transform, batch] + exporters: [opensearch, debug, datadog] + ``` +Important alert (level: info): When merging YAML values, objects are merged and arrays are replaced. That's why there are more components specified in the pipelines than actually configured. The previous configuration does not replace the values configured in the main `otelcol-config` file. + +{% /tab %} + +### Running the demo{% #running-the-demo %} + +{% tab title="Docker" %} +If you have make installed, you can use the following command to start the demo: + +```shell +make start +``` + +If you don't have `make` installed, you can use the `docker compose` command directly: + +```shell +docker compose up --force-recreate --remove-orphans --detach +``` + +{% /tab %} + +{% tab title="Kubernetes" %} +To deploy the demo application on Kubernetes using Helm, run the following command: + +```shell +helm install my-otel-demo open-telemetry/opentelemetry-demo --values my-values-file.yml +``` + +{% /tab %} + +## Navigating the application{% #navigating-the-application %} + +You can access the Astronomy Shop web UI to explore the application and observe how the telemetry data is generated. + +{% tab title="Docker" %} +Go to [http://localhost:8080](http://localhost:8080). +{% /tab %} + +{% tab title="Kubernetes" %} + +1. If you are running a local cluster, you need to port forward the frontend proxy: + + ```shell + kubectl port-forward svc/my-otel-demo-frontendproxy 8080:8080 + ``` + +1. Go to [http://localhost:8080](http://localhost:8080). + +{% /tab %} + +## Telemetry data correlation{% #telemetry-data-correlation %} + +The instrumentation steps used in all services from the Demo can be found on the main OpenTelemetry documentation. + +You can find the language in which each service was implemented as well as its documentation in the [language feature reference table](https://opentelemetry.io/docs/demo/#language-feature-reference). + +## Exploring OpenTelemetry data in Datadog{% #exploring-opentelemetry-data-in-datadog %} + +When the OTel Demo is running, the built-in load generator simulates traffic in the application. After a couple of seconds you can see data arriving in Datadog. + +### Software Catalog{% #software-catalog %} + +View all services that are part of the OTel Demo: + +1. Go to [**APM** > **Software Catalog**](https://app.datadoghq.com/services). + +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/otel_demo/software_catalog.e36240fcf4cd93b039d6674c979a63dd.png?auto=format" + alt="View Software Catalog page with list of services from OpenTelemetry demo application" /%} +Select **Map** to see how the services are connected. Change the **Map layout** to **Cluster** or **Flow** to view the map in different modes. +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/otel_demo/software_catalog_flow.d8cec924d55bd504f247a3f4429844ed.png?auto=format" + alt="View Service Map Flow with all services connected" /%} +Select the **Catalog** view, then select a service to view a performance summary in the side panel. +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/otel_demo/software_catalog_service.9e535c06677130006059fc60b91be65b.png?auto=format" + alt="View summary of performance and setup guidance from specific service" /%} + +### Trace Explorer{% #trace-explorer %} + +Explore traces received from the OTel Demo: + +1. From **Performance** > **Setup Guidance**, click **View Traces** to open the Trace Explorer, with the selected service applied as a filter. + +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/otel_demo/traces_view.b525daf355b649b91b0650a7c8d72e1e.png?auto=format" + alt="Traces view with all indexed spans for checkout service" /%} +Select an indexed span to view the full trace details for this transaction. +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/otel_demo/trace_waterfall.e1b3a6d28322156488acc866ed381508.png?auto=format" + alt="Trace view with all spans belonging to that specific transaction" /%} +Navigate through the tabs to view additional details: +- Infrastructure metrics for the services reporting Host Metrics. +- Runtime metrics for the services that have already been implemented. +- Log entries correlated with this trace. +- Span links linked to this trace. + +### Trace Queries{% #trace-queries %} + +Datadog allows you to filter and group the received OpenTelemetry data. For example, to find all transactions from a specific user, you can use Trace Queries. + +The OTel Demo sends `user.id` as span tags, so you can use this to filter all transactions triggered by the user: + +1. From **Info** in the side panel, hover over the line with the user ID, click the **cog** icon, and select **filter by @app.user.id:**. + +1. Remove any previous filters, leaving only **@app.user.id** applied to view all transactions containing spans with the specified user ID. + +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/otel_demo/trace_query.da7009353f9f418dd29156665954e35f.png?auto=format" + alt="Trace query filtering all spans that contain a specific app.user.id" /%} + +### Error Tracking{% #error-tracking %} + +The OpenTelemetry Demo includes a feature flag engine for simulating error scenarios. + +1. Navigate to [http://localhost:8080/feature](http://localhost:8080/feature) to manage the available scenarios. See the [OpenTelemetry Demo documentation](https://opentelemetry.io/docs/demo/feature-flags/) for more details. +1. After the demo starts producing errors, you can visualize and track down the affected services in Datadog. + +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/otel_demo/error_tracking.58c4cc6954bd7f4a78aa6478009919af.png?auto=format" + alt="Error tracking view showing error PaymentService Fail Feature Flag Enabled" /%} + +### OpenTelemetry Collector Configuration{% #opentelemetry-collector-configuration %} + +The Datadog Extension allows you to view OpenTelemetry Collector configuration within Datadog on either one of the following pages: + +- [Infrastructure List](https://app.datadoghq.com/infrastructure). +- [Resource Catalog](https://app.datadoghq.com/infrastructure/catalog). + +When selecting the hostname where the Collector is running, you can visualize its full configuration: + +{% image + source="http://localhost:1313/images/getting_started/opentelemetry/otel_demo/collector_full_config.1724cf7145230f9b30bfd82b600accd7.png?auto=format" + alt="OpenTelemetry Collector configuration rendered within Datadog" /%} + +## Further Reading{% #further-reading %} + +- [Software Catalog](http://localhost:1313/software_catalog/) +- [Trace Explorer](http://localhost:1313/tracing/trace_explorer/) +- [Trace Queries](http://localhost:1313/tracing/trace_explorer/trace_queries/) +- [Error Tracking](http://localhost:1313/error_tracking/) diff --git a/opentelemetry-mdoc/guide/combining_otel_and_datadog_metrics/index.md b/opentelemetry-mdoc/guide/combining_otel_and_datadog_metrics/index.md new file mode 100644 index 0000000000000..7121b6572376f --- /dev/null +++ b/opentelemetry-mdoc/guide/combining_otel_and_datadog_metrics/index.md @@ -0,0 +1,84 @@ +--- +isPrivate: true +title: Combining OpenTelemetry and Datadog Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Guides > Combining OpenTelemetry and Datadog + Metrics +--- + +# Combining OpenTelemetry and Datadog Metrics + +## Overview{% #overview %} + +{% callout %} +##### Join the Preview! + +The `equiv_otel()` function is in Preview. If you have feedback related to this feature, reach out to your account team to provide input. +{% /callout %} + +Datadog and OpenTelemetry (OTel) use different naming conventions for integration metrics. This guide explains how to combine metrics from both systems in a single query using Datadog's `equiv_otel` function. + +{% alert level="info" %} +To query across Datadog and OpenTelemetry metrics in the Datadog UI, read the [Query OpenTelemetry Metrics](http://localhost:1313/metrics/open_telemetry/query_metrics) documentation. +{% /alert %} + +## Challenges when combining metrics{% #challenges-when-combining-metrics %} + +When working with both Datadog and OTel metrics, two main challenges arise. Let's examine these using NGINX connection monitoring as an example: + +### Different naming conventions{% #different-naming-conventions %} + +Datadog and OTel handle the same measurements differently: + +- Datadog: `nginx.net.connections` (a specific metric for active connections) +- OTel: `nginx.connections_current` (captures all connection states in a single metric) + - Requires filtering with `state:active` to match Datadog's active connections metric + +### Aggregation limitations{% #aggregation-limitations %} + +Simply combining separate metric queries can lead to incorrect results. For example, if you try to combine these queries: + +``` +avg:nginx.net.connections +avg:nginx.connections_current{state:active} +``` + +You get an average of averages, not the true average across all timeseries. This happens because traditional [metrics functions](http://localhost:1313/dashboards/functions) combine the results of separate queries rather than treating the data as a single metric. + +## Combining metrics with the equiv_otel function{% #combining-metrics-with-the-equiv_otel-function %} + +The `equiv_otel` function automatically combines equivalent Datadog and OTel metrics in a single query. It: + +- Automatically handles metric name translation +- Properly aggregates all timeseries as a single metric +- Works bidirectionally (Datadog to OTel or OTel to Datadog) +- Preserves query aggregation semantics + +### Converting from Datadog to OTel{% #converting-from-datadog-to-otel %} + +To include the equivalent OTel metrics in your query, wrap your Datadog query in `equiv_otel`: + +``` +equiv_otel(avg:nginx.net.connections) +``` + +This query: + +1. Identifies the equivalent OTel metric (`nginx.connections_current{state:active}`) +1. Combines timeseries from both metrics +1. Applies the aggregation (`avg`) across all datapoints + +### Converting from OTel to Datadog{% #converting-from-otel-to-datadog %} + +The same works for including Datadog metrics in an OTel query: + +``` +equiv_otel(avg:nginx.connections_current{state:active}) +``` + +The function works the same way in reverse, automatically including the equivalent Datadog metric (`nginx.net.connections`). + +## Further reading{% #further-reading %} + +- [Metrics Documentation](http://localhost:1313/metrics/) diff --git a/opentelemetry-mdoc/guide/index.md b/opentelemetry-mdoc/guide/index.md new file mode 100644 index 0000000000000..b325558d40738 --- /dev/null +++ b/opentelemetry-mdoc/guide/index.md @@ -0,0 +1,34 @@ +--- +isPrivate: true +title: Guides +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Guides +--- + +# Guides + +## Guides{% #guides %} + +- [Getting Started with OpenTelemetry at Datadog](http://localhost:1313/opentelemetry/getting_started/datadog_example) +- [Producing Delta Temporality Metrics](http://localhost:1313/opentelemetry/guide/otlp_delta_temporality/) +- [Sending Data from OpenTelemetry Demo to Datadog](http://localhost:1313/opentelemetry/guide/otel_demo_to_datadog/) +- [Visualize Histograms as Heatmaps](http://localhost:1313/opentelemetry/guide/otlp_histogram_heatmaps/) + +## Migration guides{% #migration-guides %} + +- [Migrate to OpenTelemetry Collector version 0.120.0+](http://localhost:1313/opentelemetry/migrate/collector_0_120_0) +- [Migrate to OpenTelemetry Collector Version 0.95.0+](http://localhost:1313/opentelemetry/migrate/collector_0_95_0) +- [Migrate to New Operation Name Mappings](http://localhost:1313/opentelemetry/migrate/migrate_operation_names) +- [Migrate to the Datadog Distribution of OTel Collector](http://localhost:1313/opentelemetry/migrate/ddot_collector) + +## Read more on the blog{% #read-more-on-the-blog %} + +- [Datadog's partnership with OpenTelemetry](https://www.datadoghq.com/blog/opentelemetry-instrumentation/) +- [Monitor OpenTelemetry-instrumented apps with support for W3C Trace Context](https://www.datadoghq.com/blog/monitor-otel-with-w3c-trace-context/) +- [Send metrics and traces from OpenTelemetry Collector to Datadog via Datadog Exporter](https://www.datadoghq.com/blog/ingest-opentelemetry-traces-metrics-with-datadog-exporter/) +- [Forward logs from the OpenTelemetry Collector with the Datadog Exporter](https://www.datadoghq.com/blog/opentelemetry-logs-datadog-exporter/) +- [OTLP ingestion in the Agent](https://www.datadoghq.com/about/latest-news/press-releases/datadog-announces-opentelemetry-protocol-support/) +- [Learn more about AWS's managed Lambda Layer for OpenTelemetry](https://www.datadoghq.com/blog/aws-opentelemetry-lambda-layer-datadog/) +- [Correlate Datadog RUM events with traces from OpenTelemetry-instrumented applications](https://www.datadoghq.com/blog/correlate-traces-datadog-rum-otel/) +- [Monitor runtime metrics from OTel-instrumented apps with Datadog APM](https://www.datadoghq.com/blog/opentelemetry-runtime-metrics-datadog/) +- [Unify OpenTelemetry and Datadog with the DDOT Collector](https://www.datadoghq.com/blog/datadog-distribution-otel-collector/) diff --git a/opentelemetry-mdoc/guide/otlp_delta_temporality/index.md b/opentelemetry-mdoc/guide/otlp_delta_temporality/index.md new file mode 100644 index 0000000000000..e7f77b5915078 --- /dev/null +++ b/opentelemetry-mdoc/guide/otlp_delta_temporality/index.md @@ -0,0 +1,284 @@ +--- +title: Producing Delta Temporality Metrics with OpenTelemetry +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Guides > Producing Delta Temporality Metrics + with OpenTelemetry +--- + +# Producing Delta Temporality Metrics with OpenTelemetry + +## Overview{% #overview %} + +The OpenTelemetry protocol (OTLP) sends [several metric types](http://localhost:1313/metrics/open_telemetry/otlp_metric_types), some of which can have either *delta* or *cumulative* [aggregation temporality](https://opentelemetry.io/docs/reference/specification/metrics/data-model/#sums). Datadog works best with delta aggregation temporality for monotonic sums, histograms, and exponential histograms. + +This guide describes the implications of using cumulative aggregation temporality instead, and how to select which aggregation temporality to export your metrics with, either in the OpenTelemetry SDK or by using the [OpenTelemetry Collector `cumulativetodelta` processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/cumulativetodeltaprocessor). + +## Implications of using cumulative aggregation temporality{% #implications-of-using-cumulative-aggregation-temporality %} + +If you opt to send OTLP monotonic sums, histograms, or exponential histograms with cumulative aggregation temporality, Datadog takes the difference between consecutive points on a timeseries. This means that: + +- Your deployment is stateful, so you need to send all points on a timeseries to the same Datadog Agent or Datadog exporter. This affects how you scale your OpenTelemetry Collector deployments. +- Datadog might not send the first point it receives from a given timeseries if it cannot ensure this point is the true start of the timeseries. This may lead to missing points upon restarts. +- The minimum and maximum cannot be recovered for cumulative OTLP Histograms; they may be missing or approximated depending on the histograms export mode. + +## Configuring your OpenTelemetry SDK{% #configuring-your-opentelemetry-sdk %} + +If you produce OTLP metrics from an OpenTelemetry SDK, you can configure your OTLP exporter to produce these metric types with delta aggregation temporality. In some languages you can use the recommended configuration by setting the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable to `Delta` (case-insensitive). For a list of languages with support for this environment variable, read [the specification compliance matrix](https://github.com/open-telemetry/opentelemetry-specification/blob/main/spec-compliance-matrix.md#environment-variables). + +If your SDK does not support this environment variable you can configure delta temporality in code. The following example configures an OTLP HTTP exporter and adds `1` to a counter every two seconds for a total of five minutes. + +**Note**: These examples are intended to help you get started. You shouldn't apply patterns like using console or stdout exporters in production scenarios. + +{% tab title="Python" %} + +```python +import time + +from opentelemetry.exporter.otlp.proto.http.metric_exporter import ( + OTLPMetricExporter, ) +from opentelemetry.sdk.metrics import ( + Counter, + Histogram, + MeterProvider, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + UpDownCounter, +) +from opentelemetry.sdk.metrics.export import ( + AggregationTemporality, + ConsoleMetricExporter, + PeriodicExportingMetricReader, +) + +deltaTemporality = { + Counter: AggregationTemporality.DELTA, + UpDownCounter: AggregationTemporality.CUMULATIVE, + Histogram: AggregationTemporality.DELTA, + ObservableCounter: AggregationTemporality.DELTA, + ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, + ObservableGauge: AggregationTemporality.CUMULATIVE, +} + +exporter = OTLPMetricExporter(preferred_temporality=deltaTemporality) +reader = PeriodicExportingMetricReader(exporter, export_interval_millis=5_000) +provider = MeterProvider(metric_readers=[reader]) + +consoleReader = PeriodicExportingMetricReader( + ConsoleMetricExporter(preferred_temporality=deltaTemporality), export_interval_millis=5_000) +consoleProvider = MeterProvider(metric_readers=[consoleReader]) + +meter = provider.get_meter("my-meter") +counter = meter.create_counter("example.counter") + +consoleMeter = consoleProvider.get_meter("my-meter-console") +consoleCounter = consoleMeter.create_counter("example.counter.console") + +for i in range(150): + counter.add(1) + consoleCounter.add(1) + time.sleep(2) +``` + +{% /tab %} + +{% tab title="Go" %} + +```go +package main + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +func deltaSelector(kind metric.InstrumentKind) metricdata.Temporality { + switch kind { + case metric.InstrumentKindCounter, + metric.InstrumentKindGauge, + metric.InstrumentKindHistogram, + metric.InstrumentKindObservableGauge, + metric.InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + case metric.InstrumentKindUpDownCounter, + metric.InstrumentKindObservableUpDownCounter: + return metricdata.CumulativeTemporality + } + panic("unknown instrument kind") +} + +func main() { + ctx := context.Background() + exporter, err := otlpmetrichttp.New(ctx, + otlpmetrichttp.WithTemporalitySelector(deltaSelector), + ) + consoleExporter, consoleErr := stdoutmetric.New( + stdoutmetric.WithTemporalitySelector(deltaSelector), + ) + if err != nil || consoleErr != nil { + panic(err) + } + + reader := metric.NewPeriodicReader(exporter, + metric.WithInterval(5*time.Second), + ) + provider := metric.NewMeterProvider(metric.WithReader(reader)) + + consoleReader := metric.NewPeriodicReader(consoleExporter, + metric.WithInterval(5*time.Second), + ) + consoleProvider := metric.NewMeterProvider(metric.WithReader(consoleReader)) + + defer func() { + err := provider.Shutdown(ctx) + consoleErr := consoleProvider.Shutdown(ctx) + if err != nil || consoleErr != nil { + panic(err) + } + }() + + meter := provider.Meter("my-meter") + counter, err := meter.Int64Counter("example.counter") + + consoleMeter := consoleProvider.Meter("my-meter-console") + consoleCounter, consoleErr := consoleMeter.Int64Counter("example.counter.console") + + if err != nil || consoleErr != nil { + panic(err) + } + + for i := 0; i < 150; i++ { + counter.Add(ctx, 1) + consoleCounter.Add(ctx, 1) + time.Sleep(2 * time.Second) + } +} +``` + +{% /tab %} + +{% tab title="Java" %} + +```java +package io.opentelemetry.example.delta; + +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.exporter.otlp.http.metrics.OtlpHttpMetricExporter; +import io.opentelemetry.sdk.metrics.export.AggregationTemporalitySelector; +import io.opentelemetry.sdk.metrics.export.MetricReader; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; + +public final class Main { + public static void main(String[] args) throws InterruptedException { + OtlpHttpMetricExporter exporter = + OtlpHttpMetricExporter.builder() + .setAggregationTemporalitySelector( + AggregationTemporalitySelector.deltaPreferred()) + .build(); + + MetricReader reader = + PeriodicMetricReader.builder(exporter).build(); + + MeterProvider provider = SdkMeterProvider.builder() + .registerMetricReader(reader) + .build(); + + Meter meter = provider.get("my-meter"); + + LongCounter counter = + meter.counterBuilder("example.counter").build(); + + for (int i = 0; i < 150; i++) { + counter.add(1); + Thread.sleep(2000); + } + } +} +``` + +{% /tab %} + +{% tab title=".NET" %} + +```c# +// Requires: $ dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol + +using System.Diagnostics; +using System.Diagnostics.Metrics; +using OpenTelemetry; +using OpenTelemetry.Exporter; +using OpenTelemetry.Metrics; +using OpenTelemetry.Resources; +using System.Threading; +using System; +using System.Threading.Tasks; + +namespace GettingStarted; + +public class Program +{ + public static void Main() + { + using var meter = new Meter("my-meter"); + var providerBuilder = Sdk.CreateMeterProviderBuilder().AddMeter(meter.Name); + providerBuilder + .AddConsoleExporter((exporterOptions, metricReaderOptions) => + { + metricReaderOptions.PeriodicExportingMetricReaderOptions = new PeriodicExportingMetricReaderOptions + { + ExportIntervalMilliseconds = Convert.ToInt32("5000"), + }; + metricReaderOptions.TemporalityPreference = MetricReaderTemporalityPreference.Delta; + }) + .AddOtlpExporter((exporterOptions, metricReaderOptions) => + { + metricReaderOptions.PeriodicExportingMetricReaderOptions = new PeriodicExportingMetricReaderOptions + { + ExportIntervalMilliseconds = Convert.ToInt32("5000"), + }; + exporterOptions.Protocol = OtlpExportProtocol.HttpProtobuf; + metricReaderOptions.TemporalityPreference = MetricReaderTemporalityPreference.Delta; + }); + using var provider = providerBuilder.Build(); + + Counter counter = meter.CreateCounter("example.counter", "1", "Example counter"); + for (int i = 0; i < 150; i++) { + counter?.Add(1); + Task.Delay(2000).Wait(); + } + } +} +``` + +{% /tab %} + +You can configure OTLP gRPC exporters in a similar fashion. + +## Converting to delta temporality on the Collector{% #converting-to-delta-temporality-on-the-collector %} + +When your metrics do not come from an OpenTelemetry language library, it may be infeasible to configure them to use delta aggregation temporality. This may be the case, for example, when producing metrics with other open source libraries such as Prometheus. In this situation, you can use the [cumulative to delta processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/cumulativetodeltaprocessor) to map your metrics to delta aggregation temporality. Your deployment is still stateful, so if your deployment has multiple Collectors, you need to use the processor on a first layer of stateful Collectors to ensure that all points of a metric are sent to the same Collector instance. + +To enable the cumulative-to-delta processor so that it applies to all your metrics, define it with an empty configuration on the `processors` section: + +```yaml +processors: + cumulativetodelta: +``` + +Finally, add it to the `processors` list on your metrics pipelines. + +**Note**: The cumulative-to-delta processor does not support exponential histograms. Also, some fields, such as the minimum and maximum, can't be recovered with this approach. Instead, use the OpenTelemetry SDK approach whenever possible. + +## Further reading{% #further-reading %} + +- [OTLP Metric Types](http://localhost:1313/metrics/open_telemetry/otlp_metric_types) +- [OpenTelemetry Support in Datadog](http://localhost:1313/opentelemetry/) diff --git a/opentelemetry-mdoc/guide/otlp_histogram_heatmaps/index.md b/opentelemetry-mdoc/guide/otlp_histogram_heatmaps/index.md new file mode 100644 index 0000000000000..566ca9ed678fb --- /dev/null +++ b/opentelemetry-mdoc/guide/otlp_histogram_heatmaps/index.md @@ -0,0 +1,81 @@ +--- +title: Visualize OTLP Histograms as Heatmaps +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Guides > Visualize OTLP Histograms as + Heatmaps +--- + +# Visualize OTLP Histograms as Heatmaps + +## Overview{% #overview %} + +The OpenTelemetry Protocol (OTLP) supports sending OTLP Histograms, a type of metric that compresses information about a set of measurements by providing aggregate statistics, like sum, count, min, and max. OTLP Histograms also count how many of these measurements fall into user-configurable buckets. + +You can visualize this datatype as a [heatmap](http://localhost:1313/dashboards/widgets/heatmap) in Datadog by following the steps on this page. + +**Note**: The related OTLP Exponential Histogram type can also be displayed as a heatmap, since it is converted to a distribution. Read more about distributions on the [dedicated Distributions page](http://localhost:1313/metrics/distributions). + +## Setup{% #setup %} + +This guide assumes you already have a [functioning setup for sending OpenTelemetry metrics to Datadog](http://localhost:1313/opentelemetry/otel_metrics). + +### OpenTelemetry SDK configuration{% #opentelemetry-sdk-configuration %} + +If you are producing metrics from an OpenTelemetry SDK, take the following steps to configure them: + +1. [Configure the SDK you are sending OTLP Histograms with delta temporality](http://localhost:1313/opentelemetry/guide/otlp_delta_temporality). This makes the minimum and maximum available for the heatmap widget. +1. Check if you want to override the [default bucket boundaries](https://opentelemetry.io/docs/reference/specification/metrics/sdk/#explicit-bucket-histogram-aggregation) from your aggregation. **Note**: Each additional bucket is considered a separate custom metric. + +For metrics coming from other sources, ensure if possible that these come as delta OTLP Histograms with the minimum and maximum fields set. + +### Datadog Exporter or Datadog Agent configuration{% #datadog-exporter-or-datadog-agent-configuration %} + +Set the histogram mode and enable aggregation metrics on your Datadog Exporter or Datadog Agent. + +{% tab title="Datadog Exporter (OpenTelemetry Collector)" %} +In the `collector.yaml` file for the Datadog Exporter, configure the histogram mode to `counters` and enable aggregation metrics with the `send_aggregation_metrics` flag. + +```yaml +exporters: + datadog: + metrics: + histograms: + mode: counters + send_aggregation_metrics: true +``` + +**Note**: `send_aggregation_metrics` is available starting with Datadog Exporter v0.75.0. If you are using an earlier version, use the `send_count_sum_metrics` flag instead. The minimum and maximum are missing in earlier versions. +{% /tab %} + +{% tab title="Datadog Agent" %} +On the `otlp_config` section, configure the histogram mode to `counters` and enable aggregation metrics with the `send_aggregation_metrics` flag. + +```yaml +otlp_config: + metrics: + histograms: + mode: counters + send_aggregation_metrics: true +``` + +**Note**: `send_aggregation_metrics` is available starting on the Datadog Agent v6.45.0/v7.45.0. If you are using an earlier version, use the `send_count_sum_metrics` flag instead. The minimum and maximum are missing in earlier versions. +{% /tab %} + +### Heatmap widget configuration{% #heatmap-widget-configuration %} + +The [heatmap widget](http://localhost:1313/dashboards/widgets/heatmap) uses the set of `.bucket` metrics generated by the Datadog Exporter or Datadog Agent, each corresponding to a different histogram bucket. To visualize your histogram as a heatmap: + +1. Select `.bucket` as the metric to visualize. +1. Choose the `pre-binned data` option on the `distributions of` menu. + +You can now see your OTLP Histogram as a heatmap widget. + +## OpenMetrics compatibility{% #openmetrics-compatibility %} + +The [Datadog Agent OpenMetrics check](http://localhost:1313/integrations/openmetrics) is also compatible with the pre-binned data heatmap widget option. If you want to send metrics to the OpenMetrics check directly without converting to OpenTelemetry, enable the `collect_histogram_buckets` and `non_cumulative_histogram_buckets` flags on your instance to ensure the data is sent in a compatible way to Datadog. + +## Further reading{% #further-reading %} + +- [OTLP Metric Types](http://localhost:1313/metrics/open_telemetry/otlp_metric_types) +- [OpenTelemetry Support in Datadog](http://localhost:1313/opentelemetry/) diff --git a/opentelemetry-mdoc/index.md b/opentelemetry-mdoc/index.md new file mode 100644 index 0000000000000..2079273def77b --- /dev/null +++ b/opentelemetry-mdoc/index.md @@ -0,0 +1,114 @@ +--- +title: OpenTelemetry in Datadog +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog +--- + +# OpenTelemetry in Datadog + +{% callout %} +##### Try "Introduction to OTel with Datadog" in the Learning Center + +Learn how to configure OpenTelemetry to export metrics, traces, and logs to Datadog, and explore the collected data in the platform. + +[ENROLL NOW](https://learn.datadoghq.com/courses/otel-with-datadog) +{% /callout %} + +## Overview{% #overview %} + +[OpenTelemetry](https://opentelemetry.io/) (OTel) provides standardized protocols for collecting and routing telemetry data. Datadog supports multiple ways to collect and analyze telemetry data from OpenTelemetry-instrumented applications, whether you're using existing Datadog infrastructure or prefer a vendor-neutral setup. + +### Why OpenTelemetry with Datadog?{% #why-opentelemetry-with-datadog %} + +Datadog provides advanced observability for all your application telemetry, regardless of its source. By supporting OpenTelemetry, Datadog offers: + +- **Flexibility and choice**: Use standardized instrumentation while maintaining freedom to adapt as your technology needs evolve. +- **Comprehensive language support**: Consistently monitor applications across your entire tech stack. +- **Unified instrumentation**: Maintain a single approach to instrumentation across your systems. +- **Powerful analytics**: Combine OpenTelemetry's standardization with Datadog's robust analysis, visualization, and alerting capabilities. + +Whether you're already using OpenTelemetry or considering adoption, Datadog provides flexible options to meet your needs. + +### Key decisions{% #key-decisions %} + +There are two key decisions to make when using OpenTelemetry with Datadog: + +- How to instrument your applications +- How to send your data to Datadog + +The features available to you depend on these choices. For example, using the OpenTelemetry API with the Datadog SDK provides access to more Datadog features than using the OpenTelemetry SDK alone. + +For more information, read [Feature Compatibility](http://localhost:1313/opentelemetry/compatibility/). + +## Instrument your applications{% #instrument-your-applications %} + +There are several ways to instrument your applications with OpenTelemetry and Datadog. Each approach provides different features and levels of vendor neutrality. + +- **Full OpenTelemetry**: Use the OpenTelemetry SDK and API for a vendor-neutral setup. +- **OpenTelemetry API**: Use the OpenTelemetry API with Datadog's SDK implementation. +- **OpenTelemetry instrumentation libraries**: Extend Datadog's observability to additional frameworks and technologies. + +For more information, see [Instrument Your Applications](http://localhost:1313/opentelemetry/instrument/). + +## Send OpenTelemetry data to Datadog{% #send-opentelemetry-data-to-datadog %} + +If your applications and services are instrumented with OpenTelemetry libraries, you can choose how to get traces, metrics, and logs data into Datadog. + +{% alert level="info" %} +**Not sure which setup is right for you?**See the [Feature Compatibility](http://localhost:1313/opentelemetry/compatibility/) table to understand which Datadog features are supported. +{% /alert %} + +### Option 1: Use the Datadog Agent with DDOT Collector (Recommended){% #option-1-use-the-datadog-agent-with-ddot-collector-recommended %} + +{% image + source="http://localhost:1313/images/opentelemetry/setup/ddot-collector-2.48e827fe0ea4d62cd26a81521e9fa584.png?auto=format" + alt="Architecture overview for DDOT Collector, which is embedded in the Datadog Agent." /%} + +**Best for**: Existing Datadog users or teams requiring Agent-based features such as: + +- Fleet Automation +- Live Container Monitoring +- Kubernetes Explorer +- Live Processes +- Cloud Network Monitoring +- Universal Service Monitoring +- 1,000+ Datadog integrations + +{% alert level="info" %} +For a complete list of Agent-based features, see **OTel to Datadog Agent (OTLP)** in [Feature Compatibility](http://localhost:1313/opentelemetry/compatibility/). +{% /alert %} + +- [Learn more about using the Datadog Agent with DDOT Collector](http://localhost:1313/opentelemetry/setup/ddot_collector/) + +### Option 2: Use the OpenTelemetry Collector{% #option-2-use-the-opentelemetry-collector %} + +{% image + source="http://localhost:1313/images/opentelemetry/setup/otel-collector.0480e3141dece4beac1203109a2cbf8a.png?auto=format" + alt="Diagram: OpenTelemetry SDK in code sends data through OTLP to host running OpenTelemetry Collector with Datadog Exporter, which forwards to Datadog's Observability Platform." /%} + +**Best for**: New or existing OTel users wanting a completely vendor-neutral setup. + +- Complete vendor neutrality for sending OpenTelemetry data to Datadog +- Flexible configuration options like tail-based sampling and data transformations + +- [Learn more about using the OTel Collector](http://localhost:1313/opentelemetry/setup/collector_exporter/) + +### Additional setup options{% #additional-setup-options %} + +For other setup options, including direct OTLP ingestion, see [Send Data to Datadog](http://localhost:1313/opentelemetry/setup). + +## Further reading{% #further-reading %} + +- [Feature Compatibility](http://localhost:1313/opentelemetry/compatibility/) +- [Instrument Your Applications](http://localhost:1313/opentelemetry/instrument/) +- [Send Data to Datadog](http://localhost:1313/opentelemetry/setup/) +- [Datadog's partnership with OpenTelemetry](https://www.datadoghq.com/blog/opentelemetry-instrumentation/) +- [Monitor OpenTelemetry-instrumented apps with support for W3C Trace Context](https://www.datadoghq.com/blog/monitor-otel-with-w3c-trace-context/) +- [Send metrics and traces from OpenTelemetry Collector to Datadog via Datadog Exporter](https://www.datadoghq.com/blog/ingest-opentelemetry-traces-metrics-with-datadog-exporter/) +- [Forward logs from the OpenTelemetry Collector with the Datadog Exporter](https://www.datadoghq.com/blog/opentelemetry-logs-datadog-exporter/) +- [OTLP ingestion in the Agent](https://www.datadoghq.com/about/latest-news/press-releases/datadog-announces-opentelemetry-protocol-support/) +- [Learn more about AWS's managed Lambda Layer for OpenTelemetry](https://www.datadoghq.com/blog/aws-opentelemetry-lambda-layer-datadog/) +- [Correlate Datadog RUM events with traces from OpenTelemetry-instrumented applications](https://www.datadoghq.com/blog/correlate-traces-datadog-rum-otel/) +- [Monitor runtime metrics from OTel-instrumented apps with Datadog APM](https://www.datadoghq.com/blog/opentelemetry-runtime-metrics-datadog/) +- [How to select your OpenTelemetry deployment](https://www.datadoghq.com/blog/otel-deployments/) +- [Introduction to OpenTelemetry with Datadog](https://learn.datadoghq.com/courses/otel-with-datadog) diff --git a/opentelemetry-mdoc/ingestion_sampling/index.md b/opentelemetry-mdoc/ingestion_sampling/index.md new file mode 100644 index 0000000000000..47f51ad85e66a --- /dev/null +++ b/opentelemetry-mdoc/ingestion_sampling/index.md @@ -0,0 +1,138 @@ +--- +title: Ingestion Sampling with OpenTelemetry +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Ingestion Sampling with OpenTelemetry +--- + +# Ingestion Sampling with OpenTelemetry + +## Overview{% #overview %} + +OpenTelemetry SDKs and the OpenTelemetry Collector provide sampling capabilities, as ingesting 100% of traces is often unnecessary to gain visibility into the health of your applications. Configure sampling rates before sending traces to Datadog to ingest data that is most relevant to your business and observability goals, while controlling and managing overall costs. + +This document demonstrates two primary methods for sending traces to Datadog with OpenTelemetry: + +- Send traces to the **[OpenTelemetry Collector](http://localhost:1313/opentelemetry/otel_collector_datadog_exporter)**, and use the Datadog Exporter to forward them to Datadog. +- Send traces to the **[Datadog Agent OTLP ingest](http://localhost:1313/opentelemetry/otlp_ingest_in_the_agent)**, which forwards them to Datadog. + +**Note**: Datadog doesn't support running the OpenTelemetry Collector and the Datadog Agent on the same host. + +### Using the OpenTelemetry Collector{% #using-the-opentelemetry-collector %} + +With this method, the OpenTelemetry Collector receives traces from OpenTelemetry SDKs and exports them to Datadog using the Datadog Exporter. In this scenario, [APM trace metrics](http://localhost:1313/tracing/metrics/metrics_namespace/) are computed by the Datadog Connector: + +{% image + source="http://localhost:1313/images/opentelemetry/guide/ingestion_otel/otel_apm_metrics_computation_collector.4ccd16a89a25e916cd1780314f483910.png?auto=format" + alt="OpenTelemetry APM Metrics computation using the Collector" /%} + +Choose this method if you require the advanced processing capabilities of the OpenTelemetry Collector, such as tail-based sampling. To configure the Collector to receive traces, follow the instructions on [OpenTelemetry Collector and Datadog Exporter](http://localhost:1313/opentelemetry/otel_collector_datadog_exporter). + +### Using Datadog Agent OTLP ingestion{% #using-datadog-agent-otlp-ingestion %} + +With this method, the Datadog Agent receives traces directly from OpenTelemetry SDKs using the OTLP protocol. This allows you to send traces to Datadog without running a separate OpenTelemetry Collector service. In this scenario, APM trace metrics are computed by the Agent: + +{% image + source="http://localhost:1313/images/opentelemetry/guide/ingestion_otel/otel_apm_metrics_computation_agent.6db7edab7cfa0c7b8127c643df9c529a.png?auto=format" + alt="OpenTelemetry APM Metrics computation using the Datadog Agent" /%} + +Choose this method if you prefer a simpler setup without the need for a separate OpenTelemetry Collector service. To configure the Datadog Agent to receive traces using OTLP, follow the instructions on [OTLP Ingestion by the Datadog Agent](http://localhost:1313/opentelemetry/interoperability/otlp_ingest_in_the_agent/?tab=host). + +## Reducing ingestion volume{% #reducing-ingestion-volume %} + +With OpenTelemetry, you can configure sampling both in the OpenTelemetry libraries and in the OpenTelemetry Collector: + +- **Head-based sampling** in the OpenTelemetry SDKs +- **Tail-based sampling** in the OpenTelemetry Collector +- **Probabilistic sampling** in the Datadog Agent + +### Head-based sampling{% #head-based-sampling %} + +At the SDK level, you can implement *head-based sampling*. This is when the sampling decision is made at the beginning of the trace. This type of sampling is particularly useful for high-throughput applications, where you have a clear understanding of which traces are most important to ingest and want to make sampling decisions early in the tracing process. + +#### Configuring{% #configuring %} + +To configure head-based sampling, use the [TraceIdRatioBased](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#traceidratiobased) or [ParentBased](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#parentbased) samplers provided by the OpenTelemetry SDKs. These allow you to implement deterministic head-based sampling based on the `trace_id` at the SDK level. + +#### Considerations{% #considerations %} + +Head-based sampling affects the computation of APM metrics. Only sampled traces are sent to the OpenTelemetry Collector or Datadog Agent, which perform metrics computation. + +To approximate unsampled metrics from sampled metrics, use [formulas and functions](http://localhost:1313/dashboards/functions/#add-a-function) with the sampling rate configured in the SDK. + +Use the [ingestion volume control guide](http://localhost:1313/tracing/guide/trace_ingestion_volume_control/#effects-of-reducing-trace-ingestion-volume) to read more about the implications of setting up trace sampling on trace analytics monitors and metrics from spans. + +### Tail-based sampling{% #tail-based-sampling %} + +At the OpenTelemetry Collector level, you can do *tail-based sampling*, which allows you to define more advanced rules to maintain visibility over traces with errors or high latency. + +#### Configuring{% #configuring-1 %} + +To configure tail-based sampling, use the [Tail Sampling Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/tailsamplingprocessor/README.md) or [Probabilistic Sampling Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/probabilisticsamplerprocessor/README.md) to sample traces based on a set of rules at the collector level. + +#### Considerations{% #considerations-1 %} + +A limitation of tail-based sampling is that all spans for a given trace must be received by the same collector instance for effective sampling decisions. If a trace is distributed across multiple collector instances, and tail-based sampling is used, some parts of that trace may not be sent to Datadog. + +To ensure that APM metrics are computed based on 100% of the applications' traffic while using collector-level tail-based sampling, use the [Datadog Connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/connector/datadogconnector#datadog-connector). + +{% alert level="info" %} +The Datadog Connector is available starting v0.83.0. Read [Switch from Datadog Processor to Datadog Connector for OpenTelemetry APM Metrics](http://localhost:1313/opentelemetry/guide/switch_from_processor_to_connector) if migrating from an older version. +{% /alert %} + +See the [ingestion volume control guide](http://localhost:1313/tracing/guide/trace_ingestion_volume_control/#effects-of-reducing-trace-ingestion-volume) for information about the implications of setting up trace sampling on trace analytics monitors and metrics from spans. + +### Probabilistic sampling{% #probabilistic-sampling %} + +When using Datadog Agent OTLP ingest, a probabilistic sampler is available starting with Agent v7.54.0. + +#### Configuring{% #configuring-2 %} + +To configure probabilistic sampling, do one of the following: + +- Set `DD_APM_PROBABILISTIC_SAMPLER_ENABLED` to `true` and `DD_APM_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE` to the percentage of traces you'd like to sample (between `0` and `100`). + +- Add the following YAML to your Agent's configuration file: + + ```yaml + apm_config: + # ... + probabilistic_sampler: + enabled: true + sampling_percentage: 51 #In this example, 51% of traces are captured. + hash_seed: 22 #A seed used for the hash algorithm. This must match other agents and OTel + ``` + +**If you use a mixed setup of Datadog tracing libraries and OTel SDKs**: + +- Probabilistic sampling will apply to spans originating from both Datadog and OTel tracing libraries. +- If you send spans both to the Datadog Agent **and** OTel collector instances, set the same seed between Datadog Agent (`DD_APM_PROBABILISTIC_SAMPLER_HASH_SEED`) and OTel collector (`hash_seed`) to ensure consistent sampling. + +{% alert level="warning" %} +`DD_OTLP_CONFIG_TRACES_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE` is deprecated and has been replaced by `DD_APM_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE`. +{% /alert %} + +#### Considerations{% #considerations-2 %} + +- The probabilistic sampler will ignore the sampling priority of spans that are set at the tracing library level. As a result, probabilistic sampling is **incompatible with [head-based sampling](http://localhost:1313/tracing/trace_pipeline/ingestion_mechanisms#head-based-sampling)**. This means that head-based sampled traces might still be dropped by probabilistic sampling. +- Spans not captured by the probabilistic sampler may still be captured by the Datadog Agent's [error and rare samplers](http://localhost:1313/tracing/trace_pipeline/ingestion_mechanisms/#error-and-rare-traces). +- For consistent sampling all tracers must support [128-bit trace IDs](http://localhost:1313/opentelemetry/interoperability/otel_api_tracing_interoperability/#128-bit-trace-ids). + +## Monitoring ingested volumes in Datadog{% #monitoring-ingested-volumes-in-datadog %} + +Use the [APM Estimated Usage dashboard](https://app.datadoghq.com/dash/integration/apm_estimated_usage) and the `datadog.estimated_usage.apm.ingested_bytes` metric to get visibility into your ingested volumes over a specific time period. Filter the dashboard to specific environments and services to see which services are responsible for the largest shares of the ingested volume. + +If the ingestion volume is higher than expected, consider adjusting your sampling rates. + +## Unified service tagging{% #unified-service-tagging %} + +When sending data from OpenTelemetry to Datadog, it's important to tie trace data together with unified service tagging. + +Setting unified service tags ensures that traces are accurately linked to their corresponding services and environments. This prevents hosts from being misattributed, which can lead to unexpected increases in usage and costs. + +For more information, see [Unified Service Tagging](http://localhost:1313/getting_started/tagging/unified_service_tagging/#opentelemetry). + +## Further reading{% #further-reading %} + +- [Trace Ingestion Volume Control](http://localhost:1313/tracing/guide/trace_ingestion_volume_control) +- [Ingestion Controls](http://localhost:1313/tracing/trace_pipeline/ingestion_controls) +- [OpenTelemetry Support in Datadog](http://localhost:1313/opentelemetry/) diff --git a/opentelemetry-mdoc/instrument/api_support/dotnet/index.md b/opentelemetry-mdoc/instrument/api_support/dotnet/index.md new file mode 100644 index 0000000000000..4d8a9c21f6b7b --- /dev/null +++ b/opentelemetry-mdoc/instrument/api_support/dotnet/index.md @@ -0,0 +1,163 @@ +--- +title: .NET Custom Instrumentation using the OpenTelemetry API +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + API Support > .NET Custom Instrumentation using the OpenTelemetry API +--- + +# .NET Custom Instrumentation using the OpenTelemetry API + +{% alert level="info" %} +Unsure when to use OpenTelemetry with Datadog? Start with [Custom Instrumentation with the OpenTelemetry API](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/) to learn more. +{% /alert %} + +## Overview{% #overview %} + +There are a few reasons to manually instrument your applications with the OpenTelemetry API: + +- You are not using Datadog [supported library instrumentation](http://localhost:1313/tracing/trace_collection/compatibility/). +- You want to extend the `ddtrace` library's functionality. +- You need finer control over instrumenting your applications. + +The `ddtrace` library provides several techniques to help you achieve these goals. The following sections demonstrate how to use the OpenTelemetry API for custom instrumentation to use with Datadog. + +## Setup{% #setup %} + +To configure OpenTelemetry to use the Datadog trace provider: + +1. Add your desired manual OpenTelemetry instrumentation to your .NET code following the [OpenTelemetry .NET Manual Instrumentation documentation](https://opentelemetry.io/docs/instrumentation/net/manual/). **Note**: Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. + +1. Install the Datadog .NET tracing library and enable the tracer for your [.NET Framework service](http://localhost:1313/tracing/trace_collection/dd_libraries/dotnet-framework/#installation-and-getting-started) or your [.NET Core (and .NET 5+) service](http://localhost:1313/tracing/trace_collection/dd_libraries/dotnet-core/#installation-and-getting-started). **Preview**: You can optionally do this with [Single Step APM Instrumentation](http://localhost:1313/tracing/trace_collection/single-step-apm/). + +1. Set `DD_TRACE_OTEL_ENABLED` environment variable to `true`. + +1. Run your application. + +Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. It also supports [OpenTelemetry instrumentation libraries](https://opentelemetry.io/docs/instrumentation/net/libraries/). + +## Creating custom spans{% #creating-custom-spans %} + +To manually create spans that start a new, independent trace: + +```csharp +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; + +// Start a new span +using (Activity? activity = Telemetry.ActivitySource.StartActivity("")) + { + activity?.SetTag("operation.name", "custom-operation"); + // Do something + } +``` + +## Creating spans{% #creating-spans %} + +To create custom spans within an existing trace context: + +```csharp +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; + +using (Activity? parentScope = Telemetry.ActivitySource.StartActivity("")) +{ + parentScope?.SetTag("operation.name", "manual.sortorders"); + using (Activity? childScope = Telemetry.ActivitySource.StartActivity("")) + { + // Nest using statements around the code to trace + childScope?.SetTag("operation.name", "manual.sortorders.child"); + SortOrders(); + } +} +``` + +## Adding span tags{% #adding-span-tags %} + +Add custom tags to your spans to provide additional context: + +```csharp +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; + +public class ShoppingCartController : Controller +{ + private IShoppingCartRepository _shoppingCartRepository; + + [HttpGet] + public IActionResult Index(int customerId) + { + Activity? activity = + Telemetry.ActivitySource.StartActivity("") + + // Add a tag to the span for use in the Datadog web UI + activity?.SetTag("customer.id", customerId.ToString()); + + var cart = _shoppingCartRepository.Get(customerId); + + return View(cart); + } +} +``` + +## Setting errors on spans{% #setting-errors-on-spans %} + +Set error information on a span when an error occurs during its execution. + +```csharp +try +{ + // do work that can throw an exception +} +catch(Exception e) +{ + activity?.SetTag("error", 1); + activity?.SetTag("error.message", exception.Message); + activity?.SetTag("error.stack", exception.ToString()); + activity?.SetTag("error.type", exception.GetType().ToString()); +} +``` + +## Adding span events{% #adding-span-events %} + +{% alert level="info" %} +Adding span events requires SDK version 2.53.0 or higher. +{% /alert %} + +You can add span events using the `AddEvent` API. This method requires an `ActivityEvent`constructed with the `name` parameter and optionally accepts `attributes` and `timestamp` parameters. The method creates a new span event with the specified properties and associates it with the corresponding span. + +- **Name** [*required*]: A string representing the event's name. +- **Timestamp** [*optional*]: A UNIX timestamp representing the event's occurrence time. Expects a `DateTimeOffset` object. +- **Attributes** [*optional*]: Zero or more key-value pairs with the following properties: + - The key must be a non-empty string. + - The value can be either: + - A primitive type: string, Boolean, or number. + - A homogeneous array of primitive type values (for example, an array of strings). + - Nested arrays and arrays containing elements of different data types are not allowed. + +The following examples demonstrate different ways to add events to a span: + +```csharp +var eventTags = new ActivityTagsCollection +{ + { "int_val", 1 }, + { "string_val", "two" }, + { "int_array", new int[] { 3, 4 } }, + { "string_array", new string[] { "5", "6" } }, + { "bool_array", new bool[] { true, false } } +}; + +activity.AddEvent(new ActivityEvent("Event With No Attributes")); +activity.AddEvent(new ActivityEvent("Event With Some Attributes", DateTimeOffset.Now, eventTags)); +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#add-events) specification for more information. + +## Propagating context with headers extraction and injection{% #propagating-context-with-headers-extraction-and-injection %} + +You can configure the propagation of context for distributed traces by injecting and extracting headers. Read [Trace Context Propagation](http://localhost:1313/tracing/trace_collection/trace_context_propagation/) for information. + +## Further Reading{% #further-reading %} + +- [Explore your services, resources, and traces](http://localhost:1313/tracing/glossary/) +- [Interoperability of OpenTelemetry API and Datadog instrumented traces](http://localhost:1313/opentelemetry/guide/otel_api_tracing_interoperability) diff --git a/opentelemetry-mdoc/instrument/api_support/go/index.md b/opentelemetry-mdoc/instrument/api_support/go/index.md new file mode 100644 index 0000000000000..94e292d34f37f --- /dev/null +++ b/opentelemetry-mdoc/instrument/api_support/go/index.md @@ -0,0 +1,195 @@ +--- +title: Go Custom Instrumentation using the OpenTelemetry API +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + API Support > Go Custom Instrumentation using the OpenTelemetry API +--- + +# Go Custom Instrumentation using the OpenTelemetry API + +{% alert level="info" %} +Unsure when to use OpenTelemetry with Datadog? Start with [Custom Instrumentation with the OpenTelemetry API](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/) to learn more. +{% /alert %} + +## Overview{% #overview %} + +There are a few reasons to manually instrument your applications with the OpenTelemetry API: + +- You are not using Datadog [supported library instrumentation](http://localhost:1313/tracing/trace_collection/compatibility/). +- You want to extend the `ddtrace` library's functionality. +- You need finer control over instrumenting your applications. + +The `ddtrace` library provides several techniques to help you achieve these goals. The following sections demonstrate how to use the OpenTelemetry API for custom instrumentation to use with Datadog. + +## Imports{% #imports %} + +Import the following packages to setup the Datadog trace provider and use cases demonstrated below. **Note**: This documentation uses v2 of the Go tracer, which Datadog recommends for all users. If you are using v1, see the [migration guide](http://localhost:1313/tracing/trace_collection/compatibility/) to upgrade to v2. + +```go +import ( + "context" + "log" + "os" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/opentelemetry" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +) +``` + +## Setup{% #setup %} + +To configure OpenTelemetry to use the Datadog trace provider: + +1. Add your desired manual OpenTelemetry instrumentation to your Go code following the [OpenTelemetry Go Manual Instrumentation documentation](https://opentelemetry.io/docs/instrumentation/go/manual/). **Important!** Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. + +1. Install the OpenTelemetry package `go.opentelemetry.io/otel` using the command: + + ```shell + go get go.opentelemetry.io/otel + ``` + +1. Install the Datadog OpenTelemetry wrapper package using the command: + + ```shell + go get github.com/DataDog/dd-trace-go/v2/ddtrace/opentelemetry + ``` + +1. Import packages in the code: + + ```go + import ( + "go.opentelemetry.io/otel" + ddotel "github.com/DataDog/dd-trace-go/v2/ddtrace/opentelemetry" + ) + ``` + +1. Create a TracerProvider, optionally providing a set of options, that are specific to Datadog APM, and defer the Shutdown method, which stops the tracer: + + ```go + provider := ddotel.NewTracerProvider() + defer provider.Shutdown() + ``` + +1. Use the Tracer Provider instance with the OpenTelemetry API to set the global TracerProvider: + + ```go + otel.SetTracerProvider(provider) + ``` + +1. Run your application. + +Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. + +## Adding span tags{% #adding-span-tags %} + +Add custom tags to your spans to attach additional metadata and context to your traces. + +```go +// Can only be done after the setup steps, such as initialising the tracer. + +// Start a span. +ctx, span := t.Start(ctx, "read.file") +// Set an attribute, or a tag in Datadog terminology, on a span. +span.SetAttributes(attribute.String(ext.ResourceName, "test.json")) +``` + +### Adding tags globally to all spans{% #adding-tags-globally-to-all-spans %} + +Add tags to all spans by configuring the tracer with the `WithGlobalTag` option: + +```go +// Here we can leverage the Datadog tracer options by passing them into the +// NewTracerProvider function. +provider := ddotel.NewTracerProvider( + ddtracer.WithGlobalTag("datacenter", "us-1"), + ddtracer.WithGlobalTag("env", "dev"), +) +defer provider.Shutdown() + +// Use it with the OpenTelemetry API to set the global TracerProvider. +otel.SetTracerProvider(provider) + +// Start the Tracer with the OpenTelemetry API. +t := otel.Tracer("") +``` + +### Setting errors on a span{% #setting-errors-on-a-span %} + +To set an error on a span, use the otel SetAttributes or ddtrace WithError options. + +```go +// Start a span. +ctx, span := t.Start(context.Background(), "spanName") // where `t` refers to otel/trace + +... +// Set an error on a span with 'span.SetAttributes'. +span.SetAttributes(attribute.String(ext.ErrorMsg, "errorMsg")) + +// ALternatively, it is possible to set an error on a span via end span options. +EndOptions(span, tracer.WithError(errors.New("myErr"))) // where `tracer` refers to ddtrace/tracer +span.End() +``` + +## Adding spans{% #adding-spans %} + +Unlike other Datadog tracing libraries, when tracing Go applications, Datadog recommends that you explicitly manage and pass the Go context of your spans. This approach ensures accurate span relationships and meaningful tracing. For more information, see the [Go context library documentation](https://pkg.go.dev/context) or documentation for any third-party libraries integrated with your application. + +```go +// Can only be done after the setup steps. + +// Here we can leverage context.Context to pass in Datadog-specifc start span options, +// like 'ddtracer.Measured()' +ctx, span := t.Start( + ddotel.ContextWithStartOptions(context.Background(), ddtracer.Measured()), "span_name") + +span.End() +``` + +## Adding span events{% #adding-span-events %} + +{% alert level="info" %} +Adding span events requires SDK version 1.67.0 or higher. +{% /alert %} + +You can add span events using the `AddEvent` API. This method requires a `name` parameter and optionally accepts `attributes` and `timestamp` parameters. The method creates a new span event with the specified properties and associates it with the corresponding span. + +- **Name** [*required*]: A string representing the event's name. +- **Attributes** [*optional*]: Zero or more key-value pairs with the following properties: + - The key must be a non-empty string. + - The value can be either: + - A primitive type: string, Boolean, or number. + - A homogeneous array of primitive type values (for example, an array of strings). + - Nested arrays and arrays containing elements of different data types are not allowed. +- **Timestamp** [*optional*]: A UNIX timestamp representing the event's occurrence time. Expects a `Time` object. + +In the following example, `oteltrace` is an alias for the go.opentelemetry.io/otel/trace package and `attribute` refers to the go.opentelemetry.io/otel/attribute package. These packages must be imported in order to use this example. + +```go +// Start a span. +ctx, span := tracer.StartSpan(context.Background(), "span_name") +span.AddEvent("Event With No Attributes") +span.AddEvent("Event With Some Attributes", oteltrace.WithAttributes(attribute.Int("int_val", 1), attribute.String("string_val", "two"), attribute.Int64Slice("int_array", []int64{3, 4}), attribute.StringSlice("string_array", []string{"5", "6"}), attribute.BoolSlice("bool_array", []bool{false, true}))) +span.Finish() +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#add-events) specification for more information. + +## Trace client and Agent configuration{% #trace-client-and-agent-configuration %} + +There are additional configurations to consider for both the tracing client and Datadog Agent: + +- Context propagation with B3 Headers +- Exclude specific resources from sending traces to Datadog if you do not want to include these traces in metrics calculated, such as Health Checks. + +### Propagating context with headers extraction and injection{% #propagating-context-with-headers-extraction-and-injection %} + +You can configure the propagation of context for distributed traces by injecting and extracting headers. Read [Trace Context Propagation](http://localhost:1313/tracing/trace_collection/trace_context_propagation/) for information. + +### Resource filtering{% #resource-filtering %} + +Traces can be excluded based on their resource name, to remove synthetic traffic such as health checks from reporting traces to Datadog. This and other security and fine-tuning configurations can be found on the [Security](http://localhost:1313/tracing/security) page. diff --git a/opentelemetry-mdoc/instrument/api_support/index.md b/opentelemetry-mdoc/instrument/api_support/index.md new file mode 100644 index 0000000000000..474823a31642b --- /dev/null +++ b/opentelemetry-mdoc/instrument/api_support/index.md @@ -0,0 +1,45 @@ +--- +title: OpenTelemetry API Support +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + API Support +--- + +# OpenTelemetry API Support + +Datadog tracing libraries provide an implementation of the [OpenTelemetry API](https://opentelemetry.io/docs/specs/otel/trace/api/) for instrumenting your code. This means you can maintain vendor-neutral instrumentation of your services, while still taking advantage of Datadog's native implementation, features, and products. + +{% image + source="http://localhost:1313/images/opentelemetry/setup/otel-api-dd-sdk.18116c7846ecc6686abb7940964c4c5b.png?auto=format" + alt="Diagram: OpenTelemetry API with DD tracing libraries sends data through OTLP protocol to the Datadog Agent, which forwards to Datadog's platform." /%} + +**Note:** You can also send your OpenTelemetry API instrumented traces to Datadog using the [OTel Collector](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +By [instrumenting your code with OpenTelemetry APIs](http://localhost:1313/tracing/trace_collection/otel_instrumentation/), your code: + +- Remains free of vendor-specific API calls. +- Does not depend on Datadog tracing libraries at compile time (only runtime). + +Replace the OpenTelemetry SDK with the Datadog tracing library in the instrumented application, and the traces produced by your running code can be processed, analyzed, and monitored alongside Datadog traces and in Datadog proprietary products such as [Continuous Profiler](http://localhost:1313/profiler/), [Data Streams Monitoring](http://localhost:1313/data_streams/), [App and API Protection](http://localhost:1313/security/application_security/), and [Live Processes](http://localhost:1313/infrastructure/process). + +To learn more, follow the link for your language: + +- [Java](http://localhost:1313/opentelemetry/instrument/api_support/java) +- [Python](http://localhost:1313/opentelemetry/instrument/api_support/python) +- [Ruby](http://localhost:1313/opentelemetry/instrument/api_support/ruby) +- [go](http://localhost:1313/opentelemetry/instrument/api_support/go) +- [Node.js](http://localhost:1313/opentelemetry/instrument/api_support/nodejs) +- [PHP](http://localhost:1313/opentelemetry/instrument/api_support/php) +- [.Net](http://localhost:1313/opentelemetry/instrument/api_support/dotnet) + +{% alert level="info" %} +To see which Datadog features are supported with this setup, see the [feature compatibility table](http://localhost:1313/opentelemetry/compatibility/) under OTel API with Datadog SDK and Agent. +{% /alert %} + +## Further reading{% #further-reading %} + +- [Instrument a custom method to get deep visibility into your business logic](http://localhost:1313/tracing/guide/instrument_custom_method) +- [Connect your Logs and Traces together](http://localhost:1313/tracing/connect_logs_and_traces) +- [Explore your services, resources, and traces](http://localhost:1313/tracing/visualization/) +- [Learn More about Datadog and the OpenTelemetry initiative](https://www.datadoghq.com/blog/opentelemetry-instrumentation/) diff --git a/opentelemetry-mdoc/instrument/api_support/java/index.md b/opentelemetry-mdoc/instrument/api_support/java/index.md new file mode 100644 index 0000000000000..3280a321fbc14 --- /dev/null +++ b/opentelemetry-mdoc/instrument/api_support/java/index.md @@ -0,0 +1,252 @@ +--- +title: Java Custom Instrumentation using the OpenTelemetry API +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + API Support > Java Custom Instrumentation using the OpenTelemetry API +--- + +# Java Custom Instrumentation using the OpenTelemetry API + +{% alert level="info" %} +Unsure when to use OpenTelemetry with Datadog? Start with [Custom Instrumentation with the OpenTelemetry API](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/) to learn more. +{% /alert %} + +## Overview{% #overview %} + +There are a few reasons to manually instrument your applications with the OpenTelemetry API: + +- You are not using Datadog [supported library instrumentation](http://localhost:1313/tracing/trace_collection/compatibility/). +- You want to extend the `ddtrace` library's functionality. +- You need finer control over instrumenting your applications. + +The `ddtrace` library provides several techniques to help you achieve these goals. The following sections demonstrate how to use the OpenTelemetry API for custom instrumentation to use with Datadog. + +## Setup{% #setup %} + +{% alert level="info" %} +OpenTelemetry is supported in Java after version 1.24.0. +{% /alert %} + +To configure OpenTelemetry to use the Datadog trace provider: + +1. If you have not yet read the instructions for auto-instrumentation and setup, start with the [Java Setup Instructions](http://localhost:1313/tracing/setup/java/). + +1. Make sure you only depend on the OpenTelemetry API (and not the OpenTelemetry SDK). + +1. Set the `dd.trace.otel.enabled` system property or the `DD_TRACE_OTEL_ENABLED` environment variable to `true`. + +## Adding span tags{% #adding-span-tags %} + +### Add custom span tags{% #add-custom-span-tags %} + +Add custom tags to your spans corresponding to any dynamic value within your application code such as `customer.id`. + +```java +import io.opentelemetry.api.trace.Span; + +public void doSomething() { + Span span = Span.current(); + span.setAttribute("user-name", "Some User"); +} +``` + +### Adding tags globally to all spans{% #adding-tags-globally-to-all-spans %} + +The `dd.tags` property allows you to set tags across all generated spans for an application. This is useful for grouping stats for your applications, data centers, or any other tags you would like to see in Datadog. + +```shell +java -javaagent:.jar \ + -Ddd.tags=datacenter:njc,: \ + -jar .jar +``` + +### Setting errors on span{% #setting-errors-on-span %} + +To set an error on a span, you can use the `setStatus` method on the span like this: + +```java +import static io.opentelemetry.api.trace.StatusCode.ERROR; +import io.opentelemetry.api.trace.Span; + +public void doSomething() { + Span span = Span.current(); + span.setStatus(ERROR, "Some error details..."); +} +``` + +### Setting tags and errors on a root span from a child span{% #setting-tags-and-errors-on-a-root-span-from-a-child-span %} + +This example demonstrates how to set tags and errors on a root span from a child span: + +```java +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.ContextKey; +import io.opentelemetry.context.Scope; +import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.semconv.ResourceAttributes; +import java.util.concurrent.TimeUnit; + +public class Example { + + private final static ContextKey CONTEXT_KEY = + ContextKey.named("opentelemetry-traces-local-root-span"); + + public void begin() { + tracer = GlobalOpenTelemetry.getTracer("my-scope", "0.1.0"); + Span parentSpan = tracer.spanBuilder("begin").startSpan(); + try (Scope scope = parentSpan.makeCurrent()) { + createChildSpan(); + } finally { + parentSpan.end(); + } + } + + private void createChildSpan() { + Span childSpan = tracer.spanBuilder("child-span").startSpan(); + try { + Span rootSpan = Context.current().get(CONTEXT_KEY); + if (null != rootSpan) { + rootSpan.setAttribute("my-attribute", "my-attribute-value"); + rootSpan.setStatus(StatusCode.ERROR, "Some error details..."); + } + } finally { + childSpan.end(); + } + } + +} +``` + +## Adding spans{% #adding-spans %} + +If you aren't using a [supported framework instrumentation](http://localhost:1313/tracing/trace_collection/automatic_instrumentation/dd_libraries/java/?tab=wget#compatibility), or you would like additional depth in your application's [traces](http://localhost:1313/tracing/glossary/#trace), you may want to add custom instrumentation to your code for complete flame graphs or to measure execution times for pieces of code. + +If modifying application code is not possible, use the environment variable dd.trace.methods to detail these methods. + +If you have existing @Trace or similar annotations, or prefer to use annotations to complete any incomplete traces within Datadog, use Trace Annotations. + +Traces may also be created using the OpenTelemetry `@WithSpan` annotation as described in Trace annotations. + +### Trace annotations{% #trace-annotations %} + +Add `@WithSpan` to methods to have them be traced when running OpenTelemetry and the `dd-java-agent.jar`. If the Agent is not attached, this annotation has no effect on your application. OpenTelemetry's `@WithSpan` annotation is provided by the `opentelemetry-instrumentation-annotations` dependency. + +```java +import io.opentelemetry.instrumentation.annotations.WithSpan; + +public class SessionManager { + + @WithSpan + public static void saveSession() { + // your method implementation here + } +} +``` + +### Manually creating a new span{% #manually-creating-a-new-span %} + +To manually create new spans within the current trace context: + +```java +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.semconv.ResourceAttributes; +import java.util.concurrent.TimeUnit; + +public class Example { + + public void doSomething() { + Tracer tracer = GlobalOpenTelemetry.getTracer("my-scope", "0.1.0"); + Span span = tracer.spanBuilder("my-resource").startSpan(); + try (Scope scope = span.makeCurrent()) { + // do some work + } catch (Throwable t) { + span.recordException(t); + throw t; + } finally { + span.end(); + } + } + +} +``` + +## Adding span events{% #adding-span-events %} + +{% alert level="info" %} +Adding span events requires SDK version 1.40.0 or higher. +{% /alert %} + +You can add span events using the `addEvent` API. This method requires a `name` parameter and optionally accepts `attributes` and `timestamp` parameters. The method creates a new span event with the specified properties and associates it with the corresponding span. + +- **Name** [*required*]: A string representing the event's name. +- **Attributes** [*optional*]: Zero or more key-value pairs with the following properties: + - The key must be a non-empty string. + - The value can be either: + - A primitive type: string, Boolean, or number. + - A homogeneous array of primitive type values (for example, an array of strings). + - Nested arrays and arrays containing elements of different data types are not allowed. +- **Timestamp** [*optional*]: A UNIX timestamp representing the event's occurrence time. Expects an `Instant` object. + +The following examples demonstrate different ways to add events to a span: + +```java +Attributes eventAttributes = Attributes.builder() + .put(AttributeKey.longKey("int_val"), 1L) + .put(AttributeKey.stringKey("string_val"), "two") + .put(AttributeKey.longArrayKey("int_array"), Arrays.asList(3L, 4L)) + .put(AttributeKey.stringArrayKey("string_array"), Arrays.asList("5", "6")) + .put(AttributeKey.booleanArrayKey("bool_array"), Arrays.asList(true, false)) + .build(); + +span.addEvent("Event With No Attributes"); +span.addEvent("Event With Some Attributes", eventAttributes); +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#add-events) specification for more information. + +### Recording exceptions{% #recording-exceptions %} + +To record exceptions, use the `recordException` API. This method requires an `exception` parameter and optionally accepts a UNIX `timestamp` parameter. It creates a new span event that includes standardized exception attributes and associates it with the corresponding span. + +The following examples demonstrate different ways to record exceptions: + +```java +span.recordException(new Exception("Error Message")); +span.recordException(new Exception("Error Message"), + Attributes.builder().put(AttributeKey.stringKey("status"), "failed").build()); +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#record-exception) specification for more information. + +## Trace client and Agent configuration{% #trace-client-and-agent-configuration %} + +Both the tracing client and Datadog Agent offer additional configuration options for context propagation. You can also exclude specific resources from sending traces to Datadog if you don't want those traces to be included in calculated metrics, such as traces related to health checks. + +### Propagating context with headers extraction and injection{% #propagating-context-with-headers-extraction-and-injection %} + +You can configure the propagation of context for distributed traces by injecting and extracting headers. Read [Trace Context Propagation](http://localhost:1313/tracing/trace_collection/trace_context_propagation/) for information. + +### Resource filtering{% #resource-filtering %} + +Traces can be excluded based on their resource name, to remove synthetic traffic such as health checks from reporting traces to Datadog. This and other security and fine-tuning configurations can be found on the [Security](http://localhost:1313/tracing/security) page or in [Ignoring Unwanted Resources](http://localhost:1313/tracing/guide/ignoring_apm_resources/). + +## Further Reading{% #further-reading %} + +- [Explore your services, resources, and traces](http://localhost:1313/tracing/glossary/) +- [Interoperability of OpenTelemetry API and Datadog instrumented traces](http://localhost:1313/opentelemetry/guide/otel_api_tracing_interoperability) diff --git a/opentelemetry-mdoc/instrument/api_support/nodejs/index.md b/opentelemetry-mdoc/instrument/api_support/nodejs/index.md new file mode 100644 index 0000000000000..ac47fd6668a22 --- /dev/null +++ b/opentelemetry-mdoc/instrument/api_support/nodejs/index.md @@ -0,0 +1,173 @@ +--- +title: Node.js Custom Instrumentation using the OpenTelemetry API +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + API Support > Node.js Custom Instrumentation using the OpenTelemetry API +--- + +# Node.js Custom Instrumentation using the OpenTelemetry API + +{% alert level="info" %} +Unsure when to use OpenTelemetry with Datadog? Start with [Custom Instrumentation with the OpenTelemetry API](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/) to learn more. +{% /alert %} + +## Overview{% #overview %} + +There are a few reasons to manually instrument your applications with the OpenTelemetry API: + +- You are not using Datadog [supported library instrumentation](http://localhost:1313/tracing/trace_collection/compatibility/). +- You want to extend the `ddtrace` library's functionality. +- You need finer control over instrumenting your applications. + +The `ddtrace` library provides several techniques to help you achieve these goals. The following sections demonstrate how to use the OpenTelemetry API for custom instrumentation to use with Datadog. + +## Setup{% #setup %} + +To configure OpenTelemetry to use the Datadog trace provider: + +1. Add your desired manual OpenTelemetry instrumentation to your Node.js code following the [OpenTelemetry Node.js Manual Instrumentation documentation](https://opentelemetry.io/docs/instrumentation/js/instrumentation/). **Note**: Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. + +1. Add the `dd-trace` module to your package.json: + + ```sh + npm install dd-trace + ``` + +1. Initialize the `dd-trace` module in your application: + + ```js + const tracer = require('dd-trace').init({ + // ... + }) + ``` + +1. Get `TracerProvider` from `tracer`: + + ```js + const { TracerProvider } = tracer + ``` + +1. Construct and register a `TracerProvider`: + + ```js + const provider = new TracerProvider() + provider.register() + ``` + +1. Import the OpenTelemetry API and create an OpenTelemetry tracer instance: + + ```js + const ot = require('@opentelemetry/api') + const otelTracer = ot.trace.getTracer( + 'my-service' + ) + ``` + +1. Run your application. + +Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. It also supports [integration instrumentation](http://localhost:1313/tracing/trace_collection/dd_libraries/nodejs#integration-instrumentation) and [OpenTelemetry automatic instrumentation](https://opentelemetry.io/docs/instrumentation/js/automatic/). + +## Adding span tags{% #adding-span-tags %} + +Add custom attributes to your spans to provide additional context: + +```js +function processData(i, param1, param2) { + return otelTracer.startActiveSpan(`processData:${i}`, (span) => { + const result = someOperation(param1, param2); + + // Add an attribute to the span + span.setAttribute('app.processedData', result.toString()); + span.end(); + return result; + }); +} +``` + +## Creating spans{% #creating-spans %} + +To create a new span and properly close it, use the `startActiveSpan` method: + +```js +function performTask(iterations, param1, param2) { + // Create a span. A span must be closed. + return otelTracer.startActiveSpan('performTask', (span) => { + const results = []; + for (let i = 0; i < iterations; i++) { + results.push(processData(i, param1, param2)); + } + // Be sure to end the span! + span.end(); + return results; + }); +} +``` + +## Adding span events{% #adding-span-events %} + +{% alert level="info" %} +Adding span events requires SDK version 5.17.0/4.41.0 or higher. +{% /alert %} + +You can add span events using the `addEvent` API. This method requires a `name` parameter and optionally accepts `attributes` and `timestamp` parameters. The method creates a new span event with the specified properties and associates it with the corresponding span. + +- **Name** [*required*]: A string representing the event's name. +- **Attributes** [*optional*]: Zero or more key-value pairs with the following properties: + - The key must be a non-empty string. + - The value can be either: + - A primitive type: string, Boolean, or number. + - A homogeneous array of primitive type values (for example, an array of strings). + - Nested arrays and arrays containing elements of different data types are not allowed. +- **Timestamp** [*optional*]: A UNIX timestamp representing the event's occurrence time. Expects a `TimeInput` object. + +The following examples demonstrate different ways to add events to a span: + +```js +span.addEvent('Event With No Attributes') +span.addEvent('Event With Some Attributes', {"int_val": 1, "string_val": "two", "int_array": [3, 4], "string_array": ["5", "6"], "bool_array": [true, false]}) +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#add-events) specification for more information. + +### Recording exceptions{% #recording-exceptions %} + +To record exceptions, use the `recordException` API. This method requires an `exception` parameter and optionally accepts a UNIX `timestamp` parameter. It creates a new span event that includes standardized exception attributes and associates it with the corresponding span. + +The following examples demonstrate different ways to record exceptions: + +```js +span.recordException(new TestError()) +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#record-exception) specification for more information. + +## Filtering requests{% #filtering-requests %} + +In some cases, you may want to exclude certain requests from being instrumented, such as health checks or synthetic traffic. You can use the `blocklist` or `allowlist` option on the `http` plugin to ignore these requests. + +To exclude requests at the application level, add the following after initializing the tracer: + +```javascript +// at the top of the entry point right after tracer.init() +tracer.use('http', { + blocklist: ['/health', '/ping'] +}) +``` + +You can also split the configuration between client and server if needed: + +```javascript +tracer.use('http', { + server: { + blocklist: ['/ping'] + } +}) +``` + +Additionally, you can exclude traces based on their resource name to prevent the Agent from sending them to Datadog. For more information on security and fine-tuning Agent configurations, read the [Security](http://localhost:1313/tracing/security) or [Ignoring Unwanted Resources](http://localhost:1313/tracing/guide/ignoring_apm_resources/). + +## Further Reading{% #further-reading %} + +- [Explore your services, resources, and traces](http://localhost:1313/tracing/glossary/) +- [Interoperability of OpenTelemetry API and Datadog instrumented traces](http://localhost:1313/opentelemetry/guide/otel_api_tracing_interoperability) diff --git a/opentelemetry-mdoc/instrument/api_support/php/index.md b/opentelemetry-mdoc/instrument/api_support/php/index.md new file mode 100644 index 0000000000000..3621bcc3dfba1 --- /dev/null +++ b/opentelemetry-mdoc/instrument/api_support/php/index.md @@ -0,0 +1,168 @@ +--- +title: PHP Custom Instrumentation using the OpenTelemetry API +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + API Support > PHP Custom Instrumentation using the OpenTelemetry API +--- + +# PHP Custom Instrumentation using the OpenTelemetry API + +{% alert level="info" %} +Unsure when to use OpenTelemetry with Datadog? Start with [Custom Instrumentation with the OpenTelemetry API](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/) to learn more. +{% /alert %} + +## Overview{% #overview %} + +There are a few reasons to manually instrument your applications with the OpenTelemetry API: + +- You are not using Datadog [supported library instrumentation](http://localhost:1313/tracing/trace_collection/compatibility/). +- You want to extend the `ddtrace` library's functionality. +- You need finer control over instrumenting your applications. + +The `ddtrace` library provides several techniques to help you achieve these goals. The following sections demonstrate how to use the OpenTelemetry API for custom instrumentation to use with Datadog. + +## Setup{% #setup %} + +To configure OpenTelemetry to use the Datadog trace provider: + +1. Install [OpenTelemetry API packages](https://opentelemetry.io/docs/languages/php/instrumentation/#instrumentation-setup). + +```php +composer require open-telemetry/sdk +``` + +Add your desired manual OpenTelemetry instrumentation to your PHP code following the [OpenTelemetry PHP Manual Instrumentation documentation](https://opentelemetry.io/docs/instrumentation/php/manual/). + +Install the [Datadog PHP tracing library](http://localhost:1313/tracing/trace_collection/dd_libraries/php#getting-started). + +Set `DD_TRACE_OTEL_ENABLED` to `true`. + +Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. + +## Adding span tags{% #adding-span-tags %} + +You can add attributes at the exact moment as you are starting the span: + +```php +$span = $tracer->spanBuilder('mySpan') + ->setAttribute('key', 'value') + ->startSpan(); +``` + +Or while the span is active: + +```php +$activeSpan = OpenTelemetry\API\Trace\Span::getCurrent(); + +$activeSpan->setAttribute('key', 'value'); +``` + +## Setting errors on a span{% #setting-errors-on-a-span %} + +Exception information is captured and attached to a span if one is active when the exception is raised. + +```php +// Create a span +$span = $tracer->spanBuilder('mySpan')->startSpan(); + +throw new \Exception('Oops!'); + +// 'mySpan' will be flagged as erroneous and have +// the stack trace and exception message attached as tags +``` + +Flagging a trace as erroneous can also be done manually: + +```php +use OpenTelemetry\API\Trace\Span; +use OpenTelemetry\Context\Context; + +// Can only be done after the setup steps, such as initializing the tracer. + +try { + throw new \Exception('Oops!'); +} catch (\Exception $e) { + $rootSpan = Span::fromContext(Context::getRoot()); + $rootSpan->recordException($e); +} +``` + +## Adding spans{% #adding-spans %} + +To add a span: + +```php +// Get a tracer or use an existing one +$tracerProvider = \OpenTelemetry\API\Globals::tracerProvider(); +$tracer = $tracerProvider->getTracer('datadog') + +// Create a span +$span = $tracer->spanBuilder('mySpan')->startSpan(); + +// ... do stuff + +// Close the span +$span->end(); +``` + +## Adding span events{% #adding-span-events %} + +{% alert level="info" %} +Adding span events requires SDK version 1.3.0 or higher. +{% /alert %} + +You can add span events using the `addEvent` API. This method requires a `name` parameter and optionally accepts `attributes` and `timestamp` parameters. The method creates a new span event with the specified properties and associates it with the corresponding span. + +- **Name** [*required*]: A string representing the event's name. +- **Attributes** [*optional*]: Zero or more key-value pairs with the following properties: + - The key must be a non-empty string. + - The value can be either: + - A primitive type: string, Boolean, or number. + - A homogeneous array of primitive type values (for example, an array of strings). + - Nested arrays and arrays containing elements of different data types are not allowed. +- **Timestamp** [*optional*]: A UNIX timestamp representing the event's occurrence time. Expects `nanoseconds`. + +The following examples demonstrate different ways to add events to a span: + +```php +$span->addEvent("Event With No Attributes"); +$span->addEvent( + "Event With Some Attributes", + [ + 'int_val' => 1, + 'string_val' => "two", + 'int_array' => [3, 4], + 'string_array' => ["5", "6"], + 'bool_array' => [true, false] + ] +); +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#add-events) specification for more information. + +### Recording exceptions{% #recording-exceptions %} + +To record exceptions, use the `recordException` API. This method requires an `exception` parameter and optionally accepts a UNIX `timestamp` parameter. It creates a new span event that includes standardized exception attributes and associates it with the corresponding span. + +The following examples demonstrate different ways to record exceptions: + +```php +$span->recordException(new \Exception("Error Message")); +$span->recordException(new \Exception("Error Message"), [ "status" => "failed" ]); +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#record-exception) specification for more information. + +## Accessing active spans{% #accessing-active-spans %} + +To access the currently active span: + +```php +$span = OpenTelemetry\API\Trace\Span::getCurrent(); +``` + +## Further Reading{% #further-reading %} + +- [Explore your services, resources, and traces](http://localhost:1313/tracing/glossary/) +- [Interoperability of OpenTelemetry API and Datadog instrumented traces](http://localhost:1313/opentelemetry/guide/otel_api_tracing_interoperability) diff --git a/opentelemetry-mdoc/instrument/api_support/python/index.md b/opentelemetry-mdoc/instrument/api_support/python/index.md new file mode 100644 index 0000000000000..66a41ae36ef69 --- /dev/null +++ b/opentelemetry-mdoc/instrument/api_support/python/index.md @@ -0,0 +1,116 @@ +--- +title: Python Custom Instrumentation using the OpenTelemetry API +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + API Support > Python Custom Instrumentation using the OpenTelemetry API +--- + +# Python Custom Instrumentation using the OpenTelemetry API + +{% alert level="info" %} +Unsure when to use OpenTelemetry with Datadog? Start with [Custom Instrumentation with the OpenTelemetry API](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/) to learn more. +{% /alert %} + +## Overview{% #overview %} + +There are a few reasons to manually instrument your applications with the OpenTelemetry API: + +- You are not using Datadog [supported library instrumentation](http://localhost:1313/tracing/trace_collection/compatibility/). +- You want to extend the `ddtrace` library's functionality. +- You need finer control over instrumenting your applications. + +The `ddtrace` library provides several techniques to help you achieve these goals. The following sections demonstrate how to use the OpenTelemetry API for custom instrumentation to use with Datadog. + +## Setup{% #setup %} + +To configure OpenTelemetry to use the Datadog trace provider: + +1. If you have not yet read the instructions for auto-instrumentation and setup, start with the [Python Setup Instructions](http://localhost:1313/tracing/setup/python/). + +1. Set `DD_TRACE_OTEL_ENABLED` environment variable to `true`. + +### Creating custom spans{% #creating-custom-spans %} + +To create custom spans within an existing trace context: + +```python +from opentelemetry import trace + +tracer = trace.get_tracer(__name__) + +def do_work(): + with tracer.start_as_current_span("operation_name") as span: + # Perform the work that you want to track with the span + print("Doing work...") + # When the 'with' block ends, the span is automatically closed +``` + +## Accessing active spans{% #accessing-active-spans %} + +To access the currently active span, use the `get_current_span()` function: + +```python +from opentelemetry import trace + +current_span = trace.get_current_span() +# enrich 'current_span' with information +``` + +## Adding span tags{% #adding-span-tags %} + +Add attributes to a span to provide additional context or metadata. + +Here's an example of how to add attributes to the current span: + +```python +from opentelemetry import trace + +current_span = trace.get_current_span() + +current_span.set_attribute("attribute_key1", 1) +``` + +## Adding span events{% #adding-span-events %} + +{% alert level="info" %} +Adding span events requires SDK version 2.9.0 or higher. +{% /alert %} + +You can add span events using the `add_event` API. This method requires a `name` parameter and optionally accepts `attributes` and `timestamp` parameters. The method creates a new span event with the specified properties and associates it with the corresponding span. + +- **Name** [*required*]: A string representing the event's name. +- **Attributes** [*optional*]: Zero or more key-value pairs with the following properties: + - The key must be a non-empty string. + - The value can be either: + - A primitive type: string, Boolean, or number. + - A homogeneous array of primitive type values (for example, an array of strings). + - Nested arrays and arrays containing elements of different data types are not allowed. +- **Timestamp** [*optional*]: A UNIX timestamp representing the event's occurrence time. Expects `microseconds`. + +The following examples demonstrate different ways to add events to a span: + +```python +span.add_event("Event With No Attributes") +span.add_event("Event With Some Attributes", {"int_val": 1, "string_val": "two", "int_array": [3, 4], "string_array": ["5", "6"], "bool_array": [True, False]}) +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#add-events) specification for more information. + +### Recording exceptions{% #recording-exceptions %} + +To record exceptions, use the `record_exception` API. This method requires an `exception` parameter and optionally accepts a UNIX `timestamp` parameter. It creates a new span event that includes standardized exception attributes and associates it with the corresponding span. + +The following examples demonstrate different ways to record exceptions: + +```python +span.record_exception(Exception("Error Message")) +span.record_exception(Exception("Error Message"), {"status": "failed"}) +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#record-exception) specification for more information. + +## Further reading{% #further-reading %} + +- [Explore your services, resources, and traces](http://localhost:1313/tracing/glossary/) +- [Interoperability of OpenTelemetry API and Datadog instrumented traces](http://localhost:1313/opentelemetry/guide/otel_api_tracing_interoperability) diff --git a/opentelemetry-mdoc/instrument/api_support/ruby/index.md b/opentelemetry-mdoc/instrument/api_support/ruby/index.md new file mode 100644 index 0000000000000..b068c5f135b07 --- /dev/null +++ b/opentelemetry-mdoc/instrument/api_support/ruby/index.md @@ -0,0 +1,128 @@ +--- +title: Ruby Custom Instrumentation using the OpenTelemetry API +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + API Support > Ruby Custom Instrumentation using the OpenTelemetry API +--- + +# Ruby Custom Instrumentation using the OpenTelemetry API + +## Overview{% #overview %} + +Datadog tracing libraries provide an implementation of the [OpenTelemetry API](https://opentelemetry.io/docs/reference/specification/trace/api) for instrumenting your code. This means you can maintain vendor-neutral instrumentation of all your services, while still taking advantage of Datadog's native implementation, features, and products. You can configure it to generate Datadog-style spans and traces to be processed by the Datadog tracing library for your language, and send those to Datadog. + +By instrumenting your code with OpenTelemetry API: + +- Your code remains free of vendor-specific API calls. +- Your code does not depend on Datadog tracing libraries at compile time (only runtime). + +Replace the OpenTelemetry SDK with the Datadog tracing library in the instrumented application, and the traces produced by your running code can be processed, analyzed, and monitored alongside Datadog traces and in Datadog proprietary products. + +The Datadog tracing library, when configured as described here, accepts the spans and traces generated by OpenTelemetry-instrumented code, processes the telemetry, and sends it to Datadog. You can use this approach, for example, if your code has already been instrumented with the OpenTelemetry API, or if you want to instrument using the OpenTelemetry API, and you want to gain the benefits of using the Datadog tracing libraries without changing your code. + +If you're looking for a way to instrument your code with OpenTelemetry and then send span data to Datadog *without going through the Datadog tracing library*, see [OpenTelemetry in Datadog](http://localhost:1313/opentelemetry/). + +## Requirements and limitations{% #requirements-and-limitations %} + +- Datadog Ruby tracing library `dd-trace-rb` version 1.9.0 or greater. +- Gem version support 1.1.0 or greater. + +The following OpenTelemetry features implemented in the Datadog library as noted: + +| Feature | Support notes | +| ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [OpenTelemetry Context propagation](https://opentelemetry.io/docs/instrumentation/ruby/manual/#context-propagation) | [Datadog and W3C Trace Context header formats](http://localhost:1313/tracing/trace_collection/trace_context_propagation/) are enabled by default. | +| [Span processors](https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-processor) | Unsupported | +| [Span Exporters](https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-exporter) | Unsupported | +| `OpenTelemetry.logger` | `OpenTelemetry.logger` is set to the same object as `Datadog.logger`. Configure through [custom logging](http://localhost:1313/tracing/trace_collection/dd_libraries/ruby/#custom-logging). | +| Trace/span [ID generators](https://opentelemetry.io/docs/reference/specification/trace/sdk/#id-generators) | ID generation is performed by the tracing library, with support for [128-bit trace IDs](http://localhost:1313/opentelemetry/guide/otel_api_tracing_interoperability/). | + +## Configuring OpenTelemetry to use the Datadog tracing library{% #configuring-opentelemetry-to-use-the-datadog-tracing-library %} + +1. Add your desired manual OpenTelemetry instrumentation to your Ruby code following the [OpenTelemetry Ruby Manual Instrumentation documentation](https://opentelemetry.io/docs/instrumentation/ruby/manual/). **Important!** Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. + +1. Add the `datadog` gem to your Gemfile: + + ```ruby + source 'https://rubygems.org' + gem 'datadog' # For dd-trace-rb v1.x, use the `ddtrace` gem. + ``` + +1. Install the gem by running `bundle install`. + +1. Add the following lines to your OpenTelemetry configuration file: + + ```ruby + require 'opentelemetry/sdk' + require 'datadog/opentelemetry' + ``` + +1. Add a configuration block to your application where you can activate integrations and change tracer settings. Without additional configuration here, only code you have instrumented with OpenTelemetry is traced: + + ```ruby + Datadog.configure do |c| + ... + end + ``` + +Using this block you can: + + - [Add additional Datadog configuration settings](http://localhost:1313/tracing/trace_collection/dd_libraries/ruby/#additional-configuration) + - [Activate or reconfigure Datadog instrumentation](http://localhost:1313/tracing/trace_collection/dd_libraries/ruby#integration-instrumentation) + +OpenTelemetry configuration can be changed separately, using the [`OpenTelemetry::SDK.configure` block](https://opentelemetry.io/docs/languages/ruby/getting-started/#instrumentation). + +Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. It supports [integration instrumentation](http://localhost:1313/tracing/trace_collection/dd_libraries/ruby#integration-instrumentation) and [OpenTelemetry Automatic instrumentation](https://opentelemetry.io/docs/languages/ruby/libraries/) also. + +## Adding span events{% #adding-span-events %} + +{% alert level="info" %} +Adding span events requires SDK version 2.3.0 or higher. +{% /alert %} + +You can add span events using the `add_event` API. This method requires a `name` parameter and optionally accepts `attributes` and `timestamp` parameters. The method creates a new span event with the specified properties and associates it with the corresponding span. + +- **Name** [*required*]: A string representing the event's name. +- **Attributes** [*optional*]: Zero or more key-value pairs with the following properties: + - The key must be a non-empty string. + - The value can be either: + - A primitive type: string, Boolean, or number. + - A homogeneous array of primitive type values (for example, an array of strings). + - Nested arrays and arrays containing elements of different data types are not allowed. +- **Timestamp** [*optional*]: A UNIX timestamp representing the event's occurrence time. Expects `seconds(Float)`. + +The following examples demonstrate different ways to add events to a span: + +```ruby +span.add_event('Event With No Attributes') +span.add_event( + 'Event With All Attributes', + attributes: { 'int_val' => 1, 'string_val' => 'two', 'int_array' => [3, 4], 'string_array' => ['5', '6'], 'bool_array' => [false, true]} +) +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#add-events) specification for more information. + +### Recording exceptions{% #recording-exceptions %} + +To record exceptions, use the `record_exception` API. This method requires an `exception` parameter and optionally accepts a UNIX `timestamp` parameter. It creates a new span event that includes standardized exception attributes and associates it with the corresponding span. + +The following examples demonstrate different ways to record exceptions: + +```ruby +span.record_exception( + StandardError.new('Error Message') +) +span.record_exception( + StandardError.new('Error Message'), + attributes: { 'status' => 'failed' } +) +``` + +Read the [OpenTelemetry](https://opentelemetry.io/docs/specs/otel/trace/api/#record-exception) specification for more information. + +## Further Reading{% #further-reading %} + +- [Explore your services, resources, and traces](http://localhost:1313/tracing/glossary/) +- [Interoperability of OpenTelemetry API and Datadog instrumented traces](http://localhost:1313/opentelemetry/guide/otel_api_tracing_interoperability) diff --git a/opentelemetry-mdoc/instrument/index.md b/opentelemetry-mdoc/instrument/index.md new file mode 100644 index 0000000000000..7ceacd293d304 --- /dev/null +++ b/opentelemetry-mdoc/instrument/index.md @@ -0,0 +1,42 @@ +--- +title: Instrument Your Applications +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Instrument Your Applications +--- + +# Instrument Your Applications + +## Overview{% #overview %} + +Datadog supports several approaches for instrumenting your applications with OpenTelemetry. Choose the method that best fits your needs: + +### OpenTelemetry SDK{% #opentelemetry-sdk %} + +You can fully instrument your applications with OpenTelemetry SDKs. These SDKs provide complete implementations for creating traces, metrics, and logs in the OpenTelemetry format, which can then be sent to Datadog. + +{% alert level="info" %} +If you instrument your applications fully with OTel, some Datadog features are not supported. For more information, see the [Feature Compatibility](http://localhost:1313/opentelemetry/compatibility/) table. +{% /alert %} + +- [Use OpenTelemetry SDKs](https://opentelemetry.io/docs/languages/) + +### OpenTelemetry API and Datadog SDK{% #opentelemetry-api-and-datadog-sdk %} + +Integrating Datadog with OpenTelemetry allows you to use Datadog's comprehensive observability platform while leveraging OpenTelemetry's vendor-agnostic instrumentation. This allows you to collect, visualize, and analyze traces from your applications and infrastructure. + +Use the OpenTelemetry Tracing APIs with Datadog's SDK to maintain vendor-neutral instrumentation while accessing Datadog's full feature set. + +- [Use the Datadog SDK with OpenTelemetry API](http://localhost:1313/opentelemetry/instrument/api_support) +- [Configure the Datadog SDK with OpenTelemetry SDK environment variables](http://localhost:1313/opentelemetry/config/environment_variable_support/) + +### OpenTelemetry instrumentation libraries{% #opentelemetry-instrumentation-libraries %} + +Extend your observability with OpenTelemetry [instrumentation libraries](https://opentelemetry.io/docs/specs/otel/overview/#instrumentation-libraries) alongside Datadog's SDK. + +Datadog supports OpenTelemetry-compatible instrumentation libraries which provide observability for frameworks and technologies not covered by Datadog's native SDKs. This allows you to instrument additional frameworks and libraries while still sending data to Datadog's backend. + +- [Use OpenTelemetry Instrumentation Libraries with the Datadog SDK](http://localhost:1313/opentelemetry/instrument/instrumentation_libraries/) + +## Further reading{% #further-reading %} + +- [OpenTelemetry Instrumentation](https://opentelemetry.io/docs/concepts/instrumentation/) diff --git a/opentelemetry-mdoc/instrument/instrumentation_libraries/index.md b/opentelemetry-mdoc/instrument/instrumentation_libraries/index.md new file mode 100644 index 0000000000000..59bb36a377abf --- /dev/null +++ b/opentelemetry-mdoc/instrument/instrumentation_libraries/index.md @@ -0,0 +1,397 @@ +--- +title: Using OpenTelemetry Instrumentation Libraries with Datadog SDKs +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > Using + OpenTelemetry Instrumentation Libraries with Datadog SDKs +--- + +# Using OpenTelemetry Instrumentation Libraries with Datadog SDKs + +Datadog supports OpenTelemetry-compatible instrumentations which provides observability for libraries not covered by Datadog SDKs without changing SDKs. You can extend Datadog's tracing capabilities to these frameworks. + +## Prerequisites{% #prerequisites %} + +1. **Enable OpenTelemetry support**: Set the `DD_TRACE_OTEL_ENABLED` environment variable to `true`. This step isn't required for the Datadog Go and Ruby APM SDKs. + +1. **Run the Datadog Agent**: Datadog SDKs provide an implementation of the OpenTelemetry API and submit spans to a Datadog Agent. Ensure the Datadog Agent is [running](http://localhost:1313/getting_started/tracing/#set-up-datadog-apm) to use OpenTelemetry instrumentation with Datadog SDKs. + +1. **Disable duplicate instrumentation**: When replacing a Datadog instrumentation with its OpenTelemetry equivalent, disable the Datadog instrumentation to prevent duplicate spans from appearing in the trace. + +## Configuration{% #configuration %} + +You can configure Datadog SDKs by setting the same [environment variables supported by OpenTelemetry](http://localhost:1313/opentelemetry/interoperability/environment_variable_support). + +## Language support{% #language-support %} + +Datadog SDKs implement the OpenTelemetry API by overriding the default implementations in the OpenTelemetry SDK. However, note the following limitations: + +Operations specific to the OpenTelemetry SDK are not supported (for example, SpanProcessors or OTLP Trace Exporters). Datadog SDKs do not support OpenTelemetry Metrics and Logs APIs. To use OpenTelemetry Logs and Metrics APIs, use OTLP Ingest. + +| Language | Minimum version | +| -------- | --------------- | +| Java | 1.35.0 | +| Python | 2.10.0 | +| Ruby | 2.1.0 | +| Go | 1.67.0 | +| Node.js | 4.3.0 | +| PHP | 0.94.0 | +| .NET | 2.53.0 | + +{% tab title="Java" %} +## Compatibility requirements{% #compatibility-requirements %} + +1. The Datadog Java SDK supports library instrumentations using OpenTelemetry's [instrumentation API](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation-api/src/main/java/io/opentelemetry/instrumentation/api/instrumenter/) and `javaagent` [extension API](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/javaagent-extension-api/src/main/java/io/opentelemetry/javaagent/extension/instrumentation/). +1. Each instrumentation must be packaged as an OpenTelemetry [extension](https://opentelemetry.io/docs/zero-code/java/agent/extensions/) in its own JAR. +1. OpenTelemetry provides an [example extension project](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/examples/extension/README.md) that registers a custom [instrumentation for Servlet 3 classes](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/examples/extension/src/main/java/com/example/javaagent/instrumentation/DemoServlet3InstrumentationModule.java). +1. The Datadog SDK for Java also accepts select individual instrumentation JARs produced by OpenTelemetry's [opentelemetry-java-instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/) build, for example the [R2DBC instrumentation JAR](https://search.maven.org/search?q=a:opentelemetry-javaagent-r2dbc-1.0). + +{% alert level="warning" %} +OpenTelemetry incubator APIs are not supported. +{% /alert %} + +## Setup{% #setup %} + +To use an OpenTelemetry instrumentation with the Datadog Java SDK: + +1. Set the `dd.trace.otel.enabled` system property or the `DD_TRACE_OTEL_ENABLED` environment variable to `true`. +1. Copy the OpenTelemetry extension JAR containing the instrumentation to the same container as the application. +1. Set the `otel.javaagent.extensions` system property or the `OTEL_JAVAAGENT_EXTENSIONS` environment variable to the extension JAR path. + +## Example{% #example %} + +Here's a step-by-step example using R2DBC in Java to illustrate how you can add OpenTelemetry instrumentation into your service and begin sending data to Datadog, ensuring you capture all the missing spans. + +```sh +git clone https://github.com/eugenp/tutorials +cd tutorials/spring-reactive-modules/spring-reactive-data + +curl -Lo dd-java-agent.jar 'https://dtdg.co/latest-java-tracer' +``` + +Download the OpenTelemetry R2DBC agent and run your Spring Boot application with both the Datadog Java agent and the OpenTelemetry R2DBC agent. + +```sh +curl -Lo opentelemetry-javaagent-r2dbc.jar \ + 'https://repo1.maven.org/maven2/io/opentelemetry/javaagent/instrumentation/opentelemetry-javaagent-r2dbc-1.0/2.5.0-alpha/opentelemetry-javaagent-r2dbc-1.0-2.5.0-alpha.jar' + +mvn spring-boot:run -Dstart-class=com.baeldung.pagination.PaginationApplication \ + -Dspring-boot.run.jvmArguments='-javaagent:dd-java-agent.jar -Ddd.trace.otel.enabled=true -Dotel.javaagent.extensions=opentelemetry-javaagent-r2dbc.jar -Ddd.trace.split-by-tags=db.name,db.sql.table -Ddd.trace.debug=true' +``` + +Open `http://127.0.0.1:8080/products` to exercise the product query. With this setup, you are using OpenTelemetry's instrumentation to ensure full observability for R2DBC queries. + +{% alert level="warning" %} +Versions 2.6.0-alpha and later of these OpenTelemetry instrumentations are not supported by the Datadog Java SDK. +{% /alert %} + +## Verified OpenTelemetry extensions{% #verified-opentelemetry-extensions %} + +| Framework | Versions | OpenTelemetry Extension | Instrumentation Names | +| ------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Apache CXF (Jax-WS) | 3.0+ | [opentelemetry-javaagent-jaxws-2.0-cxf-3.0](https://search.maven.org/search?q=a:opentelemetry-javaagent-jaxws-2.0-cxf-3.0) | `otel.cxf` | +| R2DBC | 1.0+ | [opentelemetry-javaagent-r2dbc-1.0](https://search.maven.org/search?q=a:opentelemetry-javaagent-r2dbc-1.0) | `otel.r2dbc` | + +{% /tab %} + +{% tab title="Python" %} +## Compatibility requirements{% #compatibility-requirements %} + +1. The Datadog Python SDK supports library [instrumentations](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation#readme) using the OpenTelemetry Python Trace API. +1. OpenTelemetry provides an [example](https://opentelemetry.io/docs/zero-code/python/example/) for instrumenting a sample application. + +## Setup{% #setup %} + +To use OpenTelemetry instrumentations with the Datadog Python SDK, perform the following steps: + +1. Follow the instructions in the [OpenTelemetry API](https://ddtrace.readthedocs.io/en/stable/api.html?highlight=opentelemetry%20api#module-ddtrace.opentelemetry) section in the Datadog Python library docs. +1. Follow the steps for instrumenting your service with your chosen `opentelemetry-python-contrib` library. + +## Example{% #example %} + +The following is an example instrumenting the OpenTelemetry's kafka-python library with the Datadog Python SDK: + +```python +from kafka import KafkaProducer, KafkaConsumer +from opentelemetry.instrumentation.kafka import KafkaInstrumentor +from opentelemetry import trace + +# Instrument Kafka with OpenTelemetry +KafkaInstrumentor().instrument() + +# Kafka configuration +KAFKA_TOPIC = 'demo-topic0' +KAFKA_BROKER = 'localhost:9092' + +def produce_message(): + producer = KafkaProducer(bootstrap_servers=KAFKA_BROKER) + message = b'Hello, OpenTelemetry!' + + # No manual span creation, relying on automatic instrumentation + producer.send(KAFKA_TOPIC, message) + producer.flush() + + print(f"Produced message: {message}") + +def consume_message(): + consumer = KafkaConsumer(KAFKA_TOPIC, bootstrap_servers=KAFKA_BROKER, auto_offset_reset='earliest', group_id='demo-group') + + # No manual span creation, relying on automatic instrumentation + for message in consumer: + print(f"Consumed message: {message.value}") + break # For simplicity, consume just one message + +if __name__ == "__main__": + # manual span here + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span("Span") as parent_span: + parent_span.set_attribute("Hello", "World") + produce_message() + consume_message() +``` + +{% /tab %} + +{% tab title="Go" %} +## Compatibility requirements{% #compatibility-requirements %} + +The Datadog SDK for Go supports library instrumentations written using the [OpenTelemetry-Go Trace API](https://github.com/open-telemetry/opentelemetry-go/tree/main/trace), including the [`opentelemetry-go-contrib/instrumentation`](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation) libraries. + +**Note**: This documentation uses v2 of the Go tracer, which Datadog recommends for all users. If you are using v1, see the [migration guide](http://localhost:1313/tracing/trace_collection/custom_instrumentation/go/migration) to upgrade to v2. + +OpenTelemetry support has not changed between v1 and v2 of the Go Tracer. + +## Setup{% #setup %} + +To use OpenTelemetry integrations with the Datadog Go SDK, perform the following steps: + +1. Follow the instructions in the Imports and Setup sections of the [Go Custom Instrumentation using OpenTelemetry API](https://docs.datadoghq.com/tracing/trace_collection/custom_instrumentation/go/otel/#imports) page. +1. Follow the steps for instrumenting your service with your chosen `opentelemetry-go-contrib` library. + +## Example{% #example %} + +The following is an example instrumenting the `net/http` library with the Datadog Tracer and OpenTelemetry's `net/http` integration: + +```go +import ( + "fmt" + "log" + "net/http" + + ddotel "github.com/DataDog/dd-trace-go/v2/ddtrace/opentelemetry" + ddtracer "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" +) + +func main() { + // register tracer + provider := ddotel.NewTracerProvider(ddtracer.WithDebugMode(true)) + defer provider.Shutdown() + otel.SetTracerProvider(provider) + + // configure the server with otelhttp instrumentation as you normally would using opentelemetry: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp + var mux http.ServeMux + mux.Handle("/hello", http.HandlerFunc(hello)) + http.HandleFunc("/hello", hello) + log.Fatal(http.ListenAndServe(":8080", otelhttp.NewHandler(&mux, "server"))) +} + +func hello(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "hello\n") +} +``` + +{% image + source="http://localhost:1313/images/opentelemetry/interoperability/go-otel-dropin-support.97e01dcae7d6f8fec2a5d60ede3226e0.png?auto=format" + alt="go-dd-otelhttp" /%} + +{% /tab %} + +{% tab title="Node.js" %} +## Compatibility requirements{% #compatibility-requirements %} + +The Datadog Node.js SDK supports library [instrumentations](https://github.com/open-telemetry/opentelemetry-js-contrib/tree/main/metapackages/auto-instrumentations-node#supported-instrumentations) using the OpenTelemetry Node.js Trace API. + +## Setup{% #setup %} + +To use OpenTelemetry instrumentations with the Datadog Node.js SDK, perform the following steps: + +1. Follow the Setup instructions in [Node.js Custom Instrumentation using OpenTelemetry API](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/nodejs/#setup). +1. Follow the steps for instrumenting your service with your chosen `opentelemetry-js-contrib` library. + +## Example{% #example %} + +The following example demonstrates how to instrument the `http` and `express` OpenTelemetry integrations with the Datadog Node.js SDK: + +```js +const tracer = require('dd-trace').init() +const { TracerProvider } = tracer +const provider = new TracerProvider() +provider.register() + +const { registerInstrumentations } = require('@opentelemetry/instrumentation') +const { HttpInstrumentation } = require('@opentelemetry/instrumentation-http') +const { ExpressInstrumentation } = require('@opentelemetry/instrumentation-express') + +// Register the instrumentation with the Datadog trace provider +// and the OpenTelemetry instrumentation of your choice +registerInstrumentations({ + instrumentations: [ + new HttpInstrumentation({ + ignoreIncomingRequestHook (req) { + // Ignore spans created from requests to the agent + return req.path === '/v0.4/traces' || req.path === '/v0.7/config' || + req.path === '/telemetry/proxy/api/v2/apmtelemetry' + }, + ignoreOutgoingRequestHook (req) { + // Ignore spans created from requests to the agent + return req.path === '/v0.4/traces' || req.path === '/v0.7/config' || + req.path === '/telemetry/proxy/api/v2/apmtelemetry' + } + }), + new ExpressInstrumentation() + ], + tracerProvider: provider +}) + +const express = require('express') +const http = require('http') + +// app code below .... +``` + +## Configuration{% #configuration %} + +To avoid duplicate spans, disable the corresponding Datadog instrumentations. + +Set the `DD_TRACE_DISABLED_INSTRUMENTATIONS` environment variable to a comma-separated list of integration names to disable. For example, to disable Datadog instrumentations for the libraries used in the Setup example, set the following: + +```sh +DD_TRACE_DISABLED_INSTRUMENTATIONS=http,dns,express,net +``` + +{% /tab %} + +{% tab title="PHP" %} +## Compatibility requirements{% #compatibility-requirements %} + +The Datadog PHP SDK supports library [instrumentation](https://github.com/open-telemetry/opentelemetry-php-contrib/tree/main/src/Instrumentation) using the `stable` OpenTelemetry PHP Trace API. OpenTelemetry provides an [example](https://opentelemetry.io/docs/zero-code/php/) for instrumenting a sample PHP application. + +## Setup{% #setup %} + +To use OpenTelemetry integrations with the Datadog PHP SDK: + +1. Follow the instructions in [configuring OpenTelemetry](http://localhost:1313/tracing/trace_collection/custom_instrumentation/php/otel/#setup) in the Datadog PHP SDK documentation. +1. Follow the steps for instrumenting your service with your chosen `opentelemetry-php-contrib` library. + +## Example{% #example %} + +You can find a sample [PHP application](https://github.com/DataDog/trace-examples/tree/master/php/Slim4OtelDropIn) with OpenTelemetry and Datadog auto instrumentations in the `DataDog/trace-examples` GitHub repository. + +## Configuration{% #configuration %} + +To avoid duplicate spans, you can disable the corresponding Datadog integrations. Set the `DD_TRACE__ENABLED` environment variable to `0` or `false` to disable an integration(see [Integration names](http://localhost:1313/tracing/trace_collection/library_config/php/#integration-names)). + +Use the integration name when setting integration-specific configuration for example: Laravel is `DD_TRACE_LARAVEL_ENABLED`. + +```sh +DD_TRACE_LARAVEL_ENABLED=false +``` + +{% /tab %} + +{% tab title=".NET" %} +## Compatibility requirements{% #compatibility-requirements %} + +The Datadog .NET SDK supports library instrumentations that come with [built-in OpenTelemetry support](https://opentelemetry.io/docs/languages/net/libraries/#use-natively-instrumented-libraries). + +## Setup{% #setup %} + +To use OpenTelemetry instrumentation libraries with the Datadog .NET SDK: + +1. Set the `DD_TRACE_OTEL_ENABLED` environment variable to `true`. +1. Follow the steps to configure each library, if any, to generate OpenTelemetry-compatible instrumentation via `ActivitySource` + +## Example{% #example %} + +The following example demonstrates how to instrument the `Hangfire` OpenTelemetry integrations with the Datadog .NET SDK: + +```csharp +using System; +using Hangfire; +using Hangfire.MemoryStorage; +using OpenTelemetry.Trace; +using OpenTelemetry.Resources; +using OpenTelemetry.Instrumentation.Hangfire; +using OpenTelemetry; + +class Program +{ + static void Main(string[] args) + { + // Create an OpenTelemetry TracerProvider to initialize the OpenTelemetry Hangfire instrumentation and build the configuration + var openTelemetry = Sdk.CreateTracerProviderBuilder() + .SetResourceBuilder(ResourceBuilder.CreateDefault().AddService("hangfire-demo2")) + .AddHangfireInstrumentation() // This line generates the OpenTelemetry spans + .Build(); + + // Configure Hangfire to use memory storage + GlobalConfiguration.Configuration.UseMemoryStorage(); + + // Create a new Hangfire server + using (var server = new BackgroundJobServer()) + { + // Enqueue a background job + BackgroundJob.Enqueue(() => RunBackgroundJob()); + + Console.WriteLine("Hangfire Server started. Press any key to exit..."); + Console.ReadKey(); + } + + // Dispose OpenTelemetry resources + openTelemetry?.Dispose(); + } + + // Define the background job method + public static void RunBackgroundJob() + { + Console.WriteLine("Hello from Hangfire!"); + } +} +``` + +## Verified OpenTelemetry Instrumentation Libraries{% #verified-opentelemetry-instrumentation-libraries %} + +| Library | Versions | NuGet package | Integration Name | Setup instructions | +| ----------------- | -------- | --------------------------------------------------------------------------------------- | ----------------- | ----------------------------- | +| Azure Service Bus | 7.14.0+ | [Azure.Messaging.ServiceBus](https://www.nuget.org/packages/Azure.Messaging.ServiceBus) | `AzureServiceBus` | See `Azure SDK` section below | + +### Azure SDK{% #azure-sdk %} + +The Azure SDK provides built-in OpenTelemetry support. Enable it by setting the `AZURE_EXPERIMENTAL_ENABLE_ACTIVITY_SOURCE` environment variable to `true` or by setting the `Azure.Experimental.EnableActivitySource` context switch to `true` in your application code. See [Azure SDK documentation](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/samples/Diagnostics.md#enabling-experimental-tracing-features) for more details. +{% /tab %} + +{% tab title="Ruby" %} +## Compatibility requirements{% #compatibility-requirements %} + +The Datadog Ruby SDK supports library [instrumentation](https://github.com/open-telemetry/opentelemetry-ruby-contrib/tree/main/instrumentation#opentelemetry-instrumentation-libraries) using the OpenTelemetry Ruby Trace API. + +OpenTelemetry provides an [example](https://github.com/open-telemetry/opentelemetry-ruby-contrib/tree/main/instrumentation/faraday/example) for instrumenting a sample application. + +## Setup{% #setup %} + +To use OpenTelemetry integrations with the Datadog Ruby SDK, perform the following steps: + +1. Follow the instructions in [configuring OpenTelemetry](http://localhost:1313/tracing/trace_collection/custom_instrumentation/ruby/otel/#configuring-opentelemetry-to-use-the-datadog-tracing-library) in the Datadog Ruby SDK documentation. +1. Follow the steps for instrumenting your service with your chosen `opentelemetry-ruby-contrib` library. + +{% /tab %} + +## Further reading{% #further-reading %} + +- [Instrument a custom method to get deep visibility into your business logic](http://localhost:1313/tracing/guide/instrument_custom_method) +- [Connect your Logs and Traces together](http://localhost:1313/tracing/connect_logs_and_traces) +- [Explore your services, resources, and traces](http://localhost:1313/tracing/visualization/) +- [Learn More about Datadog and the OpenTelemetry initiative](https://www.datadoghq.com/blog/opentelemetry-instrumentation/) diff --git a/opentelemetry-mdoc/instrument/otel_sdks/index.md b/opentelemetry-mdoc/instrument/otel_sdks/index.md new file mode 100644 index 0000000000000..c3e11ac4c52b7 --- /dev/null +++ b/opentelemetry-mdoc/instrument/otel_sdks/index.md @@ -0,0 +1,21 @@ +--- +title: OpenTelemetry SDKs +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Instrument Your Applications > OpenTelemetry + SDKs +--- + +# OpenTelemetry SDKs + +You can fully instrument your applications with OpenTelemetry SDKs. These SDKs provide complete implementations for creating traces, metrics, and logs in the OpenTelemetry format, which can then be sent to Datadog. + +{% alert level="info" %} +If you instrument your applications fully with OTel, some Datadog features are not supported. For more information, see the [Feature Compatibility](http://localhost:1313/opentelemetry/compatibility/) table. +{% /alert %} + +- [Use OpenTelemetry SDKs](https://opentelemetry.io/docs/languages/) + +## Further reading{% #further-reading %} + +- [Send Data to Datadog](http://localhost:1313/opentelemetry/setup/) diff --git a/opentelemetry-mdoc/integrations/apache_metrics/index.md b/opentelemetry-mdoc/integrations/apache_metrics/index.md new file mode 100644 index 0000000000000..177b6116cb1f5 --- /dev/null +++ b/opentelemetry-mdoc/integrations/apache_metrics/index.md @@ -0,0 +1,34 @@ +--- +title: Apache Web Server Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Apache Web Server Metrics +--- + +# Apache Web Server Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/apache_metrics.db7ea9fb7871a15a917bababd06bcd18.png?auto=format" + alt="OpenTelemetry Apache metrics in an Apache dashboard" /%} + +The [Apache receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/apachereceiver) allows for collection of Apache Web Server metrics. Configure the receiver according to the specifications of the latest version of the `apachereceiver`. + +For more information, see the OpenTelemetry project documentation for the [Apache receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/apachereceiver). + +## Setup{% #setup %} + +To collect Apache Web Server metrics with OpenTelemetry for use with Datadog: + +1. Configure the [Apache receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/apachereceiver) in your OpenTelemetry Collector configuration. +1. Ensure the OpenTelemetry Collector is [configured to export to Datadog](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +See the [Apache receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/apachereceiver) for detailed configuration options and requirements. + +## Data collected{% #data-collected %} + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/) for more information. + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/collector_exporter/) diff --git a/opentelemetry-mdoc/integrations/collector_health_metrics/index.md b/opentelemetry-mdoc/integrations/collector_health_metrics/index.md new file mode 100644 index 0000000000000..e2bf9174f29d7 --- /dev/null +++ b/opentelemetry-mdoc/integrations/collector_health_metrics/index.md @@ -0,0 +1,127 @@ +--- +title: Health Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Health Metrics +--- + +# Health Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/collector_health_metrics.ffbf001b09f35ad7a4dfd6af804a1cfa.png?auto=format" + alt="OpenTelemetry Collector health metrics dashboard" /%} + +To collect health metrics from the OpenTelemetry Collector itself, configure the [Prometheus receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/prometheusreceiver) in your Datadog Exporter. + +For more information, see the OpenTelemetry project documentation for the [Prometheus receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/prometheusreceiver). + +## Setup{% #setup %} + +Add the following lines to your Collector configuration: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'otelcol' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] +``` + +## Data collected{% #data-collected %} + +| OpenTelemetry Metric | Description | +| ------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `otelcol_process_uptime` | Uptime of the process | +| `otelcol_process_memory_rss` | Total physical memory (resident set size) | +| `otelcol_exporter_queue_size` | Current size of the retry queue (in batches) | +| `otelcol_exporter_sent_spans` | Number of spans successfully sent to destination | +| `otelcol_exporter_send_failed_metric_points` | Number of metric points in failed attempts to send to destination | +| `otelcol_exporter_send_failed_spans` | Number of spans in failed attempts to send to destination | +| `otelcol_process_cpu_seconds` | Total CPU user and system time in seconds | +| `otelcol_receiver_refused_spans` | Number of spans that could not be pushed into the pipeline | +| `otelcol_exporter_queue_capacity` | Fixed capacity of the retry queue (in batches) | +| `otelcol_receiver_accepted_spans` | Number of spans successfully pushed into the pipeline | +| `otelcol_exporter_sent_metric_points` | Number of metric points successfully sent to destination | +| `otelcol_exporter_enqueue_failed_spans` | Number of spans failed to be added to the sending queue | +| `otelcol_scraper_errored_metric_points` | Number of metric points that were unable to be scraped | +| `otelcol_scraper_scraped_metric_points` | Number of metric points successfully scraped | +| `otelcol_receiver_refused_metric_points` | Number of metric points that could not be pushed into the pipeline | +| `otelcol_receiver_accepted_metric_points` | Number of metric points successfully pushed into the pipeline | +| `otelcol_process_runtime_heap_alloc_bytes` | Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') | +| `otelcol_process_runtime_total_alloc_bytes` | Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') | +| `otelcol_exporter_enqueue_failed_log_records` | Number of log records failed to be added to the sending queue | +| `otelcol_processor_batch_timeout_trigger_send` | Number of times the batch was sent due to a timeout trigger | +| `otelcol_exporter_enqueue_failed_metric_points` | Number of metric points failed to be added to the sending queue | +| `otelcol_process_runtime_total_sys_memory_bytes` | Total bytes of memory obtained from the OS (see [the Go docs for `runtime.MemStats.Sys`](https://pkg.go.dev/runtime#MemStats.Sys)) | +| `otelcol_processor_batch_batch_size_trigger_send` | Number of times the batch was sent due to a size trigger | +| `otelcol_exporter_sent_log_records` | Number of log records successfully sent to destination | +| `otelcol_receiver_refused_log_records` | Number of log records that could not be pushed into the pipeline | +| `otelcol_receiver_accepted_log_records` | Number of log records successfully pushed into the pipeline | + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`collector-metrics.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/collector-metrics.yaml). + +## Example logging output{% #example-logging-output %} + +``` +ResourceMetrics #0 +Resource SchemaURL: https://opentelemetry.io/schemas/1.6.1 +Resource attributes: + -> service.name: Str(opentelemetry-collector) + -> net.host.name: Str(192.168.55.78) + -> service.instance.id: Str(192.168.55.78:8888) + -> net.host.port: Str(8888) + -> http.scheme: Str(http) + -> k8s.pod.ip: Str(192.168.55.78) + -> cloud.provider: Str(aws) + -> cloud.platform: Str(aws_ec2) + -> cloud.region: Str(us-east-1) + -> cloud.account.id: Str(XXXXXXXXX) + -> cloud.availability_zone: Str(us-east-1c) + -> host.id: Str(i-0368add8e328c28f7) + -> host.image.id: Str(ami-08a2e6a8e82737230) + -> host.type: Str(m5.large) + -> host.name: Str(ip-192-168-53-115.ec2.internal) + -> os.type: Str(linux) + -> k8s.pod.name: Str(opentelemetry-collector-agent-gqwm8) + -> k8s.daemonset.name: Str(opentelemetry-collector-agent) + -> k8s.daemonset.uid: Str(6d6fef61-d4c7-4226-9b7b-7d6b893cb31d) + -> k8s.node.name: Str(ip-192-168-53-115.ec2.internal) + -> kube_app_name: Str(opentelemetry-collector) + -> kube_app_instance: Str(opentelemetry-collector) + -> k8s.namespace.name: Str(otel-staging) + -> k8s.pod.start_time: Str(2023-11-20T12:53:23Z) + -> k8s.pod.uid: Str(988d1bdc-5baf-4e98-942f-ab026a371daf) +ScopeMetrics #0 +ScopeMetrics SchemaURL: +InstrumentationScope otelcol/prometheusreceiver 0.88.0-dev +Metric #0 +Descriptor: + -> Name: otelcol_otelsvc_k8s_namespace_added + -> Description: Number of namespace add events received + -> Unit: + -> DataType: Sum + -> IsMonotonic: true + -> AggregationTemporality: Cumulative +NumberDataPoints #0 +Data point attributes: + -> service_instance_id: Str(d80d11f9-aa84-4e16-818d-3e7d868c0cfe) + -> service_name: Str(otelcontribcol) + -> service_version: Str(0.88.0-dev) +StartTimestamp: 1970-01-01 00:00:00 +0000 UTC +Timestamp: 2023-11-20 13:17:36.881 +0000 UTC +Value: 194151496.000000 +Metric #9 +Descriptor: + -> Name: otelcol_receiver_accepted_spans + -> Description: Number of spans successfully pushed into the pipeline. + -> Unit: + -> DataType: Sum + -> IsMonotonic: true + -> AggregationTemporality: Cumulative +``` diff --git a/opentelemetry-mdoc/integrations/datadog_extension/index.md b/opentelemetry-mdoc/integrations/datadog_extension/index.md new file mode 100644 index 0000000000000..31bd6a421e289 --- /dev/null +++ b/opentelemetry-mdoc/integrations/datadog_extension/index.md @@ -0,0 +1,183 @@ +--- +title: Datadog Extension +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Datadog Extension +--- + +# Datadog Extension + +## Overview{% #overview %} + +{% alert level="info" %} +The Datadog Extension is in Preview. +{% /alert %} + +The Datadog Extension allows you to view OpenTelemetry Collector configuration and build information directly within Datadog on the [Infrastructure List](https://app.datadoghq.com/infrastructure) and [Resource Catalog](https://app.datadoghq.com/infrastructure/catalog). When used with the [Datadog Exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/datadogexporter), this extension gives you visibility into your Collector fleet without leaving the Datadog UI. + +{% image + source="http://localhost:1313/images/opentelemetry/integrations/datadog_extension_hostlist.d1af05f71adbc1917e9357ec3f7333b0.png?auto=format" + alt="OpenTelemetry Collector configuration shown in Datadog Host List" /%} + +## Key features{% #key-features %} + +- **Collector Configuration Visibility**: View the complete configuration for any Collector in your infrastructure. +- **Build Information**: See Collector version, build details, and component information. +- **Fleet Management**: Monitor and manage your OpenTelemetry Collector fleet from the Datadog UI. +- **Local Inspection Endpoint**: Use an HTTP endpoint for local debugging and configuration verification. + +## Setup{% #setup %} + +### 1. Add the Datadog Extension to your Collector configuration + +Configure the Datadog Extension in your OpenTelemetry Collector configuration file: + +```yaml +extensions: + datadog: + api: + key: ${env:DD_API_KEY} + site: + # hostname: "my-collector-host" # Optional: must match Datadog Exporter hostname if set + +service: + extensions: [datadog] +``` + +### 2. Configure the Datadog Exporter + +This feature requires the Datadog Exporter to be configured and enabled in an active pipeline (`traces` or `metrics`). The extension uses the exporter's telemetry to associate the Collector's configuration with a specific host in Datadog. + +```yaml +exporters: + datadog/exporter: + api: + key: ${env:DD_API_KEY} + site: + # hostname: "my-collector-host" # Optional: must match Datadog Extension hostname if set +``` + +### 3. Enable the extension in your service configuration + +Add the Datadog Extension to your service extensions: + +```yaml +service: + extensions: [datadog] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog/exporter] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [datadog/exporter] +``` + +## Configuration options{% #configuration-options %} + +| Parameter | Description | Default | +| -------------------------- | ----------------------------------------------- | ---------------- | +| `api.key` | Datadog API key (required) | - | +| `api.site` | Datadog site (for example, `us5.datadoghq.com`) | `datadoghq.com` | +| `hostname` | Custom hostname for the Collector | Auto-detected | +| `http.endpoint` | Local HTTP server endpoint | `localhost:9875` | +| `http.path` | HTTP server path for metadata | `/metadata` | +| `proxy_url` | HTTP proxy URL for outbound requests | - | +| `timeout` | Timeout for HTTP requests | `30s` | +| `tls.insecure_skip_verify` | Skip TLS certificate verification | `false` | + +{% alert level="warning" %} +**Hostname Matching**: If you specify a custom `hostname` in the Datadog Extension, it **must** match the `hostname` value in the Datadog Exporter configuration. The Datadog Extension does not have access to pipeline telemetry and cannot infer hostnames from incoming spans. It only obtains hostnames from system/cloud provider APIs or manual configuration. If telemetry has different [hostname attributes](http://localhost:1313/opentelemetry/config/hostname_tagging/?tab=host) than the hostname reported by the extension, the telemetry will not be correlated to the correct host, and you may see duplicate hosts in Datadog. +{% /alert %} + +### Complete configuration example{% #complete-configuration-example %} + +```yaml +extensions: + datadog: + api: + key: ${env:DD_API_KEY} + site: + hostname: "my-collector-host" + http: + endpoint: "localhost:9875" + path: "/metadata" + proxy_url: "http://proxy.example.com:8080" + timeout: 30s + tls: + insecure_skip_verify: false + +exporters: + datadog/exporter: + api: + key: ${env:DD_API_KEY} + site: + hostname: "my-collector-host" + +service: + extensions: [datadog] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog/exporter] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [datadog/exporter] +``` + +## Viewing Collector configuration{% #viewing-collector-configuration %} + +Once configured, you can view your OpenTelemetry Collector configuration and build information in two locations: + +### Infrastructure List (Host List){% #infrastructure-list-host-list %} + +1. Navigate to **[Infrastructure > Hosts](https://app.datadoghq.com/infrastructure)** in your Datadog account. +1. Click on any host running the OpenTelemetry Collector (**Note**: Filter by `field:apps:otel` to only show Collector instances). +1. In the host details panel, select the **OTel Collector** tab to see build info and full Collector configuration. + +### Resource Catalog{% #resource-catalog %} + +1. Navigate to **[Infrastructure > Resource Catalog](https://app.datadoghq.com/infrastructure/catalog)** in your Datadog account +1. Filter for hosts or search for your Collector instances. +1. Click on any host running the OpenTelemetry Collector. +1. Scroll down to **Collector** to see build info and full Collector configuration. + +## Local HTTP server{% #local-http-server %} + +The Datadog Extension includes a local HTTP server for debugging and inspection: + +```bash +# Access collector metadata locally +curl http://localhost:9875/metadata +``` + +This endpoint provides: + +- Collector configuration (scrubbed of sensitive information) +- Build information and version details +- Active component list +- Extension status + +## Troubleshooting{% #troubleshooting %} + +### Configuration not appearing in Datadog{% #configuration-not-appearing-in-datadog %} + +1. **Check hostname matching**: Ensure hostnames match between the Datadog Extension and Datadog Exporter. +1. **Verify API key**: Confirm the API key is valid and has appropriate permissions. +1. **Check Collector logs**: Look for extension initialization and data submission logs. +1. **Confirm extension is enabled**: Verify the extension is listed in the service configuration. + +### HTTP server issues{% #http-server-issues %} + +1. **Port conflicts**: Ensure port 9875 is available or configure a different port. +1. **Network access**: Verify the HTTP server is accessible from your debug location. +1. **Check logs**: Review extension logs for HTTP server startup issues. + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/setup/collector_exporter/) +- [Infrastructure List](http://localhost:1313/infrastructure/list/) +- [Resource Catalog](http://localhost:1313/infrastructure/resource_catalog/) diff --git a/opentelemetry-mdoc/integrations/docker_metrics/index.md b/opentelemetry-mdoc/integrations/docker_metrics/index.md new file mode 100644 index 0000000000000..90d3ba83a690d --- /dev/null +++ b/opentelemetry-mdoc/integrations/docker_metrics/index.md @@ -0,0 +1,177 @@ +--- +title: Docker Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Docker Metrics +--- + +# Docker Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/docker_metrics.8dd22fdd0cea9ad4d525b98b61f9c646.png?auto=format" + alt="OpenTelemetry Docker metrics in a Containers dashboard" /%} + +To collect container metrics, configure the [Docker stats receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/dockerstatsreceiver) in your OpenTelemetry Collector and send the data using the Datadog Exporter. + +For more information, see the OpenTelemetry project documentation for [the Docker stats receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/dockerstatsreceiver). + +## Setup{% #setup %} + +{% tab title="Host" %} +The Docker stats receiver needs access to the Docker socket. By default, the receiver looks for the Docker socket at `unix:///var/run/docker.sock`. If this is not the Docker socket path, specify the path in the `endpoint` configuration line. + +Add the following lines to your Collector configuration: + +```yaml +receivers: + docker_stats: + endpoint: unix:///var/run/docker.sock # (default) + metrics: + container.network.io.usage.rx_packets: + enabled: true + container.network.io.usage.tx_packets: + enabled: true + container.cpu.usage.system: + enabled: true + container.memory.rss: + enabled: true + container.blockio.io_serviced_recursive: + enabled: true + container.uptime: + enabled: true + container.memory.hierarchical_memory_limit: + enabled: true +``` + +**Note**: If you are using the collector image, you may need to [configure additional permissions for the collector to have access to the Docker socket](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/11791). +{% /tab %} + +{% tab title="Kubernetes" %} +The Docker stats receiver needs access to the Docker socket. In Kubernetes, if you are running Docker as a runtime, mount the Docker socket: + +Add the following lines to `values.yaml`: + +```yaml +extraVolumes: + - name: docker-sock + hostPath: + path: /var/run/docker.sock +extraVolumeMounts: + - name: docker-sock + mountPath: /var/run/docker.sock +``` + +Add the following in the Collector configuration: + +```yaml +receivers: + docker_stats: + endpoint: unix:///var/run/docker.sock # default + metrics: + container.network.io.usage.rx_packets: + enabled: true + container.network.io.usage.tx_packets: + enabled: true + container.cpu.usage.system: + enabled: true + container.memory.rss: + enabled: true + container.blockio.io_serviced_recursive: + enabled: true + container.uptime: + enabled: true + container.memory.hierarchical_memory_limit: + enabled: true +``` + +{% /tab %} + +## Correlate traces with container metrics{% #correlate-traces-with-container-metrics %} + +To correlate traces with container metrics, both telemetry types must share common resource attributes. These attributes provide the necessary context for correlation. + +1. Configure [Unified Service Tagging](http://localhost:1313/opentelemetry/mapping/semantic_mapping#unified-service-tagging) attributes. +1. Configure the following attributes on both your traces and metrics: + +| Attribute | Value | Description | +| ---------------------------------------- | -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `container.id` (**Required**) | The Docker container ID. | Uniquely identifies the container. Essential for correlating spans with container metrics. Without this attribute on traces, container metric views are not shown in APM. | +| `container.name` or `k8s.container.name` | The human‑readable container name (for example, `redis-otel`). | Used as the display name in Datadog. | +| `k8s.pod.name` | The pod name (for example, `redis-otel-59c9b5c9d5-s9t2r`). | Enables navigation between pod and container context views in Kubernetes environments. | + +### Traces{% #traces %} + +To populate these resource attributes on **traces**: + +- You can use a `resourcedetectionprocessor` in your Collector config: + + ```yaml + processors: + resourcedetection: + detectors: ["env", "container", "k8s"] + service: + pipelines: + traces: + processors: [resourcedetection] + ``` + +- You can add a container resource detector in your application code.For example, using Go: + + ```go + // resource.WithContainer() adds container.id attribute to the trace's resource + res, err := resource.New( + ctx, + resource.WithContainer(), + resource.WithFromEnv(), + semconv.ServiceNameKey.String("calendar"), + ) + ``` + +See the complete example in [opentelemetry-examples](https://github.com/DataDog/opentelemetry-examples/blob/main/apps/rest-services/golang/calendar/main.go). + +### Metrics{% #metrics %} + +To populate these resource attributes on **metrics**, the `docker_stats` receiver automatically detects and adds these attributes on container metrics it emits. + +## Data collected{% #data-collected %} + +The Docker Stats receiver generates container metrics for the OpenTelemetry Collector. The Datadog Exporter translates container metrics to their Datadog counterparts for use in the following views: + +- [Containers Overview default dashboard](http://localhost:1313/opentelemetry/otel_collector_datadog_exporter/?tab=onahost#containers-overview-dashboard) +- [APM Trace view](http://localhost:1313/tracing/trace_explorer/trace_view/) with container metrics + +Learn more about [mapping between OpenTelemetry and Datadog semantic conventions for resource attributes](http://localhost:1313/opentelemetry/guide/semantic_mapping/). + +The following table shows the Datadog container metric names that correspond to OpenTelemetry container metric names: + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/) for more information. + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`docker-stats.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/docker-stats.yaml). + +## Example logging output{% #example-logging-output %} + +``` +Resource SchemaURL: https://opentelemetry.io/schemas/1.6.1 +Resource attributes: + -> container.runtime: Str(docker) + -> container.hostname: Str(be51776e036e) + -> container.id: Str(be51776e036e04461169fce2847d4e77be3d83856b474ad544143afc3d48e9e5) + -> container.image.name: Str(sha256:9bdff337981de15f8cdf9e73b24af64a03e2e6dd1f156a274a15c1d8db98ab79) + -> container.name: Str(redis-otel) +ScopeMetrics #0 +ScopeMetrics SchemaURL: +InstrumentationScope otelcol/dockerstatsreceiver 0.89.0-dev +Metric #6 +Descriptor: + -> Name: container.cpu.utilization + -> Description: Percent of CPU used by the container. + -> Unit: 1 + -> DataType: Gauge +NumberDataPoints #0 +StartTimestamp: 2023-11-20 14:58:17.522765 +0000 UTC +Timestamp: 2023-11-20 14:58:19.550208 +0000 UTC +Value: 0.170933 +``` diff --git a/opentelemetry-mdoc/integrations/haproxy_metrics/index.md b/opentelemetry-mdoc/integrations/haproxy_metrics/index.md new file mode 100644 index 0000000000000..68c16665aa3ec --- /dev/null +++ b/opentelemetry-mdoc/integrations/haproxy_metrics/index.md @@ -0,0 +1,34 @@ +--- +title: HAProxy Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > HAProxy Metrics +--- + +# HAProxy Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/haproxy_metrics.25ddb1705d2eb0ad0d935071577d3cf4.png?auto=format" + alt="OpenTelemetry HAProxy metrics in an HAProxy dashboard" /%} + +The [HAProxy receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/haproxyreceiver) allows for collection of HAProxy metrics and access to the [HAProxy Overview](https://app.datadoghq.com/dash/integration/28/haproxy---overview) dashboard. Configure the receiver according to the specifications of the latest version of the `haproxyreceiver`. + +For more information, see the OpenTelemetry project documentation for the [HAProxy receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/haproxyreceiver). + +## Setup{% #setup %} + +To collect HAProxy metrics with OpenTelemetry for use with Datadog: + +1. Configure the [HAProxy receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/haproxyreceiver) in your OpenTelemetry Collector configuration. +1. Ensure the OpenTelemetry Collector is [configured to export to Datadog](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +See the [HAProxy receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/haproxyreceiver) for detailed configuration options and requirements. + +## Data collected{% #data-collected %} + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/) for more information. + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/collector_exporter/) diff --git a/opentelemetry-mdoc/integrations/host_metrics/index.md b/opentelemetry-mdoc/integrations/host_metrics/index.md new file mode 100644 index 0000000000000..763bd236b0831 --- /dev/null +++ b/opentelemetry-mdoc/integrations/host_metrics/index.md @@ -0,0 +1,163 @@ +--- +title: Host Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Host Metrics +--- + +# Host Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/host_metrics.5bdacef6580156ca03c655b79b88c340.png?auto=format" + alt="OpenTelemetry host metrics dashboard" /%} + +To collect system metrics such as CPU, disk, and memory usage, enable the [host metrics receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/hostmetricsreceiver/README.md) in your Collector. + +For more information, including supported operating systems, see the OpenTelemetry project documentation for the [host metrics receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/hostmetricsreceiver/README.md). + +## Setup{% #setup %} + +{% tab title="Host" %} +Add the following lines to your Collector configuration: + +```yaml +processors: + resourcedetection: + detectors: [system] + system: + hostname_sources: [os] + +receivers: + hostmetrics: + collection_interval: 10s + scrapers: + paging: + metrics: + system.paging.utilization: + enabled: true + cpu: + metrics: + system.cpu.utilization: + enabled: true + disk: + filesystem: + metrics: + system.filesystem.utilization: + enabled: true + load: + memory: + network: + processes: + +service: + pipelines: + metrics: + receivers: [hostmetrics] + processors: [resourcedetection] + exporters: [datadog] +``` + +{% /tab %} + +{% tab title="Kubernetes" %} +Set up the host metrics receiver on each node from which metrics need to be collected. To collect host metrics from every node in your cluster, deploy the host metrics receiver as a DaemonSet collector. Add the following in the Collector configuration: + +```yaml +receivers: + hostmetrics: + collection_interval: 10s + scrapers: + paging: + metrics: + system.paging.utilization: + enabled: true + cpu: + metrics: + system.cpu.utilization: + enabled: true + system.cpu.physical.count: + enabled: true + system.cpu.logical.count: + enabled: true + system.cpu.frequency: + enabled: true + disk: + filesystem: + metrics: + system.filesystem.utilization: + enabled: true + load: + memory: + network: + processes: +``` + +{% /tab %} + +## Data collected{% #data-collected %} + +Host Metrics are collected by the [host metrics receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/hostmetricsreceiver). For information about setting up the receiver, see [OpenTelemetry Collector Datadog Exporter](http://localhost:1313/opentelemetry/otel_collector_datadog_exporter/). + +The metrics, mapped to Datadog metrics, are used in the following views: + +- [Infrastructure Host Map](https://app.datadoghq.com/infrastructure/map?fillby=avg%3Acpuutilization&groupby=availability-zone) +- [Infrastructure List](https://app.datadoghq.com/infrastructure) +- [Host default dashboards](http://localhost:1313/opentelemetry/collector_exporter/#out-of-the-box-dashboards) +- [APM Trace view Host info](http://localhost:1313/tracing/trace_explorer/trace_view/?tab=hostinfo) + +**Note**: To correlate trace and host metrics, configure [Unified Service Tagging attributes](http://localhost:1313/opentelemetry/correlate/#prerequisite-unified-service-tagging) for each service, and set the `host.name` resource attribute to the corresponding underlying host for both service and collector instances. + +The following table shows which Datadog host metric names are associated with corresponding OpenTelemetry host metric names, and, if applicable, what math is applied to the OTel host metric to transform it to Datadog units during the mapping. + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/#host-metrics) for more information. + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`host-metrics.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/host-metrics.yaml). + +## Example logging output{% #example-logging-output %} + +``` +ResourceMetrics #1 +Resource SchemaURL: https://opentelemetry.io/schemas/1.9.0 +Resource attributes: + -> k8s.pod.ip: Str(192.168.63.232) + -> cloud.provider: Str(aws) + -> cloud.platform: Str(aws_ec2) + -> cloud.region: Str(us-east-1) + -> cloud.account.id: Str(XXXXXXXXX) + -> cloud.availability_zone: Str(us-east-1c) + -> host.id: Str(i-07e7d48cedbec9e86) + -> host.image.id: Str(ami-0cbbb5a8c6f670bb6) + -> host.type: Str(m5.large) + -> host.name: Str(ip-192-168-49-157.ec2.internal) + -> os.type: Str(linux) + -> kube_app_instance: Str(opentelemetry-collector-gateway) + -> k8s.pod.name: Str(opentelemetry-collector-gateway-688585b95-l2lds) + -> k8s.pod.uid: Str(d8063a97-f48f-4e9e-b180-8c78a56d0a37) + -> k8s.replicaset.uid: Str(9e2d5331-f763-43a3-b0be-9d89c0eaf0cd) + -> k8s.replicaset.name: Str(opentelemetry-collector-gateway-688585b95) + -> k8s.deployment.name: Str(opentelemetry-collector-gateway) + -> kube_app_name: Str(opentelemetry-collector) + -> k8s.namespace.name: Str(otel-ds-gateway) + -> k8s.pod.start_time: Str(2023-11-20T12:53:08Z) + -> k8s.node.name: Str(ip-192-168-49-157.ec2.internal) +ScopeMetrics #0 +ScopeMetrics SchemaURL: +InstrumentationScope otelcol/hostmetricsreceiver/memory 0.88.0-dev +Metric #0 +Descriptor: + -> Name: system.memory.usage + -> Description: Bytes of memory in use. + -> Unit: By + -> DataType: Sum + -> IsMonotonic: false + -> AggregationTemporality: Cumulative +NumberDataPoints #0 +Data point attributes: + -> state: Str(used) +StartTimestamp: 2023-08-21 13:45:37 +0000 UTC +Timestamp: 2023-11-20 13:04:19.489045896 +0000 UTC +Value: 1153183744 +``` diff --git a/opentelemetry-mdoc/integrations/iis_metrics/index.md b/opentelemetry-mdoc/integrations/iis_metrics/index.md new file mode 100644 index 0000000000000..75c4ea4a89c1d --- /dev/null +++ b/opentelemetry-mdoc/integrations/iis_metrics/index.md @@ -0,0 +1,34 @@ +--- +title: IIS Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > IIS Metrics +--- + +# IIS Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/iis_metrics.d8732538a3b969024afa3b90834abcd4.png?auto=format" + alt="OpenTelemetry IIS metrics in an IIS dashboard" /%} + +The [IIS receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/iisreceiver) allows for collection of IIS (Internet Information Services) metrics and access to the [IIS Overview](https://app.datadoghq.com/screen/integration/243/iis---overview) dashboard. Configure the receiver according to the specifications of the latest version of the `iisreceiver`. + +For more information, see the OpenTelemetry project documentation for the [IIS receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/iisreceiver). + +## Setup{% #setup %} + +To collect IIS metrics with OpenTelemetry for use with Datadog: + +1. Configure the [IIS receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/iisreceiver) in your OpenTelemetry Collector configuration. +1. Ensure the OpenTelemetry Collector is [configured to export to Datadog](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +See the [IIS receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/iisreceiver) for detailed configuration options and requirements. + +## Data collected{% #data-collected %} + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/) for more information. + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/collector_exporter/) diff --git a/opentelemetry-mdoc/integrations/index.md b/opentelemetry-mdoc/integrations/index.md new file mode 100644 index 0000000000000..41eaeb87f5a51 --- /dev/null +++ b/opentelemetry-mdoc/integrations/index.md @@ -0,0 +1,81 @@ +--- +title: Integrations +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations +--- + +# Integrations + +This page covers Datadog-supported OpenTelemetry (OTel) integrations. These integrations allow you to collect and monitor your observability data using OpenTelemetry in Datadog. + +## Overview{% #overview %} + +OpenTelemetry (OTel) integrations are components that enable the collection of observability data (metrics, traces, and logs) from various sources using the OpenTelemetry standard. These integrations are designed to work with the OpenTelemetry Collector, which receives, processes, and exports telemetry data to observability backends like Datadog. + +For a comprehensive list of all OpenTelemetry integrations, see the [OpenTelemetry Registry](https://opentelemetry.io/ecosystem/registry/). This registry provides information on receivers, exporters, and other components in the OpenTelemetry ecosystem. + +## Metric pricing{% #metric-pricing %} + +Datadog collects metrics from supported OpenTelemetry receivers at no extra cost. These no-cost metrics are: + +- Defined in the `metadata.yaml` file for each receiver. +- Listed in the [Metrics Mappings](http://localhost:1313/opentelemetry/mapping/metrics_mapping/#metrics-mappings) table. + +For example, the [`dockerstatsreceiver`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/dockerstatsreceiver/metadata.yaml) `metadata.yaml` file lists metrics that you can collect at no extra cost. + +{% alert level="warning" %} +Ensure that you configure receivers according to OpenTelemetry receiver documentation. Incorrectly configured receivers may cause metrics to be classified as custom, resulting in additional charges. +{% /alert %} + +## Datadog-supported OpenTelemetry integrations{% #datadog-supported-opentelemetry-integrations %} + +Datadog supports the following OpenTelemetry integrations: + +### APM (Application Performance Monitoring){% #apm-application-performance-monitoring %} + +Monitor and optimize your application's performance: + +- [Trace Metrics](http://localhost:1313/opentelemetry/integrations/trace_metrics) - Generate APM stats such as hits, errors, and duration +- [Runtime Metrics](http://localhost:1313/opentelemetry/integrations/runtime_metrics/) - Collect runtime metrics for Java, .NET, and Go applications + +### Collector{% #collector %} + +Monitor the health and performance of your OpenTelemetry Collector: + +- [Collector Health Metrics](http://localhost:1313/opentelemetry/integrations/collector_health_metrics/) - Track the performance of your OpenTelemetry Collector +- [Datadog Extension](http://localhost:1313/opentelemetry/integrations/datadog_extension/) - View Collector configuration and build information in Datadog Infrastructure Monitoring + +### Containers and hosts{% #containers-and-hosts %} + +Gain insights into your containerized environments and host systems: + +- [Docker Metrics](http://localhost:1313/opentelemetry/integrations/docker_metrics/) - Monitor Docker container performance +- [Host Metrics](http://localhost:1313/opentelemetry/integrations/host_metrics/) - Track system metrics such as CPU, disk, and memory usage +- [Kubernetes Metrics](http://localhost:1313/opentelemetry/integrations/kubernetes_metrics/) - Monitor Kubernetes cluster health and performance +- [Podman Metrics](http://localhost:1313/opentelemetry/integrations/podman_metrics/) - Monitor Podman container performance + +### Web servers and proxies{% #web-servers-and-proxies %} + +Monitor web servers and proxy technologies: + +- [Apache Web Server Metrics](http://localhost:1313/opentelemetry/integrations/apache_metrics/) - Collect metrics from Apache HTTP Server +- [NGINX Metrics](http://localhost:1313/opentelemetry/integrations/nginx_metrics/) - Monitor NGINX web server performance +- [IIS Metrics](http://localhost:1313/opentelemetry/integrations/iis_metrics/) - Track Internet Information Services (IIS) metrics +- [HAProxy Metrics](http://localhost:1313/opentelemetry/integrations/haproxy_metrics/) - Monitor HAProxy load balancer performance + +### Databases and messaging{% #databases-and-messaging %} + +Monitor database and messaging systems: + +- [MySQL Metrics](http://localhost:1313/opentelemetry/integrations/mysql_metrics/) - Track MySQL database performance +- [Kafka Metrics](http://localhost:1313/opentelemetry/integrations/kafka_metrics/) - Monitor Apache Kafka messaging platform + +### Big data and processing{% #big-data-and-processing %} + +Monitor big data processing frameworks: + +- [Apache Spark Metrics](http://localhost:1313/opentelemetry/integrations/spark_metrics/) - Track Apache Spark performance metrics + +## Further reading{% #further-reading %} + +- [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/schema_semantics/metrics_mapping/) diff --git a/opentelemetry-mdoc/integrations/kafka_metrics/index.md b/opentelemetry-mdoc/integrations/kafka_metrics/index.md new file mode 100644 index 0000000000000..7fb872679339b --- /dev/null +++ b/opentelemetry-mdoc/integrations/kafka_metrics/index.md @@ -0,0 +1,281 @@ +--- +title: Kafka Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Kafka Metrics +--- + +# Kafka Metrics + +{% alert level="warning" %} +OTel Kafka Metrics Remapping is in public alpha. It is available in versions >= 0.93.0 of the collector. If you have feedback related to this, reach out to your account team to provide your input. +{% /alert %} + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/kafka_metrics.34bf4de835ecb56a140a92077ecf86cd.png?auto=format" + alt="OpenTelemetry Kafka metrics in OOTB Kafka dashboard" /%} + +The [Kafka metrics receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/kafkametricsreceiver), [JMX Receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/jmxreceiver)/ [JMX Metrics Gatherer](https://github.com/open-telemetry/opentelemetry-java-contrib/blob/main/jmx-metrics) allow collecting Kafka metrics and access to the out of the box [Kafka Dashboard](https://app.datadoghq.com/dash/integration/50/kafka-zookeeper-and-kafka-consumer-overview), "Kafka, Zookeeper and Kafka Consumer Overview". + +**Note**: the [JMX Receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/jmxreceiver) and [JMX Metrics Gatherer](https://github.com/open-telemetry/opentelemetry-java-contrib/blob/main/jmx-metrics) should be considered as replacements. They collect the same set of metrics ([JMX Receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/jmxreceiver) launches the [JMX Metrics Gatherer](https://github.com/open-telemetry/opentelemetry-java-contrib/blob/main/jmx-metrics)). + +## Kafka metrics receiver{% #kafka-metrics-receiver %} + +{% tab title="Host" %} + +```yaml +receivers: + kafkametrics: + brokers: "${env:KAFKA_BROKER_ADDRESS}" + protocol_version: 2.0.0 + scrapers: + - brokers + - topics + - consumers +``` + +{% /tab %} + +{% tab title="Kubernetes" %} +The Kafka metrics receiver needs to be used in a collector in `deployment` mode with a single replica. This ensures that the same metric is not collected multiple times. The collector in deployment mode can then leverage the Datadog Exporter to export the metrics directly to Datadog, or leverage the OTLP exporter to forward the metrics to another collector instance. + +Add the following lines to `values.yaml`: + +```yaml +mode: deployment +``` + +Add the following in the Collector configuration: + +```yaml +receivers: + kafkametrics: + brokers: ${env:KAFKA_BROKER_ADDRESS} + protocol_version: 2.0.0 + scrapers: + - brokers + - topics + - consumers +``` + +{% /tab %} + +## JMX receiver{% #jmx-receiver %} + +{% tab title="Host" %} +The JMX Receiver has the following requirements: + +- JRE is available on the host where you are running the collector. +- The JMX Metric Gatherer JAR is available on the host where you are running the collector. You can download the most recent release of the JMX Metric Gatherer JAR from the [opentelemetry-java-contrib releases page](https://github.com/open-telemetry/opentelemetry-java-contrib/releases). + +Add the following in the Collector configuration: + +```yaml +receivers: + jmx: + jar_path: /path/to/opentelemetry-jmx-metrics.jar + endpoint: ${env:KAFKA_BROKER_JMX_ADDRESS} + target_system: kafka,jvm + jmx/consumer: + jar_path: /path/to/opentelemetry-jmx-metrics.jar + endpoint: ${env:KAFKA_CONSUMER_JMX_ADDRESS} + target_system: kafka-consumer + jmx/producer: + jar_path: /path/to/opentelemetry-jmx-metrics.jar + endpoint: ${env:KAFKA_PRODUCER_JMX_ADDRESS} + target_system: kafka-producer +``` + +{% /tab %} + +{% tab title="Kubernetes" %} +The JMX receiver needs to be used in a collector in `deployment` mode with a single replica. This ensures that the same metric is not collected multiple times. The collector in deployment mode can then leverage the Datadog Exporter to export the metrics directly to Datadog, or leverage the OTLP exporter to forward the metrics to another collector instance. + +The JMX Receiver has the following requirements: + +- JRE is available on the host in which you are running the collector. +- The JMX Metric Gatherer JAR is available on the host in which you are running the collector. You can download the most recent release of the JMX Metric Gatherer JAR [here](https://github.com/open-telemetry/opentelemetry-java-contrib/releases). + +Because the OTel collector default image does not meet the requirements above, a custom image needs to be built. See the Dockerfile below for an example image that contains the collector binary, JRE, and JMX Metrics Gatherer Jar. + +Dockerfile: + +```Dockerfile +FROM alpine:latest as prep + +# OpenTelemetry Collector Binary +ARG OTEL_VERSION=0.92.0 +ARG TARGETARCH=linux_amd64 +ADD "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v${OTEL_VERSION}/otelcol-contrib_${OTEL_VERSION}_${TARGETARCH}.tar.gz" /otelcontribcol +RUN tar -zxvf /otelcontribcol + +# JMX Metrics Gatherer Jar +ARG JMX_GATHERER_JAR_VERSION=1.27.0 +ADD https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/v${JMX_GATHERER_JAR_VERSION}/opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar +# nonroot user id (https://groups.google.com/g/distroless-users/c/-DpzCr7xRDY/m/eQqJmJroCgAJ) +ARG USER_UID=65532 +RUN chown ${USER_UID} /opt/opentelemetry-jmx-metrics.jar + + +FROM gcr.io/distroless/java17-debian11:nonroot + +COPY --from=prep /opt/opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar +COPY --from=prep /otelcol-contrib /otelcol-contrib + +EXPOSE 4317 55680 55679 +ENTRYPOINT ["/otelcol-contrib"] +CMD ["--config", "/etc/otelcol-contrib/config.yaml"] +``` + +Add the following lines to `values.yaml`: + +```yaml +mode: deployment +``` + +Add the following in the Collector configuration: + +```yaml +receivers: + jmx: + jar_path: /path/to/opentelemetry-jmx-metrics.jar + endpoint: ${env:KAFKA_BROKER_JMX_ADDRESS} + target_system: kafka,jvm + jmx/consumer: + jar_path: /path/to/opentelemetry-jmx-metrics.jar + endpoint: ${env:KAFKA_CONSUMER_JMX_ADDRESS} + target_system: kafka-consumer + jmx/producer: + jar_path: /path/to/opentelemetry-jmx-metrics.jar + endpoint: ${env:KAFKA_PRODUCER_JMX_ADDRESS} + target_system: kafka-producer +``` + +{% /tab %} + +## JMX Metrics Gatherer{% #jmx-metrics-gatherer %} + +{% tab title="Host" %} +The JMX Metric Gatherer is intended to be run as an uber jar and configured with properties from the command line. + +Please make sure that JRE is available on the host in which you are running the collector. If not, please make sure to download it, e.g. + +``` +apt-get update && \ +apt-get -y install default-jre-headless +``` + +Once you have done this, download the most recent release of the JMX Metric Gatherer JAR [here](https://github.com/open-telemetry/opentelemetry-java-contrib/releases) and run: + +```gdscript3 +// Kafka Broker +java -jar -Dotel.jmx.service.url=service:jmx:rmi:///jndi/rmi://{KAFKA_BROKER_JMX_ADDRESS}/jmxrmi \ -Dotel.jmx.target.system=kafka,jvm \ +-Dotel.metrics.exporter=otlp \ +-Dotel.exporter.otlp.endpoint=http://localhost:4317 \ +-jar /path/to/opentelemetry-jmx-metrics.jar + +// Kafka Producer +java -jar -Dotel.jmx.service.url=service:jmx:rmi:///jndi/rmi://{KAFKA_PRODUCER_JMX_ADDRESS}/jmxrmi \ -Dotel.jmx.target.system=kafka-producer \ +-Dotel.metrics.exporter=otlp \ +-Dotel.exporter.otlp.endpoint=http://localhost:4317 \ +-jar /path/to/opentelemetry-jmx-metrics.jar + +// Kafka Consumer +java -jar -Dotel.jmx.service.url=service:jmx:rmi:///jndi/rmi://{KAFKA_CONSUMER_JMX_ADDRESS}/jmxrmi \ -Dotel.jmx.target.system=kafka-consumer \ +-Dotel.metrics.exporter=otlp \ +-Dotel.exporter.otlp.endpoint=http://localhost:4317 \ +-jar /path/to/opentelemetry-jmx-metrics.jar +``` + +{% /tab %} + +{% tab title="Kubernetes" %} +The JMX Metric Gatherer is intended to be run as an uber jar and configured with properties from the command line. + +In order to deploy this in Kubernetes, you need to build an image which contains JRE and the JMX Metrics Gatherer Jar. Please see the Dockerfile below for an example image that contains JRE and JMX Metrics Gatherer Jar. + +Dockerfile: + +```Dockerfile +FROM alpine:latest as prep + +# JMX Metrics Gatherer Jar +ARG JMX_GATHERER_JAR_VERSION=1.27.0 +ADD https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/v${JMX_GATHERER_JAR_VERSION}/opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar +# nonroot user id (https://groups.google.com/g/distroless-users/c/-DpzCr7xRDY/m/eQqJmJroCgAJ) +ARG USER_UID=65532 +RUN chown ${USER_UID} /opt/opentelemetry-jmx-metrics.jar + +FROM gcr.io/distroless/java17-debian11:nonroot + +COPY --from=prep /opt/opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar + +EXPOSE 4317 55680 55679 +ENTRYPOINT ["java"] +CMD ["-Dotel.jmx.service.url=service:jmx:rmi:///jndi/rmi://kafka:1099/jmxrmi", \ +"-Dotel.jmx.target.system=kafka,jvm", \ +"-Dotel.metrics.exporter=otlp", \ +"-Dotel.exporter.otlp.endpoint=http://otelcol:4317", \ +"-jar", \ +"/opt/opentelemetry-jmx-metrics.jar"] +``` + +{% /tab %} + +## Log collection{% #log-collection %} + +See [Log Collection](http://localhost:1313/opentelemetry/collector_exporter/log_collection) for instructions on how to collect logs using the OpenTelemetry Collector. + +To appear in the out-of-the-box Kafka Dashboard, the Kafka logs need to be tagged with `source:kafka`. To do this, use an attributes processor: + +```yaml +processors: + attributes: + actions: + - key: ddtags + value: "source:kafka" + action: insert +``` + +In order to ensure this attribute only gets added to your Kafka logs, use [include/exclude filtering](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/attributesprocessor/README.md#includeexclude-filtering) of the attributes processor. + +## Data collected{% #data-collected %} + +**Note:** In Datadog `-` gets translated to `_`. For the metrics prepended by `otel.`, this means that the OTel metric name and the Datadog metric name are the same (for example, `kafka.producer.request-rate` and `kafka.producer.request_rate`). In order to avoid double counting for these metrics, the OTel metric is then prepended with `otel.`. + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/#kafka-metrics) for more information. + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`kafka.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/kafka.yaml). + +## Example logging output{% #example-logging-output %} + +``` +Resource SchemaURL: https://opentelemetry.io/schemas/1.20.0 +Resource attributes: + -> service.name: Str(unknown_service:java) + -> telemetry.sdk.language: Str(java) + -> telemetry.sdk.name: Str(opentelemetry) + -> telemetry.sdk.version: Str(1.27.0) +ScopeMetrics #0 +ScopeMetrics SchemaURL: +InstrumentationScope io.opentelemetry.contrib.jmxmetrics 1.27.0-alpha +Metric #0 +Descriptor: + -> Name: kafka.message.count + -> Description: The number of messages received by the broker + -> Unit: {messages} + -> DataType: Sum + -> IsMonotonic: true + -> AggregationTemporality: Cumulative +NumberDataPoints #0 +StartTimestamp: 2024-01-22 15:50:24.212 +0000 UTC +Timestamp: 2024-01-22 15:51:24.218 +0000 UTC +Value: 25 +``` + +## Example app{% #example-app %} + +Please see the following [example application](https://github.com/DataDog/opentelemetry-examples/tree/main/apps/kafka-metrics) which demonstrates the configurations discussed in this documentation. This example application is comprised of a producer, consumer, broker and zookeeper instance. It demonstrates using the Kafka metrics receiver, JMX Receiver and/or JMX Metrics Gatherer. diff --git a/opentelemetry-mdoc/integrations/kubernetes_metrics/index.md b/opentelemetry-mdoc/integrations/kubernetes_metrics/index.md new file mode 100644 index 0000000000000..fb1523f3abaa3 --- /dev/null +++ b/opentelemetry-mdoc/integrations/kubernetes_metrics/index.md @@ -0,0 +1,201 @@ +--- +title: Kubernetes Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Kubernetes Metrics +--- + +# Kubernetes Metrics + +{% alert level="info" %} +The OpenTelemetry Kubernetes integration is in Preview. To request access, contact your Datadog account team. +{% /alert %} + +## Overview{% #overview %} + +Collect Kubernetes metrics using the OpenTelemetry Collector to gain comprehensive insights into your cluster's health and performance. This integration uses a combination of OpenTelemetry receivers to gather data, which populates the [Kubernetes - Overview](https://app.datadoghq.com/dash/integration/86/kubernetes---overview) dashboard. + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/kubernetes_metrics.3f85284d72929c386b5ba4d3f15df3ca.png?auto=format" + alt="The 'Kubernetes - Overview' dashboard, showing metrics for containers, including status and resource usage of your cluster and its containers." /%} + +This integration requires the [`kube-state-metrics`](https://github.com/kubernetes/kube-state-metrics) service and uses a two-collector architecture to gather data. + +The `kube-state-metrics` service is a required component that generates detailed metrics about the state of Kubernetes objects like deployments, nodes, and pods. This architecture uses two separate OpenTelemetry Collectors: + +- A Cluster Collector, deployed as a Kubernetes Deployment, gathers cluster-wide metrics (for example, the total number of deployments). +- A Node Collector, deployed as a Kubernetes DaemonSet, runs on each node to collect node-specific metrics (for example, CPU and memory usage per node). + +This approach ensures that cluster-level metrics are collected only once, preventing data duplication, while node-level metrics are gathered from every node in the cluster. + +## Setup{% #setup %} + +To collect Kubernetes metrics with OpenTelemetry, you need to deploy `kube-state-metrics` and configure both of the above OpenTelemetry Collectors in your cluster. + +### Prerequisites{% #prerequisites %} + +- **Helm**: The setup uses Helm to deploy resources. To install Helm, see the [official Helm documentation](https://helm.sh/docs/intro/install/). +- **Collector Image**: This guide uses the `otel/opentelemetry-collector-contrib:0.130.0` image or newer. + +### Installation{% #installation %} + +#### 1. Install kube-state-metrics + +Run the following commands to add the `prometheus-community` Helm repository and install `kube-state-metrics`: + +```sh +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +helm install kube-state-metrics prometheus-community/kube-state-metrics +``` + +#### 2. Create a Datadog API Key Secret + +Create a Kubernetes secret to store your Datadog API key securely. + +```sh +export DD_API_KEY="" +kubectl create secret generic datadog-secret --from-literal api-key=$DD_API_KEY +``` + +#### 3. Install the OpenTelemetry Collectors + +1. Add the OpenTelemetry Helm chart repository: + + ```sh + helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts + helm repo update + ``` + +1. Download the configuration files for the two Collectors: + + - [cluster-collector.yaml](https://github.com/DataDog/opentelemetry-examples/blob/main/guides/kubernetes/configuration/cluster-collector.yaml) + - [daemonset-collector.yaml](https://github.com/DataDog/opentelemetry-examples/blob/main/guides/kubernetes/configuration/daemonset-collector.yaml) + +1. Set your cluster name as an environment variable and use Helm to deploy both the Cluster and Node Collectors. Make sure the paths to the YAML files are correct. + + ```bash + # Set your cluster name + export K8S_CLUSTER_NAME="" + + # Install the Node Collector (DaemonSet) + helm install otel-daemon-collector open-telemetry/opentelemetry-collector \ + -f daemonset-collector.yaml \ + --set image.repository=otel/opentelemetry-collector-contrib \ + --set image.tag=0.130.0 \ + --set-string "config.processors.resource.attributes[0].key=k8s.cluster.name" \ + --set-string "config.processors.resource.attributes[0].value=${K8S_CLUSTER_NAME}" + + # Install the Cluster Collector (Deployment) + helm install otel-cluster-collector open-telemetry/opentelemetry-collector \ + -f cluster-collector.yaml \ + --set image.repository=otel/opentelemetry-collector-contrib \ + --set image.tag=0.130.0 \ + --set-string "config.processors.resource.attributes[0].key=k8s.cluster.name" \ + --set-string "config.processors.resource.attributes[0].value=${K8S_CLUSTER_NAME}" + ``` + +## Metric metadata configuration{% #metric-metadata-configuration %} + +Some metrics require manual metadata updates in Datadog to ensure they are interpreted and displayed correctly. + +To edit a metric's metadata: + +1. Go to **[Metrics > Summary](https://app.datadoghq.com/metric/summary)**. +1. Select the metric you want to edit. +1. Click **Edit** in the side panel. +1. Edit the metadata as needed. +1. Click **Save**. + +Repeat this process for each of the metrics listed in the following table: + +| Metric Name | Metric Type | Unit | Denominator | +| ------------------------ | ----------- | ----------------------------- | --------------------- | +| `k8s.pod.cpu.usage` | `Gauge` | `Cpu` > `core` | +| `k8s.pod.memory.usage` | `Gauge` | `Bytes (binary)` > `byte (B)` | +| `k8s.pod.network.io` | `Gauge` | `Bytes (binary)` > `byte (B)` | `Time` > `second (s)` | +| `k8s.pod.network.errors` | `Gauge` | `Bytes (binary)` > `byte (B)` | `Time` > `second (s)` | + +**Note**: Click the plus (**+**) icon beside the **Unit** to add the **Denominator**. + +## Correlating traces with infrastructure metrics{% #correlating-traces-with-infrastructure-metrics %} + +To correlate your APM traces with Kubernetes infrastructure metrics, Datadog uses [unified service tagging](http://localhost:1313/getting_started/tagging/unified_service_tagging/?tab=kubernetes#opentelemetry). This requires setting three standard resource attributes on telemetry from both your application and your infrastructure. Datadog automatically maps these OpenTelemetry attributes to the standard Datadog tags (`env`, `service`, and `version`) used for correlation. + +The required OpenTelemetry attributes are: + +- `service.name` +- `service.version` +- `deployment.environment.name` (formerly `deployment.environment`) + +This ensures that telemetry from your application is consistently tagged, allowing Datadog to link traces, metrics, and logs to the same service. + +### Application configuration{% #application-configuration %} + +Set the following environment variables in your application's container specification to tag outgoing telemetry: + +```yaml +spec: + containers: + - name: my-container + env: + - name: OTEL_SERVICE_NAME + value: "" + - name: OTEL_SERVICE_VERSION + value: "" + - name: OTEL_ENVIRONMENT + value: "" + - name: OTEL_RESOURCE_ATTRIBUTES + value: "service.name=$(OTEL_SERVICE_NAME),service.version=$(OTEL_SERVICE_VERSION),deployment.environment.name=$(OTEL_ENVIRONMENT)" +``` + +### Infrastructure configuration{% #infrastructure-configuration %} + +Add the corresponding annotations to your Kubernetes `Deployment` metadata. The `k8sattributes` processor in the Collector uses these annotations to enrich infrastructure metrics with service context. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + annotations: + # Use resource.opentelemetry.io/ for the k8sattributes processor + resource.opentelemetry.io/service.name: "" + resource.opentelemetry.io/service.version: "" + resource.opentelemetry.io/deployment.environment.name: "" +spec: + template: + metadata: + annotations: + resource.opentelemetry.io/service.name: "" + resource.opentelemetry.io/service.version: "" + resource.opentelemetry.io/deployment.environment.name: "" +# ... rest of the manifest +``` + +## Data collected{% #data-collected %} + +This integration collects metrics using several OpenTelemetry receivers. + +### kube-state-metrics (using Prometheus receiver){% #kube-state-metrics-using-prometheus-receiver %} + +Metrics scraped from the `kube-state-metrics` endpoint provide information about the state of Kubernetes API objects. + +### Kubelet stats receiver{% #kubelet-stats-receiver %} + +The `kubeletstatsreceiver` collects metrics from the Kubelet on each node, focusing on pod, container, and volume resource usage. + +### Kubernetes cluster receiver{% #kubernetes-cluster-receiver %} + +The `k8sclusterreceiver` collects cluster-level metrics, such as the status and count of nodes, pods, and other objects. + +### Host metrics receiver{% #host-metrics-receiver %} + +The `hostmetricsreceiver` gathers system-level metrics from each node in the cluster. + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/schema_semantics/metrics_mapping/) for more information. + +## Further reading{% #further-reading %} + +- [Send OpenTelemetry Data to Datadog](http://localhost:1313/opentelemetry/setup/) +- [Unified Service Tagging](https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging/) +- [Example Collector Configurations](https://github.com/DataDog/opentelemetry-examples/tree/main/guides/kubernetes) diff --git a/opentelemetry-mdoc/integrations/mysql_metrics/index.md b/opentelemetry-mdoc/integrations/mysql_metrics/index.md new file mode 100644 index 0000000000000..2b83e24b5d7f0 --- /dev/null +++ b/opentelemetry-mdoc/integrations/mysql_metrics/index.md @@ -0,0 +1,137 @@ +--- +title: MySQL Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > MySQL Metrics +--- + +# MySQL Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/mysql_metrics.c85e1516ba5fafcd2a34f7f47a25f728.png?auto=format" + alt="OpenTelemetry MySQL metrics in a MySQL dashboard" /%} + +The [MySQL receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/mysqlreceiver) allows for collection of MySQL metrics and access to the [MySQL Overview](https://app.datadoghq.com/dash/integration/12/mysql---overview) dashboard. Configure the receiver according to the specifications of the latest version of the `mysqlreceiver`. + +For more information, see the OpenTelemetry project documentation for the [MySQL receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/mysqlreceiver). + +## Setup{% #setup %} + +To collect MySQL metrics with OpenTelemetry for use with Datadog: + +1. Configure the [MySQL receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/mysqlreceiver) in your OpenTelemetry Collector configuration. +1. Optionally, configure the [host metrics receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/hostmetricsreceiver) if your OpenTelemetry Collector is running on the same server as your MySQL database. +1. Optionally, configure the [file log receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver) if your OpenTelemetry Collector is running on the same server as your MySQL database. +1. Configure service pipelines. +1. Ensure the OpenTelemetry Collector is [configured to export to Datadog](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +### MySQL receiver{% #mysql-receiver %} + +``` +receivers: + mysql/mysql-host-1: + endpoint: ":" + username: "" + password: "" + collection_interval: 10s + metrics: + mysql.connection.count: + enabled: true + mysql.connection.errors: + enabled: true + mysql.commands: + enabled: true + mysql.query.slow.count: + enabled: true + mysql.max_used_connections: + enabled: true + +processors: + resource/mysql-host-1: + attributes: + - action: insert + key: datadog.host.name + value: + transform/mysql-host-1: + metric_statements: + - convert_sum_to_gauge() where metric.name == "mysql.locks" + cumulativetodelta: {} + deltatorate: + metrics: + - mysql.connection.count + - mysql.commands + - mysql.operations + - mysql.query.slow.count + - mysql.connection.errors + - mysql.log_operations + - system.network.io +``` + +See the [MySQL receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/mysqlreceiver) for detailed configuration options and requirements. + +### Host metrics receiver{% #host-metrics-receiver %} + +```yaml +receivers: + hostmetrics: + scrapers: + load: + cpu: + metrics: + system.cpu.utilization: + enabled: true + memory: + network: +``` + +### File log receiver{% #file-log-receiver %} + +```yaml +receivers: + filelog: + include: + - + - + operators: + - type: json_parser + parse_from: body + timestamp: + parse_from: attributes.timestamp + layout: "%Y-%m-%dT%H:%M:%SZ" + +processors: + transform/logs: + log_statements: + - context: resource + statements: + - set(attributes["datadog.host.name"], "") + - set(attributes["datadog.log.source"], "mysql") + + batch: {} +``` + +### Service pipelines{% #service-pipelines %} + +```yaml +service: + pipelines: + metrics/mysql-host-1: + receivers: [mysql/mysql-host-1] + exporters: [datadog/exporter] + processors: [resource/mysql-host-1,cumulativetodelta,deltatorate,transform/mysql-host-1] +``` + +Add `hostmetrics` and `filelog` receiver if you configured them, for example: + +```yaml + receivers: [mysql/mysql-host-1,hostmetrics,filelog] +``` + +## Data collected{% #data-collected %} + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/) for more information. + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/collector_exporter/) diff --git a/opentelemetry-mdoc/integrations/nginx_metrics/index.md b/opentelemetry-mdoc/integrations/nginx_metrics/index.md new file mode 100644 index 0000000000000..f14921e9933da --- /dev/null +++ b/opentelemetry-mdoc/integrations/nginx_metrics/index.md @@ -0,0 +1,34 @@ +--- +title: NGINX Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > NGINX Metrics +--- + +# NGINX Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/nginx_metrics.e1b54fb86a275e25a45e1ad7d912282c.png?auto=format" + alt="OpenTelemetry NGINX metrics in a NGINX dashboard" /%} + +The [NGINX receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/nginxreceiver) allows for collection of NGINX metrics and access to the [NGINX Overview](https://app.datadoghq.com/dash/integration/21/nginx---overview) dashboard. Configure the receiver according to the specifications of the latest version of the `nginxreceiver`. + +For more information, see the OpenTelemetry project documentation for the [NGINX receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/nginxreceiver). + +## Setup{% #setup %} + +To collect NGINX metrics with OpenTelemetry for use with Datadog: + +1. Configure the [NGINX receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/nginxreceiver) in your OpenTelemetry Collector configuration. +1. Ensure the OpenTelemetry Collector is [configured to export to Datadog](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +See the [NGINX receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/nginxreceiver) for detailed configuration options and requirements. + +## Data collected{% #data-collected %} + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/) for more information. + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/collector_exporter/) diff --git a/opentelemetry-mdoc/integrations/podman_metrics/index.md b/opentelemetry-mdoc/integrations/podman_metrics/index.md new file mode 100644 index 0000000000000..16eedf83607bd --- /dev/null +++ b/opentelemetry-mdoc/integrations/podman_metrics/index.md @@ -0,0 +1,36 @@ +--- +title: Podman Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Podman Metrics +--- + +# Podman Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/podman_metrics.5f82e3ff16828462daa54f058b200ad7.png?auto=format" + alt="The 'Containers - Overview' dashboard, showing metrics for CPU and Memory usage." /%} + +The [Podman receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/podmanreceiver) collects metrics that populate the [Containers - Overview](https://app.datadoghq.com/dash/integration/30657/containers---overview) dashboard. Configure the receiver according to the specifications of the latest version of the `podmanreceiver`. + +This dashboard displays metrics from all container runtimes. To view your Podman data, use the **runtime** template variable at the top of the dashboard to select `podman`. + +For more information, see the OpenTelemetry project documentation for the [Podman receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/podmanreceiver). + +## Setup{% #setup %} + +To collect Podman metrics with OpenTelemetry for use with Datadog: + +1. Configure the [Podman receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/podmanreceiver) in your OpenTelemetry Collector configuration. +1. Ensure the OpenTelemetry Collector is [configured to export to Datadog](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +See the [Podman receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/podmanreceiver) for detailed configuration options and requirements. + +## Data collected{% #data-collected %} + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/) for more information. + +## Further reading{% #further-reading %} + +- [Send OpenTelemetry Data to Datadog](http://localhost:1313/opentelemetry/setup/) diff --git a/opentelemetry-mdoc/integrations/runtime_metrics/index.md b/opentelemetry-mdoc/integrations/runtime_metrics/index.md new file mode 100644 index 0000000000000..d1dd06f7a38cb --- /dev/null +++ b/opentelemetry-mdoc/integrations/runtime_metrics/index.md @@ -0,0 +1,221 @@ +--- +title: OpenTelemetry Runtime Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > OpenTelemetry Runtime Metrics +--- + +# OpenTelemetry Runtime Metrics + +## Overview{% #overview %} + +Runtime metrics provide insights into application performance, including memory usage, garbage collection, and parallelization. Datadog tracing libraries offer [runtime metrics collection](http://localhost:1313/tracing/metrics/runtime_metrics/) for each supported language, and OpenTelemetry (OTel) also collects compatible runtime metrics that can be sent to Datadog through the OpenTelemetry SDKs. + +## Compatibility{% #compatibility %} + +Datadog supports OpenTelemetry runtime metrics for the following languages: + +- Java +- .NET +- Go + +For details about host and container metrics mapping, see [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/mapping/metrics_mapping/). + +## Setup instructions{% #setup-instructions %} + +### 1. Prerequisites + +- You have successfully [configured OpenTelemetry metrics to send to Datadog](http://localhost:1313/opentelemetry/setup/). +- You have installed the [corresponding language integration in Datadog](https://app.datadoghq.com/integrations). + +### 2. Configure your application + +Select your language to see instructions for configuring the OpenTelemetry SDK to send runtime metrics: + +{% tab title="Java" %} +#### Automatic instrumentation{% #automatic-instrumentation %} + +If you use [OpenTelemetry automatic instrumentation](https://opentelemetry.io/docs/instrumentation/java/automatic/) for Java applications, runtime metrics are enabled by default. + +#### Manual instrumentation{% #manual-instrumentation %} + +If you use [OpenTelemetry manual instrumentation](https://opentelemetry.io/docs/instrumentation/java/manual/), follow the guides for your Java version: + +- [Java 8](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation/runtime-telemetry/runtime-telemetry-java8/library) +- [Java 17](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation/runtime-telemetry/runtime-telemetry-java17/library) + +{% /tab %} + +{% tab title="Go" %} +OpenTelemetry Go applications are [instrumented manually](https://opentelemetry.io/docs/instrumentation/go/manual/). To enable runtime metrics, see the documentation for the [runtime package](https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/runtime). +{% /tab %} + +{% tab title=".NET" %} + +{% alert level="warning" %} +The minimum supported version of the .NET OpenTelemetry SDK is [1.5.0](https://github.com/open-telemetry/opentelemetry-dotnet/releases/tag/core-1.5.0) +{% /alert %} + +#### Automatic instrumentation{% #automatic-instrumentation %} + +If you use [OpenTelemetry automatic instrumentation](https://opentelemetry.io/docs/instrumentation/net/automatic/) for .NET applications, runtime metrics are enabled by default. + +#### Manual instrumentation{% #manual-instrumentation %} + +If you use [OpenTelemetry manual instrumentation](https://opentelemetry.io/docs/instrumentation/net/manual/), see the documentation for the [OpenTelemetry.Instrumentation.Runtime library](https://github.com/open-telemetry/opentelemetry-dotnet-contrib/tree/main/src/OpenTelemetry.Instrumentation.Runtime). + +#### Metric export interval{% #metric-export-interval %} + +The default metric export interval for the .NET OTel SDK differs from the default for the Datadog .NET SDK. Datadog recommends setting the [OTEL_METRIC_EXPORT_INTERVAL](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#periodic-exporting-metricreader) environment variable on your .NET service to match the default Datadog metric export interval: + +``` +OTEL_METRIC_EXPORT_INTERVAL=10000 +``` + +{% /tab %} + +## View runtime metric dashboards{% #view-runtime-metric-dashboards %} + +After setup is complete, you can view runtime metrics in: + +- The service's details page (see Java example below) +- The flame graph metrics tab +- Default [runtime dashboards](https://app.datadoghq.com/dash/integration/256/jvm-metrics) + +{% image + source="http://localhost:1313/images/opentelemetry/otel_runtime_metrics_service_page.684ee9c34c52d62f70b570d67b94e32b.png?auto=format" + alt="Service page showing OpenTelemetry runtime metrics on the JVM Metrics tab" /%} + +## Data collected{% #data-collected %} + +When using OpenTelemetry runtime metrics with Datadog, you receive both: + +- Original OpenTelemetry runtime metrics +- Mapped Datadog runtime metrics for equivalent metrics + +The OpenTelemetry runtime metrics have the following prefixes based on their source: + +| Source | Prefix | +| ------------------------------------------------------------------------------------------------ | ------------------------ | +| [OTel Collector Datadog Exporter](http://localhost:1313/opentelemetry/setup/collector_exporter/) | `otel.process.runtime.*` | +| [Datadog Agent OTLP Ingest](http://localhost:1313/opentelemetry/setup/otlp_ingest_in_the_agent) | `process.runtime.*` | + +The following tables list the Datadog runtime metrics that are supported through OpenTelemetry mapping. "N/A" indicates that there is no OpenTelemetry equivalent metric available. + +{% alert level="warning" %} +OpenTelemetry runtime metrics are mapped to Datadog by metric name. Do not rename host metrics for OpenTelemetry runtime metrics as this breaks the mapping. +{% /alert %} + +{% tab title="Java" %} + +| Datadog metric | Description | OpenTelemetry metric | +| ------------------------------- | ---------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------ | +| `jvm.heap_memory` | The total Java heap memory used. | `process.runtime.jvm.memory.usage``jvm.memory.used` | +| `jvm.heap_memory_committed` | The total Java heap memory committed to be used. | `process.runtime.jvm.memory.committed``jvm.memory.committed` | +| `jvm.heap_memory_init` | The initial Java heap memory allocated. | `process.runtime.jvm.memory.init``jvm.memory.init` | +| `jvm.heap_memory_max` | The maximum Java heap memory available. | `process.runtime.jvm.memory.limit``jvm.memory.limit` | +| `jvm.non_heap_memory` | The total Java non-heap memory used. Non-heap memory is: `Metaspace + CompressedClassSpace + CodeCache`. | `process.runtime.jvm.memory.usage``jvm.memory.used` | +| `jvm.non_heap_memory_committed` | The total Java non-heap memory committed to be used. | `process.runtime.jvm.memory.committed``jvm.memory.committed` | +| `jvm.non_heap_memory_init` | The initial Java non-heap memory allocated. | `process.runtime.jvm.memory.init``jvm.memory.init` | +| `jvm.non_heap_memory_max` | The maximum Java non-heap memory available. | `process.runtime.jvm.memory.limit``jvm.memory.limit` | +| `jvm.gc.old_gen_size` | The current Java heap memory usage of the Old Generation memory pool. | `process.runtime.jvm.memory.usage``jvm.memory.used` | +| `jvm.gc.eden_size` | The current Java heap memory usage of the Eden memory pool. | `process.runtime.jvm.memory.usage``jvm.memory.used` | +| `jvm.gc.survivor_size` | The current Java heap memory usage of the Survivor memory pool. | `process.runtime.jvm.memory.usage``jvm.memory.used` | +| `jvm.gc.metaspace_size` | The current Java non-heap memory usage of the Metaspace memory pool. | `process.runtime.jvm.memory.usage``jvm.memory.used` | +| `jvm.thread_count` | The number of live threads. | `process.runtime.jvm.threads.count``jvm.thread.count` | +| `jvm.loaded_classes` | Number of classes currently loaded. | `process.runtime.jvm.classes.current_loaded``jvm.class.count` | +| `jvm.cpu_load.system` | Recent CPU utilization for the whole system. | `process.runtime.jvm.system.cpu.utilization``jvm.system.cpu.utilization` | +| `jvm.cpu_load.process` | Recent CPU utilization for the process. | `process.runtime.jvm.cpu.utilization``jvm.cpu.recent_utilization` | +| `jvm.buffer_pool.direct.used` | Measure of memory used by direct buffers. | `process.runtime.jvm.buffer.usage``jvm.buffer.memory.usage` | +| `jvm.buffer_pool.direct.count` | Number of direct buffers in the pool. | `process.runtime.jvm.buffer.count``jvm.buffer.count` | +| `jvm.buffer_pool.direct.limit` | Measure of total memory capacity of direct buffers. | `process.runtime.jvm.buffer.limit``jvm.buffer.memory.limit` | +| `jvm.buffer_pool.mapped.used` | Measure of memory used by mapped buffers. | `process.runtime.jvm.buffer.usage``jvm.buffer.memory.usage` | +| `jvm.buffer_pool.mapped.count` | Number of mapped buffers in the pool. | `process.runtime.jvm.buffer.count``jvm.buffer.count` | +| `jvm.buffer_pool.mapped.limit` | Measure of total memory capacity of mapped buffers. | `process.runtime.jvm.buffer.limit``jvm.buffer.memory.limit` | +| `jvm.gc.parnew.time` | The approximate accumulated garbage collection time elapsed. | N/A | +| `jvm.gc.cms.count` | The total number of garbage collections that have occurred. | N/A | +| `jvm.gc.major_collection_count` | The rate of major garbage collections. Set `new_gc_metrics: true` to receive this metric. | N/A | +| `jvm.gc.minor_collection_count` | The rate of minor garbage collections. Set `new_gc_metrics: true` to receive this metric. | N/A | +| `jvm.gc.major_collection_time` | The fraction of time spent in major garbage collection. Set `new_gc_metrics: true` to receive this metric. | N/A | +| `jvm.gc.minor_collection_time` | The fraction of time spent in minor garbage collection. Set `new_gc_metrics: true` to receive this metric. | N/A | +| `jvm.os.open_file_descriptors` | The number of open file descriptors. | N/A | + +{% /tab %} + +{% tab title="Go" %} + +| Datadog metric | Description | OpenTelemetry metric | +| ----------------------------------------- | --------------------------------------------------------------------------------------- | -------------------------------------- | +| `runtime.go.num_goroutine` | Number of goroutines spawned. | `process.runtime.go.goroutines` | +| `runtime.go.num_cgo_call` | Number of CGO calls made. | `process.runtime.go.cgo.calls` | +| `runtime.go.mem_stats.lookups` | Number of pointer lookups performed by the runtime. | `process.runtime.go.mem.lookups` | +| `runtime.go.mem_stats.heap_alloc` | Bytes of allocated heap objects. | `process.runtime.go.mem.heap_alloc` | +| `runtime.go.mem_stats.heap_sys` | Bytes of heap memory obtained from the operating system. | `process.runtime.go.mem.heap_sys` | +| `runtime.go.mem_stats.heap_idle` | Bytes in idle (unused) spans. | `process.runtime.go.mem.heap_idle` | +| `runtime.go.mem_stats.heap_inuse` | Bytes in in-use spans. | `process.runtime.go.mem.heap_inuse` | +| `runtime.go.mem_stats.heap_released` | Bytes of physical memory returned to the operating system. | `process.runtime.go.mem.heap_released` | +| `runtime.go.mem_stats.heap_objects` | Number of allocated heap objects. | `process.runtime.go.mem.heap_objects` | +| `runtime.go.mem_stats.pause_total_ns` | Cumulative nanoseconds in garbage collection (GC). | `process.runtime.go.gc.pause_total_ns` | +| `runtime.go.mem_stats.num_gc` | Number of completed GC cycles. | `process.runtime.go.gc.count` | +| `runtime.go.num_cpu` | Number of CPUs detected by the runtime. | N/A | +| `runtime.go.mem_stats.alloc` | Bytes of allocated heap objects. | N/A | +| `runtime.go.mem_stats.total_alloc` | Cumulative bytes allocated for heap objects. | N/A | +| `runtime.go.mem_stats.sys` | Total bytes of memory obtained from the operating system. | N/A | +| `runtime.go.mem_stats.mallocs` | Cumulative count of heap objects allocated. | N/A | +| `runtime.go.mem_stats.frees` | Cumulative count of heap objects freed. | N/A | +| `runtime.go.mem_stats.stack_inuse` | Bytes in stack spans. | N/A | +| `runtime.go.mem_stats.stack_sys` | Bytes of stack memory obtained from the operating system. | N/A | +| `runtime.go.mem_stats.m_span_inuse` | Bytes of allocated mspan structures. | N/A | +| `runtime.go.mem_stats.m_span_sys` | Bytes of memory obtained from the operating system for mspan structures. | N/A | +| `runtime.go.mem_stats.m_cache_inuse` | Bytes of allocated mcache structures. | N/A | +| `runtime.go.mem_stats.m_cache_sys` | Bytes of memory obtained from the operating system for mcache structures. | N/A | +| `runtime.go.mem_stats.buck_hash_sys` | Bytes of memory in profiling bucket hash tables. | N/A | +| `runtime.go.mem_stats.gc_sys` | Bytes of memory in garbage collection metadata. | N/A | +| `runtime.go.mem_stats.other_sys` | Bytes of memory in miscellaneous off-heap. | N/A | +| `runtime.go.mem_stats.next_gc` | Target heap size of the next GC cycle. | N/A | +| `runtime.go.mem_stats.last_gc` | Last garbage collection finished, as nanoseconds since the UNIX epoch. | N/A | +| `runtime.go.mem_stats.num_forced_gc` | Number of GC cycles that were forced by the application calling the GC function. | N/A | +| `runtime.go.mem_stats.gc_cpu_fraction` | Fraction of this program's available CPU time used by the GC since the program started. | N/A | +| `runtime.go.gc_stats.pause_quantiles.min` | Distribution of GC pause times: minimum values. | N/A | +| `runtime.go.gc_stats.pause_quantiles.25p` | Distribution of GC pause times: 25th percentile. | N/A | +| `runtime.go.gc_stats.pause_quantiles.50p` | Distribution of GC pause times: 50th percentile. | N/A | +| `runtime.go.gc_stats.pause_quantiles.75p` | Distribution of GC pause times: 75th percentile. | N/A | +| `runtime.go.gc_stats.pause_quantiles.max` | Distribution of GC pause times: maximum values. | N/A | + +{% /tab %} + +{% tab title=".NET" %} + +| Datadog metric | Description | OpenTelemetry metric | +| ------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------- | +| `runtime.dotnet.threads.contention_count` | The number of times a thread stopped to wait on a lock. | `process.runtime.dotnet.``monitor.lock_contention.count` | +| `runtime.dotnet.exceptions.count` | The number of first-chance exceptions. | `process.runtime.dotnet.``exceptions.count` | +| `runtime.dotnet.gc.size.gen0` | The size of the gen 0 heap. | `process.runtime.dotnet.``gc.heap.size` | +| `runtime.dotnet.gc.size.gen1` | The size of the gen 1 heap. | `process.runtime.dotnet.``gc.heap.size` | +| `runtime.dotnet.gc.size.gen2` | The size of the gen 2 heap. | `process.runtime.dotnet.``gc.heap.size` | +| `runtime.dotnet.gc.size.loh` | The size of the large object heap. | `process.runtime.dotnet.``gc.heap.size` | +| `runtime.dotnet.gc.count.gen0` | The number of gen 0 garbage collections. | `process.runtime.dotnet.``gc.collections.count` | +| `runtime.dotnet.gc.count.gen1` | The number of gen 1 garbage collections. | `process.runtime.dotnet.``gc.collections.count` | +| `runtime.dotnet.gc.count.gen2` | The number of gen 2 garbage collections. | `process.runtime.dotnet.``gc.collections.count` | +| `runtime.dotnet.cpu.system` | The number of milliseconds executing in the kernel. | N/A | +| `runtime.dotnet.cpu.user` | The number of milliseconds executing outside the kernel. | N/A | +| `runtime.dotnet.cpu.percent` | The percentage of total CPU used by the application. | N/A | +| `runtime.dotnet.mem.committed` | Memory usage. | N/A | +| `runtime.dotnet.threads.count` | The number of threads. | N/A | +| `runtime.dotnet.threads.workers_count` | The number of workers in the threadpool. (.NET Core only) | N/A | +| `runtime.dotnet.threads.contention_time` | The cumulated time spent by threads waiting on a lock. (.NET Core only) | N/A | +| `runtime.dotnet.gc.memory_load` | The percentage of the total memory used by the process. The garbage collection (GC) changes its behavior when this value gets above 85. (.NET Core only) | N/A | +| `runtime.dotnet.gc.pause_time` | The amount of time the GC paused the application threads. (.NET Core only) | N/A | +| `runtime.dotnet.aspnetcore.``requests.total` | The total number of HTTP requests received by the server. (.NET Core only) | N/A | +| `runtime.dotnet.aspnetcore.``requests.failed` | The number of failed HTTP requests received by the server. (.NET Core only) | N/A | +| `runtime.dotnet.aspnetcore.``requests.current` | The total number of HTTP requests that have started but not yet stopped. (.NET Core only) | N/A | +| `runtime.dotnet.aspnetcore.``requests.queue_length` | The current length of the server HTTP request queue. (.NET Core only) | N/A | +| `runtime.dotnet.aspnetcore.``connections.total` | The total number of HTTP connections established to the server. (.NET Core only) | N/A | +| `runtime.dotnet.aspnetcore.``connections.current` | The current number of active HTTP connections to the server. (.NET Core only) | N/A | +| `runtime.dotnet.aspnetcore.``connections.queue_length` | The current length of the HTTP server connection queue. (.NET Core only) | N/A | + +{% /tab %} + +## Further reading{% #further-reading %} + +- [APM Runtime Metrics](http://localhost:1313/tracing/metrics/runtime_metrics/) +- [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/mapping/metrics_mapping/) diff --git a/opentelemetry-mdoc/integrations/spark_metrics/index.md b/opentelemetry-mdoc/integrations/spark_metrics/index.md new file mode 100644 index 0000000000000..20163221b699a --- /dev/null +++ b/opentelemetry-mdoc/integrations/spark_metrics/index.md @@ -0,0 +1,34 @@ +--- +title: Apache Spark Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Apache Spark Metrics +--- + +# Apache Spark Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/spark_metrics.530d0f8550d8342ab5cb0073967ff151.png?auto=format" + alt="OpenTelemetry Apache Spark metrics in a Spark dashboard" /%} + +The [Apache Spark receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/apachesparkreceiver) allows for collection of Apache Spark metrics and access to the [Spark Overview](https://app.datadoghq.com/screen/integration/95/spark---overview) dashboard. Configure the receiver according to the specifications of the latest version of the `apachesparkreceiver`. + +For more information, see the OpenTelemetry project documentation for the [Apache Spark receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/apachesparkreceiver). + +## Setup{% #setup %} + +To collect Apache Spark metrics with OpenTelemetry for use with Datadog: + +1. Configure the [Apache Spark receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/apachesparkreceiver) in your OpenTelemetry Collector configuration. +1. Ensure the OpenTelemetry Collector is [configured to export to Datadog](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +See the [Apache Spark receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/apachesparkreceiver) for detailed configuration options and requirements. + +## Data collected{% #data-collected %} + +See [OpenTelemetry Metrics Mapping](http://localhost:1313/opentelemetry/guide/metrics_mapping/) for more information. + +## Further reading{% #further-reading %} + +- [Setting Up the OpenTelemetry Collector](http://localhost:1313/opentelemetry/setup/collector_exporter/) diff --git a/opentelemetry-mdoc/integrations/trace_metrics/index.md b/opentelemetry-mdoc/integrations/trace_metrics/index.md new file mode 100644 index 0000000000000..2f66444f7923d --- /dev/null +++ b/opentelemetry-mdoc/integrations/trace_metrics/index.md @@ -0,0 +1,61 @@ +--- +title: Trace Metrics +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Integrations > Trace Metrics +--- + +# Trace Metrics + +## Overview{% #overview %} + +{% image + source="http://localhost:1313/images/opentelemetry/collector_exporter/trace_metrics.50651fc46c1bdd28ef8786711c1cc577.png?auto=format" + alt="APM metrics from OpenTelemetry" /%} + +To send APM stats such as hits, errors, and duration, set up the [Datadog Connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/connector/datadogconnector). + +For more information, see the OpenTelemetry project documentation for the [Datadog Connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/connector/datadogconnector). + +## Setup{% #setup %} + +Add the following lines to your Collector configuration: + +```yaml +processors: + probabilistic_sampler: + sampling_percentage: 20 +connectors: + # add the "datadog" connector definition and further configurations + datadog/connector: +exporters: + datadog: + api: + key: ${env:DD_API_KEY} +service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog/connector] + traces/2: + receivers: [datadog/connector] + processors: [batch, probabilistic_sampler] + exporters: [datadog] + metrics: + receivers: [datadog/connector] + processors: [batch] + exporters: [datadog] +``` + +## Data collected{% #data-collected %} + +See [Trace Metrics](http://localhost:1313/tracing/metrics/metrics_namespace/). + +## Full example configuration{% #full-example-configuration %} + +For a full working example configuration with the Datadog exporter, see [`trace-metrics.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/trace-metrics.yaml). + +## Further reading{% #further-reading %} + +- [Getting Started with Collector](http://localhost:1313/opentelemetry/collector_exporter/) +- [Mapping OpenTelemetry Semantic Conventions to Service-entry Spans](http://localhost:1313/opentelemetry/guide/service_entry_spans_mapping/) diff --git a/opentelemetry-mdoc/mapping/host_metadata/index.md b/opentelemetry-mdoc/mapping/host_metadata/index.md new file mode 100644 index 0000000000000..a3cf326381a81 --- /dev/null +++ b/opentelemetry-mdoc/mapping/host_metadata/index.md @@ -0,0 +1,92 @@ +--- +title: Infrastructure List Host Information +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Semantic Mapping > Infrastructure List Host + Information +--- + +# Infrastructure List Host Information + +{% alert level="info" %} +This feature is in Preview. If you have any feedback, contact [Datadog support](http://localhost:1313/help/). +{% /alert %} + +## Overview{% #overview %} + +The Datadog exporter supports sending system information about your hosts to Datadog, which you can see in the [Infrastructure List](http://localhost:1313/infrastructure/list/). You can send this information in OTLP through the ['Resource' field](https://opentelemetry.io/docs/concepts/glossary/#resource) as part of any of the existing signals. This is supported under any [deployment pattern](https://opentelemetry.io/docs/collector/deployment/) including gateway deploys. + +Datadog uses [OpenTelemetry semantic conventions](https://opentelemetry.io/docs/concepts/semantic-conventions/) to recognize system information about your hosts. Follow the instructions for [setting up for host metrics](http://localhost:1313/opentelemetry/collector_exporter/host_metrics) to send the necessary metrics and resource attributes to Datadog. Alternatively, you can manually send this information in the way that best fits your infrastructure. + +## Opting in to the feature{% #opting-in-to-the-feature %} + +To use this feature, set the `datadog.host.use_as_metadata` resource attribute to `true` in all OTLP payloads that contain information about hosts. + +Resources populate the infrastructure list information if they have a [host-identifying attribute](http://localhost:1313/opentelemetry/schema_semantics/hostname/) and the `datadog.host.use_as_metadata` attribute set to `true`. + +To explicitly declare what resources to use for metadata, add the Boolean resource attribute `datadog.host.use_as_metadata` to all resources that have relevant host information. + +For example, to set this for all resources in metrics, traces, and logs, use the [transform processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor#transform-processor) with the following configuration: + +```yaml +processors: + transform: + metric_statements: + - context: resource + statements: + - set(attributes["datadog.host.use_as_metadata"], true) + trace_statements: + - context: resource + statements: + - set(attributes["datadog.host.use_as_metadata"], true) + log_statements: + - context: resource + statements: + - set(attributes["datadog.host.use_as_metadata"], true) +``` + +Add this processor to the `processors` list of all your pipelines. + +You must explicitly tag all your resources with a host-identifying attribute. This is done by default by the [recommended setup for host metrics](http://localhost:1313/opentelemetry/collector_exporter/host_metrics). + +## Supported conventions{% #supported-conventions %} + +The Datadog exporter supports both resource attribute-level semantic conventions and system metrics-level semantic conventions. Supported resource attribute semantic conventions are mainly under [the `host.` namespace](https://opentelemetry.io/docs/specs/semconv/resource/host/) and [the `os.` namespace](https://opentelemetry.io/docs/specs/semconv/resource/os/). All supported system metrics-level semantic conventions are under [the `system.` namespace](https://opentelemetry.io/docs/specs/semconv/system/system-metrics/). + +### General system conventions{% #general-system-conventions %} + +| Semantic convention | Type | In-app field | +| ------------------------------------------------------------------------------------------------------- | ------------------ | ------------ | +| [*Various host-identifying attributes*](http://localhost:1313/opentelemetry/schema_semantics/hostname/) | Resource attribute | Hostname | +| `os.description` | Resource attribute | OS | + +### CPU conventions{% #cpu-conventions %} + +| Semantic convention | Type | In-app field | +| --------------------------- | ------------------ | ------------------ | +| `host.cpu.vendor.id` | Resource attribute | Vendor ID | +| `host.cpu.model.name` | Resource attribute | Model Name | +| `host.cpu.cache.l2.size` | Resource attribute | Cache Size | +| `host.cpu.family` | Resource attribute | Family | +| `host.cpu.model.id` | Resource attribute | Model | +| `host.cpu.stepping` | Resource attribute | Stepping | +| `system.cpu.logical.count` | System metric | Logical Processors | +| `system.cpu.physical.count` | System metric | Cores | +| `system.cpu.frequency` | System metric | MHz | + +### Network conventions{% #network-conventions %} + +| Semantic convention | Type | In-app field | +| ------------------- | ------------------ | ------------------------- | +| `host.ip` | Resource attribute | IP Address & IPv6 Address | +| `host.mac` | Resource attribute | Mac Address | + +### Collecting these conventions with the OpenTelemetry Collector{% #collecting-these-conventions-with-the-opentelemetry-collector %} + +To collect these conventions with the OpenTelemetry Collector, set up the [recommended setup for host metrics](http://localhost:1313/opentelemetry/collector_exporter/host_metrics). The host metrics receiver collects all the relevant metrics, while the resource detection processor collects all relevant resource attributes. + +**Note:** You need to add these processors and receivers in the Collector running on the host that you want to monitor. A gateway host does not collect this information from remote hosts. + +## Further reading{% #further-reading %} + +- [OpenTelemetry Support in Datadog](http://localhost:1313/opentelemetry/) diff --git a/opentelemetry-mdoc/mapping/hostname/index.md b/opentelemetry-mdoc/mapping/hostname/index.md new file mode 100644 index 0000000000000..9cf9514b53ec1 --- /dev/null +++ b/opentelemetry-mdoc/mapping/hostname/index.md @@ -0,0 +1,137 @@ +--- +title: Mapping OpenTelemetry Semantic Conventions to Hostnames +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Semantic Mapping > Mapping OpenTelemetry + Semantic Conventions to Hostnames +--- + +# Mapping OpenTelemetry Semantic Conventions to Hostnames + +## Overview{% #overview %} + +OpenTelemetry defines certain semantic conventions for resource attributes related to hostnames. If an OpenTelemetry Protocol (OTLP) payload for any signal type has known hostname resource attributes, Datadog honors these conventions and tries to use its value as a hostname. The default hostname resolution algorithm is built with compatibility with the rest of Datadog products in mind, but you can override it if needed. + +This algorithm is used in the [Datadog exporter](http://localhost:1313/opentelemetry/setup/collector_exporter/) as well as the [OTLP ingest pipeline in the Datadog Agent](http://localhost:1313/opentelemetry/interoperability/otlp_ingest_in_the_agent) and [DDOT Collector](http://localhost:1313/opentelemetry/migrate/ddot_collector/). When using the [recommended configuration](http://localhost:1313/opentelemetry/config/hostname_tagging/) for the Datadog exporter, the [resource detection processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor) adds the necessary resource attributes to the payload to ensure accurate hostname resolution. + +## Conventions used to determine the hostname{% #conventions-used-to-determine-the-hostname %} + +Conventions are checked in resource attributes the following order, and the first valid hostname is used. If no valid conventions are present, the fallback hostname logic is used. This fallback logic varies by product. + +1. Check Datadog-specific conventions: `host` and `datadog.host.name`. +1. Check cloud provider-specific conventions for AWS, Azure and GCP. +1. Check Kubernetes-specific conventions. +1. If no specific conventions are found, fall back to `host.id` and `host.name`. + +The following sections explain each set of conventions in more detail. + +### General hostname semantic conventions{% #general-hostname-semantic-conventions %} + +The `host` and `datadog.host.name` conventions are Datadog-specific conventions. They are considered first and can be used to override the hostname detected using the usual OpenTelemetry semantic conventions. `host` is checked first and then `datadog.host.name` is checked if `host` was not set. + +Prefer using the `datadog.host.name` convention since it is namespaced, and it is less likely to conflict with other vendor-specific behavior. + +When using the OpenTelemetry Collector, you can use the `transform` processor to set the `datadog.host.name` convention in your pipelines. For example, to set the hostname as `my-custom-hostname` in all metrics, traces, and logs on a given pipeline, use the following configuration: + +```yaml +transform: + metric_statements: &statements + - context: resource + statements: + - set(attributes["datadog.host.name"], "my-custom-hostname") + trace_statements: *statements # Use the same statements as in metrics + log_statements: *statements # Use the same statements as in metrics +``` + +Don't forget to add the `transform` processor to your pipelines. + +Due to how the backend processes deduplicate hostnames, you may occasionally see an alias for your host. If this causes issues for you, please contact support. + +### Cloud provider-specific conventions{% #cloud-provider-specific-conventions %} + +The `cloud.provider` resource attribute is used to determine the cloud provider. Further resource attributes are used to determine the hostname for each specific platform. If `cloud.provider` or any of the expected resource attributes are missing, the next set of conventions is checked. + +#### Amazon Web Services{% #amazon-web-services %} + +If `cloud.provider` has the value `aws`, the following conventions are checked: + +1. Check `aws.ecs.launchtype` to determine if the payload comes from an ECS Fargate task. If so, use `aws.ecs.task.arn` as the identifier with tag name `task_arn`. +1. Otherwise, use `host.id` as the hostname. This matches the EC2 instance id. + +#### Google Cloud{% #google-cloud %} + +If `cloud.provider` has the value `gcp`, the following conventions are checked: + +1. Check that both `host.name` and `cloud.account.id` are available and have the expected format, remove the prefix from `host.name`, and merge both into a hostname. + +#### Azure{% #azure %} + +If `cloud.provider` has the value `azure`, the following conventions are checked: + +1. Use `host.id` as the hostname if it is available and has the expected format. +1. Otherwise, fall back to `host.name`. + +### Kubernetes-specific conventions{% #kubernetes-specific-conventions %} + +If `k8s.node.name` and the cluster name are available, the hostname is set to `-`. If only `k8s.node.name` is available, the hostname is set to the node name. + +To get the cluster name, the following conventions are checked: + +1. Check `k8s.cluster.name` and use it if present. +1. If `cloud.provider` is set to `azure`, extract the cluster name from `azure.resourcegroup.name`. +1. If `cloud.provider` is set to `aws`, extract the cluster name from the first resource attribute starting with `ec2.tag.kubernetes.io/cluster/`. + +### `host.id` and `host.name`{% #hostid-and-hostname %} + +If none of the above conventions are present, the `host.id` and `host.name` resource attributes are used as-is to determine the hostname. `host.id` is checked first and then `host.name` is checked if `host.id` was not set. + +**Note:** The OpenTelemetry specification allows `host.id` and `host.name` to have values that may not match those used by other Datadog products in a given environment. If using multiple Datadog products to monitor the same host, you may have to override the hostname using `datadog.host.name` to ensure consistency. + +## Infra attributes processor{% #infra-attributes-processor %} + +The [infra attributes processor](https://github.com/DataDog/datadog-agent/tree/main/comp/otelcol/otlp/components/processor/infraattributesprocessor) automates the extraction of Kubernetes tags based on labels or annotations and assigns these tags as resource attributes on traces, metrics, and logs. The infra attributes processor requires the following [attributes](https://github.com/DataDog/datadog-agent/tree/main/comp/otelcol/otlp/components/processor/infraattributesprocessor#expected-attributes) (such as `container.id`) to be set to extract the correct attributes and hostname. + +The infra attributes processor can also be configured to override the hostname extracted from attributes by the Agent hostname: + +``` +processors: + infraattributes: + allow_hostname_override: true +``` + +**Note**: This setting is only available for the DDOT Collector. + +## Fallback hostname logic{% #fallback-hostname-logic %} + +If no valid host names are found in the resource attributes, the behavior varies depending on the ingestion path. + +{% tab title="Datadog Exporter" %} +The fallback hostname logic is used. This logic generates a hostname for the machine where the Datadog Exporter is running, which is compatible with the rest of Datadog products, by checking the following sources: + +1. The `hostname` field in the Datadog Exporter configuration. +1. Cloud provider API. +1. Kubernetes host name. +1. Fully qualified domain name. +1. Operating system host name. + +This may lead to incorrect hostnames in [gateway deployments](https://opentelemetry.io/docs/collector/deployment/gateway/). To avoid this, use the `resource detection` processor in your pipelines to ensure accurate hostname resolution. +{% /tab %} + +{% tab title="OTLP ingest pipeline in the Datadog Agent" %} +The Datadog Agent hostname is used. See [How does Datadog determine the Agent hostname?](http://localhost:1313/agent/faq/how-datadog-agent-determines-the-hostname/) for more information. +{% /tab %} + +## Invalid hostnames{% #invalid-hostnames %} + +The following host names are deemed invalid and discarded: + +- `0.0.0.0` +- `127.0.0.1` +- `localhost` +- `localhost.localdomain` +- `localhost6.localdomain6` +- `ip6-localhost` + +## Further reading{% #further-reading %} + +- [OpenTelemetry Support in Datadog](http://localhost:1313/opentelemetry/) diff --git a/opentelemetry-mdoc/mapping/index.md b/opentelemetry-mdoc/mapping/index.md new file mode 100644 index 0000000000000..79cfe12f40256 --- /dev/null +++ b/opentelemetry-mdoc/mapping/index.md @@ -0,0 +1,21 @@ +--- +title: Semantic Mapping +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Semantic Mapping +--- + +# Semantic Mapping + +OpenTelemetry uses semantic conventions to standardize names for different types of telemetry data, ensuring consistency across systems. + +The following documentation describes how OpenTelemetry and Datadog conventions map to one another. + +- [Resource Attribute Mapping](http://localhost:1313/opentelemetry/mapping/semantic_mapping/) +- [Metrics Mapping](http://localhost:1313/opentelemetry/mapping/metrics_mapping/) +- [Infrastructure Host Mapping](http://localhost:1313/opentelemetry/mapping/host_metadata/) +- [Hostname Mapping](http://localhost:1313/opentelemetry/mapping/hostname/) +- [Service-entry Spans Mapping](http://localhost:1313/opentelemetry/mapping/service_entry_spans/) + +## Further reading{% #further-reading %} + +- [Correlate Data](http://localhost:1313/opentelemetry/correlate/) diff --git a/opentelemetry-mdoc/mapping/metrics_mapping/index.md b/opentelemetry-mdoc/mapping/metrics_mapping/index.md new file mode 100644 index 0000000000000..3a5dffebe965b --- /dev/null +++ b/opentelemetry-mdoc/mapping/metrics_mapping/index.md @@ -0,0 +1,39 @@ +--- +title: OpenTelemetry Metrics Mapping +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Semantic Mapping > OpenTelemetry Metrics + Mapping +--- + +# OpenTelemetry Metrics Mapping + +## Overview{% #overview %} + +Datadog products and visualizations are built on metrics and tags that follow specific naming patterns. Therefore, Datadog maps incoming OpenTelemetry metrics to the appropriate Datadog metric format. This mapping process may create additional metrics, but these do not affect Datadog billing. + +{% alert level="info" %} +**Want to unify OpenTelemetry and Datadog metrics in your queries?** Learn how to [query across Datadog and OpenTelemetry metrics](http://localhost:1313/metrics/open_telemetry/query_metrics) from the Metrics Query Editor. +{% /alert %} + +## How OpenTelemetry metrics appear in Datadog{% #how-opentelemetry-metrics-appear-in-datadog %} + +To differentiate metrics from the OpenTelemetry Collector's [hostmetrics](http://localhost:1313/opentelemetry/integrations/host_metrics/) receiver and the Datadog Agent, Datadog prepends `otel.` to any received metric that starts with `system.` or `process.`. Datadog does not recommend monitoring the same infrastructure with both the Datadog Agent and the OpenTelemetry Collector + +{% alert level="info" %} +Datadog is evaluating ways to improve the OTLP metric experience, including potentially deprecating this `otel` prefix. +{% /alert %} + +## Metrics mappings{% #metrics-mappings %} + +The following table shows the metric mappings for various integrations. Use the search and filter controls to find the mappings for a specific integration. + +For more information, see [OpenTelemetry integrations](http://localhost:1313/opentelemetry/integrations/). + +| | +| | + +## Further reading{% #further-reading %} + +- [OTLP Metric Types](http://localhost:1313/metrics/open_telemetry/otlp_metric_types) +- [Resource attribute mapping from OpenTelemetry to Datadog](http://localhost:1313/opentelemetry/guide/semantic_mapping) diff --git a/opentelemetry-mdoc/mapping/semantic_mapping/index.md b/opentelemetry-mdoc/mapping/semantic_mapping/index.md new file mode 100644 index 0000000000000..88a626dc1b878 --- /dev/null +++ b/opentelemetry-mdoc/mapping/semantic_mapping/index.md @@ -0,0 +1,173 @@ +--- +title: OpenTelemetry Semantic Conventions and Datadog Conventions +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Semantic Mapping > OpenTelemetry Semantic + Conventions and Datadog Conventions +--- + +# OpenTelemetry Semantic Conventions and Datadog Conventions + +OpenTelemetry makes use of [semantic conventions](https://opentelemetry.io/docs/concepts/semantic-conventions/) that specify names for different types of data. This page lists mappings for OpenTelemetry semantic conventions to Datadog's semantic conventions. + +| OpenTelemetry convention | Datadog convention | Type | +| ------------------------------------ | ------------------------------------- | ----------------------- | +| `deployment.environment.name` \* | `env` | Unified service tagging | +| `service.name` | `service` | Unified service tagging | +| `service.version` | `version` | Unified service tagging | +| `container.id` | `container_id` | Containers | +| `container.name` | `container_name` | Containers | +| `container.image.name` | `image_name` | Containers | +| `container.image.tag` | `image_tag` | Containers | +| `cloud.provider` | `cloud_provider` | Cloud | +| `cloud.region` | `region` | Cloud | +| `cloud.availability_zone` | `zone` | Cloud | +| `aws.ecs.task.family` | `task_family` | ECS | +| `aws.ecs.task.arn` | `task_arn` | ECS | +| `aws.ecs.cluster.arn` | `ecs_cluster_name` | ECS | +| `aws.ecs.task.revision` | `task_version` | ECS | +| `aws.ecs.container.arn` | `ecs_container_name` | ECS | +| `k8s.container.name` | `kube_container_name` | Kubernetes | +| `k8s.cluster.name` | `kube_cluster_name` | Kubernetes | +| `k8s.deployment.name` | `kube_deployment` | Kubernetes | +| `k8s.replicaset.name` | `kube_replica_set` | Kubernetes | +| `k8s.statefulset.name` | `kube_stateful_set` | Kubernetes | +| `k8s.daemonset.name` | `kube_daemon_set` | Kubernetes | +| `k8s.job.name` | `kube_job` | Kubernetes | +| `k8s.cronjob.name` | `kube_cronjob` | Kubernetes | +| `k8s.namespace.name` | `kube_namespace` | Kubernetes | +| `k8s.pod.name` | `pod_name` | Kubernetes | +| `app.kubernetes.io/name` | `kube_app_name` | Kubernetes labels | +| `app.kubernetes.io/instance` | `kube_app_instance` | Kubernetes labels | +| `app.kubernetes.io/version` | `kube_app_version` | Kubernetes labels | +| `app.kuberenetes.io/component` | `kube_app_component` | Kubernetes labels | +| `app.kubernetes.io/part-of` | `kube_app_part_of` | Kubernetes labels | +| `app.kubernetes.io/managed-by` | `kube_app_managed_by` | Kubernetes labels | +| `client.address` | `http.client_ip` | HTTP | +| `http.response.body.size` | `http.response.content_length` | HTTP | +| `http.response.header.` | `http.response.headers.` | HTTP | +| `http.response.status_code` | `http.status_code` | HTTP | +| `http.request.body.size` | `http.request.content_length` | HTTP | +| `http.request.header.referrer` | `http.referrer` | HTTP | +| `http.request.header.` | `http.request.headers.` | HTTP | +| `http.request.method` | `http.method` | HTTP | +| `http.route` | `http.route` | HTTP | +| `network.protocol.version` | `http.version` | HTTP | +| `server.address` | `http.server_name` | HTTP | +| `url.full` | `http.url` | HTTP | +| `user_agent.original` | `http.useragent` | HTTP | + +\*Replaces the deprecated `deployment.environment` convention. Requires Datadog Agent 7.58.0+ and Datadog Exporter v0.110.0+. + +## Span type mapping{% #span-type-mapping %} + +Datadog has a vendor-specific convention of "span type" represented by the `span.type` attribute. + +Based on the attributes included in your span, the Datadog Agent and Datadog OpenTelemetry components attempt to infer the appropriate span type for better compatibility with other Datadog services. You may also explicitly set the `span.type` attribute on any given span to override this logic using an [attributes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/attributesprocessor) or a [transform](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor) processor, as well as by setting appropriate configuration values in OpenTelemetry SDKs. + +### Map OpenTelemetry span attribute to Datadog span type{% #map-opentelemetry-span-attribute-to-datadog-span-type %} + +The following table shows the span type mapping logic that is used if the feature flag `enable_receive_resource_spans_v2` is set in the Datadog Agent or both the Datadog Exporter and Connector, if using the OpenTelemetry Collector. The chart lists mappings in order of precedence. + +| \# | Span Attribute | Datadog span.type | +| -- | --------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| 1 | `span.type` | `span.type` attribute value | +| 2 | [Span kind server](https://opentelemetry.io/docs/concepts/signals/traces/#server) | `web` | +| 3 | [Span kind client](https://opentelemetry.io/docs/concepts/signals/traces/#client) | see 3a/b | +| 3a | Client span kind, `db.system` attribute not found | `http` | +| 3b | Client span kind, `db.system` attribute found | See the table below Mapping OpenTelemetry database system type to Datadog span type | +| 4 | None of above conditions were fulfilled | `custom` | + +### Mapping OpenTelemetry database system type to Datadog span type{% #mapping-opentelemetry-database-system-type-to-datadog-span-type %} + +In the table above, if a span is a "client" kind and contains [`db.system` attribute](https://opentelemetry.io/docs/specs/semconv/attributes-registry/db/#db-system), the following mapping applies for the span type in Datadog. Setting a `span.type` attribute on your span overrides this logic. + +| `db.system` | Datadog span.type | +| -------------------------------------- | ----------------- | +| SQL Type DBMS (listed below) | `sql` | +| `adabas` | `sql` | +| `cache` | `sql` | +| `clickhouse` | `sql` | +| `cloudscape` | `sql` | +| `cockroachdb` | `sql` | +| `coldfusion` | `sql` | +| `db2` | `sql` | +| `derby` | `sql` | +| `edb` | `sql` | +| `firebird` | `sql` | +| `firstsql` | `sql` | +| `filemaker` | `sql` | +| `hanadb` | `sql` | +| `h2` | `sql` | +| `hsqldb` | `sql` | +| `informix` | `sql` | +| `ingres` | `sql` | +| `instantdb` | `sql` | +| `interbase` | `sql` | +| `mariadb` | `sql` | +| `maxdb` | `sql` | +| `mssql` | `sql` | +| `mysql` | `sql` | +| `netezza` | `sql` | +| `oracle` | `sql` | +| `other_sql` | `sql` | +| `pervasive` | `sql` | +| `pointbase` | `sql` | +| `postgresql` | `sql` | +| `progress` | `sql` | +| `redshift` | `sql` | +| `sqlite` | `sql` | +| `sybase` | `sql` | +| `teradata` | `sql` | +| `vertica` | `sql` | +| Other DB types | see below | +| `cassandra` | `cassandra` | +| `couchbase` | `db` | +| `couchdb` | `db` | +| `cosmosdb` | `db` | +| `dynamodb` | `db` | +| `elasticsearch` | `elasticsearch` | +| `geode` | `db` | +| `hive` | `db` | +| `memcached` | `memcached` | +| `mongodb` | `mongodb` | +| `opensearch` | `opensearch` | +| `redis` | `redis` | +| any `db.system` value not listed above | `db` | + +## Metrics attribute mapping{% #metrics-attribute-mapping %} + +For metrics, by default, Datadog only maps the OpenTelemetry resource attributes listed in the previous sections to Datadog metric tags. To map all resource attributes to tags, enable the `metrics::resource_attributes_as_tags` setting: + +{% tab title="Datadog exporter" %} + +```yaml +exporters: + datadog: + # Other configuration goes here... + metrics: + # Add all resource attributes as tags for metrics + resource_attributes_as_tags: true +``` + +{% /tab %} + +{% tab title="Datadog Agent" %} + +```yaml +otlp_config: + # Other configuration goes here... + metrics: + # Add all resource attributes as tags for metrics + resource_attributes_as_tags: true +``` + +{% /tab %} + +Enabling this option adds both the OpenTelemetry resource attributes and the Datadog semantic conventions to the metric tags. + +## Further reading{% #further-reading %} + +- [Metrics mapping from OpenTelemetry to Datadog](http://localhost:1313/opentelemetry/guide/metrics_mapping) +- [OpenTelemetry metric types](http://localhost:1313/metrics/open_telemetry/otlp_metric_types) +- [Implementation code for these mappings](https://github.com/DataDog/opentelemetry-mapping-go/blob/main/pkg/otlp/attributes/attributes.go) diff --git a/opentelemetry-mdoc/mapping/service_entry_spans/index.md b/opentelemetry-mdoc/mapping/service_entry_spans/index.md new file mode 100644 index 0000000000000..22253c2fef99f --- /dev/null +++ b/opentelemetry-mdoc/mapping/service_entry_spans/index.md @@ -0,0 +1,63 @@ +--- +title: Mapping OpenTelemetry Semantic Conventions to Service-entry Spans +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Semantic Mapping > Mapping OpenTelemetry + Semantic Conventions to Service-entry Spans +--- + +# Mapping OpenTelemetry Semantic Conventions to Service-entry Spans + +## Overview{% #overview %} + +Datadog uses [service-entry spans](https://docs.datadoghq.com/glossary/#service-entry-span) throughout the platform for features such as [Trace Metrics](https://docs.datadoghq.com/opentelemetry/integrations/trace_metrics/) and the [APM Trace Explorer](https://docs.datadoghq.com/tracing/trace_explorer). This convention is unique to Datadog, but can be mapped from the [`SpanKind`](https://opentelemetry.io/docs/specs/otel/trace/api/#spankind) attribute in OpenTelemetry by following the opt-in guide below. + +## Requirements{% #requirements %} + +- OTel Collector Contrib v0.100.0 or greater +- Datadog Agent v7.53.0 or greater + +## Setup{% #setup %} + +Enable the config option based on your ingestion path: + +{% tab title="OTel Collector and Datadog Exporter" %} +The new service-entry span identification logic can be enabled by setting the `traces::compute_top_level_by_span_kind` config option to true in the [Datadog exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.100.0/exporter/datadogexporter/examples/collector.yaml#L365-L370) and [Datadog connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.100.0/connector/datadogconnector/examples/config.yaml#L48-L53). This config option needs to be enabled in both the exporter and connector if both components are being used. +{% /tab %} + +{% tab title="OTLP ingest pipeline in the Datadog Agent" %} +The new service-entry span identification logic can be enabled by adding `"enable_otlp_compute_top_level_by_span_kind"` to [apm_config.features](https://github.com/DataDog/datadog-agent/blob/7.53.0/pkg/config/config_template.yaml#L1585-L1591) in the Datadog Agent config. +{% /tab %} + +## Supported conventions{% #supported-conventions %} + +[Trace Metrics](https://docs.datadoghq.com/opentelemetry/integrations/trace_metrics/) are generated for service entry spans and measured spans. These span conventions are unique to Datadog, so OpenTelemetry spans are identified with the following mapping: + +| OpenTelemetry Convention | Datadog Convention | +| ------------------------------------- | -------------------------- | +| Root span | Service entry span | +| Server span (`span.kind: server`) | Service entry span | +| Consumer span (`span.kind: consumer`) | Service entry span | +| Client span (`span.kind: client`) | Measured span | +| Producer span (`span.kind: producer`) | Measured span | +| Internal span (`span.kind: internal`) | No trace metrics generated | + +## Migration{% #migration %} + +This new service-entry span identification logic may increase the number of spans that generate trace metrics, which may affect existing monitors that are based on trace metrics. Users who only have internal spans will see a decrease in trace metrics. + +If you have existing monitors based on trace metrics, you can update them after upgrading since this change introduces more consistency in trace metrics. If you only have internal spans, update your instrumentation according to the above table to receive trace metrics and service-entry spans. + +[`SpanKind`](https://opentelemetry.io/docs/specs/otel/trace/api/#spankind) is typically set when a span is created, but can also be updated by using the [transform processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md) in the OpenTelemetry Collector to control the mapping above. For example, if trace metrics are desired for an internal span, the following configuration transforms an internal span with `http.path: "/health"` into a client span: + +```yaml + transform: + trace_statements: + - context: span + statements: + - set(kind.string, "Client") where kind.string == "Internal" and attributes["http.path"] == "/health" +``` + +## Further reading{% #further-reading %} + +- [OpenTelemetry Trace Metrics](http://localhost:1313/opentelemetry/integrations/trace_metrics) diff --git a/opentelemetry-mdoc/migrate/collector_0_120_0/index.md b/opentelemetry-mdoc/migrate/collector_0_120_0/index.md new file mode 100644 index 0000000000000..788e02444222f --- /dev/null +++ b/opentelemetry-mdoc/migrate/collector_0_120_0/index.md @@ -0,0 +1,96 @@ +--- +title: Migrate to OpenTelemetry Collector version 0.120.0+ +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > OpenTelemetry Migration Guides > Migrate to + OpenTelemetry Collector version 0.120.0+ +--- + +# Migrate to OpenTelemetry Collector version 0.120.0+ + +[OTel Collector Contrib version 0.120.0](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases/tag/v0.120.0) introduced breaking changes to metric names as part of the upgrade to Prometheus 3.0. After upgrading to this version of the OpenTelemetry Collector, you may notice differences in metric values displayed in Datadog. + +{% alert level="info" %} +These breaking changes are not introduced by or directly related to Datadog. They impact all OpenTelemetry users who use Prometheus. For a complete list of changes, see the [update](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36873) to the Prometheus receiver in the Collector or the Prometheus 3.0 [migration guide](https://prometheus.io/docs/prometheus/latest/migration/). +{% /alert %} + +## Changes to Collector internal metric names{% #changes-to-collector-internal-metric-names %} + +[Collector Internal Metrics](https://opentelemetry.io/docs/collector/internal-telemetry/) sent using the latest Collector version have the following changes: + +- Dots (`.`) in internal collector metrics and resource attributes scraped by Prometheus are no longer replaced with underscores (`_`) by default. +- The `otelcol_` prefix is no longer added to metric names. + +For example: + +| Before 0.120.0 | After 0.120.0 | +| ------------------------------------------------- | ----------------------------------------- | +| `otelcol_datadog_trace_agent_otlp_traces` | `datadog.trace_agent.otlp.traces` | +| `otelcol_datadog_trace_agent_otlp_spans` | `datadog.trace_agent.otlp.spans` | +| `otelcol_datadog_trace_agent_otlp_payload` | `datadog.trace_agent.otlp.payload` | +| `otelcol_datadog_trace_agent_trace_writer_events` | `datadog.trace_agent.trace_writer.events` | + +As a result, Datadog has updated two out-of-the-box dashboards affected by this upgrade: + +- OpenTelemetry Collector Health Dashboard +- APM Datadog Trace Agent Dashboard + +### OpenTelemetry Collector health dashboard{% #opentelemetry-collector-health-dashboard %} + +Queries on the [OpenTelemetry Collector health dashboard](http://localhost:1313/opentelemetry/integrations/collector_health_metrics/) were modified to be compatible with metric names sent from both older (< 0.120.0) and newer (0.120.0+) versions of the Collector. + +If you are using a cloned version of this dashboard or have monitors that query metric names from older Collector versions, you may need to manually update them using the [equiv_otel() function](http://localhost:1313/opentelemetry/guide/combining_otel_and_datadog_metrics/). + +{% image + source="http://localhost:1313/images/opentelemetry/guide/migration/collector_health.1985e5affef618f5cadcf25312872a54.png?auto=format" + alt="OpenTelemetry Collector Health Dashboard showing compatible queries" /%} + +### APM Datadog Trace Agent dashboard{% #apm-datadog-trace-agent-dashboard %} + +Queries on the [APM Datadog Trace Agent dashboard](http://localhost:1313/tracing/troubleshooting/agent_apm_metrics/) were updated with filters to exclude sources `datadogexporter` and `datadogconnector` to prevent metric collisions with OpenTelemetry sources that emit the same metric names. This dashboard is designed to show only Trace Agent data, and the update ensures that data from these sources doesn't mix with OpenTelemetry data. + +Only the out-of-the-box dashboard template was updated. If you are using a cloned version of this dashboard, you may need to manually update queries on custom dashboards to exclude sources `datadogexporter` and `datadogconnector` using: + +```text +source NOT IN (datadogexporter, datadogconnector) +``` + +## Changes to Prometheus Server reader defaults{% #changes-to-prometheus-server-reader-defaults %} + +{% alert level="info" %} +If you use default configurations for your OpenTelemetry Collector's telemetry settings, you will not be impacted by these changes. +{% /alert %} + +You are only impacted if you have explicitly configured the Prometheus reader with custom settings, such as: + +```yaml +service: + telemetry: + metrics: + level: detailed + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8888 +``` + +If you are affected by these changes, you may see differences in metric names, such as suffix changes and unit additions. + +To revert to the previous behavior, add these three parameters to your existing Prometheus reader configuration: + +```yaml +without_scope_info: true +without_type_suffix: true +without_units: true +``` + +For questions or assistance, contact [Datadog support](http://localhost:1313/help/). + +## Further reading{% #further-reading %} + +- [Migrate to OpenTelemetry Collector version 0.95.0+](http://localhost:1313/opentelemetry/guide/switch_from_processor_to_connector) +- [OpenTelemetry Collector Health Dashboard](http://localhost:1313/opentelemetry/integrations/collector_health_metrics) +- [APM Datadog Trace Agent Dashboard](http://localhost:1313/tracing/troubleshooting/agent_apm_metrics) +- [Prometheus 3.0 Migration Guide](https://prometheus.io/docs/prometheus/latest/migration/) diff --git a/opentelemetry-mdoc/migrate/collector_0_95_0/index.md b/opentelemetry-mdoc/migrate/collector_0_95_0/index.md new file mode 100644 index 0000000000000..86218a99f3a2c --- /dev/null +++ b/opentelemetry-mdoc/migrate/collector_0_95_0/index.md @@ -0,0 +1,105 @@ +--- +title: Migrate to OpenTelemetry Collector version 0.95.0+ +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > OpenTelemetry Migration Guides > Migrate to + OpenTelemetry Collector version 0.95.0+ +--- + +# Migrate to OpenTelemetry Collector version 0.95.0+ + +[OTel Collector Contrib version 0.95.0](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases/tag/v0.95.0) disables Trace Metrics computation in the Datadog Exporter by default. + +In versions 0.95.0 and later, the calculation of Trace Metrics is handled by the Datadog Connector. This change ensures that: + +- Trace Metrics are consistently calculated on 100% of the trace data, even when sampling is applied. +- Moving calculation to the Datadog Connector better aligns with the OpenTelemetry recommended architecture. + +To continue receiving Trace Metrics, configure the Datadog Connector in the OpenTelemetry Collector. + +## Migrate to OpenTelemetry Collector version 0.95.0+{% #migrate-to-opentelemetry-collector-version-0950 %} + +{% alert level="warning" %} +To continue receiving Trace Metrics, you must configure the Datadog Connector as a part of your upgrade to OpenTelemetry Collector version 0.95.0+. Upgrading without configuring the Datadog Connector might also result in difficulties viewing the APM Traces page within the application. Monitors and dashboards based on the affected metrics might also be impacted. +{% /alert %} + +Before proceeding with the upgrade to the OTel Collector versions 0.95.0+: + +- Review the [release notes](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases/tag/v0.95.0) to understand the nature of the changes. +- Assess how these changes may affect your current configuration and deployment. +- Consider reaching out to the [Datadog support team](https://docs.datadoghq.com/help/) for guidance and assistance in planning your upgrade strategy. + +To upgrade: + +1. Include `datadog/connector` in the list of configured connectors. +1. Include `datadog/connector` and `datadog/exporter` in the list of the configured exporters in your OpenTelemetry `traces` pipeline. +1. Include `datadog/connector` in the list of the configured receivers in your OpenTelemetry `metrics` pipeline. +1. Include `datadog/exporter` in the list of the configured exporters in your OpenTelemetry `metrics` pipeline. + +## Example configuration{% #example-configuration %} + +Below is an example of an OpenTelemetry configuration before and after migration. + +Before migration: + +```gdscript3 +// Legacy default configuration before v0.95.0 +receivers: + otlp: + protocols: + http: + grpc: +processors: + batch: +exporters: + datadog: + api: + key: +service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog/exporter] +``` + +After migration: + +```gdscript3 +// New default configuration after v0.95.0 +receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + grpc: + endpoint: 0.0.0.0:4317 +processors: + batch: +connectors: + datadog/connector: +exporters: + datadog/exporter: + api: + key: +service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog/connector, datadog/exporter] + metrics: + receivers: [datadog/connector, otlp] // The connector provides the metrics to your metrics pipeline + processors: [batch] + exporters: [datadog/exporter] +``` + +## Vendor-specific Open-Telemetry distributions{% #vendor-specific-open-telemetry-distributions %} + +If you're running a vendor-specific OpenTelemetry distribution that does not include the Datadog Connector, revert to the previous Trace Connector behavior by disabling the `exporter.datadogexporter.DisableAPMStats` feature gate. + +```sh +otelcol --config=config.yaml --feature-gates=-exporter.datadogexporter.DisableAPMStats +``` + +For questions or assistance, contact [Datadog support](https://docs.datadoghq.com/help/). diff --git a/opentelemetry-mdoc/migrate/ddot_collector/index.md b/opentelemetry-mdoc/migrate/ddot_collector/index.md new file mode 100644 index 0000000000000..08309f3f86889 --- /dev/null +++ b/opentelemetry-mdoc/migrate/ddot_collector/index.md @@ -0,0 +1,363 @@ +--- +title: Migrate to the Datadog Distribution of OTel Collector +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > OpenTelemetry Migration Guides > Migrate to + the Datadog Distribution of OTel Collector +--- + +# Migrate to the Datadog Distribution of OTel Collector + +If you are already using a standalone OpenTelemetry (OTel) Collector for your OTel-instrumented applications, you can migrate to the Datadog Distribution of OpenTelemetry (DDOT) Collector. The DDOT Collector allows you to leverage Datadog's enhanced capabilities, including optimized configurations, seamless integrations, and additional features tailored for the Datadog ecosystem. + +To migrate to the DDOT Collector, you need to install the Datadog Agent and configure your applications to report the telemetry data. + +{% alert level="warning" %} +The DDOT Collector only supports deployment as a DaemonSet (following the [agent deployment pattern](https://opentelemetry.io/docs/collector/deployment/agent/)), not as a [gateway](https://opentelemetry.io/docs/collector/deployment/gateway/). If you have an existing gateway architecture, you can use the DDOT Collector with the [loadbalancingexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/loadbalancingexporter) to connect to your existing gateway layer. +{% /alert %} + +## Prerequisites{% #prerequisites %} + +Before starting the migration process, ensure you have: + +- A valid Datadog account +- An OpenTelemetry-instrumented application ready to send telemetry data +- Access to your current OpenTelemetry Collector configurations +- Administrative access to your Kubernetes cluster (Kubernetes v1.29+ is required) + - **Note**: EKS Fargate environments are not supported +- Helm v3+ + +## Review existing configuration{% #review-existing-configuration %} + +Before you begin, review your configuration to see if your existing config is supported by default: + +1. Examine your existing OpenTelemetry Collector configuration file (`otel-config.yaml`). +1. Compare it to the [list of components](http://localhost:1313/opentelemetry/setup/ddot_collector/#included-components) included in the Datadog Agent by default. +1. If your setup uses components not included in the Agent by default, follow [Use Custom OpenTelemetry Components with Datadog Agent](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components). +1. If your configuration uses `span_name_as_resource_name` or `span_name_remappings`, review the [New Operation Name Mappings guide](http://localhost:1313/opentelemetry/guide/migrate/migrate_operation_names/). The DDOT Collector enables these new mappings by default. + +{% alert level="info" %} +The default configuration settings in Datadog's embedded collector may differ from the standard OpenTelemetry Collector configuration defaults. This can affect behavior of components like the `filelogreceiver`. Review the configuration closely when migrating from a standalone collector. +{% /alert %} + +### Example configuration{% #example-configuration %} + +Here are two example Collector configuration files: + +{% tab title="Custom Collector components" %} +This example uses a custom `metricstransform` component: + +In the `collector-config.yaml` file: + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +exporters: + datadog: + api: + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} +processors: + infraattributes: + cardinality: 2 + batch: + timeout: 10s + metricstransform: + transforms: + - include: system.cpu.usage + action: insert + new_name: host.cpu.utilization +connectors: + datadog/connector: + traces: +service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog/connector, datadog] + metrics: + receivers: [otlp, datadog/connector] + processors: [metricstransform, infraattributes, batch] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog] +``` + +In this case, you need to follow [Use Custom OpenTelemetry Components with Datadog Agent](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components). +{% /tab %} + +{% tab title="Default Agent components" %} +This example only uses components included in the Datadog Agent by default: + +In the `collector-config.yaml` file: + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +exporters: + datadog: + api: + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} +processors: + infraattributes: + cardinality: 2 + batch: + timeout: 10s +connectors: + datadog/connector: + traces: +service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog/connector, datadog] + metrics: + receivers: [otlp, datadog/connector] + processors: [infraattributes, batch] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog] +``` + +In this case, you can proceed to installing the DDOT Collector. +{% /tab %} + +## Install the Agent with OpenTelemetry Collector{% #install-the-agent-with-opentelemetry-collector %} + +Follow these steps to install the DDOT Collector. + +### Add the Datadog Helm Repository{% #add-the-datadog-helm-repository %} + +To add the Datadog repository to your Helm repositories: + +```shell +helm repo add datadog https://helm.datadoghq.com +helm repo update +``` + +### Set up Datadog API key{% #set-up-datadog-api-key %} + +1. Get the Datadog [API key](https://app.datadoghq.com/organization-settings/api-keys/). +1. Store the API key as a Kubernetes secret: + ```shell + kubectl create secret generic datadog-secret \ + --from-literal api-key= + ``` +Replace `` with your actual Datadog API key. + +### Configure the Datadog Agent{% #configure-the-datadog-agent %} + +Use a YAML file to specify the Helm chart parameters for the [Datadog Agent chart](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components). + +1. Create an empty `datadog-values.yaml` file: + + ```shell + touch datadog-values.yaml + ``` +Important alert (level: info): Unspecified parameters use defaults from [values.yaml](https://github.com/DataDog/helm-charts/blob/main/charts/datadog/values.yaml). +1. Configure the Datadog API key secret: + +In the `datadog-values.yaml` file: + + ```yaml + datadog: + site: + apiKeyExistingSecret: datadog-secret + +``` +Set `` to your [Datadog site](http://localhost:1313/getting_started/site/). Otherwise, it defaults to `datadoghq.com`, the US1 site. + + +1. Enable the OpenTelemetry Collector and configure the essential ports: + +In the `datadog-values.yaml` file: + + ```yaml + datadog: + ... + otelCollector: + enabled: true + ports: + - containerPort: "4317" # default port for OpenTelemetry gRPC receiver. + hostPort: "4317" + name: otel-grpc + - containerPort: "4318" # default port for OpenTelemetry HTTP receiver + hostPort: "4318" + name: otel-http + +``` +It is required to set the `hostPort` in order for the container port to be exposed to the external network. This enables configuring the OTLP exporter to point to the IP address of the node to which the Datadog Agent is assigned. + + +If you don't want to expose the port, you can use the Agent service instead: + + 1. Remove the `hostPort` entries from your `datadog-values.yaml` file. + 1. In your application's deployment file (`deployment.yaml`), configure the OTLP exporter to use the Agent service: + ```sh + env: + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: 'http://..svc.cluster.local' + - name: OTEL_EXPORTER_OTLP_PROTOCOL + value: 'grpc' + ``` + +1. (Optional) Enable additional Datadog features: +Important alert (level: danger): Enabling these features may incur additional charges. Review the [pricing page](https://www.datadoghq.com/pricing/) and talk to your Customer Success Manager before proceeding. +In the `datadog-values.yaml` file: + + ```yaml + datadog: + ... + apm: + portEnabled: true + peer_tags_aggregation: true + compute_stats_by_span_kind: true + peer_service_aggregation: true + orchestratorExplorer: + enabled: true + processAgent: + enabled: true + processCollection: true + +``` + +1. (Optional) Collect pod labels and use them as tags to attach to metrics, traces, and logs: +Important alert (level: danger): Custom metrics may impact billing. See the [custom metrics billing page](https://docs.datadoghq.com/account_management/billing/custom_metrics) for more information. +In the `datadog-values.yaml` file: + + ```yaml + datadog: + ... + podLabelsAsTags: + app: kube_app + release: helm_release +``` + +## Deploy the Agent with OpenTelemetry Collector{% #deploy-the-agent-with-opentelemetry-collector %} + +1. Install or upgrade the Datadog Agent with OpenTelemetry Collector to your Kubernetes environment: + ```sh + helm upgrade -i datadog/datadog \ + -f datadog-values.yaml \ + --set-file datadog.otelCollector.config=collector-config.yaml + ``` +1. Navigate to **Integrations** > **Fleet Automation**. +1. Select the **OTel Collector Version** facet. +1. Select an Agent and inspect its configuration to verify the new Agent with OpenTelemetry Collector is installed successfully. + +## Configure your application{% #configure-your-application %} + +To configure your existing application to use Datadog Agent instead of standalone Collector, ensure that the correct OTLP endpoint hostname is used. The Datadog Agent with DDOT Collector deployed as a DaemonSet, so the current host needs to be targeted. + +1. Go to your application's Deployment manifest file (`deployment.yaml`). +1. Add following environment variables to configure the OTLP endpoint: +In the `deployment.yaml` file: + + ```yaml + env: + ... + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OTLP_GRPC_PORT + value: "4317" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: 'http://$(HOST_IP):$(OTLP_GRPC_PORT)' + - name: OTEL_EXPORTER_OTLP_PROTOCOL + value: 'grpc' +``` + +### Operation name mapping differences{% #operation-name-mapping-differences %} + +If you previously used `span_name_as_resource_name` or `span_name_remappings` configurations in your standalone Collector, you need to adapt your configuration. + +1. Remove these configurations from your Datadog Exporter and Connector settings. +1. Enable the `enable_operation_and_resource_name_logic_v2` feature flag in your Agent configuration. + +For detailed instructions on migrating to the new operation name mappings, see [Migrate to New Operation Name Mappings](http://localhost:1313/opentelemetry/guide/migrate/migrate_operation_names/). + +## Correlate observability data{% #correlate-observability-data %} + +[Unified service tagging](http://localhost:1313/getting_started/tagging/unified_service_tagging) ties observability data together in Datadog so you can navigate across metrics, traces, and logs with consistent tags. + +To configure your application with unified service tagging, set the `OTEL_RESOURCE_ATTRIBUTES` environment variable: + +1. Go to your application's Deployment manifest file. +1. Add following lines to enable the correlation between application traces and other observability data: +In the `deployment.yaml` file: + + ```yaml + env: + ... + - name: OTEL_SERVICE_NAME + value: {{ include "calendar.fullname" . }} + - name: OTEL_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: OTEL_K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: OTEL_K8S_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: OTEL_EXPORTER_OTLP_PROTOCOL + value: 'grpc' + - name: OTEL_RESOURCE_ATTRIBUTES + value: >- + service.name=$(OTEL_SERVICE_NAME), + k8s.namespace.name=$(OTEL_K8S_NAMESPACE), + k8s.node.name=$(OTEL_K8S_NODE_NAME), + k8s.pod.name=$(OTEL_K8S_POD_NAME), + k8s.container.name={{ .Chart.Name }}, + host.name=$(OTEL_K8S_NODE_NAME), + deployment.environment=$(OTEL_K8S_NAMESPACE) +``` + +## Verify data flow{% #verify-data-flow %} + +After configuring your application, verify that data is flowing correctly to Datadog: + +1. Apply the configuration changes by redeploying your applications. + ```sh + kubectl apply -f deployment.yaml + ``` +1. Confirm that telemetry data is being received in your Datadog account. Check logs, traces and metrics to ensure correct data collection and correlation. + +## Uninstall standalone Collector{% #uninstall-standalone-collector %} + +After you've confirmed that all data is being collected correctly in Datadog, you can remove the standalone OpenTelemetry Collector: + +1. Ensure all required data is being collected and displayed in Datadog. +1. Uninstall the open source OpenTelemetry Collector from your environment: + ```sh + kubectl delete deployment old-otel-collector + ``` + +## Further reading{% #further-reading %} + +- [Use Custom OpenTelemetry Components with Datadog Agent](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components) +- [Install the Datadog Distribution of OTel Collector](http://localhost:1313/opentelemetry/setup/ddot_collector/install/) diff --git a/opentelemetry-mdoc/migrate/index.md b/opentelemetry-mdoc/migrate/index.md new file mode 100644 index 0000000000000..e11e09ee2b637 --- /dev/null +++ b/opentelemetry-mdoc/migrate/index.md @@ -0,0 +1,12 @@ +--- +title: OpenTelemetry Migration Guides +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > OpenTelemetry Migration Guides +--- + +# OpenTelemetry Migration Guides + +- [Migrate to OpenTelemetry Collector version 0.120.0+](http://localhost:1313/opentelemetry/migrate/collector_0_120_0) +- [Migrate to OpenTelemetry Collector version 0.95.0+](http://localhost:1313/opentelemetry/migrate/collector_0_95_0) +- [Migrate to New Operation Name Mappings](http://localhost:1313/opentelemetry/migrate/migrate_operation_names) +- [Migrate to the Datadog Distribution of OpenTelemetry (DDOT) Collector](http://localhost:1313/opentelemetry/migrate/ddot_collector) diff --git a/opentelemetry-mdoc/migrate/migrate_operation_names/index.md b/opentelemetry-mdoc/migrate/migrate_operation_names/index.md new file mode 100644 index 0000000000000..a38401f317283 --- /dev/null +++ b/opentelemetry-mdoc/migrate/migrate_operation_names/index.md @@ -0,0 +1,249 @@ +--- +title: Migrate to New Operation Name Mappings +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > OpenTelemetry Migration Guides > Migrate to + New Operation Name Mappings +--- + +# Migrate to New Operation Name Mappings + +## Overview{% #overview %} + +When using OpenTelemetry with Datadog, you might see unclear or lengthy operation names in your traces, and some traces might not appear in your service pages. This happens because of missing mappings between OpenTelemetry SDK information and Datadog operation names, which are span attributes that classify [entry points into a service](http://localhost:1313/tracing/guide/configuring-primary-operation/#primary-operations). + +Datadog has introduced new logic for generating operation names for OpenTelemetry traces, controlled by the `enable_operation_and_resource_name_logic_v2` feature flag. This new logic improves trace visibility in service pages and standardizes operation naming according to the rules outlined below. + +{% alert level="danger" %} +**Breaking Change:** When this new logic is active (either by opting-in or future default), it is a breaking change for monitors or dashboards that reference operation names based on the old conventions. You must update your monitors and dashboards to use the new naming conventions described in New mapping logic. If you cannot update them yet, you can opt out . +{% /alert %} + +## Default rollout schedule{% #default-rollout-schedule %} + +The `enable_operation_and_resource_name_logic_v2` feature flag controls this new logic. It is enabled by default starting in the following versions: + +- **Datadog Distribution of OpenTelemetry (DDOT) Collector**: Datadog Agent v7.65+ +- **OpenTelemetry Collector**: OTel Collector v0.126.0+ +- **OTel to Datadog Agent (OTLP)**: Datadog Agent v7.66+ + +If you are using an earlier version and want to use the new logic without upgrading, you can explicitly enable the new logic. + +## New mapping logic{% #new-mapping-logic %} + +When the `enable_operation_and_resource_name_logic_v2` flag is active, the following table shows how operation names are determined based on span attributes and kind. The system processes conditions from top to bottom and uses the first matching rule. + +For example, with the new logic active, a span previously named `go.opentelemetry.io_contrib_instrumentation_net_http_otelhttp.server` is now named `http.server.request`. + +| Condition on Span Attributes | Span Kind | Resulting Operation Name | +| ------------------------------------------------------- | ------------------------------- | ----------------------------- | +| `operation.name` is set | Any | Value of `operation.name` | +| `http.request.method` or `http.method` is set | Server | `http.server.request` | +| `http.request.method` or `http.method` is set | Client | `http.client.request` | +| `db.system` is set | Client | `.query` | +| `messaging.system` and `messaging.operation` are set | Client/Server/Consumer/Producer | `.` | +| `rpc.system == "aws-api"` and `rpc.service` are set | Client | `aws..request` | +| `rpc.system == "aws-api"` | Client | `aws.client.request` | +| `rpc.system` is set | Client | `.client.request` | +| `rpc.system` is set | Server | `.server.request` | +| `faas.invoked_provider` and `faas.invoked_name` are set | Client | `..invoke` | +| `faas.trigger` is set | Server | `.invoke` | +| `graphql.operation.type` is set | Any | `graphql.server.request` | +| `network.protocol.name` is set | Server | `.server.request` | +| `network.protocol.name` is set | Client | `.client.request` | +| No matching attributes | Server | `server.request` | +| No matching attributes | Client | `client.request` | +| No matching attributes | Any non-unspecified kind | `.String()` | +| No matching attributes | Unspecified | `internal` | + +**Note**: `` placeholders like `` represent the value of the corresponding span attribute. + +## Prerequisites{% #prerequisites %} + +Before enabling the new logic (either by opting-in or by upgrading to a version where it is default), remove any existing legacy span name configurations that might conflict: + +{% tab title="OpenTelemetry Collector" %} +Remove `span_name_as_resource_name` and `span_name_remappings` from your Datadog exporter and connector configurations: + +```py +# Remove the highlighted lines if they exist in your configuration +exporters: + datadog: + traces: + span_name_as_resource_name: true + span_name_remappings: + "old_name1": "new_name" + +connectors: + datadog/connector: + traces: + span_name_as_resource_name: true +``` + +{% /tab %} + +{% tab title="Datadog Agent" %} + +1. Remove `span_name_as_resource_name` and `span_name_remappings` from your Agent's OTLP ingest configuration (`otlp_config` in `datadog.yaml`): + +```py +# Remove the highlighted lines if they exist in your configuration +otlp_config: + traces: + span_name_as_resource_name: true + span_name_remappings: + "old_name1": "new_name" +``` +Remove these environment variables if previously set for the Agent: +- `DD_OTLP_CONFIG_TRACES_SPAN_NAME_AS_RESOURCE_NAME` +- `DD_OTLP_CONFIG_TRACES_SPAN_NAME_REMAPPINGS` + +{% /tab %} + +## Enabling the new logic (opt-in){% #enabling-the-new-logic-opt-in %} + +If you are using a version of the Datadog Agent or OpenTelemetry Collector prior to the versions listed in Rollout Schedule, you can enable the new logic using the following methods. Datadog strongly recommends enabling this logic and adapting your monitors and dashboards. + +{% tab title="OpenTelemetry Collector" %} +Launch the OpenTelemetry Collector with the feature gate (requires Collector v0.98.0+): + +```shell +otelcol --config=config.yaml --feature-gates=datadog.EnableOperationAndResourceNameV2 +``` + +{% /tab %} + +{% tab title="Datadog Agent" %} +Enable the feature for OTLP ingest using one of these methods: + +- Add the feature flag to your Agent configuration (`datadog.yaml`): + + ```yaml + # datadog.yaml + apm_config: + features: ["enable_operation_and_resource_name_logic_v2"] + ``` + +- Set the environment variable for the Agent process: + + ```shell + export DD_APM_FEATURES="enable_operation_and_resource_name_logic_v2" + ``` + +**Note:** If appending to existing features, use a comma-separated list, for example: `export DD_APM_FEATURES="existing_feature:true,enable_operation_and_resource_name_logic_v2"` + +{% /tab %} + +## Disabling the new logic (opt-out){% #disabling-the-new-logic-opt-out %} + +If you are using a version where this logic is enabled by default (see Rollout Schedule), or if you have manually opted-in, you can disable it and retain the old operation name behavior using the following methods: + +{% tab title="OpenTelemetry Collector" %} +Launch the OpenTelemetry Collector with the feature gate explicitly disabled using a minus sign (`-`): + +```shell +otelcol --config=config.yaml --feature-gates=-datadog.EnableOperationAndResourceNameV2 +``` + +{% /tab %} + +{% tab title="Datadog Agent" %} +Disable the feature for OTLP ingest using one of these methods: + +- Add the disable flag to your Agent configuration (`datadog.yaml`): + + ```yaml + # datadog.yaml + apm_config: + features: ["disable_operation_and_resource_name_logic_v2"] + ``` + +- Set the environment variable for the Agent process: + + ```shell + export DD_APM_FEATURES="disable_operation_and_resource_name_logic_v2" + ``` + +**Note**: If you already have features configured with this variable, use a comma-separated list, ensuring you disable the correct flag: + + ```shell + export DD_APM_FEATURES="existing_feature:true,disable_operation_and_resource_name_logic_v2" + ``` + +{% /tab %} + +{% tab title="DDOT Collector" %} +Since DDOT enables this logic by default, you may need to disable it: + +### Helm{% #helm %} + +Pass the feature gate flag to the embedded DDOT Collector using Helm values: + +```shell +helm upgrade -i datadog/datadog \ + -f datadog-values.yaml \ + --set datadog.otelCollector.featureGates="-datadog.EnableOperationAndResourceNameV2" +``` + +### Datadog Operator{% #datadog-operator %} + +If you are using Datadog Operator, use the `DD_APM_FEATURES` environment variable override in `datadog-agent.yaml`: + +```yaml +# datadog-agent.yaml +apiVersion: datadoghq.com/v2alpha1 +kind: DatadogAgent +metadata: + name: datadog +spec: + override: + nodeAgent: # Or clusterAgent depending on where DDOT runs + env: + - name: DD_APM_FEATURES + value: "disable_operation_and_resource_name_logic_v2" # Add comma-separated existing features if needed +``` + +{% /tab %} + +## Adapting monitors, dashboards, and custom configuration{% #adapting-monitors-dashboards-and-custom-configuration %} + +After the new naming logic is active, take the following steps: + +1. **(Required) Update monitors and dashboards:** + - Review the New mapping logic table to predict how your operation names will change. + - Update any monitors or dashboards that query, filter, or group by `operation_name` to use the new expected names. + - Update any metric monitors or dashboards observing metrics derived from traces that might change due to the new operation names (for example, metrics starting with `trace.*` tagged by operation name). +1. **(Optional) Replicate previous customizations:** If you previously used the removed configurations in Prerequisites (`span_name_as_resource_name` or `span_name_remappings`) and need equivalent functionality, you must now use different methods: + +{% tab title="OpenTelemetry Collector" %} +Use processors in your Collector pipeline: + +- **To replicate `span_name_as_resource_name`**: Use a `transform` processor to explicitly set `operation.name` from the span name. This is generally **not recommended** as it overrides the improved standard logic, but can be used if specific span names are required as operation names. + ```yaml + processors: + transform: + trace_statements: + - context: span + statements: + - set(attributes["operation.name"], name) + ``` +- **To replicate `span_name_remappings`**: Use a `transform` processor to conditionally set `operation.name`. This allows targeted overrides while letting the default logic handle other cases. + ```yaml + processors: + transform: + trace_statements: + - context: span + statements: + # Example: Rename spans named "old_name" to "new_name" + - set(attributes["operation.name"], "new_name") where name == "old_name" + # Add more specific renaming rules as needed + ``` + +{% /tab %} + +{% tab title="Datadog Agent" %} +To `replicate span_name_as_resource_name` or `span_name_remappings`, set the `operation.name` span attribute directly within your instrumented application code before the span is exported. This gives you the most control if you need specific overrides. Consult the OpenTelemetry SDK documentation for your language on how to modify span attributes. +{% /tab %} + +## Further reading{% #further-reading %} + +- [Mapping Datadog and OpenTelemetry Conventions](http://localhost:1313/opentelemetry/schema_semantics/) diff --git a/opentelemetry-mdoc/pages.json b/opentelemetry-mdoc/pages.json new file mode 100644 index 0000000000000..afcfa2e8309e0 --- /dev/null +++ b/opentelemetry-mdoc/pages.json @@ -0,0 +1,1160 @@ +{ + "compatibility/index.md": { + "metadata": { + "title": "Datadog and OpenTelemetry Compatibility", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Datadog and OpenTelemetry Compatibility" + ] + }, + "mdocHash": "0e70a698507bf8319dde86a4f62b23f1", + "htmlHash": "0ab093d509dd4ebadf865756a2da2924", + "astIsValid": true + }, + "config/collector_batch_memory/index.md": { + "metadata": { + "title": "Batch and Memory Settings", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Configuration", + "Batch and Memory Settings" + ] + }, + "mdocHash": "b57bef9b56e0bf93d71bdd408e3eafb9", + "htmlHash": "d641ec27cb07f77641378e40bf795742", + "astIsValid": true + }, + "config/environment_variable_support/index.md": { + "metadata": { + "title": "Using OpenTelemetry Environment Variables with Datadog SDKs", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Configuration", + "Using OpenTelemetry Environment Variables with Datadog SDKs" + ] + }, + "mdocHash": "0408c75c2a3c19621bdff50b999d1445", + "htmlHash": "383ff2eaecce9a574497d1a77dc3fd6e", + "astIsValid": false + }, + "config/hostname_tagging/index.md": { + "metadata": { + "title": "Hostname and Tagging", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Configuration", + "Hostname and Tagging" + ] + }, + "mdocHash": "72e3b468437ed4c0b3f29217716ee606", + "htmlHash": "f23651f28b6fcf5cc4b87fcda7740a37", + "astIsValid": true + }, + "config/index.md": { + "metadata": { + "title": "OpenTelemetry Configuration", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Configuration" + ] + }, + "mdocHash": "2db4d62a8741c85dd7a2492677d67353", + "htmlHash": "d8adcaff8e7c024d715be8311f4d1c1b", + "astIsValid": true + }, + "config/log_collection/index.md": { + "metadata": { + "title": "Log Collection", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Configuration", + "Log Collection" + ] + }, + "mdocHash": "d1a79968f87101681c4ddd75e00a9f60", + "htmlHash": "5ee68a88e5be99a6dc3ef78ced40388f", + "astIsValid": true + }, + "config/otlp_receiver/index.md": { + "metadata": { + "title": "OTLP Receiver", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Configuration", + "OTLP Receiver" + ] + }, + "mdocHash": "17fed0e33d9c23675f8c68dcf5a31180", + "htmlHash": "f1a009363fa9b8c26ac4e162dec9e4d6", + "astIsValid": true + }, + "correlate/dbm_and_traces/index.md": { + "metadata": { + "title": "Correlate OpenTelemetry Traces and DBM", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Correlate OpenTelemetry Data", + "Correlate OpenTelemetry Traces and DBM" + ] + }, + "mdocHash": "bafa0cac3bc3e45215cfdc9f6aefe320", + "htmlHash": "24386b128a1454541e10baa654497173", + "astIsValid": true + }, + "correlate/index.md": { + "metadata": { + "title": "Correlate OpenTelemetry Data", + "description": "Learn how to correlate your OpenTelemetry traces, metrics, logs, and other telemetry in Datadog to get a unified view of your application's performance.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Correlate OpenTelemetry Data" + ] + }, + "mdocHash": "e6617b4590d3246d011c58d36a2d09b6", + "htmlHash": "f8b91fe3c6f6c356a08035ebe3e99666", + "astIsValid": true + }, + "correlate/logs_and_traces/index.md": { + "metadata": { + "title": "Correlate OpenTelemetry Traces and Logs", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Correlate OpenTelemetry Data", + "Correlate OpenTelemetry Traces and Logs" + ] + }, + "mdocHash": "503a7995490794fd99f7c804a3a2746c", + "htmlHash": "9f7fda7b7052c4fb55b24532f1fb03e1", + "astIsValid": true + }, + "correlate/metrics_and_traces/index.md": { + "metadata": { + "title": "Correlate OpenTelemetry Traces and Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Correlate OpenTelemetry Data", + "Correlate OpenTelemetry Traces and Metrics" + ] + }, + "mdocHash": "73c814e538e1b266627293738f4b7523", + "htmlHash": "bde96ab1c857645140e7d7797a07ff1f", + "astIsValid": true + }, + "correlate/rum_and_traces/index.md": { + "metadata": { + "title": "Correlate RUM and Traces", + "description": "Learn how to integrate Real User Monitoring with APM.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Correlate OpenTelemetry Data", + "Correlate RUM and Traces" + ] + }, + "mdocHash": "f33ad7f167d738a5c4201082790d634a", + "htmlHash": "d7389ff9949e5363666d088f6e7eb86e", + "astIsValid": true + }, + "getting_started/datadog_example/index.md": { + "metadata": { + "title": "Getting Started with OpenTelemetry at Datadog", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Getting Started with OpenTelemetry at Datadog", + "Getting Started with OpenTelemetry at Datadog" + ] + }, + "mdocHash": "599f2b09de851b4d12061f01b81e366c", + "htmlHash": "a7b3ce91738dddfa7152d80e269cc8c5", + "astIsValid": false + }, + "getting_started/index.md": { + "metadata": { + "title": "Getting Started with OpenTelemetry at Datadog", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Getting Started with OpenTelemetry at Datadog" + ] + }, + "mdocHash": "a9e20e2db6bce8590be7295380df602f", + "htmlHash": "5731a83dd5c436d3b1d7b6c32f6996d7", + "astIsValid": true + }, + "getting_started/otel_demo_to_datadog/index.md": { + "metadata": { + "title": "Sending Data from the OpenTelemetry Demo to Datadog", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Getting Started with OpenTelemetry at Datadog", + "Sending Data from the OpenTelemetry Demo to Datadog" + ] + }, + "mdocHash": "5dd46b1b51e0fbfce27edaafb959fb9b", + "htmlHash": "35f61d2581cb3e0103569188af7663b6", + "astIsValid": true + }, + "guide/otlp_delta_temporality/index.md": { + "metadata": { + "title": "Producing Delta Temporality Metrics with OpenTelemetry", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Guides", + "Producing Delta Temporality Metrics with OpenTelemetry" + ] + }, + "mdocHash": "882f0b9eb0c14c413929cdcdc00a15d6", + "htmlHash": "f97bf2e7b3d5baadd48a3d5cb2aef95b", + "astIsValid": true + }, + "guide/otlp_histogram_heatmaps/index.md": { + "metadata": { + "title": "Visualize OTLP Histograms as Heatmaps", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Guides", + "Visualize OTLP Histograms as Heatmaps" + ] + }, + "mdocHash": "d41210875b20b31fb07466279995987b", + "htmlHash": "d8b683d42eef4e988040ec77e605361b", + "astIsValid": true + }, + "index.md": { + "metadata": { + "title": "OpenTelemetry in Datadog", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog" + ] + }, + "mdocHash": "f8131c32d62031e497e83fb00e1bc2ee", + "htmlHash": "b35f2691bcbb594ae7633e22283bb8d2", + "astIsValid": true + }, + "ingestion_sampling/index.md": { + "metadata": { + "title": "Ingestion Sampling with OpenTelemetry", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Ingestion Sampling with OpenTelemetry" + ] + }, + "mdocHash": "30fbec506508598765980170280e1b67", + "htmlHash": "953c29ff68b1032c992f0f714399edae", + "astIsValid": true + }, + "instrument/api_support/dotnet/index.md": { + "metadata": { + "title": ".NET Custom Instrumentation using the OpenTelemetry API", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry API Support", + ".NET Custom Instrumentation using the OpenTelemetry API" + ] + }, + "mdocHash": "0b308830e6ceb0c4d6fe8fca505a05c8", + "htmlHash": "b2950f2fa120fdd2c9db902968bdcbdb", + "astIsValid": true + }, + "instrument/api_support/go/index.md": { + "metadata": { + "title": "Go Custom Instrumentation using the OpenTelemetry API", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry API Support", + "Go Custom Instrumentation using the OpenTelemetry API" + ] + }, + "mdocHash": "0e1128b93bcce03f0e0cab31b8338ea1", + "htmlHash": "b77e37df6bfb5ecda80435a61e9d131e", + "astIsValid": true + }, + "instrument/api_support/index.md": { + "metadata": { + "title": "OpenTelemetry API Support", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry API Support" + ] + }, + "mdocHash": "cb51341cf6d9021992649a7541d9b720", + "htmlHash": "662dfe9d398afc4b463b9453606a8d94", + "astIsValid": true + }, + "instrument/api_support/java/index.md": { + "metadata": { + "title": "Java Custom Instrumentation using the OpenTelemetry API", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry API Support", + "Java Custom Instrumentation using the OpenTelemetry API" + ] + }, + "mdocHash": "b0ddcb1b7b33e17033557d419a50d5b7", + "htmlHash": "6af08a27bef40e78514a51819e597b79", + "astIsValid": true + }, + "instrument/api_support/nodejs/index.md": { + "metadata": { + "title": "Node.js Custom Instrumentation using the OpenTelemetry API", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry API Support", + "Node.js Custom Instrumentation using the OpenTelemetry API" + ] + }, + "mdocHash": "55cc7935831b01a6ba6ed488106ee4cc", + "htmlHash": "2c1b5f50eaea13d6561bf07786970873", + "astIsValid": true + }, + "instrument/api_support/php/index.md": { + "metadata": { + "title": "PHP Custom Instrumentation using the OpenTelemetry API", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry API Support", + "PHP Custom Instrumentation using the OpenTelemetry API" + ] + }, + "mdocHash": "4ce64345f8d45f485f54184cc2b40549", + "htmlHash": "f2f78d7810b67faeb76382425105ff65", + "astIsValid": true + }, + "instrument/api_support/python/index.md": { + "metadata": { + "title": "Python Custom Instrumentation using the OpenTelemetry API", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry API Support", + "Python Custom Instrumentation using the OpenTelemetry API" + ] + }, + "mdocHash": "4576f043c640acf58d15b781a8d41f30", + "htmlHash": "96b51b939decdc73bf289898c145fe34", + "astIsValid": true + }, + "instrument/api_support/ruby/index.md": { + "metadata": { + "title": "Ruby Custom Instrumentation using the OpenTelemetry API", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry API Support", + "Ruby Custom Instrumentation using the OpenTelemetry API" + ] + }, + "mdocHash": "11cec7a3b83eeb90d6ad7dd47e23f7cc", + "htmlHash": "188fcddf73a667adbff6c42b7dcd322f", + "astIsValid": true + }, + "instrument/index.md": { + "metadata": { + "title": "Instrument Your Applications", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications" + ] + }, + "mdocHash": "6e0c4b721608fed98c33fa4b528c310a", + "htmlHash": "52067615764a2413927794c36ec14de4", + "astIsValid": true + }, + "instrument/instrumentation_libraries/index.md": { + "metadata": { + "title": "Using OpenTelemetry Instrumentation Libraries with Datadog SDKs", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "Using OpenTelemetry Instrumentation Libraries with Datadog SDKs" + ] + }, + "mdocHash": "c5c1afbfd6d206c4c7683e7d2dcc7573", + "htmlHash": "661ec38527769ba1cb75717d03b62aef", + "astIsValid": true + }, + "instrument/otel_sdks/index.md": { + "metadata": { + "title": "OpenTelemetry SDKs", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Instrument Your Applications", + "OpenTelemetry SDKs" + ] + }, + "mdocHash": "ad4183da882dd9ec57933d28ecbecd77", + "htmlHash": "a93801b6aecccc2f9910a19fcf8841e9", + "astIsValid": true + }, + "integrations/apache_metrics/index.md": { + "metadata": { + "title": "Apache Web Server Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Apache Web Server Metrics" + ] + }, + "mdocHash": "84ad151141b8930c2ccc62354e4134e5", + "htmlHash": "fb9b0c7861b816c6cc3cf4f4f216af00", + "astIsValid": true + }, + "integrations/collector_health_metrics/index.md": { + "metadata": { + "title": "Health Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Health Metrics" + ] + }, + "mdocHash": "00fb4ba4ebda0e055251a9413114a5e2", + "htmlHash": "510bcf48043bc6c7460f64ff2a415df8", + "astIsValid": true + }, + "integrations/datadog_extension/index.md": { + "metadata": { + "title": "Datadog Extension", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Datadog Extension" + ] + }, + "mdocHash": "b04368b5e18b61b9d4d61a54796df651", + "htmlHash": "d6daa5b1c1ed6aaf43e741bd23e54481", + "astIsValid": true + }, + "integrations/docker_metrics/index.md": { + "metadata": { + "title": "Docker Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Docker Metrics" + ] + }, + "mdocHash": "6d7b2dfb6ea7e1e2fcc39e5f8d17e745", + "htmlHash": "849d6a479824d7e96d019228d3d09395", + "astIsValid": true + }, + "integrations/haproxy_metrics/index.md": { + "metadata": { + "title": "HAProxy Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "HAProxy Metrics" + ] + }, + "mdocHash": "2de1fa8a7d1e4eb06b7ae5e680374cd9", + "htmlHash": "ef6767ff8f7966c55e9cdb958ce4d1ce", + "astIsValid": true + }, + "integrations/host_metrics/index.md": { + "metadata": { + "title": "Host Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Host Metrics" + ] + }, + "mdocHash": "62e4bee24a433236e07c20cbd5009895", + "htmlHash": "125387f8ef502dc6a6de42d3869fce00", + "astIsValid": true + }, + "integrations/iis_metrics/index.md": { + "metadata": { + "title": "IIS Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "IIS Metrics" + ] + }, + "mdocHash": "5be04860a5052ca0717160cf4b41f770", + "htmlHash": "1e6c0ac24c03af24d76f8a6e01e52085", + "astIsValid": true + }, + "integrations/index.md": { + "metadata": { + "title": "Integrations", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations" + ] + }, + "mdocHash": "c7774216f488f5c950b6424bcbcfefda", + "htmlHash": "2536103579ad85c5a0ae5d626f53b764", + "astIsValid": true + }, + "integrations/kafka_metrics/index.md": { + "metadata": { + "title": "Kafka Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Kafka Metrics" + ] + }, + "mdocHash": "1ec5e296526653a236a157a49871d739", + "htmlHash": "77ef8e56b871a98912d66d4042f8497d", + "astIsValid": true + }, + "integrations/kubernetes_metrics/index.md": { + "metadata": { + "title": "Kubernetes Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Kubernetes Metrics" + ] + }, + "mdocHash": "94a261c7a16c07c614070a713d650e22", + "htmlHash": "2eb18722a0dd4facad177d426713d54a", + "astIsValid": true + }, + "integrations/mysql_metrics/index.md": { + "metadata": { + "title": "MySQL Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "MySQL Metrics" + ] + }, + "mdocHash": "bd44e0450e015664d0f629b46873c672", + "htmlHash": "bed6ced2461b086bb3657ef81fed3479", + "astIsValid": true + }, + "integrations/nginx_metrics/index.md": { + "metadata": { + "title": "NGINX Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "NGINX Metrics" + ] + }, + "mdocHash": "003a0d69e06b73e3b5c42dbb3b0fccca", + "htmlHash": "5135975f40ab1c05e8649a067b82d0df", + "astIsValid": true + }, + "integrations/podman_metrics/index.md": { + "metadata": { + "title": "Podman Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Podman Metrics" + ] + }, + "mdocHash": "bbda422aec9ae1beba2dfc26eb049ad6", + "htmlHash": "ad4dba3ea21ca4b9951d293eb6524867", + "astIsValid": true + }, + "integrations/runtime_metrics/index.md": { + "metadata": { + "title": "OpenTelemetry Runtime Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "OpenTelemetry Runtime Metrics" + ] + }, + "mdocHash": "9a8a0ef0bc425bcbea965e0b96c17880", + "htmlHash": "c01dff4e0b72b11a195604adf3bbeac7", + "astIsValid": true + }, + "integrations/spark_metrics/index.md": { + "metadata": { + "title": "Apache Spark Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Apache Spark Metrics" + ] + }, + "mdocHash": "7a897c3864467f6a8b06a493a7f38cd8", + "htmlHash": "29e21d7f1070d3ec6c46493c6949bd51", + "astIsValid": true + }, + "integrations/trace_metrics/index.md": { + "metadata": { + "title": "Trace Metrics", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Integrations", + "Trace Metrics" + ] + }, + "mdocHash": "fc6d622acb83dbcb896e3319fd73e2f8", + "htmlHash": "fa462ecc675da445581e7a602d1f9432", + "astIsValid": true + }, + "mapping/host_metadata/index.md": { + "metadata": { + "title": "Infrastructure List Host Information", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Semantic Mapping", + "Infrastructure List Host Information" + ] + }, + "mdocHash": "146da9e067087c913c5b8156e6ff4535", + "htmlHash": "cd5c340d11e3a19338c63a89a1a3695b", + "astIsValid": true + }, + "mapping/hostname/index.md": { + "metadata": { + "title": "Mapping OpenTelemetry Semantic Conventions to Hostnames", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Semantic Mapping", + "Mapping OpenTelemetry Semantic Conventions to Hostnames" + ] + }, + "mdocHash": "6d23182f500ddef22014eddfcd527942", + "htmlHash": "7a00090582738dd2972381f6d05def64", + "astIsValid": true + }, + "mapping/index.md": { + "metadata": { + "title": "Semantic Mapping", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Semantic Mapping" + ] + }, + "mdocHash": "31d65e9138d2fb3058ba3e9bd90669e3", + "htmlHash": "bb5a3fcf146ffec743d8959aa5f27e11", + "astIsValid": true + }, + "mapping/metrics_mapping/index.md": { + "metadata": { + "title": "OpenTelemetry Metrics Mapping", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Semantic Mapping", + "OpenTelemetry Metrics Mapping" + ] + }, + "mdocHash": "3b5c751929441421ac6f9f2a32d71632", + "htmlHash": "90e93d5227eb3511b12c61df1176f3b1", + "astIsValid": true + }, + "mapping/semantic_mapping/index.md": { + "metadata": { + "title": "OpenTelemetry Semantic Conventions and Datadog Conventions", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Semantic Mapping", + "OpenTelemetry Semantic Conventions and Datadog Conventions" + ] + }, + "mdocHash": "e6e8e0c210b2ecde456fa869d8e26c1e", + "htmlHash": "011d1936b721f8ca0a83dcfecd1a5e6e", + "astIsValid": true + }, + "mapping/service_entry_spans/index.md": { + "metadata": { + "title": "Mapping OpenTelemetry Semantic Conventions to Service-entry Spans", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Semantic Mapping", + "Mapping OpenTelemetry Semantic Conventions to Service-entry Spans" + ] + }, + "mdocHash": "f606bcfe53bf25132d77377d147a2b96", + "htmlHash": "d409692b0e13e2d4211955fa6381e665", + "astIsValid": true + }, + "migrate/collector_0_120_0/index.md": { + "metadata": { + "title": "Migrate to OpenTelemetry Collector version 0.120.0+", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Migration Guides", + "Migrate to OpenTelemetry Collector version 0.120.0+" + ] + }, + "mdocHash": "72fc46a731e6f3dccd393d4b68e03390", + "htmlHash": "ba9c9f225463b4871880e17faa329f64", + "astIsValid": true + }, + "migrate/collector_0_95_0/index.md": { + "metadata": { + "title": "Migrate to OpenTelemetry Collector version 0.95.0+", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Migration Guides", + "Migrate to OpenTelemetry Collector version 0.95.0+" + ] + }, + "mdocHash": "60d58325b924c766812b5fe2217c6a86", + "htmlHash": "eb254f6c8f38fa70e813dc908c31bbdf", + "astIsValid": true + }, + "migrate/ddot_collector/index.md": { + "metadata": { + "title": "Migrate to the Datadog Distribution of OTel Collector", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Migration Guides", + "Migrate to the Datadog Distribution of OTel Collector" + ] + }, + "mdocHash": "e223d7a62ae5c5d281f24ebcc979b938", + "htmlHash": "1a8257cde54cf9cc2efa878e3dd6bc64", + "astIsValid": false + }, + "migrate/index.md": { + "metadata": { + "title": "OpenTelemetry Migration Guides", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Migration Guides" + ] + }, + "mdocHash": "fc08a5e814616e0b00c48904fda17433", + "htmlHash": "09e1e922faccc7084f3523b16ec95b78", + "astIsValid": true + }, + "migrate/migrate_operation_names/index.md": { + "metadata": { + "title": "Migrate to New Operation Name Mappings", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "OpenTelemetry Migration Guides", + "Migrate to New Operation Name Mappings" + ] + }, + "mdocHash": "e801cb8a1a6bca37fdb1d0008c279133", + "htmlHash": "caf8ccc2bb5b9d2552c0e1a522b2ec7f", + "astIsValid": true + }, + "reference/concepts/index.md": { + "metadata": { + "title": "OpenTelemetry Terms and Concepts", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Reference", + "OpenTelemetry Terms and Concepts" + ] + }, + "mdocHash": "0d0d18ac39f2e1af5c01dfcf94f252fc", + "htmlHash": "3448c52f722ac803cb12c94591f24c19", + "astIsValid": true + }, + "reference/index.md": { + "metadata": { + "title": "Reference", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Reference" + ] + }, + "mdocHash": "fc2a83b14c541a60d1cc8d5e5f2a916c", + "htmlHash": "e7b018d34615a689a80905dbd3630625", + "astIsValid": true + }, + "reference/otlp_metric_types/index.md": { + "metadata": { + "title": "OTLP Metrics Types", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Reference", + "OTLP Metrics Types" + ] + }, + "mdocHash": "18c330a218fe3f7ed8e7838d07a4a42f", + "htmlHash": "9f187ed458e64e40541827b82fdc1bcb", + "astIsValid": true + }, + "reference/trace_context_propagation/index.md": { + "metadata": { + "title": "Trace Context Propagation", + "description": "Extract and inject Datadog, B3, and W3C Trace Context headers to propagate the context of a distributed trace.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Reference", + "Trace Context Propagation" + ] + }, + "mdocHash": "2d1ccac70dfdaa79d89362c0cd895c1d", + "htmlHash": "ff55ebb3fcbe41a895adc3708810f219", + "astIsValid": true + }, + "reference/trace_ids/index.md": { + "metadata": { + "title": "Trace IDs", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Reference", + "Trace IDs" + ] + }, + "mdocHash": "9d7a23d5ae43d4c046c2c0c8d4b37870", + "htmlHash": "9f04604a8f753db6ee05a1119f234733", + "astIsValid": true + }, + "setup/agent/index.md": { + "metadata": { + "title": "Datadog Agent", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog Agent" + ] + }, + "mdocHash": "1657309fb405ca770c7e27ab5caaaac2", + "htmlHash": "5c158199cff959ba9e211cef76be5abf", + "astIsValid": true + }, + "setup/collector_exporter/deploy/index.md": { + "metadata": { + "title": "Deploy the OpenTelemetry Collector", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Install and Configure the OpenTelemetry Collector", + "Deploy the OpenTelemetry Collector" + ] + }, + "mdocHash": "de70ecee87b7ff0456c909fbc6890644", + "htmlHash": "c4b07fc2fa098d583fa80d38579be83b", + "astIsValid": true + }, + "setup/collector_exporter/index.md": { + "metadata": { + "title": "Install and Configure the OpenTelemetry Collector", + "description": "Send OpenTelemetry data to the OpenTelemetry Collector and Datadog Exporter", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Install and Configure the OpenTelemetry Collector" + ] + }, + "mdocHash": "20d22b6697778f4dbcbb5a21db1d9c04", + "htmlHash": "1f19710a97e86d506a16fd584d75b655", + "astIsValid": true + }, + "setup/collector_exporter/install/index.md": { + "metadata": { + "title": "Set Up the OpenTelemetry Collector", + "description": "Send OpenTelemetry data to the OpenTelemetry Collector and Datadog Exporter", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Install and Configure the OpenTelemetry Collector", + "Set Up the OpenTelemetry Collector" + ] + }, + "mdocHash": "2e8210157d137e9139cf63b26787b386", + "htmlHash": "37dbf0344ed4110112a3c7c1967874e9", + "astIsValid": true + }, + "setup/ddot_collector/custom_components/index.md": { + "metadata": { + "title": "Use Custom OpenTelemetry Components with Datadog Distribution of OpenTelemetry (DDOT) Collector", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog Distribution of OpenTelemetry Collector", + "Use Custom OpenTelemetry Components with Datadog Distribution of OpenTelemetry (DDOT) Collector" + ] + }, + "mdocHash": "0e131f1ef3ae7feff474498ecf857a9c", + "htmlHash": "25d928843f164ea6481f8b701ad37e91", + "astIsValid": false + }, + "setup/ddot_collector/index.md": { + "metadata": { + "title": "Datadog Distribution of OpenTelemetry Collector", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog Distribution of OpenTelemetry Collector" + ] + }, + "mdocHash": "0342af3899601d7955c7a250be783881", + "htmlHash": "fbe4722270fcddaf359c26500676f1ed", + "astIsValid": true + }, + "setup/ddot_collector/install/index.md": { + "metadata": { + "title": "Install", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog Distribution of OpenTelemetry Collector", + "Install" + ] + }, + "mdocHash": "7925987cd448cb28023efd4769ae9736", + "htmlHash": "d7da73d4aa47885e2075aa054745387a", + "astIsValid": true + }, + "setup/ddot_collector/install/kubernetes/index.md": { + "metadata": { + "title": "Install the Datadog Distribution of OTel Collector on Kubernetes", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog Distribution of OpenTelemetry Collector", + "Install", + "Install the Datadog Distribution of OTel Collector on Kubernetes" + ] + }, + "mdocHash": "f89f1170b0bdb0feb660d1c4fe7e1686", + "htmlHash": "addf64986f14902021f08004a0ec7998", + "astIsValid": true + }, + "setup/ddot_collector/install/linux/index.md": { + "metadata": { + "title": "Install the Datadog Distribution of OTel Collector on Linux", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog Distribution of OpenTelemetry Collector", + "Install", + "Install the Datadog Distribution of OTel Collector on Linux" + ] + }, + "mdocHash": "264cf1bf4959cf51a03049b7039846f0", + "htmlHash": "573e17520bcbf389218e576399f0d188", + "astIsValid": true + }, + "setup/index.md": { + "metadata": { + "title": "Send OpenTelemetry Data to Datadog", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog" + ] + }, + "mdocHash": "c71f25343913d2e1db1e3267d3b89095", + "htmlHash": "2e6d0744447701521e0cbad4ec9a22fc", + "astIsValid": true + }, + "setup/otlp_ingest_in_the_agent/index.md": { + "metadata": { + "title": "OTLP Ingestion by the Datadog Agent", + "description": "Ingest OTLP trace data through the Datadog Agent", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "OTLP Ingestion by the Datadog Agent" + ] + }, + "mdocHash": "b8af864d940b2c2b5dc0b3cca8952d4a", + "htmlHash": "b3ea13fb5e88450ff6c97811ba516702", + "astIsValid": true + }, + "setup/otlp_ingest/index.md": { + "metadata": { + "title": "Datadog OTLP Intake Endpoint", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog OTLP Intake Endpoint" + ] + }, + "mdocHash": "caf45c74a832de507a0f4c279c06e068", + "htmlHash": "59e659d55615fd1dfcd2d2edcf82bd7c", + "astIsValid": true + }, + "setup/otlp_ingest/logs/index.md": { + "metadata": { + "title": "Datadog OTLP Logs Intake Endpoint", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog OTLP Intake Endpoint", + "Datadog OTLP Logs Intake Endpoint" + ] + }, + "mdocHash": "58236fb8a763d38462f1e55d9bec23f1", + "htmlHash": "7a61206f2e882562a66a770cd80530ce", + "astIsValid": true + }, + "setup/otlp_ingest/metrics/index.md": { + "metadata": { + "title": "Datadog OTLP Metrics Intake Endpoint", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Send OpenTelemetry Data to Datadog", + "Datadog OTLP Intake Endpoint", + "Datadog OTLP Metrics Intake Endpoint" + ] + }, + "mdocHash": "47aabcbb942c947dc08571b8d2da7237", + "htmlHash": "66ed629a49d25cd332311bca61e37c7c", + "astIsValid": true + }, + "troubleshooting/index.md": { + "metadata": { + "title": "Troubleshooting", + "description": "Datadog, the leading service for cloud-scale monitoring.", + "breadcrumbs": [ + "Docs", + "OpenTelemetry in Datadog", + "Troubleshooting" + ] + }, + "mdocHash": "5e26e7df618d55819cea57c3b00791c5", + "htmlHash": "861556379531772e10bdb8f14894fd4b", + "astIsValid": true + } +} \ No newline at end of file diff --git a/opentelemetry-mdoc/reference/concepts/index.md b/opentelemetry-mdoc/reference/concepts/index.md new file mode 100644 index 0000000000000..033af3494a47f --- /dev/null +++ b/opentelemetry-mdoc/reference/concepts/index.md @@ -0,0 +1,24 @@ +--- +title: OpenTelemetry Terms and Concepts +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Reference > OpenTelemetry Terms and Concepts +--- + +# OpenTelemetry Terms and Concepts + +This page describes essential terms and concepts for OpenTelemetry and Datadog. For additional definitions and descriptions, see the [OpenTelemetry Glossary](https://opentelemetry.io/docs/concepts/glossary/). + +| Concept | Description | +| ----------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Telemetry | The collection of metrics, logs, traces, and profiles that provide observations about the behaviors and performance of applications and infrastructure. | +| [OpenTelemetry Collector](http://localhost:1313/opentelemetry/collector_exporter/) | A vendor-agnostic implementation for collecting and exporting telemetry data emitted by many processes. It can be configured to receive, process, and export telemetry to one or multiple destinations including storage backends and analysis tools. | +| [Datadog Exporter](http://localhost:1313/opentelemetry/collector_exporter/otel_collector_datadog_exporter/) | An OTel Collector component that lets you forward trace, metric, and logs data from OpenTelemetry SDKs to Datadog. | +| [OTLP Receiver](http://localhost:1313/opentelemetry/collector_exporter/otlp_receiver/) | A component within the OpenTelemetry Collector responsible for accepting telemetry data in the OpenTelemetry Protocol (OTLP) format. OTLP is the native protocol for OpenTelemetry, designed for transferring telemetry data between the SDKs and the Collector. | +| [Context Propagation](http://localhost:1313/opentelemetry/reference/trace_context_propagation/) | The mechanism used in distributed tracing to maintain trace context across different services. | +| [Semantic Conventions](http://localhost:1313/opentelemetry/schema_semantics/semantic_mapping/) | Standardized naming patterns and attribute definitions that establish consistent terminology for telemetry data across different systems and implementations. These conventions ensure that data collected from diverse sources can be uniformly processed and analyzed. | + +## Further reading{% #further-reading %} + +- [OpenTelemetry Concepts](https://opentelemetry.io/docs/concepts/) +- [OpenTelemetry Glossary](https://opentelemetry.io/docs/concepts/glossary/) +- [Datadog Glossary](https://docs.datadoghq.com/glossary/) diff --git a/opentelemetry-mdoc/reference/index.md b/opentelemetry-mdoc/reference/index.md new file mode 100644 index 0000000000000..7f92ad5ef6de0 --- /dev/null +++ b/opentelemetry-mdoc/reference/index.md @@ -0,0 +1,17 @@ +--- +title: Reference +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Reference +--- + +# Reference + +Reference and conceptual documentation for OpenTelemetry and Datadog: + +- [Terms and concepts](http://localhost:1313/opentelemetry/reference/concepts) +- [Trace context propagation](http://localhost:1313/opentelemetry/reference/trace_context_propagation) +- [OTLP metric types](http://localhost:1313/opentelemetry/reference/otlp_metric_types) + +## Further reading{% #further-reading %} + +- [OpenTelemetry Guides](http://localhost:1313/opentelemetry/guide) diff --git a/opentelemetry-mdoc/reference/otlp_metric_types/index.md b/opentelemetry-mdoc/reference/otlp_metric_types/index.md new file mode 100644 index 0000000000000..11e127e04632d --- /dev/null +++ b/opentelemetry-mdoc/reference/otlp_metric_types/index.md @@ -0,0 +1,241 @@ +--- +title: OTLP Metrics Types +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Reference > OTLP Metrics Types +--- + +# OTLP Metrics Types + +## Overview{% #overview %} + +The Datadog Agent and the OpenTelemetry Collector Datadog exporter can ingest metrics in the OpenTelemetry format (OTLP), which can be produced by OpenTelemetry-instrumented applications. + +The following OTLP metric types can be ingested by the Datadog Agent and the OpenTelemetry Collector Datadog exporter: + +- Sums +- Gauges +- Histograms +- Summaries + +These OTLP metric types are mapped to Datadog metric types: + +- COUNT +- GAUGE +- DISTRIBUTION + +A single OTLP metric may be mapped to several Datadog metrics with a suffix indicating their meaning. + +**Note**: OpenTelemetry provides metric API instruments (`Gauge`, `Counter`, `UpDownCounter`, `Histogram`, and so on), whose measurements can be exported as OTLP metrics (Sum, Gauge, Histogram). Other sources for OTLP metrics are possible. Applications and libraries may provide customization into the OTLP metrics they produce. Read the documentation of your OpenTelemetry SDK or OTLP-producing application to understand the OTLP metrics produced and how to customize them. + +**Note**: OpenTelemetry protocol supports two ways of representing metrics in time: [Cumulative and Delta temporality](https://opentelemetry.io/docs/reference/specification/metrics/data-model/#temporality), affecting the metrics described below. Set the temporality preference of the OpenTelemetry implementation to **DELTA**, because setting it to CUMULATIVE may discard some data points during application (or collector) startup. For more information, read [Producing Delta Temporality Metrics with OpenTelemetry](http://localhost:1313/opentelemetry/guide/otlp_delta_temporality/). + +## Metric types{% #metric-types %} + +### Mapping{% #mapping %} + +{% tab title="Sum" %} +An OTLP Sum represents a sum of reported measurements over a time window. For example, a Sum can be used to track the total number of connections made to a database or the total number of requests to an endpoint. Sums have two features that influence the mapping: + +- *Aggregation temporality*, which can be cumulative or delta. Delta metrics have no overlap in their time windows, while cumulative metrics represent a time window from a fixed start point in time. +- *Monotonicity*. Monotonic sums never decrease and only support adding to the underlying count. + +The default mapping is as follows: + +1. For cumulative monotonic sums, the delta between consecutive points is calculated and reported to Datadog as a count. The first point is stored but omitted. To recover the value in the OTLP payload, use the [`cumsum` arithmetic function](http://localhost:1313/dashboards/functions/arithmetic/#cumulative-sum). +1. Cumulative non-monotonic sums are exported as Datadog gauges. +1. Delta sums are exported as Datadog counts. + +{% /tab %} + +{% tab title="Gauge" %} +An OTLP Gauge represents a sampled value at a given time. Only the last value on a given time window is included in the OTLP metrics. + +OTLP Gauges are mapped to Datadog Gauges, since they do not provide an aggregation semantic. Both integer and floating-point Gauge data points are mapped to floating point numbers in the Datadog format. +{% /tab %} + +{% tab title="Histogram" %} +An OTLP Histogram represents the statistical distribution of a set of values on a given time window, by storing certain aggregation metrics such as the population sum or count together with a series of bucket counts. Histograms have one feature that influences the mapping: + +- *Aggregation temporality*, which can be cumulative or delta. Delta metrics have no overlap in their time windows, while cumulative metrics represent a time window from a fixed start point in time. + +The default mapping is as follows: + +1. Delta histograms are reported as Datadog distributions. [Read more about distributions](http://localhost:1313/metrics/distributions) to understand the available aggregations. Histograms with a count of 0 are dropped. +1. For cumulative histograms, the delta between consecutive points is calculated and reported to Datadog as a distribution. Deltas with a count of 0 are not reported. You may use the [`cumsum` arithmetic function](http://localhost:1313/dashboards/functions/arithmetic/#cumulative-sum) on individual aggregations to recover the value in the OTLP payload. + +**Note**: Histogram metrics in OTLP are mapped by default to Distribution metrics. Because of how OTLP sends this data, percentile aggregations and the max and min (if not available on the original OTLP data) are approximations, not accurate calculations. + +The Datadog Agent and the OpenTelemetry Collector Datadog exporter allow changing the Histogram export in the `histogram` subsection. + +- If the `mode` is set to `counters`, the following metrics are produced: + +{% dl %} + +{% dt %} +`.bucket`, tagged by `lower_bound` and `upper_bound` +{% /dt %} + +{% dd %} +Bucket count in the time window for the bucket with the specified lower and upper bounds.**Datadog In-App Type**: COUNT +{% /dd %} + +{% /dl %} + +- If the `send_aggregation_metrics` flag is enabled, the following metrics are produced: + +{% dl %} + +{% dt %} +`.sum` +{% /dt %} + +{% dd %} +Sum of the values submitted during the time window.**Datadog In-App Type**: COUNT +{% /dd %} + +{% dt %} +`.count` +{% /dt %} + +{% dd %} +Number of values submitted during the time window.**Datadog In-App Type**: COUNT +{% /dd %} + +{% dt %} +`.min` +{% /dt %} + +{% dd %} +Minimum of values submitted during the time window. Only available for delta OTLP Histograms. Available since: Datadog exporter v0.75.0 and Datadog Agent v6.45.0 and v7.45.0.**Datadog In-App Type**: GAUGE +{% /dd %} + +{% dt %} +`.max` +{% /dt %} + +{% dd %} +Maximum of values submitted during the time window. Only available for delta OTLP Histograms. Available since: Datadog exporter v0.75.0 and Datadog Agent v6.45.0 and v7.45.0.**Datadog In-App Type**: GAUGE +{% /dd %} + +{% /dl %} + +**Note**: `send_aggregation_metrics` is useful only when not using the distributions mode. Before the Datadog exporter v0.75.0 and the Datadog Agent v6.45.0 and v7.45.0 use `send_count_sum_metrics` instead. +{% /tab %} + +{% tab title="Summary" %} +An OTLP Summary is a legacy type that conveys quantile information about a population over a time window. OTLP Summary types are not produced by OpenTelemetry SDKs but may be produced by other components for backwards compatibility. + +{% dl %} + +{% dt %} +`.sum` +{% /dt %} + +{% dd %} +Sum of the values since the application started producing the metric.**Datadog In-App Type**: COUNT +{% /dd %} + +{% dt %} +`.count` +{% /dt %} + +{% dd %} +Number of values in the population .**Datadog In-App Type**: COUNT +{% /dd %} + +{% dt %} +`.quantile`, tagged by `quantile` +{% /dt %} + +{% dd %} +Value of a given quantile.**Datadog In-App Type**: GAUGE +{% /dd %} + +{% /dl %} + +{% /tab %} + +### Attribute mapping{% #attribute-mapping %} + +OTLP supports two kinds of attributes: datapoint-level attributes and resource attributes. These attributes may follow OpenTelemetry semantic conventions and have well-known semantics. + +The Datadog Agent and the OpenTelemetry Collector Datadog exporter map the datapoints-level attributes as tags. Resource attributes following OpenTelemetry semantic conventions are mapped to the equivalent Datadog conventions if they exist. + +You may add all resource attributes as tags by using the `resource_attributes_as_tags` flag. + +### Example{% #example %} + +{% tab title="Sum" %} +Suppose you are using an OpenTelemetry Counter instrument from a single application, which, by default, exports metrics of a cumulative **monotonic** Sum type. The following table summarizes Datadog behavior: + +| Collection period | Counter values | OTLP Sum value | Value reported to Datadog | Datadog In-App Type | Notes | +| ----------------- | ----------------- | -------------- | ------------------------- | ------------------- | ---------------------------------------------- | +| \#1 | [1,1,1,2,2,2,3,3] | 15 | None | COUNT | First collection period value is not reported. | +| \#2 | [3,4,1,2] | 25 | 10 | COUNT | The difference between values is reported. | +| \#3 | [] | 25 | 0 | COUNT | No new values were reported in this period. | + +Suppose you are using an OpenTelemetry UpDownCounter instrument from a single application, which, by default, exports metrics of a cumulative Sum type. The following table summarizes Datadog behavior: + +| Collection period | UpDownCounter values | OTLP Sum value | Value reported to Datadog | Datadog In-App Type | +| ----------------- | -------------------- | -------------- | ------------------------- | ------------------- | +| \#1 | [1,1,1,2,2,2,3,3] | 15 | 15 | GAUGE | +| \#2 | [3,-4,1,2] | 17 | 17 | GAUGE | +| \#3 | [-1] | 16 | 16 | GAUGE | + +{% /tab %} + +{% tab title="Gauge" %} +Suppose you are using an OpenTelemetry Gauge instrument, `temperature`, from a single application. The following table summarizes Datadog behavior: + +| Collection period | Gauge instrument | OTLP Gauge value | Value reported to Datadog | Datadog In-App Type | +| ----------------- | ---------------- | ---------------- | ------------------------- | ------------------- | +| \#1 | 71.5 | 71.5 | 71.5 | GAUGE | +| \#2 | 72 | 72 | 72 | GAUGE | +| \#3 | 70 | 70 | 70 | GAUGE | + +{% /tab %} + +{% tab title="Histogram" %} +Suppose you are using an OpenTelemetry Histogram instrument, `request.response_time.histogram`, from two web servers: `webserver:web_1` and `webserver:web_2`. Suppose in a given collection period, `webserver:web_1` reports the metric with the values `[1,1,1,2,2,2,3,3]`, and `webserver:web_2` reports the same metric with the values `[1,1,2]`. Over this collection period, the following five aggregations represent the global statistical distribution of all values collected from both web servers: + +| Metric Name | Value | Datadog In-App Type | +| ------------------------------------------ | ------ | ------------------- | +| `avg:request.response_time.distribution` | `1.73` | GAUGE | +| `count:request.response_time.distribution` | `11` | COUNT | +| `max:request.response_time.distribution` | `3` | GAUGE | +| `min:request.response_time.distribution` | `1` | GAUGE | +| `sum:request.response_time.distribution` | `19` | COUNT | + +[Read more about distributions](http://localhost:1313/metrics/distributions) to understand how to configure further aggregations. + +Alternatively, if you are using the `counters` mode, the `send_aggregation_metrics` flag is enabled, and the histogram bucket boundaries are set to `[-inf, 2, inf]`, the following metrics are reported: + +| Metric Name | Value | Tags | Datadog In-App Type | +| ------------------------------------------- | ----- | ----------------------------------- | ------------------- | +| `request.response_time.distribution.count` | `8` | n/a | COUNT | +| `request.response_time.distribution.sum` | `15` | n/a | COUNT | +| `request.response_time.distribution.max` | `3` | n/a | GAUGE | +| `request.response_time.distribution.min` | `1` | n/a | GAUGE | +| `request.response_time.distribution.bucket` | `6` | `lower_bound:-inf`, `upper_bound:2` | GAUGE | +| `request.response_time.distribution.bucket` | `2` | `lower_bound:2`, `upper_bound:inf` | GAUGE | + +{% /tab %} + +{% tab title="Summary" %} +Suppose you are submitting a legacy OTLP Summary metric, `request.response_time.summary`, from one web server. Suppose in a given collection period, the web server reports the metric with the values `[1,1,1,2,2,2,3,3]`. The following metrics would be reported, if min, max, and median quantiles are enabled: + +| Metric Name | Value | Tags | Datadog In-App Type | +| --------------------------------------------- | ----- | -------------- | ------------------- | +| `request.response_time.distribution.count` | `8` | n/a | COUNT | +| `request.response_time.distribution.sum` | `15` | n/a | COUNT | +| `request.response_time.distribution.quantile` | `1` | `quantile:0` | GAUGE | +| `request.response_time.distribution.quantile` | `2` | `quantile:0.5` | GAUGE | +| `request.response_time.distribution.quantile` | `3` | `quantile:1.0` | GAUGE | + +{% /tab %} + +## Further reading{% #further-reading %} + +- [Learn more about distributions](http://localhost:1313/metrics/distributions) +- [Learn more about OpenTelemetry](http://localhost:1313/opentelemetry/) +- [Producing delta temporality metrics with OpenTelemetry](http://localhost:1313/opentelemetry/guide/otlp_delta_temporality/) diff --git a/opentelemetry-mdoc/reference/trace_context_propagation/index.md b/opentelemetry-mdoc/reference/trace_context_propagation/index.md new file mode 100644 index 0000000000000..4a9247160ac8c --- /dev/null +++ b/opentelemetry-mdoc/reference/trace_context_propagation/index.md @@ -0,0 +1,653 @@ +--- +title: Trace Context Propagation +description: >- + Extract and inject Datadog, B3, and W3C Trace Context headers to propagate the + context of a distributed trace. +breadcrumbs: Docs > OpenTelemetry in Datadog > Reference > Trace Context Propagation +--- + +# Trace Context Propagation + +Trace Context propagation is the mechanism of passing tracing information like Trace ID, Span ID, and sampling decisions from one part of a distributed application to another. This enables all traces (and additional telemetry) in a request to be correlated. When automatic instrumentation is enabled, trace context propagation is handled automatically by the APM SDK. + +By default, the Datadog SDK extracts and injects distributed tracing headers using the following formats: + +- Datadog (takes higher precedence when extracting headers) +- [W3C Trace Context](https://www.w3.org/TR/trace-context/) +- [Baggage](https://www.w3.org/TR/baggage/) + +This default configuration maximizes compatibility with older Datadog SDK versions and products while allowing interoperability with other distributed tracing systems like OpenTelemetry. + +## Customize trace context propagation{% #customize-trace-context-propagation %} + +You may need to customize the trace context propagation configuration if your applications: + +- Communicate distributed tracing information in a different supported format +- Need to prevent extracting or injecting distributed tracing headers + +Use the following environment variables to configure formats for reading and writing distributed tracing headers. Refer to the Language support section for language-specific configuration values. + +{% dl %} + +{% dt %} +`DD_TRACE_PROPAGATION_STYLE` +{% /dt %} + +{% dd %} +Specifies trace context propagation formats for extraction and injection in a comma-separated list. May be overridden by extract-specific or inject-specific configurations.**Default**: `datadog,tracecontext,baggage`**Note**: With multiple trace context formats, extraction follows the specified order (for example, `datadog,tracecontext` checks Datadog headers first). The first valid context continues the trace; additional valid contexts become span links. When `baggage` is included, it is added as baggage to the existing context. +{% /dd %} + +{% dt %} +`OTEL_PROPAGATORS` +{% /dt %} + +{% dd %} +Specifies trace context propagation formats for both extraction and injection (comma-separated list). Lowest precedence; ignored if any other Datadog trace context propagation environment variable is set.**Note**: Only use this configuration when migrating an application from the OpenTelemetry SDK to the Datadog SDK. For more information on this configuration and other OpenTelemetry environment variables, see [Using OpenTelemetry Environment Variables with Datadog SDKs](http://localhost:1313/opentelemetry/interoperability/environment_variable_support). +{% /dd %} + +{% dt %} +`DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT` +{% /dt %} + +{% dd %} +Specifies how incoming distributed tracing headers should be handled at a service level. Accepted values are:`continue`: The SDK will continue the distributed trace if the incoming distributed tracing headers represent a valid trace context.`restart`: The SDK will always start a new trace. If the incoming distributed tracing headers represent a valid trace context, that trace context will be represented as a span link on service entry spans (as opposed to the parent span in the `continue` configuration).`ignore`: The SDK will always start a new trace and all incoming distributed tracing headers are ignored.**Default**: `continue`**Note**: This is only implemented in the .NET, Node.js, Python, and Java libraries. +{% /dd %} + +{% /dl %} + +### Advanced configuration{% #advanced-configuration %} + +Most services send and receive trace context headers using the same format. However, if your service needs to accept trace context headers in one format and send them in another, use these configurations: + +{% dl %} + +{% dt %} +`DD_TRACE_PROPAGATION_STYLE_EXTRACT` +{% /dt %} + +{% dd %} +Specifies trace context propagation formats for extraction only in a comma-separated list. Highest precedence for configuring extraction propagators. +{% /dd %} + +{% dt %} +`DD_TRACE_PROPAGATION_STYLE_INJECT` +{% /dt %} + +{% dd %} +Specifies trace context propagation formats for injection only in comma-separated list. Highest precedence for configuring injection propagators. +{% /dd %} + +{% /dl %} + +## Supported formats{% #supported-formats %} + +The Datadog SDK supports the following trace context formats: + +| Format | Configuration Value | +| ------------------------------------------------------------------------- | -------------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [B3 Single](https://github.com/openzipkin/b3-propagation#single-header) | *Language Dependent Value* | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3multi` | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| None | `none` | + +## Language support{% #language-support %} + +{% tab title="Java" %} +### Supported formats{% #supported-formats %} + +The Datadog Java SDK supports the following trace context formats, including deprecated configuration values: + +| Format | Configuration Value | +| ------------------------------------------------------------------------------------------------------------ | ------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [B3 Single](https://github.com/openzipkin/b3-propagation#single-header) | `b3 single header` | +| `b3single` | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3multi` | +| `b3` (deprecated) | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| [AWS X-Ray](https://docs.aws.amazon.com/xray/latest/devguide/xray-concepts.html#xray-concepts-tracingheader) | `xray` | +| None | `none` | + +### Additional configuration{% #additional-configuration %} + +In addition to the environment variable configuration, you can also update the propagators using System Property configuration: + +- `-Ddd.trace.propagation.style=datadog,b3multi` +- `-Dotel.propagators=datadog,b3multi` +- `-Ddd.trace.propagation.style.inject=datadog,b3multi` +- `-Ddd.trace.propagation.style.extract=datadog,b3multi` + +{% /tab %} + +{% tab title="Python" %} +### Supported formats{% #supported-formats %} + +The Datadog Python SDK supports the following trace context formats, including deprecated configuration values: + +| Format | Configuration Value | +| ------------------------------------------------------------------------- | ------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| [B3 Single](https://github.com/openzipkin/b3-propagation#single-header) | `b3` | +| `b3 single header` (deprecated) | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3multi` | +| None | `none` | + +{% /tab %} + +{% tab title="Ruby" %} +### Supported formats{% #supported-formats %} + +The Datadog Ruby SDK supports the following trace context formats, including deprecated configuration values: + +| Format | Configuration Value | +| ------------------------------------------------------------------------- | ------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| [B3 Single](https://github.com/openzipkin/b3-propagation#single-header) | `b3` | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3multi` | +| None | `none` | + +### Additional configuration{% #additional-configuration %} + +In addition to the environment variable configuration, you can also update the propagators in code by using `Datadog.configure`: + +```ruby +Datadog.configure do |c| + # List of header formats that should be extracted + c.tracing.propagation_extract_style = [ 'tracecontext', 'datadog', 'b3' ] + + # List of header formats that should be injected + c.tracing.propagation_inject_style = [ 'tracecontext', 'datadog' ] +end +``` + +{% /tab %} + +{% tab title="Go" %} +### Supported formats{% #supported-formats %} + +The Datadog Go SDK supports the following trace context formats, including deprecated configuration values: + +| Format | Configuration Value | +| ------------------------------------------------------------------------- | ------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| [B3 Single](https://github.com/openzipkin/b3-propagation#single-header) | `B3 single header` | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3multi` | +| `b3` (deprecated) | +| None | `none` | + +{% /tab %} + +{% tab title="Node.js" %} +### Supported formats{% #supported-formats %} + +The Datadog Node.js SDK supports the following trace context formats, including deprecated configuration values: + +| Format | Configuration Value | +| ------------------------------------------------------------------------- | ------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| [B3 Single](https://github.com/openzipkin/b3-propagation#single-header) | `B3 single header` | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3multi` | +| `B3` (deprecated) | +| None | `none` | + +{% /tab %} + +{% tab title="PHP" %} +### Supported formats{% #supported-formats %} + +The Datadog PHP SDK supports the following trace context formats, including deprecated configuration values: + +| Format | Configuration Value | +| ------------------------------------------------------------------------- | ------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| [B3 Single](https://github.com/openzipkin/b3-propagation#single-header) | `B3 single header` | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3multi` | +| `B3` (deprecated) | +| None | `none` | + +### Additional use cases{% #additional-use-cases %} + +The following use cases are specific to the Datadog PHP SDK: + +{% collapsible-section %} +#### Distributed tracing on PHP script launch + +When a new PHP script is launched, the Datadog SDK automatically checks for the presence of Datadog headers for distributed tracing: + +- `x-datadog-trace-id` (environment variable: `HTTP_X_DATADOG_TRACE_ID`) +- `x-datadog-parent-id` (environment variable: `HTTP_X_DATADOG_PARENT_ID`) +- `x-datadog-origin` (environment variable: `HTTP_X_DATADOG_ORIGIN`) +- `x-datadog-tags` (environment variable: `HTTP_X_DATADOG_TAGS`) + +{% /collapsible-section %} + +{% collapsible-section %} +#### Manually setting the distributed tracing context + +To manually set tracing information in a CLI script for new or existing traces, use the `DDTrace\set_distributed_tracing_context(string $trace_id, string $parent_id, ?string $origin = null, ?array $tags = null)` function. + +```php +trace_id, $message->parent_id); + } +); +``` + +For version **0.87.0** and later, if the raw headers are available, use the `DDTrace\consume_distributed_tracing_headers(array|callable $headersOrCallback)` function. **Note**: The header names must be in lowercase. + +```php +$headers = [ + "x-datadog-trace-id" => "1234567890", + "x-datadog-parent-id" => "987654321", +]; + +\DDTrace\consume_distributed_tracing_headers($headers); +``` + +To extract the trace context directly as headers, use the `DDTrace\generate_distributed_tracing_headers(?array $inject = null): array` function. + +```php +$headers = DDTrace\generate_distributed_tracing_headers(); +// Store headers somewhere, inject them in an outbound request, ... +// These $headers can also be read back by \DDTrace\consume_distributed_tracing_headers from another process. +``` + +This function's optional argument accepts an array of injection style names. It defaults to the configured injection style. +{% /collapsible-section %} + +{% collapsible-section %} +#### RabbitMQ + +The PHP APM SDK supports automatic tracing of the `php-amqplib/php-amqplib` library (version 0.87.0+). However, in some cases, your distributed trace may be disconnected. For example, when reading messages from a distributed queue using the `basic_get` method outside an existing trace, you need to add a custom trace around the `basic_get` call and corresponding message processing: + +```php +// Create a surrounding trace +$newTrace = \DDTrace\start_trace_span(); +$newTrace->name = 'basic_get.process'; +$newTrace->service = 'amqp'; + + +// basic_get call(s) + message(s) processing +$msg = $channel->basic_get($queue); +if ($msg) { + $messageProcessing($msg); +} + + +// Once done, close the span +\DDTrace\close_span(); +``` + +Creating this surrounding trace to your consuming-processing logic ensures observability of your distributed queue. +{% /collapsible-section %} + +{% /tab %} + +{% tab title="C++" %} +### Supported formats{% #supported-formats %} + +The Datadog C++ SDK supports the following trace context formats, including deprecated configuration values: + +| Format | Configuration Value | +| ------------------------------------------------------------------------- | ------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3` | +| `b3multi` | +| None | `none` | + +### Additional configuration{% #additional-configuration %} + +In addition to the environment variable configuration, you can also update the propagators in code: + +```cpp +#include +#include + +namespace dd = datadog::tracing; +int main() { + dd::TracerConfig config; + config.service = "my-service"; + + // `injection_styles` indicates with which tracing systems trace propagation + // will be compatible when injecting (sending) trace context. + // All styles indicated by `injection_styles` are used for injection. + // `injection_styles` is overridden by the `DD_TRACE_PROPAGATION_STYLE_INJECT` + // and `DD_TRACE_PROPAGATION_STYLE` environment variables. + config.injection_styles = {dd::PropagationStyle::DATADOG, dd::PropagationStyle::B3}; + + // `extraction_styles` indicates with which tracing systems trace propagation + // will be compatible when extracting (receiving) trace context. + // Extraction styles are applied in the order in which they appear in + // `extraction_styles`. The first style that produces trace context or + // produces an error determines the result of extraction. + // `extraction_styles` is overridden by the + // `DD_TRACE_PROPAGATION_STYLE_EXTRACT` and `DD_TRACE_PROPAGATION_STYLE` + // environment variables. + config.extraction_styles = {dd::PropagationStyle::W3C}; + + ... +} +``` + +### Additional use cases{% #additional-use-cases %} + +{% collapsible-section %} +#### Manually extract propagated context + +To extract propagation context, implement a custom `DictReader` interface and call `Tracer::extract_span` or `Tracer::extract_or_create_span`. + +Here is an example of extracting propagation context from HTTP Headers: + +```cpp +#include +#include +#include + +#include + +namespace dd = datadog::tracing; + +class HTTPHeadersReader : public datadog::tracing::DictReader { + std::unordered_map headers_; + +public: + HTTPHeadersReader(std::unordered_map headers) + : headers_(std::move(headers)) {} + + ~HTTPHeadersReader() override = default; + + // Return the value at the specified `key`, or return `nullopt` if there + // is no value at `key`. + dd::Optional lookup(dd::StringView key) const override { + auto found = headers_.find(key); + if (found == headers_.cend()) return dd::nullopt; + + return found->second; + } + + // Invoke the specified `visitor` once for each key/value pair in this object. + void visit( + const std::function& visitor) + const override { + for (const auto& [key, value] : headers_) { + visitor(key, value); + } + }; +}; + +// Usage example: +void handle_http_request(const Request& request, datadog::tracing::Tracer& tracer) { + HTTPHeadersReader reader{request.headers}; + auto maybe_span = tracer.extract_span(reader); + .. +} +``` + +{% /collapsible-section %} + +{% collapsible-section %} +#### Manually inject context for distributed tracing + +To inject propagation context, implement the `DictWriter` interface and call `Span::inject` on a span instance: + +```cpp +#include +#include + +#include +#include + +using namespace dd = datadog::tracing; + +class HTTPHeaderWriter : public dd::DictWriter { + std::unordered_map& headers_; + +public: + explicit HTTPHeaderWriter(std::unordered_map& headers) : headers_(headers) {} + + ~HTTPHeaderWriter() override = default; + + void set(dd::StringView key, dd::StringView value) override { + headers_.emplace(key, value); + } +}; + +// Usage example: +void handle_http_request(const Request& request, dd::Tracer& tracer) { + auto span = tracer.create_span(); + + HTTPHeaderWriter writer(request.headers); + span.inject(writer); + // `request.headers` now populated with the headers needed to propagate the span. + .. +} +``` + +{% /collapsible-section %} + +{% /tab %} + +{% tab title=".NET" %} +### Supported formats{% #supported-formats %} + +The Datadog .NET SDK supports the following trace context formats, including deprecated configuration values: + +| Format | Configuration Value | +| ------------------------------------------------------------------------- | ------------------- | +| Datadog | `datadog` | +| [W3C Trace Context](https://www.w3.org/TR/trace-context/) | `tracecontext` | +| [Baggage](https://www.w3.org/TR/baggage/) | `baggage` | +| `W3C` (deprecated) | +| [B3 Single](https://github.com/openzipkin/b3-propagation#single-header) | `B3 single header` | +| `B3SingleHeader` (deprecated) | +| [B3 Multi](https://github.com/openzipkin/b3-propagation#multiple-headers) | `b3multi` | +| `B3` (deprecated) | +| None | `none` | + +### Additional use cases{% #additional-use-cases %} + +{% collapsible-section %} +#### Prior configuration defaults + +- As of version [2.48.0][6], the default propagation style is `datadog, tracecontext`. This means Datadog headers are used first, followed by W3C Trace Context. +- Prior to version 2.48.0, the order was `tracecontext, Datadog` for both extraction and injection propagation. +- Prior to version [2.22.0][7], only the `Datadog` injection style was enabled. +- As of version [2.42.0][8], when multiple extractors are specified, the `DD_TRACE_PROPAGATION_EXTRACT_FIRST=true` configuration specifies whether context extraction should exit immediately upon detecting the first valid `tracecontext`. The default value is `false`. + +{% /collapsible-section %} + +{% collapsible-section %} +#### Distributed tracing with message queues + +In most cases, header extraction and injection are automatic. However, there are some known cases where your distributed trace can be disconnected. For instance, when reading messages from a distributed queue, some libraries may lose the span context. It also happens if you set `DD_TRACE_KAFKA_CREATE_CONSUMER_SCOPE_ENABLED` to `false` when consuming Kafka messages. In these cases, you can add a custom trace using the following code: + +```csharp +var spanContextExtractor = new SpanContextExtractor(); +var parentContext = spanContextExtractor.Extract(headers, (headers, key) => GetHeaderValues(headers, key)); +var spanCreationSettings = new SpanCreationSettings() { Parent = parentContext }; +using var scope = Tracer.Instance.StartActive("operation", spanCreationSettings); +``` + +Provide the `GetHeaderValues` method. The way this method is implemented depends on the structure that carries `SpanContext`. + +Here are some examples: + +```csharp +// Confluent.Kafka +IEnumerable GetHeaderValues(Headers headers, string name) +{ + if (headers.TryGetLastBytes(name, out var bytes)) + { + try + { + return new[] { Encoding.UTF8.GetString(bytes) }; + } + catch (Exception) + { + // ignored + } + } + + return Enumerable.Empty(); +} + +// RabbitMQ +IEnumerable GetHeaderValues(IDictionary headers, string name) +{ + if (headers.TryGetValue(name, out object value) && value is byte[] bytes) + { + return new[] { Encoding.UTF8.GetString(bytes) }; + } + + return Enumerable.Empty(); +} + +// SQS +public static IEnumerable GetHeaderValues(IDictionary headers, string name) +{ + // For SQS, there are a maximum of 10 message attribute headers, + // so the Datadog headers are combined into one header with the following properties: + // - Key: "_datadog" + // - Value: MessageAttributeValue object + // - DataType: "String" + // - StringValue: + if (headers.TryGetValue("_datadog", out var messageAttributeValue) + && messageAttributeValue.StringValue is string jsonString) + { + var datadogDictionary = JsonConvert.DeserializeObject>(jsonString); + if (datadogDictionary.TryGetValue(name, out string value)) + { + return new[] { value }; + } + } + return Enumerable.Empty(); +} +``` + +When using the `SpanContextExtractor` API to trace Kafka consumer spans, set `DD_TRACE_KAFKA_CREATE_CONSUMER_SCOPE_ENABLED` to `false`. This ensures the consumer span is correctly closed immediately after the message is consumed from the topic, and the metadata (such as `partition` and `offset`) is recorded correctly. Spans created from Kafka messages using the `SpanContextExtractor` API are children of the producer span, and siblings of the consumer span. + +If you need to propagate trace context manually (for libraries that are not instrumented automatically, like the WCF client), you can use the `SpanContextInjection` API. Here is an example for WCF where `this` is the WCF client: + +```csharp + +using (OperationContextScope ocs = new OperationContextScope(this.InnerChannel)) +{ + var spanContextInjector = new SpanContextInjector(); + spanContextInjector.Inject(OperationContext.Current.OutgoingMessageHeaders, SetHeaderValues, Tracer.Instance.ActiveScope?.Span?.Context); +} + + +void SetHeaderValues(MessageHeaders headers, string name, string value) +{ + MessageHeader header = MessageHeader.CreateHeader(name, "datadog", value); + headers.Add(header); +} +``` + +{% /collapsible-section %} + +{% /tab %} + +## Custom header formats{% #custom-header-formats %} + +### Datadog format{% #datadog-format %} + +When the Datadog SDK is configured with the Datadog format for extraction or injection (possibly both), the Datadog SDK interacts with the following request headers: + +{% dl %} + +{% dt %} +`x-datadog-trace-id` +{% /dt %} + +{% dd %} +Specifies the lower 64-bits of the 128-bit trace-id, in decimal format. +{% /dd %} + +{% dt %} +`x-datadog-parent-id` +{% /dt %} + +{% dd %} +Specifies the 64-bits span-id of the current span, in decimal format. +{% /dd %} + +{% dt %} +`x-datadog-origin` +{% /dt %} + +{% dd %} +Specifies the Datadog product that initiated the trace, such as [Real User Monitoring](http://localhost:1313/real_user_monitoring/correlate_with_other_telemetry/apm) or [Synthetic Monitoring](http://localhost:1313/synthetics/platform/apm). If this header is present, the value is expected to be one of: `rum`, `synthetics`, `synthetics-browser`. +{% /dd %} + +{% dt %} +`x-datadog-sampling-priority` +{% /dt %} + +{% dd %} +Specifies the sampling decision made for the represented span as an integer, in decimal format. +{% /dd %} + +{% dt %} +`x-datadog-tags` +{% /dt %} + +{% dd %} +Specifies supplemental Datadog trace state information, including but not limited to the higher 64-bits of the 128-bit trace-id (in hexadecimal format). +{% /dd %} + +{% /dl %} + +### None format{% #none-format %} + +When the Datadog SDK is configured with the None format for extraction or injection (possibly both), the Datadog SDK does *not* interact with request headers, meaning that the corresponding context propagation operation does nothing. + +### Baggage{% #baggage %} + +By default, Baggage is automatically propagated through a distributed request using OpenTelemetry's [W3C-compatible headers](https://www.w3.org/TR/baggage/). To disable baggage, set DD_TRACE_PROPAGATION_STYLE to `datadog,tracecontext`. + +#### Adding baggage as span tags{% #adding-baggage-as-span-tags %} + +By default, `user.id,session.id,account.id` baggage keys are added as span tags. To customize this configuration, see [Context Propagation Configuration](http://localhost:1313/tracing/trace_collection/library_config#context-propagation). Specified baggage keys are automatically added as span tags `baggage.` (for example, `baggage.user.id`). + +Support for baggage as span tags was introduced in the following releases: + +| Language | Minimum SDK version | +| --------- | ------------------- | +| Java | 1.52.0 | +| Python | 3.7.0 | +| Ruby | 2.20.0 | +| Go | 2.2.2 | +| .NET | 3.23.0 | +| Node | 5.54.0 | +| PHP | 1.10.0 | +| C++/Proxy | Not yet supported | + +## Further reading{% #further-reading %} + +- [Understand APM terminology](http://localhost:1313/tracing/glossary/) +- [Monitor OpenTelemetry-instrumented apps with support for W3C Trace Context](https://www.datadoghq.com/blog/monitor-otel-with-w3c-trace-context/) +- [Interoperability of OpenTelemetry API and Datadog instrumented traces](http://localhost:1313/opentelemetry/guide/otel_api_tracing_interoperability) diff --git a/opentelemetry-mdoc/reference/trace_ids/index.md b/opentelemetry-mdoc/reference/trace_ids/index.md new file mode 100644 index 0000000000000..05e5de6e5dea0 --- /dev/null +++ b/opentelemetry-mdoc/reference/trace_ids/index.md @@ -0,0 +1,19 @@ +--- +title: Trace IDs +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Reference > Trace IDs +--- + +# Trace IDs + +W3C traces implicitly contain 128-bit trace IDs, rather than the 64-bit trace IDs that Datadog traces have historically used. The latest default configuration for Datadog tracing libraries uses the setting `DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED=True` so that Datadog also produces trace data with 128-bit trace IDs. + +Following the [W3C Trace Context recommendations](https://www.w3.org/TR/trace-context/#handling-trace-id-for-compliant-platforms-with-shorter-internal-identifiers), Datadog 128-bit trace IDs have randomness in the lower-order 64 bits. This restriction provides backward compatibility for systems that intermix libraries that generate 64-bit trace IDs with newer ones that support 128-bit IDs. In such systems, spans with the full 128-bit trace ID and spans with the truncated lower-order 64-bit trace ID can arrive at the backend and be treated as matching and part of the same trace. + +{% image + source="http://localhost:1313/images/opentelemetry/guide/otel_api_tracing_interop/128-62-bit-trace-ids.1a290a3a113157c46cb1900e684fab65.png?auto=format" + alt="128-bit Trace IDs can be passed with trace context to code whose tracing library generates 64-bit trace IDs, and Datadog successfully correlate them in the backend." /%} + +## Further reading{% #further-reading %} + +- [Trace Context Propagation](http://localhost:1313/opentelemetry/reference/trace_context_propagation) diff --git a/opentelemetry-mdoc/setup/agent/index.md b/opentelemetry-mdoc/setup/agent/index.md new file mode 100644 index 0000000000000..90a466b092037 --- /dev/null +++ b/opentelemetry-mdoc/setup/agent/index.md @@ -0,0 +1,65 @@ +--- +title: Datadog Agent +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + Agent +--- + +# Datadog Agent + +## Overview{% #overview %} + +Sending data to Datadog using the Datadog Agent is a great option for existing Datadog users or teams requiring Agent-based features. + +**Key benefits**: + +- Access [1,000+ Datadog integrations](http://localhost:1313/integrations/), [Live Container Monitoring](http://localhost:1313/containers/), [Cloud Network Monitoring](http://localhost:1313/network_monitoring/cloud_network_monitoring/), [Universal Service Monitoring](http://localhost:1313/universal_service_monitoring/) (with eBPF), and more +- Leverage OpenTelemetry community-contributed integrations to collect telemetry in OTLP native format +- Benefit from Datadog's robust security practices, including regular vulnerability scans and analysis +- Access Datadog's global support team for assistance with onboarding and troubleshooting + +The Datadog Agent provides two ways to ingest OpenTelemetry data: + +- **Datadog Distribution of OpenTelemetry (DDOT) Collector**: Use the DDOT Collector embedded in the Datadog Agent. +- **OTLP Ingest in the Agent**: Send telemetry data to the Datadog Agent using OpenTelemetry Protocol (OTLP). + +## Datadog Distribution of OpenTelemetry (DDOT) Collector{% #datadog-distribution-of-opentelemetry-ddot-collector %} + +The DDOT Collector combines the Datadog Agent with a built-in OpenTelemetry Collector. This option is best suited for Kubernetes users who want to take full advantage of Collector capabilities, such as advanced data processing and exporting OTLP data to multiple destinations. + +{% image + source="http://localhost:1313/images/opentelemetry/setup/ddot-collector-2.48e827fe0ea4d62cd26a81521e9fa584.png?auto=format" + alt="Architecture overview for DDOT Collector, which is embedded in the Datadog Agent." /%} + +**Use the DDOT Collector if**: + +- You want full control over OpenTelemetry pipelines, including processors and exporters +- You plan to forward OTLP data to multiple backends beyond Datadog +- You're running in a Kubernetes Linux environment + +- [Learn more about the DDOT Collector](http://localhost:1313/opentelemetry/agent/) + +## OTLP Ingest in the Agent{% #otlp-ingest-in-the-agent %} + +OTLP Ingest in the Agent is a way to send telemetry data directly from applications instrumented with OpenTelemetry SDKs to the Datadog Agent. + +{% image + source="http://localhost:1313/images/opentelemetry/setup/dd-agent-otlp-ingest.5c618e65990e9be5954c60e908ab5f09.png?auto=format" + alt="OpenTelemetry data flowing through the Datadog Agent" /%} + +**Use OTLP Ingest in the Agent if**: + +- You plan to send all OTLP telemetry data directly to Datadog without needing custom processing or multiple destinations +- You prefer an approach with minimal configuration that doesn't require managing OpenTelemetry pipelines +- You're running on platforms other than Kubernetes Linux, such as Windows, bare-metal EC2, VM environments, or [other supported platforms](http://localhost:1313/agent/basic_agent_usage/?tab=Linux#supported-platforms) + +- [Learn more about OTLP Ingest in the Agent](http://localhost:1313/opentelemetry/setup/otlp_ingest_in_the_agent) + +## Further reading{% #further-reading %} + +- [Datadog Agent with DDOT Collector](http://localhost:1313/opentelemetry/agent/) +- [Install the Datadog Agent with DDOT OpenTelemetry Collector](http://localhost:1313/opentelemetry/setup/ddot_collector/install/) +- [Use Custom OpenTelemetry Components with Datadog Agent](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components) +- [Migrate to the Datadog Agent with DDOT OpenTelemetry Collector](http://localhost:1313/opentelemetry/guide/migrate/ddot_collector) +- [OTLP Ingest in the Agent](http://localhost:1313/opentelemetry/setup/otlp_ingest_in_the_agent) diff --git a/opentelemetry-mdoc/setup/collector_exporter/deploy/index.md b/opentelemetry-mdoc/setup/collector_exporter/deploy/index.md new file mode 100644 index 0000000000000..fca75215a4a83 --- /dev/null +++ b/opentelemetry-mdoc/setup/collector_exporter/deploy/index.md @@ -0,0 +1,343 @@ +--- +title: Deploy the OpenTelemetry Collector +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Install + and Configure the OpenTelemetry Collector > Deploy the OpenTelemetry Collector +--- + +# Deploy the OpenTelemetry Collector + +This page guides you through various deployment options for the OpenTelemetry Collector with the Datadog Exporter, allowing you to send traces, metrics, and logs to Datadog. + +## Deploy the Collector{% #deploy-the-collector %} + +The OpenTelemetry Collector can be deployed in various environments to suit different infrastructure needs. This section covers the following deployment options: + +- On a host +- Docker +- Kubernetes + +It's important to note that certain features and capabilities may vary depending on the deployment method. For a detailed overview of these differences, see the Deployment-based limitations. + +Choose the deployment option that best fits your infrastructure and complete the following instructions. + +### On a host{% #on-a-host %} + +Run the Collector, specifying the configuration file using the `--config` parameter: + +```shell +otelcontribcol_linux_amd64 --config collector.yaml +``` + +### Docker{% #docker %} + +{% tab title="localhost" %} +To run the OpenTelemetry Collector as a Docker image and receive traces from the same host: + +1. Choose a published Docker image such as [`otel/opentelemetry-collector-contrib`](https://hub.docker.com/r/otel/opentelemetry-collector-contrib/tags). + +1. Determine which ports to open on your container so that OpenTelemetry traces are sent to the OpenTelemetry Collector. By default, traces are sent over gRPC on port 4317. If you don't use gRPC, use port 4318. + +1. Run the container and expose the necessary port, using the `collector.yaml` file. For example, if you are using port 4317: + + ``` + $ docker run \ + -p 4317:4317 \ + --hostname $(hostname) \ + -v $(pwd)/otel_collector_config.yaml:/etc/otelcol-contrib/config.yaml \ + otel/opentelemetry-collector-contrib + ``` + +{% /tab %} + +{% tab title="Other containers" %} +To run the OpenTelemetry Collector as a Docker image and receive traces from other containers: + +1. Create a Docker network: + + ``` + docker network create + ``` + +1. Run the OpenTelemetry Collector and application containers as part of the same network. + + ``` + # Run the OpenTelemetry Collector + docker run -d --name opentelemetry-collector \ + --network \ + --hostname $(hostname) \ + -v $(pwd)/otel_collector_config.yaml:/etc/otelcol-contrib/config.yaml \ + otel/opentelemetry-collector-contrib + ``` + +When running the application container, ensure that the environment variable `OTEL_EXPORTER_OTLP_ENDPOINT` is configured to use the appropriate hostname for the OpenTelemetry Collector. In the example below, this is `opentelemetry-collector`. + + ``` + # Run the application container + docker run -d --name app \ + --network \ + --hostname $(hostname) \ + -e OTEL_EXPORTER_OTLP_ENDPOINT=http://opentelemetry-collector:4317 \ + company/app:latest + ``` + +{% /tab %} + +### Kubernetes{% #kubernetes %} + +{% tab title="DaemonSet" %} +Using a DaemonSet is the most common and recommended way to configure OpenTelemetry collection in a Kubernetes environment. To deploy the OpenTelemetry Collector and Datadog Exporter in a Kubernetes infrastructure: + +1. Use this [example configuration](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart), including the application configuration, to set up the OpenTelemetry Collector with the Datadog Exporter as a DaemonSet. + +1. Ensure that essential ports for the DaemonSet are exposed and accessible to your application. The following configuration options [from the example](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/daemonset.yaml#L33-L38) define these ports: + + ```yaml + # ... + ports: + - containerPort: 4318 # default port for OpenTelemetry HTTP receiver. + hostPort: 4318 + - containerPort: 4317 # default port for OpenTelemetry gRPC receiver. + hostPort: 4317 + - containerPort: 8888 # Default endpoint for querying Collector observability metrics. + # ... + ``` +Important alert (level: info): If your application doesn't require both HTTP and gRPC, remove unused ports from the configuration. +1. To collect valuable Kubernetes attributes, which are used for Datadog container tagging, report the Pod IP as a resource attribute, [as shown in the example](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/daemonset.yaml#L48-L57): + + ```yaml + # ... + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # The k8s.pod.ip is used to associate pods for k8sattributes + - name: OTEL_RESOURCE_ATTRIBUTES + value: "k8s.pod.ip=$(POD_IP)" + # ... + ``` + +This ensures that [Kubernetes Attributes Processor](https://pkg.go.dev/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor#section-readme), which is used in [the config map](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/configmap.yaml), is able to extract the necessary metadata to attach to traces. There are additional [roles](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/roles.yaml) that need to be set to allow access to this metadata. [The example](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart) is complete, ready to use, and has the correct roles set up. + +1. Configure your [application container](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/deployment.yaml#L21-L22) to use the correct OTLP endpoint hostname. Since the OpenTelemetry Collector runs as a DaemonSet, the current host needs to be targeted. Set your application container's `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable accordingly, as in the [example chart](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/deployment.yaml#L32-L39): + + ```yaml + # ... + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + # The application SDK must use this environment variable in order to successfully + # connect to the DaemonSet's collector. + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://$(HOST_IP):4318" + # ... + ``` + +1. Configure host metadata collection to ensure accurate host information. Set up your DaemonSet to collect and forward host metadata: + + ```yaml + processors: + resourcedetection: + detectors: [system, env] + k8sattributes: + # existing k8sattributes config + transform: + trace_statements: + - context: resource + statements: + - set(attributes["datadog.host.use_as_metadata"], true) + ... + service: + pipelines: + traces: + receivers: [otlp] + processors: [resourcedetection, k8sattributes, transform, batch] + exporters: [datadog] + ``` + +This configuration collects host metadata using the `resourcedetection` processor, adds Kubernetes metadata with the `k8sattributes` processor, and sets the `datadog.host.use_as_metadata` attribute to `true`. For more information, see [Mapping OpenTelemetry Semantic Conventions to Infrastructure List Host Information](http://localhost:1313/opentelemetry/schema_semantics/host_metadata/). +{% /tab %} + +{% tab title="Gateway" %} +To deploy the OpenTelemetry Collector and Datadog Exporter in a Kubernetes Gateway deployment: + +1. Use this [example configuration](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart), including the application configuration, to set up the OpenTelemetry Collector with the Datadog Exporter as a DaemonSet. + +1. Ensure that essential ports for the DaemonSet are exposed and accessible to your application. The following configuration options [from the example](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/daemonset.yaml#L33-L38) define these ports: + + ```yaml + # ... + ports: + - containerPort: 4318 # default port for OpenTelemetry HTTP receiver. + hostPort: 4318 + - containerPort: 4317 # default port for OpenTelemetry gRPC receiver. + hostPort: 4317 + - containerPort: 8888 # Default endpoint for querying Collector observability metrics. + # ... + ``` +Important alert (level: info): If your application doesn't require both HTTP and gRPC, remove unused ports from the configuration. +1. To collect valuable Kubernetes attributes, which are used for Datadog container tagging, report the Pod IP as a resource attribute, [as shown in the example](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/daemonset.yaml#L48-L57): + + ```yaml + # ... + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # The k8s.pod.ip is used to associate pods for k8sattributes + - name: OTEL_RESOURCE_ATTRIBUTES + value: "k8s.pod.ip=$(POD_IP)" + # ... + ``` + +This ensures that [Kubernetes Attributes Processor](https://pkg.go.dev/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor#section-readme), which is used in [the config map](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/configmap.yaml), is able to extract the necessary metadata to attach to traces. There are additional [roles](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/roles.yaml) that need to be set to allow access to this metadata. [The example](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart) is complete, ready to use, and has the correct roles set up. + +1. Configure your [application container](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/deployment.yaml#L21-L22) to use the correct OTLP endpoint hostname. Since the OpenTelemetry Collector runs as a DaemonSet, the current host needs to be targeted. Set your application container's `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable accordingly, as in the [example chart](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/deployment.yaml#L32-L39): + + ```yaml + # ... + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + # The application SDK must use this environment variable in order to successfully + # connect to the DaemonSet's collector. + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://$(HOST_IP):4318" + # ... + ``` + +1. Change the DaemonSet to include an [OTLP exporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/otlpexporter/README.md#otlp-grpc-exporter) instead of the Datadog Exporter [currently in place](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/configmap.yaml#L56-L59): + + ```yaml + # ... + exporters: + otlp: + endpoint: ":4317" + # ... + ``` + +1. Make sure that the service pipelines use this exporter, instead of the Datadog one that [is in place in the example](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/configmap.yaml#L136-L148): + + ```yaml + # ... + service: + pipelines: + metrics: + receivers: [hostmetrics, otlp] + processors: [resourcedetection, k8sattributes, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [resourcedetection, k8sattributes, batch] + exporters: [otlp] + # ... + ``` + +This ensures that each Agent forwards its data through the OTLP protocol to the Collector Gateway. + +1. Replace `` with the address of your OpenTelemetry Collector Gateway. + +1. Configure the [`k8sattributes` processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/configmap.yaml#L69) to forward the Pod IP to the Gateway Collector so that it can obtain the metadata: + + ```yaml + # ... + k8sattributes: + passthrough: true + # ... + ``` + +For more information about the `passthrough` option, read [its documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/k8sattributesprocessor#as-a-gateway). + +1. Make sure that the Gateway Collector's configuration uses the same Datadog Exporter settings that have been replaced by the OTLP exporter in the Agents. For example (where `` is your site, ): + + ```yaml + # ... + exporters: + datadog: + api: + site: + key: ${env:DD_API_KEY} + # ... + ``` + +1. Configure host metadata collection: In a gateway deployment, you need to ensure that host metadata is collected by the agent collectors and preserved by the gateway collector. This ensures that host metadata is collected by the agents and properly forwarded through the gateway to Datadog.For more information, see [Mapping OpenTelemetry Semantic Conventions to Infrastructure List Host Information](http://localhost:1313/opentelemetry/schema_semantics/host_metadata/). + +**Agent collector configuration**: + + ```yaml + processors: + resourcedetection: + detectors: [system, env] + k8sattributes: + passthrough: true + + exporters: + otlp: + endpoint: ":4317" + + service: + pipelines: + traces: + receivers: [otlp] + processors: [resourcedetection, k8sattributes, transform, batch] + exporters: [otlp] + ``` + +**Gateway collector configuration**: + + ```yaml + processors: + k8sattributes: + extract: + metadata: [node.name, k8s.node.name] + + exporters: + datadog: + api: + key: ${DD_API_KEY} + hostname_source: resource_attribute + + service: + pipelines: + traces: + receivers: [otlp] + processors: [k8sattributes, batch] + exporters: [datadog] + ``` + +{% /tab %} + +{% tab title="Operator" %} +To use the OpenTelemetry Operator, follow the [official documentation for deploying the OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-operator#readme). As described there, deploy the certificate manager in addition to the Operator. + +Configure the Operator using one of the OpenTelemetry Collector standard Kubernetes configurations: + +- [DaemonSet deployment](http://localhost:1313/opentelemetry/collector_exporter/deployment/?tab=daemonset#kubernetes) - Use the DaemonSet deployment if you want to ensure you receive host metrics. +- [Gateway deployment](http://localhost:1313/opentelemetry/collector_exporter/deployment/?tab=gateway#kubernetes) + +{% /tab %} + +## Hostname resolution{% #hostname-resolution %} + +See [Mapping OpenTelemetry Semantic Conventions to Hostnames](http://localhost:1313/opentelemetry/schema_semantics/hostname/) to understand how the hostname is resolved. + +## Deployment-based limitations{% #deployment-based-limitations %} + +The OpenTelemetry Collector has [two primary deployment methods](https://opentelemetry.io/docs/collector/deployment/): Agent and Gateway. Depending on your deployment method, the following components are available: + +| Deployment mode | Host metrics | Kubernetes orchestration metrics | Traces | Logs auto-ingestion | +| --------------- | ------------ | -------------------------------- | ------ | ------------------- | +| as Gateway | yes | yes | +| as Agent | yes | yes | yes | yes | + +## Further reading{% #further-reading %} + +- [Configuring the OpenTelemetry Collector](http://localhost:1313/opentelemetry/setup/collector_exporter/) +- [OpenTelemetry Collector Deployment](https://opentelemetry.io/docs/collector/deployment/) diff --git a/opentelemetry-mdoc/setup/collector_exporter/index.md b/opentelemetry-mdoc/setup/collector_exporter/index.md new file mode 100644 index 0000000000000..c17a27c8e1ef1 --- /dev/null +++ b/opentelemetry-mdoc/setup/collector_exporter/index.md @@ -0,0 +1,48 @@ +--- +title: Install and Configure the OpenTelemetry Collector +description: Send OpenTelemetry data to the OpenTelemetry Collector and Datadog Exporter +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Install + and Configure the OpenTelemetry Collector +--- + +# Install and Configure the OpenTelemetry Collector + +## Overview{% #overview %} + +This page provides guides for installing and configuring a standalone OpenTelemetry Collector to send telemetry data to Datadog. + +This method is best for users who prefer to use OTel Collector distributions from the OpenTelemetry open source community or require advanced processing capabilities not available in other setups. For most use cases the [Datadog Distribution of OTel Collector (DDOT)](http://localhost:1313/opentelemetry/setup/ddot_collector/install/) is the recommended approach. + +## Setup{% #setup %} + +To begin, install the OpenTelemetry Collector and configure it with the Datadog Exporter. This guide walks you through the initial setup required before proceeding to more specific configuration topics. + +- [ + ### Install and Configure the Collector +Follow the initial setup steps to get a Collector running with the Datadog Exporter.](http://localhost:1313/opentelemetry/setup/collector_exporter/install) + +## Configuration{% #configuration %} + +After your Collector is running, use these guides to configure specific receivers and processors to collect and enrich your telemetry data. + +- [ + ### Deploy the Collector +Learn how to run the Collector in various environments, including on a host, in Docker, or as a DaemonSet or Gateway in Kubernetes.](http://localhost:1313/opentelemetry/setup/collector_exporter/deploy) +- [ + ### Configure Hostname and Tagging +Use resource detection and Kubernetes attributes processors to ensure proper hostname resolution and apply critical tags for correlating telemetry in Datadog.](http://localhost:1313/opentelemetry/config/hostname_tagging) +- [ + ### Set up Log Collection +Configure the filelog receiver to collect logs from files and forward them to Datadog, enabling unified logs, metrics, and traces.](http://localhost:1313/opentelemetry/config/log_collection) +- [ + ### Enable the OTLP Receiver +Configure the OTLP receiver to accept traces, metrics, and logs from your OpenTelemetry-instrumented applications over gRPC or HTTP.](http://localhost:1313/opentelemetry/config/otlp_receiver) +- [ + ### Tune Batch and Memory Settings +Optimize your Collector's performance and resource consumption by configuring the batch processor and memory limiter.](http://localhost:1313/opentelemetry/config/collector_batch_memory) + +## Further Reading{% #further-reading %} + +- [Install the DDOT Collector (Recommended)](http://localhost:1313/opentelemetry/setup/ddot_collector/install/) +- [Feature Compatibility](http://localhost:1313/opentelemetry/compatibility/) diff --git a/opentelemetry-mdoc/setup/collector_exporter/install/index.md b/opentelemetry-mdoc/setup/collector_exporter/install/index.md new file mode 100644 index 0000000000000..9da4718657774 --- /dev/null +++ b/opentelemetry-mdoc/setup/collector_exporter/install/index.md @@ -0,0 +1,275 @@ +--- +title: Set Up the OpenTelemetry Collector +description: Send OpenTelemetry data to the OpenTelemetry Collector and Datadog Exporter +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Install + and Configure the OpenTelemetry Collector > Set Up the OpenTelemetry Collector +--- + +# Set Up the OpenTelemetry Collector + +## Overview{% #overview %} + +The OpenTelemetry Collector enables you to collect, process, and export telemetry data from your applications in a vendor-neutral way. When configured with the [Datadog Exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/datadogexporter) and [Datadog Connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/connector/datadogconnector), you can send your traces, logs, and metrics to Datadog without the Datadog Agent. + +- **Datadog Exporter**: Forwards trace, metric, and logs data from OpenTelemetry SDKs to Datadog (without the Datadog Agent) +- **Datadog Connector**: Calculates Trace Metrics from collected span data + +{% image + source="http://localhost:1313/images/opentelemetry/setup/otel-collector.0480e3141dece4beac1203109a2cbf8a.png?auto=format" + alt="Diagram: OpenTelemetry SDK in code sends data through OTLP to host running OpenTelemetry Collector with Datadog Exporter, which forwards to Datadog's Observability Platform." /%} + +{% alert level="info" %} +To see which Datadog features are supported with this setup, see the [feature compatibility table](http://localhost:1313/opentelemetry/compatibility/) under Full OTel. +{% /alert %} + +## Install and configure{% #install-and-configure %} + +### 1 - Download the OpenTelemetry Collector + +Download the latest release of the OpenTelemetry Collector Contrib distribution, from [the project's repository](https://github.com/open-telemetry/opentelemetry-collector-releases/releases/latest). + +### 2 - Configure the Datadog Exporter and Connector + +To use the Datadog Exporter and Datadog Connector, configure them in your [OpenTelemetry Collector configuration](https://opentelemetry.io/docs/collector/configuration/): + +1. Create a configuration file named `collector.yaml`. +1. Use the following example file to get started. +1. Set your Datadog API key as the `DD_API_KEY` environment variable. + +{% alert level="warning" %} +The following examples use `0.0.0.0` as the endpoint address for convenience. This allows connections from any network interface. For enhanced security, especially in local deployments, consider using `localhost` instead. For more information on secure endpoint configuration, see the [OpenTelemetry security documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks). +{% /alert %} + +```yaml +receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + grpc: + endpoint: 0.0.0.0:4317 + # The hostmetrics receiver is required to get correct infrastructure metrics in Datadog. + hostmetrics: + collection_interval: 10s + scrapers: + paging: + metrics: + system.paging.utilization: + enabled: true + cpu: + metrics: + system.cpu.utilization: + enabled: true + disk: + filesystem: + metrics: + system.filesystem.utilization: + enabled: true + load: + memory: + network: + processes: + # The prometheus receiver scrapes metrics needed for the OpenTelemetry Collector Dashboard. + prometheus: + config: + scrape_configs: + - job_name: 'otelcol' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + + filelog: + include_file_path: true + poll_interval: 500ms + include: + - /var/log/**/*example*/*.log + +processors: + batch: + send_batch_max_size: 100 + send_batch_size: 10 + timeout: 10s + +connectors: + datadog/connector: + +exporters: + datadog/exporter: + api: + site: + key: ${env:DD_API_KEY} + +service: + pipelines: + metrics: + receivers: [hostmetrics, prometheus, otlp, datadog/connector] + processors: [batch] + exporters: [datadog/exporter] + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog/connector, datadog/exporter] + logs: + receivers: [otlp, filelog] + processors: [batch] + exporters: [datadog/exporter] +``` + +This basic configuration enables the receiving of OTLP data over HTTP and gRPC, and sets up a [batch processor](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md). + +For a complete list of configuration options for the Datadog Exporter, see the [fully documented example configuration file](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/collector.yaml). Additional options like `api::site` and `host_metadata` settings may be relevant depending on your deployment. + +#### Batch processor configuration{% #batch-processor-configuration %} + +The batch processor is required for non-development environments. The exact configuration depends on your specific workload and signal types. + +Configure the batch processor based on Datadog's intake limits: + +- Trace intake: 3.2MB +- Log intake: [5MB uncompressed](http://localhost:1313/api/latest/logs/) +- Metrics V2 intake: [500KB or 5MB after decompression](http://localhost:1313/api/latest/metrics/#submit-metrics) + +You may get `413 - Request Entity Too Large` errors if you batch too much telemetry data in the batch processor. + +### 3 - Configure your application + +To get better metadata for traces and for smooth integration with Datadog: + +- **Use resource detectors**: If they are provided by the language SDK, attach container information as resource attributes. For example, in Go, use the [`WithContainer()`](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#WithContainer) resource option. + +- **Apply [Unified Service Tagging](http://localhost:1313/getting_started/tagging/unified_service_tagging/)**: Make sure you've configured your application with the appropriate resource attributes for unified service tagging. This ties Datadog telemetry together with tags for service name, deployment environment, and service version. The application should set these tags using the OpenTelemetry semantic conventions: `service.name`, `deployment.environment`, and `service.version`. + +### 4 - Configure the logger for your application + +{% image + source="http://localhost:1313/images/logs/log_collection/otel_collector_logs.9199b033a83b71b3720ad97a95fddcdc.png?auto=format" + alt="A diagram showing the host, container, or application sending data to the filelog receiver in the collector and the Datadog Exporter in the collector sending the data to the Datadog backend" /%} + +Since the OpenTelemetry SDKs' logging functionality is not fully supported (see your specific language in the [OpenTelemetry documentation](https://opentelemetry.io/docs/instrumentation/) for more information), Datadog recommends using a standard logging library for your application. Follow the language-specific [Log Collection documentation](http://localhost:1313/logs/log_collection/?tab=host) to set up the appropriate logger in your application. Datadog strongly encourages setting up your logging library to output your logs in JSON to avoid the need for [custom parsing rules](http://localhost:1313/logs/log_configuration/parsing/). + +#### Configure the filelog receiver{% #configure-the-filelog-receiver %} + +Configure the filelog receiver using [operators](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/stanza/docs/operators). For example, if there is a service `checkoutservice` that is writing logs to `/var/log/pods/services/checkout/0.log`, a sample log might look like this: + +``` +{"level":"info","message":"order confirmation email sent to \"jack@example.com\"","service":"checkoutservice","span_id":"197492ff2b4e1c65","timestamp":"2022-10-10T22:17:14.841359661Z","trace_id":"e12c408e028299900d48a9dd29b0dc4c"} +``` + +Example filelog configuration: + +```gdscript3 +filelog: + include: + - /var/log/pods/**/*checkout*/*.log + start_at: end + poll_interval: 500ms + operators: + - id: parse_log + type: json_parser + parse_from: body + - id: trace + type: trace_parser + trace_id: + parse_from: attributes.trace_id + span_id: + parse_from: attributes.span_id + attributes: + ddtags: env:staging +``` + +- `include`: The list of files the receiver tails +- `start_at: end`: Signals to read newly written content +- `poll_internal`: Sets the poll frequency +- Operators: + - `json_parser`: Parses JSON logs. By default, the filelog receiver converts each log line into a log record, which is the `body` of the logs' [data model](https://opentelemetry.io/docs/reference/specification/logs/data-model/). Then, the `json_parser` converts the JSON body into attributes in the data model. + - `trace_parser`: Extract the `trace_id` and `span_id` from the log to correlate logs and traces in Datadog. + +#### Remap OTel's `service.name` attribute to `service` for logs{% #remap-otels-servicename-attribute-to-service-for-logs %} + +For Datadog Exporter versions 0.83.0 and later, the `service` field of OTel logs is populated as [OTel semantic convention](https://opentelemetry.io/docs/specs/semconv/resource/#service) `service.name`. However, `service.name` is not one of the default [service attributes](http://localhost:1313/logs/log_configuration/pipelines/?tab=service#service-attribute) in Datadog's log preprocessing. + +To get the `service` field correctly populated in your logs, you can specify `service.name` to be the source of a log's service by setting a [log service remapper processor](http://localhost:1313/logs/log_configuration/processors/?tab=ui#service-remapper). + +{% collapsible-section %} +#### Optional: Using Kubernetes + +There are multiple ways to deploy the OpenTelemetry Collector and Datadog Exporter in a Kubernetes infrastructure. For the filelog receiver to work, the [Agent/DaemonSet deployment](https://opentelemetry.io/docs/collector/deployment/#agent) is the recommended deployment method. + +In containerized environments, applications write logs to `stdout` or `stderr`. Kubernetes collects the logs and writes them to a standard location. You need to mount the location on the host node into the Collector for the filelog receiver. Below is an [extension example](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/daemonset.yaml) with the mounts required for sending logs. + +```gdscript3 +apiVersion: apps/v1 +metadata: + name: otel-agent + labels: + app: opentelemetry + component: otel-collector +spec: + template: + metadata: + labels: + app: opentelemetry + component: otel-collector + spec: + containers: + - name: collector + command: + - "/otelcol-contrib" + - "--config=/conf/otel-agent-config.yaml" + image: otel/opentelemetry-collector-contrib:0.71.0 + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # The k8s.pod.ip is used to associate pods for k8sattributes + - name: OTEL_RESOURCE_ATTRIBUTES + value: "k8s.pod.ip=$(POD_IP)" + ports: + - containerPort: 4318 # default port for OpenTelemetry HTTP receiver. + hostPort: 4318 + - containerPort: 4317 # default port for OpenTelemetry gRPC receiver. + hostPort: 4317 + - containerPort: 8888 # Default endpoint for querying metrics. + volumeMounts: + - name: otel-agent-config-vol + mountPath: /conf + - name: varlogpods + mountPath: /var/log/pods + readOnly: true + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + volumes: + - name: otel-agent-config-vol + configMap: + name: otel-agent-conf + items: + - key: otel-agent-config + path: otel-agent-config.yaml + # Mount nodes log file location. + - name: varlogpods + hostPath: + path: /var/log/pods + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers +``` + +{% /collapsible-section %} + +## Out-of-the-box Datadog Exporter configuration{% #out-of-the-box-datadog-exporter-configuration %} + +You can find working examples of out-of-the-box configuration for Datadog Exporter in the [`exporter/datadogexporter/examples` folder](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/) in the OpenTelemetry Collector Contrib project. See the full configuration example file, [`ootb-ec2.yaml`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/ootb-ec2.yaml). **Note**: This example is for applications running directly on an EC2 host. For containerized applications, see the [deployment documentation](http://localhost:1313/opentelemetry/collector_exporter/deployment). + +Configure each of the following components to suit your needs: + +- [OTLP Receiver](http://localhost:1313/opentelemetry/collector_exporter/otlp_receiver/) +- [Hostname and Tags](http://localhost:1313/opentelemetry/collector_exporter/hostname_tagging/) +- [Batch and Memory Settings](http://localhost:1313/opentelemetry/collector_exporter/collector_batch_memory/) + +## Further reading{% #further-reading %} + +- [Collector documentation](https://opentelemetry.io/docs/collector/) +- [Send metrics, traces, and logs from OpenTelemetry Collector to Datadog using Datadog Exporter](https://www.datadoghq.com/blog/ingest-opentelemetry-traces-metrics-with-datadog-exporter/) diff --git a/opentelemetry-mdoc/setup/ddot_collector/custom_components/index.md b/opentelemetry-mdoc/setup/ddot_collector/custom_components/index.md new file mode 100644 index 0000000000000..c75c7ad1c138b --- /dev/null +++ b/opentelemetry-mdoc/setup/ddot_collector/custom_components/index.md @@ -0,0 +1,277 @@ +--- +title: >- + Use Custom OpenTelemetry Components with Datadog Distribution of OpenTelemetry + (DDOT) Collector +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + Distribution of OpenTelemetry Collector > Use Custom OpenTelemetry Components + with Datadog Distribution of OpenTelemetry (DDOT) Collector +--- + +# Use Custom OpenTelemetry Components with Datadog Distribution of OpenTelemetry (DDOT) Collector + +{% callout %} +# Important note for users on the following Datadog sites: app.ddog-gov.com + + + +{% alert level="danger" %} +FedRAMP customers should not enable or use the embedded OpenTelemetry Collector. +{% /alert %} + + +{% /callout %} + +This guide explains how to build a DDOT Collector image with additional OpenTelemetry components not included in the default DDOT Collector. To see a list of components already included in the DDOT Collector by default, see [Included components](http://localhost:1313/opentelemetry/agent/#included-components). + +## Prerequisites{% #prerequisites %} + +To complete this guide, you need the following: + +- [Docker](https://docs.docker.com/engine/install/) +- GitHub and access to the [Datadog Agent](https://github.com/DataDog/datadog-agent) source code that contains DDOT Collector. +- The OpenTelemetry components you plan to include in the Agent source code must be compatible with DDOT Collector version. + +**Recommended**: + +- Familiarity with [building a custom collector](https://opentelemetry.io/docs/collector/custom-collector/) and [OpenTelemetry Collector Builder](https://github.com/open-telemetry/opentelemetry-collector/blob/main/cmd/builder/README.md) (OCB). +- Basic understanding of the [Go](https://go.dev/) compilation process and [Go modules](https://go.dev/blog/using-go-modules). + +## Download the Dockerfile{% #download-the-dockerfile %} + +Download the Dockerfile template: + +1. Go to your preferred file location in a terminal. Run the following commands to create a new folder (for example, named `agent-ddot`) and cd into it. + ```shell + mkdir -p agent-ddot + cd agent-ddot + ``` +1. Download the Dockerfile + ```shell + curl -o Dockerfile https://raw.githubusercontent.com/DataDog/datadog-agent/refs/tags/7.67.1/Dockerfiles/agent-ddot/Dockerfile.agent-otel + ``` + +The Dockerfile: + +- Creates a [multi-stage build](https://docs.docker.com/build/building/multi-stage/) with Ubuntu 24.04 and `datadog/agent:7.67.1-full`. +- Installs Go, Python, and necessary dependencies. +- Downloads and unpacks the DDOT Collector source code. +- Creates a virtual environment and installs required Python packages. +- Builds the DDOT Collector (also known as OTel Agent) and copies the resulting binary to the final image. + +{% alert level="info" %} +The `main` branch has the most up-to-date version of the [Dockerfile](https://github.com/DataDog/datadog-agent/blob/main/Dockerfiles/agent-ddot/Dockerfile.agent-otel). However, it is a development branch that is subject to frequent changes and is less stable than the release tags. For production and other stable use cases, use the tagged versions as listed in this guide. +{% /alert %} + +## Create an OpenTelemetry Collector Builder manifest{% #create-an-opentelemetry-collector-builder-manifest %} + +Create and customize an OpenTelemetry Collector Builder (OCB) manifest file, which defines the components to be included in your custom Datadog Agent. + +1. Download the Datadog default manifest: + ```shell + curl -o manifest.yaml https://raw.githubusercontent.com/DataDog/datadog-agent/refs/tags/7.67.1/comp/otelcol/collector-contrib/impl/manifest.yaml + ``` +1. Open the `manifest.yaml` file and add the additional OpenTelemetry components to the corresponding sections (extensions, exporters, processors, receivers, or connectors). The highlighted line in this example adds a [metrics transform processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/metricstransformprocessor/README.md): + ```json + dist: + module: github.com/DataDog/comp/otelcol/collector-contrib + name: otelcol-contrib + description: Datadog OpenTelemetry Collector + version: 0.125.0 + output_path: ./comp/otelcol/collector-contrib/impl + otelcol_version: 0.125.0 + + extensions: + # You will see a list of extensions already included by Datadog + # Add your desired extensions here + + exporters: + # You will see a list of exporters already included by Datadog + # Add your desired exporters here + + processors: + # adding metrics transform processor to modify metrics + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.125.0 + + receivers: + - gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.125.0 + - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.125.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.125.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.125.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.125.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.125.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.125.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.125.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.125.0 + + connectors: + # You will see a list of connectors already included by Datadog + # Add your desired connectors here +``` +1. Save your changes to the manifest file. + +## Build and push the DDOT Collector (Agent) image{% #build-and-push-the-ddot-collector-agent-image %} + +The custom DDOT Collector (Agent) image you build needs to be stored in your organization's private container registry for your clusters to access it. Additionally, this build process must be repeated each time you update the Agent version to maintain compatibility with new Agent releases. + +Build your custom Datadog Agent image and push it to a container registry. + +1. Build the image with Docker: + + ```shell + docker build . -t agent-ddot --no-cache \ + --build-arg AGENT_REPO="datadog/agent" \ + --build-arg AGENT_VERSION="7.67.1-full" \ + --build-arg AGENT_BRANCH="7.67.x" + ``` + +1. Tag and push the image: + + ```shell + docker tag agent-ddot datadog/agent: + docker push datadog/agent: + ``` + +Ensure your custom image name is `datadog/agent` to guarantee that all platform features work correctly. If the target repository is not Docker Hub, you need to include the repository name: + + ```shell + docker push /datadog/agent: + ``` + +1. For a Helm chart installation, set the image tag in your values file: + +In the `datadog-values.yaml` file: + + ```yaml + agents: + image: + repository: + tag: + doNotCheckTag: true + +``` +Replace `` and `` with your repository name and desired image tag. + +## Test and validate{% #test-and-validate %} + +Create a sample configuration file and run your custom DDOT Collector (Agent) to ensure everything is working correctly. + +1. Create a sample OpenTelemetry configuration file with the additional components. The following example configures an additional [metrics transform processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/metricstransformprocessor/README.md): + ```yaml + receivers: + otlp: + protocols: + http: + endpoint: "0.0.0.0:4318" + grpc: + endpoint: "0.0.0.0:4317" + + processors: + batch: + send_batch_max_size: 1000 + send_batch_size: 100 + timeout: 10s + # Rename system.cpu.usage to system.cpu.usage_time + metricstransform: + transforms: + - include: system.cpu.usage + action: update + new_name: system.cpu.usage_time + + exporters: + datadog: + api: + site: ${env:DD_SITE} + key: ${env:DD_API_KEY} + + connectors: + datadog/connector: + traces: + + service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog, datadog/connector] + metrics: + receivers: [otlp, datadog/connector, prometheus] + processors: [metricstransform, infraattributes, batch] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog] + ``` +1. Run the DDOT Collector (Agent) using the following Docker command. + ```shell + docker run -it \ + -e DD_API_KEY=XX \ + -e DD_SITE=datadoghq.com \ + -e DD_HOSTNAME=datadog \ + -v $(pwd)/config.yaml:/config.yaml \ + -p 4317:4317 \ + -p 4318:4318 \ + --entrypoint otel-agent \ + agent-ddot --config /config.yaml + ``` +1. If the DDOT Collector (Agent) starts, then the build process was successful. + +You can now use this new image to install the DDOT Collector. This enables Datadog monitoring capabilities along with the additional OpenTelemetry components you've added. + +For detailed instructions on installing and configuring the DDOT Collector with added OpenTelemetry components, see the [Install the Datadog Distribution of OTel Collector](http://localhost:1313/opentelemetry/setup/ddot_collector/install/) guide. + +## Troubleshooting{% #troubleshooting %} + +This section discusses some common issues you might encounter while building and running your custom DDOT Collector, along with their solutions: + +### Compatibility issues with `awscontainerinsightreceiver`{% #compatibility-issues-with-awscontainerinsightreceiver %} + +**Problem**: You may encounter errors related to `awscontainerinsightreceiver` during the build: + +```text +#0 0.879 go: downloading github.com/tidwall/gjson v1.17.1 +#0 0.889 go: downloading code.cloudfoundry.org/go-diodes v0.0.0-20240604201846-c756bfed2ed3 +#0 0.916 go: downloading github.com/hashicorp/go-retryablehttp v0.7.5 +#0 0.940 go: downloading github.com/tidwall/pretty v1.2.1 +#0 88.24 # github.com/opencontainers/runc/libcontainer/cgroups/ebpf +#0 88.24 /go/pkg/mod/github.com/opencontainers/runc@v1.1.12/libcontainer/cgroups/ebpf/ebpf_linux.go:190:3: unknown field Replace in struct literal of type link.RawAttachProgramOptions +#0 89.14 # github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/k8sapiserver +#0 89.14 /go/pkg/mod/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver@v0.115.0/internal/k8sapiserver/k8sapiserver.go:47:68: undefined: record.EventRecorderLogger +------ +``` + +**Solution**: Remove `awscontainerinsightreceiver` from the `manifest.yaml` file. This receiver has incompatible libraries and cannot be included in the build. + +### Build process failures{% #build-process-failures %} + +**Problem**: You receive the following error: + +```text +ERROR: failed to solve: process "/bin/sh -c . venv/bin/activate && invoke otel-agent.build" did not complete successfully: chown /var/lib/docker/overlay2/r75bx8o94uz6t7yr3ae6gop0b/work/work: no such file or directory +``` + +**Solution**: Run the build command again: + +```shell +docker build . -t agent-otel --no-cache +``` + +### Insufficient disk space{% #insufficient-disk-space %} + +**Problem**: You may encounter errors related to insufficient disk space, such as: + +```text +no space left on device +``` + +**Solution**: Clear up Docker space: + +```shell +docker system prune -a +``` + +## Further reading{% #further-reading %} + +- [Use Custom OpenTelemetry Components with DDOT Collector](http://localhost:1313/opentelemetry/setup/ddot_collector/install/) diff --git a/opentelemetry-mdoc/setup/ddot_collector/index.md b/opentelemetry-mdoc/setup/ddot_collector/index.md new file mode 100644 index 0000000000000..b6e640af04f39 --- /dev/null +++ b/opentelemetry-mdoc/setup/ddot_collector/index.md @@ -0,0 +1,215 @@ +--- +title: Datadog Distribution of OpenTelemetry Collector +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + Distribution of OpenTelemetry Collector +--- + +# Datadog Distribution of OpenTelemetry Collector + +{% callout %} +# Important note for users on the following Datadog sites: app.ddog-gov.com + + + +{% alert level="danger" %} +**The Datadog Distribution of OpenTelemetry Collector (DDOT) is not yet FedRAMP/FIPS compliant.**• If you require a FedRAMP or FIPS-compliant data collection pipeline, use the [FIPS-enabled Datadog Agent](http://localhost:1313/agent/configuration/fips-compliance/?tab=linux).• If you are a GovCloud customer whose only requirement is data residency in the GovCloud (US1-FED) data center, you **may** use the DDOT Collector. +{% /alert %} + + +{% /callout %} + +{% callout %} +##### Join the Preview! + +The DDOT Collector for Kubernetes is **Generally Available**. You can get started by following the instructions below.Deploying the DDOT Collector on Linux-based bare-metal hosts and virtual machines is **in Preview**. To get started, follow the [Linux documentation](http://localhost:1313/opentelemetry/setup/ddot_collector/install/linux). +{% /callout %} + +## Overview{% #overview %} + +The Datadog distribution of OpenTelemetry (DDOT) Collector is an open source solution that combines the flexibility of OpenTelemetry (OTel) with the comprehensive observability capabilities of Datadog. This integrated solution includes: + +- A curated set of OpenTelemetry components optimized for performance and reliability with Datadog, with ability to add additional components of your choosing +- Full data collection and processing capabilities of the Datadog Agent for seamless integration and robust monitoring, including [Datadog Fleet Automation](http://localhost:1313/agent/fleet_automation/) support for the DDOT Collector (see Key benefits) +- Custom Datadog components designed to deliver the best onboarding experience + +{% image + source="http://localhost:1313/images/opentelemetry/setup/ddot-collector-2.48e827fe0ea4d62cd26a81521e9fa584.png?auto=format" + alt="Architecture overview for DDOT Collector, which is embedded in the Datadog Agent." /%} + +## Key benefits{% #key-benefits %} + +The DDOT Collector offers: + +### Comprehensive observability{% #comprehensive-observability %} + +- Access 1,000 Datadog integrations, [Live Container Monitoring](http://localhost:1313/containers/), [Cloud Network Monitoring](http://localhost:1313/network_monitoring/cloud_network_monitoring/), and [Universal Service Monitoring](http://localhost:1313/universal_service_monitoring/) (with eBPF) and more +- Leverage OpenTelemetry community-contributed integrations to collect telemetry in OpenTelemetry Protocol (OTLP) native format +- Control your OTLP data with the Collector's processing and routing capabilities + +### Simplified fleet management{% #simplified-fleet-management %} + +- Remotely manage fleets of DDOT Collectors with [Datadog Fleet Automation](http://localhost:1313/agent/fleet_automation/) +- Gain visibility into your entire configuration, dependencies, and runtime environment +- Onboard faster with out-of-the-box tagging enrichment for OTLP data, automatically enabling [unified service tagging](http://localhost:1313/getting_started/tagging/unified_service_tagging/) + +### Enterprise reliability and resources{% #enterprise-reliability-and-resources %} + +- Benefit from Datadog's robust security practices, including regular vulnerability scans and analysis +- Access Datadog's global support team for assistance with onboarding and troubleshooting + +## Included components{% #included-components %} + +{% alert level="info" %} +**Need additional OpenTelemetry components?** If you need components beyond those included in the default package, follow [Use Custom OpenTelemetry Components](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components) to extend the Datadog Agent's capabilities. For a list of components included by default, see the following OpenTelemetry Collector components section. +{% /alert %} + +### Support levels{% #support-levels %} + +Datadog provides different levels of support depending on the type of component: + +- **Datadog Supported Components**: Datadog-owned components such as the [Datadog Connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/datadogconnector/README.md), [Datadog Exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/README.md), and [Infra Attribute Processor](https://github.com/DataDog/datadog-agent/tree/main/comp/otelcol/otlp/components/processor/infraattributesprocessor#readme). These components are maintained by Datadog, receive regular updates, and are prioritized for bug fixes and feature enhancements within OpenTelemetry community guidelines. + +- **Community Supported Components**: OpenTelemetry components included with the Agent by default. Datadog ensures these components are secure, stable, and compatible with the Agent. + +- **Custom Components**: OpenTelemetry components that are not included with the Agent by default and are added through the [custom components process](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components). Datadog provides guidance on the integration process but does not provide direct support for these components' functionality. For issues with custom components, Datadog recommends engaging with the OpenTelemetry community or the component maintainers. + +### OpenTelemetry Collector components{% #opentelemetry-collector-components %} + +By default, the DDOT Collector ships with the following Collector components. You can also see the list in [YAML format](https://github.com/DataDog/datadog-agent/blob/main/comp/otelcol/collector-contrib/impl/manifest.yaml). + +{% collapsible-section %} +Receivers +- [filelogreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/filelogreceiver/README.md) + +- [fluentforwardreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/fluentforwardreceiver/README.md) + +- [hostmetricsreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/hostmetricsreceiver/README.md) + +- [jaegerreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/jaegerreceiver/README.md) + +- [otlpreceiver](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/README.md) + +- [prometheusreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/prometheusreceiver/README.md) + +- [receivercreator](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/receivercreator/README.md) + +- [zipkinreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/zipkinreceiver/README.md) + +- [nopreceiver](https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/nopreceiver#readme) + +{% /collapsible-section %} + +{% collapsible-section %} +Processors +- [attributesprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/attributesprocessor/README.md) + +- [batchprocessor](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md) + +- [cumulativetodeltaprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/cumulativetodeltaprocessor/README.md) + +- [filterprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md) + +- [groupbyattributeprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/groupbyattrsprocessor/README.md) + +- [k8sattributesprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/k8sattributesprocessor/README.md) + +- [memorylimiterprocessor](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiterprocessor/README.md) + +- [probabilisticsamplerprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/probabilisticsamplerprocessor/README.md) + +- [resourcedetectionprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md) + +- [resourceprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourceprocessor/README.md) + +- routingprocessor (deprecated and removed in v7.71.0; use the [routingconnector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/routingconnector/README.md) instead) + +- [tailsamplingprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/tailsamplingprocessor/README.md) + +- [transformprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md) + +{% /collapsible-section %} + +{% collapsible-section %} +Exporters +- [datadogexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/README.md) + +- [debugexporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/debugexporter/README.md) + +- [loadbalancingexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/loadbalancingexporter/README.md) + +- [otlpexporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/otlpexporter/README.md) + +- [otlphttpexporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/otlphttpexporter/README.md) + +- [sapmexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/sapmexporter/README.md) + +- [nopexporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/nopexporter/README.md) + +{% /collapsible-section %} + +{% collapsible-section %} +Connectors +- [datadogconnector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/datadogconnector/README.md) + +- [routingconnector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/routingconnector/README.md) (available since version 7.68.0) + +- [spanmetricsconnector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/spanmetricsconnector/README.md) + +{% /collapsible-section %} + +{% collapsible-section %} +Extensions +- [healthcheckextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/extension/healthcheckextension/README.md) + +- [observer](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/extension/observer/README.md) + +- [pprofextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/extension/pprofextension/README.md) + +- [zpagesextension](https://github.com/open-telemetry/opentelemetry-collector/blob/main/extension/zpagesextension/README.md) + +{% /collapsible-section %} + +### Custom Datadog components{% #custom-datadog-components %} + +In addition to standard OpenTelemetry components, Datadog provides and maintains the following custom components: + +{% collapsible-section %} +Datadog components +- [Infrastructure Attribute Processor](https://github.com/DataDog/datadog-agent/tree/main/comp/otelcol/otlp/components/processor/infraattributesprocessor#readme): An OpenTelemetry processor component that automatically assigns [Kubernetes tags](http://localhost:1313/containers/kubernetes/tag/?tab=datadogoperator#out-of-the-box-tags) to OTLP telemetry (metrics, traces, and logs) emitted by a pod or an individual container within a pod. This component enables [unified service tagging](http://localhost:1313/getting_started/tagging/unified_service_tagging/?tab=kubernetes) and telemetry correlation for monitoring Kubernetes environments. + +- [Converter](https://github.com/DataDog/datadog-agent/tree/main/comp/otelcol/converter#readme): An OpenTelemetry converter component that enhances user-provided configurations. It offers an API to return both the original and enhanced configurations, automatically checking for known misconfigurations to reduce errors. This ensures seamless integration of existing OpenTelemetry Collector configurations with the Agent. + +- [DD Flare Extension](https://github.com/DataDog/datadog-agent/tree/main/comp/otelcol/ddflareextension#readme): An OpenTelemetry extension component for generating Agent Flare, which contains diagnostic information from both the DDOT Collector and the Agent for troubleshooting purposes. + +{% /collapsible-section %} + +## Get started{% #get-started %} + +Whether you're new to Datadog or already familiar with OpenTelemetry, the following guides help you get started according to your specific situation. + +### Quick start with the default Agent package{% #quick-start-with-the-default-agent-package %} + +The default Datadog Agent package includes a DDOT Collector with a curated set of included OpenTelemetry components designed to meet most needs out of the box. This guide is suitable if you're: + +- Setting up monitoring from scratch without needing OpenTelemetry components outside the included components +- Using the Datadog Agent and want to test OpenTelemetry functionality with included components +- Transitioning from OpenTelemetry Collector to Datadog Agent without requiring components beyond those included by default +- (Optional) If you need OpenTelemetry components beyond what's provided in the default package, follow [Use Custom OpenTelemetry Components](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components) to extend the Datadog Agent's capabilities. + +- [Quick start with the default Agent package](http://localhost:1313/opentelemetry/setup/ddot_collector/install/kubernetes) + +### Migrate from OpenTelemetry Collector to Datadog Agent{% #migrate-from-opentelemetry-collector-to-datadog-agent %} + +This guide helps you migrate from an existing OpenTelemetry Collector setup to the Datadog Agent, including scenarios where you need additional OpenTelemetry components. This guide is suitable if you're: + +- Transitioning from OpenTelemetry Collector while preserving your existing setup +- Migrating your existing OpenTelemetry configurations to maintain continuity +- (Optional) If you need OpenTelemetry components beyond what's provided in the default package, follow [Use Custom OpenTelemetry Components](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components) to extend the Datadog Agent's capabilities + +- [Migrate from OpenTelemetry Collector to Datadog Agent](http://localhost:1313/opentelemetry/guide/migrate/ddot_collector) + +## Further reading{% #further-reading %} + +- [Unify OpenTelemetry and Datadog with the DDOT Collector](https://www.datadoghq.com/blog/datadog-distribution-otel-collector/) diff --git a/opentelemetry-mdoc/setup/ddot_collector/install/index.md b/opentelemetry-mdoc/setup/ddot_collector/install/index.md new file mode 100644 index 0000000000000..0e164ba6ec5ba --- /dev/null +++ b/opentelemetry-mdoc/setup/ddot_collector/install/index.md @@ -0,0 +1,9 @@ +--- +title: Install +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + Distribution of OpenTelemetry Collector > Install +--- + +# Install diff --git a/opentelemetry-mdoc/setup/ddot_collector/install/kubernetes/index.md b/opentelemetry-mdoc/setup/ddot_collector/install/kubernetes/index.md new file mode 100644 index 0000000000000..7d3fd869e2e5d --- /dev/null +++ b/opentelemetry-mdoc/setup/ddot_collector/install/kubernetes/index.md @@ -0,0 +1,977 @@ +--- +title: Install the Datadog Distribution of OTel Collector on Kubernetes +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + Distribution of OpenTelemetry Collector > Install > Install the Datadog + Distribution of OTel Collector on Kubernetes +--- + +# Install the Datadog Distribution of OTel Collector on Kubernetes + +{% callout %} +# Important note for users on the following Datadog sites: app.ddog-gov.com + + + +{% alert level="danger" %} +FedRAMP customers should not enable or use the embedded OpenTelemetry Collector. +{% /alert %} + + +{% /callout %} + +## Overview{% #overview %} + +Follow this guide to install the Datadog Distribution of OpenTelemetry (DDOT) Collector using Helm or the Datadog Operator. + +{% alert level="info" %} +**Need additional OpenTelemetry components?** If you need components beyond those included in the default package, follow [Use Custom OpenTelemetry Components](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components) to extend the Datadog Agent's capabilities. For a list of components included by default, see [OpenTelemetry Collector components](http://localhost:1313/opentelemetry/agent/#opentelemetry-collector-components). +{% /alert %} + +## Requirements{% #requirements %} + +To complete this guide, you need the following: + +**Datadog account**: + +1. [Create a Datadog account](https://www.datadoghq.com/free-datadog-trial/) if you don't have one. +1. Find or create your [Datadog API key](https://app.datadoghq.com/organization-settings/api-keys/). + +**Software**: Install and set up the following on your machine: + +- A Kubernetes cluster (v1.29+) + - **Note**: EKS Fargate and GKE Autopilot environments are not supported +- [Helm (v3+)](https://helm.sh) +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) + +{% callout %} +##### Join the Preview! + +Support for deploying the DDOT Collector on Linux-based bare-metal hosts and virtual machines is in Preview. To get started, follow the [Linux documentation](http://localhost:1313/opentelemetry/setup/ddot_collector/install/linux). +{% /callout %} + +## Install the Datadog Agent with OpenTelemetry Collector{% #install-the-datadog-agent-with-opentelemetry-collector %} + +### Select installation method{% #select-installation-method %} + +Choose one of the following installation methods: + +- [Datadog Operator](http://localhost:1313/containers/datadog_operator): A [Kubernetes-native](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) approach that automatically reconciles and maintains your Datadog setup. It reports deployment status, health, and errors in its Custom Resource status, and it limits the risk of misconfiguration thanks to higher-level configuration options. +- [Helm chart](https://github.com/DataDog/helm-charts/blob/main/charts/datadog/README.md): A straightforward way to deploy Datadog Agent. It provides versioning, rollback, and templating capabilities, making deployments consistent and easier to replicate. + +{% tab title="Datadog Operator" %} +### Install the Datadog Operator{% #install-the-datadog-operator %} + +You can install the Datadog Operator in your cluster using the [Datadog Operator Helm chart](https://github.com/DataDog/helm-charts/blob/main/charts/datadog-operator/README.md): + +```shell +helm repo add datadog https://helm.datadoghq.com +helm repo update +helm install datadog-operator datadog/datadog-operator +``` + +{% /tab %} + +{% tab title="Helm" %} +### Add the Datadog Helm Repository{% #add-the-datadog-helm-repository %} + +To add the Datadog repository to your Helm repositories: + +```shell +helm repo add datadog https://helm.datadoghq.com +helm repo update +``` + +{% /tab %} + +### Set up Datadog API key{% #set-up-datadog-api-key %} + +1. Get the Datadog [API key](https://app.datadoghq.com/organization-settings/api-keys/). +1. Store the API key as a Kubernetes secret: + ```shell + kubectl create secret generic datadog-secret \ + --from-literal api-key= + ``` +Replace `` with your actual Datadog API key. + +### Configure the Datadog Agent{% #configure-the-datadog-agent %} + +{% tab title="Datadog Operator" %} +After deploying the Datadog Operator, create the `DatadogAgent` resource that triggers the deployment of the Datadog Agent, Cluster Agent and Cluster Checks Runners (if used) in your Kubernetes cluster. The Datadog Agent deploys as a DaemonSet, running a pod on every node of your cluster. + +1. Use the `datadog-agent.yaml` file to specify your `DatadogAgent` deployment configuration. + +In the `datadog-agent.yaml` file: + +```yaml + apiVersion: datadoghq.com/v2alpha1 + kind: DatadogAgent + metadata: + name: datadog + spec: + global: + clusterName: + site: + credentials: + apiSecret: + secretName: datadog-secret + keyName: api-key +``` + +- Replace `` with a name for your cluster. +- Replace `` with your [Datadog site](http://localhost:1313/getting_started/site). Your site is . (Ensure the correct **DATADOG SITE** is selected on the right.) +Enable the OpenTelemetry Collector: +In the `datadog-agent.yaml` file: + +```yaml + # Enable Features + features: + otelCollector: + enabled: true +``` + +The Datadog Operator automatically binds the OpenTelemetry Collector to ports `4317` (named `otel-grpc`) and `4318` (named `otel-http`) by default. +(Optional) Enable additional Datadog features: +{% alert level="danger" %} +Enabling these features may incur additional charges. Review the [pricing page](https://www.datadoghq.com/pricing/) and talk to your Customer Success Manager before proceeding. +{% /alert %} + +In the `datadog-agent.yaml` file: + +```yaml + # Enable Features + features: + ... + apm: + enabled: true + orchestratorExplorer: + enabled: true + processDiscovery: + enabled: true + liveProcessCollection: + enabled: true + usm: + enabled: true + clusterChecks: + enabled: true +``` + +When enabling additional Datadog features, always use the Datadog or OpenTelemetry Collector configuration files instead of relying on Datadog environment variables. +{% /tab %} + +{% tab title="Helm" %} +Use a YAML file to specify the Helm chart parameters for the [Datadog Agent chart](https://github.com/DataDog/helm-charts/blob/main/charts/datadog/README.md). + +1. Create an empty `datadog-values.yaml` file: + +```shell +touch datadog-values.yaml +``` + +{% alert level="info" %} +Unspecified parameters use defaults from [values.yaml](https://github.com/DataDog/helm-charts/blob/main/charts/datadog/values.yaml). +{% /alert %} +Configure the Datadog API key secret: +In the `datadog-values.yaml` file: + +```yaml +datadog: + site: + apiKeyExistingSecret: datadog-secret +``` + +Set `` to your [Datadog site](http://localhost:1313/getting_started/site/). Otherwise, it defaults to `datadoghq.com`, the US1 site. +Enable the OpenTelemetry Collector and configure the essential ports: +In the `datadog-values.yaml` file: + +```yaml +datadog: + ... + otelCollector: + enabled: true + ports: + - containerPort: "4317" # default port for OpenTelemetry gRPC receiver. + hostPort: "4317" + name: otel-grpc + - containerPort: "4318" # default port for OpenTelemetry HTTP receiver + hostPort: "4318" + name: otel-http +``` + +Set the `hostPort` to expose the container port to the external network. This enables configuring the OTLP exporter to point to the IP address of the node where the Datadog Agent is assigned. + +If you don't want to expose the port, you can use the Agent service instead: + +- Remove the `hostPort` entries from your `datadog-values.yaml` file. +- In your application's deployment file (`deployment.yaml`), configure the OTLP exporter to use the Agent service: + ```yaml + env: + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: 'http://..svc.cluster.local' + - name: OTEL_EXPORTER_OTLP_PROTOCOL + value: 'grpc' + ``` +(Optional) Enable additional Datadog features: +{% alert level="danger" %} +Enabling these features may incur additional charges. Review the [pricing page](https://www.datadoghq.com/pricing/) and talk to your Customer Success Manager before proceeding. +{% /alert %} + +In the `datadog-values.yaml` file: + +```yaml +datadog: + ... + apm: + portEnabled: true + peer_tags_aggregation: true + compute_stats_by_span_kind: true + peer_service_aggregation: true + orchestratorExplorer: + enabled: true + processAgent: + enabled: true + processCollection: true +``` + +When enabling additional Datadog features, always use the Datadog or OpenTelemetry Collector configuration files instead of relying on Datadog environment variables. +(Optional) Collect pod labels and use them as tags to attach to metrics, traces, and logs: +{% alert level="danger" %} +Custom metrics may impact billing. See the [custom metrics billing page](https://docs.datadoghq.com/account_management/billing/custom_metrics) for more information. +{% /alert %} + +In the `datadog-values.yaml` file: + +```yaml +datadog: + ... + podLabelsAsTags: + app: kube_app + release: helm_release +``` + +{% collapsible-section %} +Completed datadog-values.yaml file +Your `datadog-values.yaml` file should look something like this: + +In the `datadog-values.yaml` file: + +```yaml +datadog: + site: datadoghq.com + apiKeyExistingSecret: datadog-secret + + otelCollector: + enabled: true + ports: + - containerPort: "4317" + hostPort: "4317" + name: otel-grpc + - containerPort: "4318" + hostPort: "4318" + name: otel-http + apm: + portEnabled: true + peer_tags_aggregation: true + compute_stats_by_span_kind: true + peer_service_aggregation: true + orchestratorExplorer: + enabled: true + processAgent: + enabled: true + processCollection: true + + podLabelsAsTags: + app: kube_app + release: helm_release + +``` + +{% /collapsible-section %} + +{% /tab %} + +### Configure the OpenTelemetry Collector{% #configure-the-opentelemetry-collector %} + +{% tab title="Datadog Operator" %} +The Datadog Operator provides a sample OpenTelemetry Collector configuration that you can use as a starting point. If you need to modify this configuration, the Datadog Operator supports two ways of providing a custom Collector configuration: + +- **Inline configuration**: Add your custom Collector configuration directly in the `features.otelCollector.conf.configData` field. +- **ConfigMap-based configuration**: Store your Collector configuration in a ConfigMap and reference it in the `features.otelCollector.conf.configMap` field. This approach allows you to keep Collector configuration decoupled from the `DatadogAgent` resource. + +#### Inline Collector configuration{% #inline-collector-configuration %} + +In the snippet below, the Collector configuration is placed directly under the `features.otelCollector.conf.configData` parameter: + +In the `datadog-agent.yaml` file: + +```yaml + ... + # Enable Features + features: + otelCollector: + enabled: true + ports: + - containerPort: 4317 + hostPort: 4317 + name: otel-grpc + - containerPort: 4318 + hostPort: 4318 + name: otel-http + conf: + configData: |- + receivers: + prometheus: + config: + scrape_configs: + - job_name: "otelcol" + scrape_interval: 10s + static_configs: + - targets: + - 0.0.0.0:8888 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + exporters: + debug: + verbosity: detailed + datadog: + api: + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} + processors: + infraattributes: + cardinality: 2 + batch: + timeout: 10s + connectors: + datadog/connector: + traces: + service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [debug, datadog, datadog/connector] + metrics: + receivers: [otlp, datadog/connector, prometheus] + processors: [infraattributes, batch] + exporters: [debug, datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [debug, datadog] +``` + +When you apply the `datadog-agent.yaml` file containing this `DatadogAgent` resource, the Operator automatically mounts the Collector configuration into the Agent DaemonSet. + +{% collapsible-section %} +Completed datadog-agent.yaml file with inlined Collector config +Completed `datadog-agent.yaml` with inline Collector configuration should look something like this: + +In the `datadog-agent.yaml` file: + +```yaml +apiVersion: datadoghq.com/v2alpha1 +kind: DatadogAgent +metadata: + name: datadog +spec: + global: + clusterName: + site: + credentials: + apiSecret: + secretName: datadog-secret + keyName: api-key + + # Enable Features + features: + apm: + enabled: true + orchestratorExplorer: + enabled: true + processDiscovery: + enabled: true + liveProcessCollection: + enabled: true + usm: + enabled: true + clusterChecks: + enabled: true + otelCollector: + enabled: true + ports: + - containerPort: 4317 + hostPort: 4317 + name: otel-grpc + - containerPort: 4318 + hostPort: 4318 + name: otel-http + conf: + configData: |- + receivers: + prometheus: + config: + scrape_configs: + - job_name: "datadog-agent" + scrape_interval: 10s + static_configs: + - targets: + - 0.0.0.0:8888 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + exporters: + debug: + verbosity: detailed + datadog: + api: + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} + processors: + infraattributes: + cardinality: 2 + batch: + timeout: 10s + connectors: + datadog/connector: + traces: + service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [debug, datadog, datadog/connector] + metrics: + receivers: [otlp, datadog/connector, prometheus] + processors: [infraattributes, batch] + exporters: [debug, datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [debug, datadog] +``` + +{% /collapsible-section %} + +#### ConfigMap-based Collector Configuration{% #configmap-based-collector-configuration %} + +For more complex or frequently updated configurations, storing Collector configuration in a ConfigMap can simplify version control. + +1. Create a ConfigMap that contains your Collector configuration: + +In the `configmap.yaml` file: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: otel-agent-config-map + namespace: system +data: + # must be named otel-config.yaml + otel-config.yaml: |- + receivers: + prometheus: + config: + scrape_configs: + - job_name: "datadog-agent" + scrape_interval: 10s + static_configs: + - targets: + - 0.0.0.0:8888 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + exporters: + debug: + verbosity: detailed + datadog: + api: + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} + processors: + infraattributes: + cardinality: 2 + batch: + timeout: 10s + connectors: + datadog/connector: + traces: + service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [debug, datadog, datadog/connector] + metrics: + receivers: [otlp, datadog/connector, prometheus] + processors: [infraattributes, batch] + exporters: [debug, datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [debug, datadog] +``` + +{% alert level="warning" %} +The field for Collector config in the ConfigMap must be called `otel-config.yaml`. +{% /alert %} +Reference the `otel-agent-config-map` ConfigMap in your `DatadogAgent` resource using `features.otelCollector.conf.configMap` parameter: +In the `datadog-agent.yaml` file: + +```yaml + ... + # Enable Features + features: + otelCollector: + enabled: true + ports: + - containerPort: 4317 + hostPort: 4317 + name: otel-grpc + - containerPort: 4318 + hostPort: 4318 + name: otel-http + conf: + configMap: + name: otel-agent-config-map +``` + +The Operator automatically mounts `otel-config.yaml` from the ConfigMap into the Agent's OpenTelemetry Collector DaemonSet. + +{% collapsible-section %} +Completed datadog-agent.yaml file with Collector config in the ConfigMap +Completed `datadog-agent.yaml` with Collector configuration defined as ConfigMap should look something like this: + +In the `datadog-agent.yaml` file: + +```yaml +apiVersion: datadoghq.com/v2alpha1 +kind: DatadogAgent +metadata: + name: datadog +spec: + global: + clusterName: + site: + credentials: + apiSecret: + secretName: datadog-secret + keyName: api-key + + # Enable Features + features: + apm: + enabled: true + orchestratorExplorer: + enabled: true + processDiscovery: + enabled: true + liveProcessCollection: + enabled: true + usm: + enabled: true + clusterChecks: + enabled: true + otelCollector: + enabled: true + ports: + - containerPort: 4317 + hostPort: 4317 + name: otel-grpc + - containerPort: 4318 + hostPort: 4318 + name: otel-http + conf: + configMap: + name: otel-agent-config-map +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: otel-agent-config-map + namespace: system +data: + # must be named otel-config.yaml + otel-config.yaml: |- + receivers: + prometheus: + config: + scrape_configs: + - job_name: "datadog-agent" + scrape_interval: 10s + static_configs: + - targets: + - 0.0.0.0:8888 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + exporters: + debug: + verbosity: detailed + datadog: + api: + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} + processors: + infraattributes: + cardinality: 2 + batch: + timeout: 10s + connectors: + datadog/connector: + traces: + service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [debug, datadog, datadog/connector] + metrics: + receivers: [otlp, datadog/connector, prometheus] + processors: [infraattributes, batch] + exporters: [debug, datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [debug, datadog] +``` + +{% /collapsible-section %} + +{% /tab %} + +{% tab title="Helm" %} +The Datadog Helm chart provides a sample OpenTelemetry Collector configuration that you can use as a starting point. This section walks you through the predefined pipelines and included OpenTelemetry components. + +This is the full OpenTelemetry Collector configuration in `otel-config.yaml`: + +In the `otel-config.yaml` file: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: "otelcol" + scrape_interval: 10s + static_configs: + - targets: ["0.0.0.0:8888"] + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +exporters: + debug: + verbosity: detailed + datadog: + api: + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} +processors: + infraattributes: + cardinality: 2 + batch: + timeout: 10s +connectors: + datadog/connector: + traces: +service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog, datadog/connector] + metrics: + receivers: [otlp, datadog/connector, prometheus] + processors: [infraattributes, batch] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog] +``` + +{% /tab %} + +#### Key components{% #key-components %} + +To send telemetry data to Datadog, the following components are defined in the configuration: + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/components-2.74385f27545b6fc024ea25bc6cd7353f.png?auto=format" + alt="Diagram depicting the Agent deployment pattern" /%} + +##### Datadog connector{% #datadog-connector %} + +The [Datadog connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/connector/datadogconnector) computes Datadog APM trace metrics. + +In the `otel-config.yaml` file: + +```yaml +connectors: + datadog/connector: + traces: +``` + +##### Datadog exporter{% #datadog-exporter %} + +The [Datadog exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/datadogexporter) exports traces, metrics, and logs to Datadog. + +In the `otel-config.yaml` file: + +```yaml +exporters: + datadog: + api: + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} +``` + +**Note**: If `key` is not specified or set to a secret, or if `site` is not specified, the system uses values from the core Agent configuration. By default, the core Agent sets site to `datadoghq.com` (US1). + +##### Prometheus receiver{% #prometheus-receiver %} + +The [Prometheus receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/prometheusreceiver) collects health metrics from the OpenTelemetry Collector for the metrics pipeline. + +In the `otel-config.yaml` file: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: "otelcol" + scrape_interval: 10s + static_configs: + - targets: ["0.0.0.0:8888"] +``` + +For more information, see the [Collector Health Metrics](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/prometheusreceiver) documentation. + +### Deploy the Agent with the OpenTelemetry Collector{% #deploy-the-agent-with-the-opentelemetry-collector %} + +{% tab title="Datadog Operator" %} +Deploy the Datadog Agent with the configuration file: + +```shell +kubectl apply -f datadog-agent.yaml +``` + +This deploys the Datadog Agent as a DaemonSet with the DDOT OpenTelemetry Collector. The Collector runs on the same host as your application, following the [Agent deployment pattern](https://opentelemetry.io/docs/collector/deployment/agent/). The [Gateway deployment pattern](https://opentelemetry.io/docs/collector/deployment/gateway/) is not supported. +{% /tab %} + +{% tab title="Helm" %} +To install or upgrade the Datadog Agent with OpenTelemetry Collector in your Kubernetes environment, use one of the following Helm commands: + +- For default OpenTelemetry Collector configuration: + + ```shell + helm upgrade -i datadog/datadog -f datadog-values.yaml + ``` + +- For custom OpenTelemetry Collector configuration: + + ```shell + helm upgrade -i datadog/datadog \ + -f datadog-values.yaml \ + --set-file datadog.otelCollector.config=otel-config.yaml + ``` + +This command allows you to specify your own `otel-config.yaml` file. + +Replace `` with the Helm release name you are using. + +{% alert level="info" %} +You may see warnings during the deployment process. These warnings can be ignored. +{% /alert %} + +This Helm chart deploys the Datadog Agent with OpenTelemetry Collector as a DaemonSet. The Collector is deployed on the same host as your application, following the [Agent deployment pattern](https://opentelemetry.io/docs/collector/deployment/agent/). The [Gateway deployment pattern](https://opentelemetry.io/docs/collector/deployment/gateway/) is not supported. +{% /tab %} + +{% collapsible-section %} +Deployment diagram +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/deployment-2.01ec8236124c329cdfadd56354cb0242.png?auto=format" + alt="Diagram depicting the Agent deployment pattern" /%} + +{% /collapsible-section %} + +## Send your telemetry to Datadog{% #send-your-telemetry-to-datadog %} + +To send your telemetry data to Datadog: + +1. Instrument your application +1. Configure the application +1. Correlate observability data +1. Run your application + +### Instrument the application{% #instrument-the-application %} + +Instrument your application [using the OpenTelemetry API](http://localhost:1313/tracing/trace_collection/custom_instrumentation/otel_instrumentation/). + +{% collapsible-section %} +Example application instrumented with the OpenTelemetry API +As an example, you can use the [Calendar sample application](https://github.com/DataDog/opentelemetry-examples/tree/main/apps/rest-services/java/calendar) that's already instrumented for you. The following code instruments the [CalendarService.getDate()](https://github.com/DataDog/opentelemetry-examples/blob/main/apps/rest-services/java/calendar/src/main/java/com/otel/service/CalendarService.java#L27-L48) method using the OpenTelemetry annotations and API: + +In the `CalendarService.java` file: + +```java +@WithSpan(kind = SpanKind.CLIENT) +public String getDate() { + Span span = Span.current(); + span.setAttribute("peer.service", "random-date-service"); + ... +} +``` + +{% /collapsible-section %} + +### Configure the application{% #configure-the-application %} + +Your application container must send data to the DDOT Collector on the same host. Since the Collector runs as a DaemonSet, you need to specify the local host as the OTLP endpoint. + +If the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable is not already set, add it to your application's Deployment manifest file: + +In the `deployment.yaml` file: + +```yaml +env: + ... + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OTLP_GRPC_PORT + value: "4317" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: 'http://$(HOST_IP):$(OTLP_GRPC_PORT)' + - name: OTEL_EXPORTER_OTLP_PROTOCOL + value: 'grpc' + +``` + + + +### Correlate observability data{% #correlate-observability-data %} + +[Unified service tagging](http://localhost:1313/getting_started/tagging/unified_service_tagging) ties observability data together in Datadog so you can navigate across metrics, traces, and logs with consistent tags. + +Unified service tagging ties observability data together in Datadog so you can navigate across metrics, traces, and logs with consistent tags. + +In containerized environments, `env`, `service`, and `version` are set through the OpenTelemetry Resource Attributes environment variables or Kubernetes labels on your deployments and pods. The DDOT detects this tagging configuration and applies it to the data it collects from containers. + +To get the full range of unified service tagging, add **both** the environment variables and the deployment/pod labels: + +In the `deployment.yaml` file: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + tags.datadoghq.com/env: "" + tags.datadoghq.com/service: "" + tags.datadoghq.com/version: "" +... +template: + metadata: + labels: + tags.datadoghq.com/env: "" + tags.datadoghq.com/service: "" + tags.datadoghq.com/version: "" + containers: + - ... + env: + - name: OTEL_SERVICE_NAME + value: "" + - name: OTEL_RESOURCE_ATTRIBUTES + value: >- + service.name=$(OTEL_SERVICE_NAME), + service.version=, + deployment.environment.name= +``` + +### Run the application{% #run-the-application %} + +Redeploy your application to apply the changes made in the deployment manifest. Once the updated configuration is active, Unified Service Tagging will be fully enabled for your metrics, traces, and logs. + +## Explore observability data in Datadog{% #explore-observability-data-in-datadog %} + +Use Datadog to explore the observability data for your application. + +### Fleet automation{% #fleet-automation %} + +Explore your Datadog Agent and Collector configuration. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/fleet_automation.baf78a14f2401833d04f1b19c38ddcf4.png?auto=format" + alt="Review your Agent and Collector configuration from the Fleet Automation page." /%} + +### Live container monitoring{% #live-container-monitoring %} + +Monitor your container health using Live Container Monitoring capabilities. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/containers.14b4025c2f3b0e554692286e4a01f383.png?auto=format" + alt="Monitor your container health from the Containers page." /%} + +### Infrastructure node health{% #infrastructure-node-health %} + +View runtime and infrastructure metrics to visualize, monitor, and measure the performance of your nodes. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/infrastructure.c8fe3b51603bf26daef58b0c3fbdbb6d.png?auto=format" + alt="View runtime and infrastructure metrics from the Host List." /%} + +### Logs{% #logs %} + +View logs to monitor and troubleshoot application and system operations. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/logs.3399501b14c5a2c7bf9ab8dd4aa8bf52.png?auto=format" + alt="View logs from the Log Explorer." /%} + +### Traces{% #traces %} + +View traces and spans to observe the status and performance of requests processed by your application, with infrastructure metrics correlated in the same trace. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/traces.0dfe8b50575dbfcea5b0d06131826db9.png?auto=format" + alt="View traces from the Trace Explorer." /%} + +### Runtime metrics{% #runtime-metrics %} + +Monitor your runtime (JVM) metrics for your applications. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/metrics.1f91a119276f91ecfb2976f16eed58ef.png?auto=format" + alt="View JVM metrics from the JVM Metrics dashboard" /%} + +### Collector health metrics{% #collector-health-metrics %} + +View metrics from the DDOT Collector to monitor the Collector health. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/dashboard.f6e65b6b9a8708b1a7e172da215947af.png?auto=format" + alt="View Collector health metrics from the OTel dashboard." /%} + +## Further reading{% #further-reading %} + +- [Use Custom OpenTelemetry Components with Datadog Agent](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components) diff --git a/opentelemetry-mdoc/setup/ddot_collector/install/linux/index.md b/opentelemetry-mdoc/setup/ddot_collector/install/linux/index.md new file mode 100644 index 0000000000000..a5bb61577a5fb --- /dev/null +++ b/opentelemetry-mdoc/setup/ddot_collector/install/linux/index.md @@ -0,0 +1,372 @@ +--- +title: Install the Datadog Distribution of OTel Collector on Linux +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + Distribution of OpenTelemetry Collector > Install > Install the Datadog + Distribution of OTel Collector on Linux +--- + +# Install the Datadog Distribution of OTel Collector on Linux + +{% callout %} +Support for deploying the DDOT Collector on Linux-based bare-metal hosts and virtual machines is currently in Preview. +{% /callout %} + +{% callout %} +# Important note for users on the following Datadog sites: app.ddog-gov.com + + + +{% alert level="danger" %} +FedRAMP customers should not enable or use the embedded OpenTelemetry Collector. +{% /alert %} + + +{% /callout %} + +## Overview{% #overview %} + +Follow this guide to install the Datadog Distribution of OpenTelemetry (DDOT) Collector on Linux-based bare-metal hosts and virtual machines. + +## Requirements{% #requirements %} + +To complete this guide, you need the following: + +**Datadog account**: + +1. [Create a Datadog account](https://www.datadoghq.com/free-datadog-trial/) if you don't have one. +1. Find or create your [Datadog API key](https://app.datadoghq.com/organization-settings/api-keys/). + +**Software**: + +- A supported Linux distribution (for example, Debian, Ubuntu, CentOS, RHEL, Fedora, SUSE). +- `curl` must be installed to use the one-line installation script. + +## Install the Datadog Agent with OpenTelemetry Collector{% #install-the-datadog-agent-with-opentelemetry-collector %} + +### Installation{% #installation %} + +To install the DDOT Collector on a Linux host, use the following one-line installation command: + +```shell +DD_API_KEY= DD_SITE="" DD_OTELCOLLECTOR_ENABLED=true DD_AGENT_MAJOR_VERSION=7 DD_AGENT_MINOR_VERSION=70.0-1 bash -c "$(curl -L https://install.datadoghq.com/scripts/install_script_agent7.sh)" +``` + +This command installs both the core Datadog Agent package and the DDOT Collector that runs alongside it. + +### Validation{% #validation %} + +Run the Agent's [status command](http://localhost:1313/agent/configuration/agent-commands/#agent-status-and-information) to verify installation. + +```shell +sudo datadog-agent status +``` + +A successful installation returns an Agent Status report that begins with Agent information like this: + +```text +==================== +Agent (v7.x.x) +==================== + Status date: 2025-08-22 18:35:17.449 UTC (1755887717449) + Agent start: 2025-08-22 18:16:27.004 UTC (1755886587004) + Pid: 2828211 + Go Version: go1.24.6 + Python Version: 3.12.11 + Build arch: amd64 + Agent flavor: agent + FIPS Mode: not available + Log Level: info +``` + +There will also be an **OTel Agent** status section that includes OpenTelemetry information: + +```text +========== +OTel Agent +========== + + Status: Running + Agent Version: 7.x.x + Collector Version: v0.129.0 + + Receiver + ========================== + Spans Accepted: 0 + Metric Points Accepted: 1055 + Log Records Accepted: 0 + + Exporter + ========================== + Spans Sent: 0 + Metric Points Sent: 1055 + Log Records Sent: 0 +``` + +## Configure the Datadog Agent{% #configure-the-datadog-agent %} + +### Enable the DDOT Collector{% #enable-the-ddot-collector %} + +The configuration file for the Datadog Agent is automatically installed at `/etc/datadog-agent/datadog.yaml`. The installation script adds the following configuration settings to `/etc/datadog-agent/datadog.yaml` to enable the DDOT Collector: + +In the `datadog-agent.yaml` file: + +```yaml +otelcollector: + enabled: true +agent_ipc: + port: 5009 + config_refresh_interval: 60 +``` + +DDOT automatically binds the OpenTelemetry Collector to ports 4317 (grpc) and 4318 (http) by default. + +### (Optional) Enable additional Datadog features{% #optional-enable-additional-datadog-features %} + +{% alert level="danger" %} +Enabling these features may incur additional charges. Review the [pricing page](https://www.datadoghq.com/pricing/) and talk to your Customer Success Manager before proceeding. +{% /alert %} + +For a complete list of available options, refer to the fully commented reference file at `/etc/datadog-agent/datadog.yaml.example` or the sample [`config_template.yaml`](https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml) file. + +When enabling additional Datadog features, always use the Datadog or OpenTelemetry Collector configuration files instead of relying on Datadog environment variables. + +## Configure the OpenTelemetry Collector{% #configure-the-opentelemetry-collector %} + +The installation script provides a sample OpenTelemetry Collector configuration at `/etc/datadog-agent/otel-config.yaml` that you can use as a starting point. + +{% collapsible-section %} +Sample otel-config.yaml file from installation +Sample `otel-config.yaml` from installation will look something like this: + +In the `otel-config.yaml` file: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: "otelcol" + scrape_interval: 60s + static_configs: + - targets: ["0.0.0.0:8888"] + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +exporters: + debug: + verbosity: detailed + datadog: + api: + key: + site: +processors: + infraattributes: + cardinality: 2 + batch: + timeout: 10s +connectors: + datadog/connector: + traces: + compute_top_level_by_span_kind: true + peer_tags_aggregation: true + compute_stats_by_span_kind: true +service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog, datadog/connector] + metrics: + receivers: [otlp, datadog/connector, prometheus] + processors: [infraattributes, batch] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes, batch] + exporters: [datadog] +``` + +{% /collapsible-section %} + +#### Key components{% #key-components %} + +To send telemetry data to Datadog, the following components are defined in the configuration: + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/components-2.74385f27545b6fc024ea25bc6cd7353f.png?auto=format" + alt="Diagram depicting the Agent deployment pattern" /%} + +##### Datadog connector{% #datadog-connector %} + +The [Datadog connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/connector/datadogconnector) computes Datadog APM trace metrics. + +In the `otel-config.yaml` file: + +```yaml +connectors: + datadog/connector: + traces: +``` + +##### Datadog exporter{% #datadog-exporter %} + +The [Datadog exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/datadogexporter) exports traces, metrics, and logs to Datadog. + +In the `otel-config.yaml` file: + +```yaml +exporters: + datadog: + api: + key: + site: +``` + +**Note**: If `key` is not specified or set to a secret, or if `site` is not specified, the system uses values from the core Agent configuration. By default, the core Agent sets site to `datadoghq.com` (US1). + +##### Prometheus receiver{% #prometheus-receiver %} + +The [Prometheus receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/prometheusreceiver) collects health metrics from the OpenTelemetry Collector for the metrics pipeline. + +In the `otel-config.yaml` file: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: "otelcol" + scrape_interval: 60s + static_configs: + - targets: ["0.0.0.0:8888"] +``` + +For more information, see the [Collector Health Metrics](http://localhost:1313/opentelemetry/integrations/collector_health_metrics/) documentation. + +## Send your telemetry to Datadog{% #send-your-telemetry-to-datadog %} + +To send your telemetry data to Datadog: + +1. Instrument your application +1. Configure the application +1. Correlate observability data +1. Run your application + +### Instrument the application{% #instrument-the-application %} + +Instrument your application [using the OpenTelemetry API](http://localhost:1313/opentelemetry/instrument/api_support). + +{% collapsible-section %} +Example application instrumented with the OpenTelemetry API +As an example, you can use the [Calendar sample application](https://github.com/DataDog/opentelemetry-examples/tree/main/apps/rest-services/java/calendar) that's already instrumented for you. The following code instruments the [CalendarService.getDate()](https://github.com/DataDog/opentelemetry-examples/blob/main/apps/rest-services/java/calendar/src/main/java/com/otel/service/CalendarService.java#L27-L48) method using the OpenTelemetry annotations and API: + +In the `CalendarService.java` file: + +```java +@WithSpan(kind = SpanKind.CLIENT) +public String getDate() { + Span span = Span.current(); + span.setAttribute("peer.service", "random-date-service"); + ... +} +``` + +{% /collapsible-section %} + +### Configure the application{% #configure-the-application %} + +Your application must send data to the DDOT Collector on the same host. Ensure that the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable is set on your application. + +If using the example application, [`run-otel-local.sh`](https://github.com/DataDog/opentelemetry-examples/blob/main/apps/rest-services/java/calendar/run-otel-local.sh) sets up the required environment variables and runs the application: + +In the `run-otel-local.sh` file: + +```bash +export OTEL_METRICS_EXPORTER="otlp" +export OTEL_LOGS_EXPORTER="otlp" +export OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:4317" +export OTEL_EXPORTER_OTLP_PROTOCOL="grpc" +``` + + + +### Correlate observability data{% #correlate-observability-data %} + +[Unified service tagging](http://localhost:1313/opentelemetry/correlate/) ties observability data together in Datadog so you can navigate across metrics, traces, and logs with consistent tags. + +In bare-metal environments, `env`, `service`, and `version` are set through the OpenTelemetry Resource Attributes environment variables. The DDOT Collector detects this tagging configuration and applies it to the data it collects from applications. + +In the example application, this is done in `run-otel-local.sh`: + +In the `run-otel-local.sh` file: + +```bash +export OTEL_RESOURCE_ATTRIBUTES="service.name=my-calendar-service,service.version=1.0,deployment.environment.name=otel-test,host.name=calendar-host" +``` + + + +### Run the application{% #run-the-application %} + +Redeploy your application to apply the changes made in your environment variables. After the updated configuration is active, unified service tagging is fully enabled for your metrics, traces, and logs. + +## Explore observability data in Datadog{% #explore-observability-data-in-datadog %} + +Use Datadog to explore the observability data for your application. + +### Fleet automation{% #fleet-automation %} + +Explore your Datadog Agent and Collector configuration. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/fleet_automation.baf78a14f2401833d04f1b19c38ddcf4.png?auto=format" + alt="Review your Agent and Collector configuration from the Fleet Automation page." /%} + +### Infrastructure monitoring{% #infrastructure-monitoring %} + +View runtime and infrastructure metrics to visualize, monitor, and measure the performance of your hosts. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/infrastructure.c8fe3b51603bf26daef58b0c3fbdbb6d.png?auto=format" + alt="View runtime and infrastructure metrics from the Host List." /%} + +### Logs{% #logs %} + +View logs to monitor and troubleshoot application and system operations. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/logs.3399501b14c5a2c7bf9ab8dd4aa8bf52.png?auto=format" + alt="View logs from the Log Explorer." /%} + +### Traces{% #traces %} + +View traces and spans to observe the status and performance of requests processed by your application, with infrastructure metrics correlated in the same trace. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/traces.0dfe8b50575dbfcea5b0d06131826db9.png?auto=format" + alt="View traces from the Trace Explorer." /%} + +### Runtime metrics{% #runtime-metrics %} + +Monitor your runtime (JVM) metrics for your applications. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/metrics.1f91a119276f91ecfb2976f16eed58ef.png?auto=format" + alt="View JVM metrics from the JVM Metrics dashboard" /%} + +### Collector health metrics{% #collector-health-metrics %} + +View metrics from the DDOT Collector to monitor the Collector health. + +{% image + source="http://localhost:1313/images/opentelemetry/embedded_collector/dashboard.f6e65b6b9a8708b1a7e172da215947af.png?auto=format" + alt="View Collector health metrics from the OTel dashboard." /%} + +## Further reading{% #further-reading %} + +- [Use Custom OpenTelemetry Components with Datadog Agent](http://localhost:1313/opentelemetry/setup/ddot_collector/custom_components) diff --git a/opentelemetry-mdoc/setup/index.md b/opentelemetry-mdoc/setup/index.md new file mode 100644 index 0000000000000..739a596ca2755 --- /dev/null +++ b/opentelemetry-mdoc/setup/index.md @@ -0,0 +1,50 @@ +--- +title: Send OpenTelemetry Data to Datadog +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog +--- + +# Send OpenTelemetry Data to Datadog + +This page describes all of the ways you can send OpenTelemetry (OTel) data to Datadog. + +## DDOT Collector (Recommended){% #ddot-collector-recommended %} + +The Datadog Distribution of OpenTelemetry (DDOT) Collector is an open source solution that combines the flexibility of OpenTelemetry with the comprehensive observability capabilities of Datadog. + +This approach gives you full control over OpenTelemetry pipelines while also providing access to powerful, Datadog Agent-based features, including: + +- Fleet Automation +- Live Container Monitoring +- Kubernetes Explorer +- Live Processes +- Cloud Network Monitoring +- Universal Service Monitoring +- 1,000+ Datadog integrations + +- [ + ### Install the DDOT Collector +Follow our guided setup to install the Collector and start sending your OpenTelemetry data to Datadog.](http://localhost:1313/opentelemetry/setup/ddot_collector/install/) + +## Other setup options{% #other-setup-options %} + +Alternative methods are available for specific use cases, such as maintaining a vendor-neutral pipeline or running in non-Kubernetes environments. + +- [ + ### Standalone OpenTelemetry Collector +Best for: Users who prefer to use OTel Collector distributions from the OpenTelemetry open source community or require advanced processing capabilities like tail-based sampling.](http://localhost:1313/opentelemetry/setup/collector_exporter/) +- [ + ### OTLP Ingest in the Agent +Best for: Users on platforms other than Kubernetes Linux, or those who prefer a minimal configuration without managing Collector pipelines.](http://localhost:1313/opentelemetry/setup/otlp_ingest_in_the_agent) +- [ + ### Direct OTLP Ingest (Preview) +Best for: Situations requiring direct data transmission to Datadog's intake endpoint without any intermediary components.](http://localhost:1313/opentelemetry/setup/agentless) + +{% alert level="info" %} +**Still not sure which setup is right for you?**See the [Feature Compatibility](http://localhost:1313/opentelemetry/compatibility/) table to understand which Datadog features are supported. +{% /alert %} + +## Further reading{% #further-reading %} + +- [Instrument Your Applications](http://localhost:1313/opentelemetry/instrument/) +- [How to select your OpenTelemetry deployment](https://www.datadoghq.com/blog/otel-deployments/) diff --git a/opentelemetry-mdoc/setup/otlp_ingest/index.md b/opentelemetry-mdoc/setup/otlp_ingest/index.md new file mode 100644 index 0000000000000..1e18d19410ccd --- /dev/null +++ b/opentelemetry-mdoc/setup/otlp_ingest/index.md @@ -0,0 +1,30 @@ +--- +title: Datadog OTLP Intake Endpoint +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + OTLP Intake Endpoint +--- + +# Datadog OTLP Intake Endpoint + +{% callout %} +The Datadog OTLP intake endpoint is in Preview. To request access, contact your account representative. +{% /callout %} + +## Overview{% #overview %} + +Datadog's OpenTelemetry protocol (OTLP) intake API endpoint allows you to send observability data directly to Datadog. With this feature, you don't need to run the [Datadog Agent](http://localhost:1313/opentelemetry/otlp_ingest_in_the_agent/) or [OpenTelemetry Collector + Datadog Exporter](http://localhost:1313/opentelemetry/setup/collector_exporter/). + +{% image + source="http://localhost:1313/images/opentelemetry/setup/direct-ingest.675dcfafdeeca4017efe7eb10ad44571.png?auto=format" + alt="Diagram: OpenTelemetry SDK sends data directly to Datadog through the intake endpoint." /%} + +You might prefer this option if you're looking for a straightforward setup and want to send telemetry directly to Datadog without using the Datadog Agent or OpenTelemetry Collector. + +- [OTLP logs intake endpoint](http://localhost:1313/opentelemetry/setup/intake_endpoint/otlp_logs) +- [OTLP metrics intake endpoint](http://localhost:1313/opentelemetry/setup/intake_endpoint/otlp_metrics) + +## Further reading{% #further-reading %} + +- [Send Data to Datadog](http://localhost:1313/opentelemetry/setup) diff --git a/opentelemetry-mdoc/setup/otlp_ingest/logs/index.md b/opentelemetry-mdoc/setup/otlp_ingest/logs/index.md new file mode 100644 index 0000000000000..d88a884459244 --- /dev/null +++ b/opentelemetry-mdoc/setup/otlp_ingest/logs/index.md @@ -0,0 +1,118 @@ +--- +title: Datadog OTLP Logs Intake Endpoint +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + OTLP Intake Endpoint > Datadog OTLP Logs Intake Endpoint +--- + +# Datadog OTLP Logs Intake Endpoint + +{% callout %} +# Important note for users on the following Datadog sites: app.ddog-gov.com + +{% alert level="warning" %} +This product is not supported for your selected [Datadog site](http://localhost:1313/getting_started/site). (). +{% /alert %} + +{% /callout %} + +## Overview{% #overview %} + +Datadog's OpenTelemetry Protocol (OTLP) logs intake API endpoint allows you to send logs directly to Datadog. With this feature, you don't need to run the [Datadog Agent](http://localhost:1313/opentelemetry/otlp_ingest_in_the_agent/) or [OpenTelemetry Collector + Datadog Exporter](http://localhost:1313/opentelemetry/collector_exporter/). + +Choose this option for a straightforward setup to send logs directly to Datadog without using the Datadog Agent or OpenTelemetry Collector. + +## Configuration{% #configuration %} + +To send OTLP data to the Datadog OTLP logs intake endpoint, you must configure the OTLP HTTP Protobuf exporter. The process differs depending on whether you are using automatic or manual instrumentation for OpenTelemetry. + +{% alert level="info" %} +Based on your [Datadog site](http://localhost:1313/getting_started/site/), which is : Replace `${YOUR_ENDPOINT}` with in the following examples. +{% /alert %} + +#### Automatic instrumentation{% #automatic-instrumentation %} + +If you are using [OpenTelemetry automatic instrumentation](https://opentelemetry.io/docs/specs/otel/glossary/#automatic-instrumentation), set the following environment variables: + +```shell +export OTEL_EXPORTER_OTLP_LOGS_PROTOCOL="http/protobuf" +export OTEL_EXPORTER_OTLP_LOGS_ENDPOINT="${YOUR_ENDPOINT}" // Replace this with the correct endpoint +export OTEL_EXPORTER_OTLP_LOGS_HEADERS="dd-api-key=${DD_API_KEY}" +``` + +#### Manual instrumentation{% #manual-instrumentation %} + +If you are using manual instrumentation with OpenTelemetry SDKs, configure the OTLP HTTP Protobuf exporter programmatically using the following examples. + +{% alert level="info" %} +OpenTelemetry SDK logs support for JavaScript and Python is in development. For the latest statuses, see [OpenTelemetry Status and Releases](https://opentelemetry.io/docs/languages/#status-and-releases). +{% /alert %} + +{% tab title="Java" %} +The Java exporter is `OtlpHttpLogRecordExporter`. To configure the exporter, use the following code snippet: + +```java +import io.opentelemetry.exporter.otlp.http.logs.OtlpHttpLogRecordExporter; + +OtlpHttpLogRecordExporter exporter = OtlpHttpLogRecordExporter.builder() + .setEndpoint("${YOUR_ENDPOINT}") // Replace this with the correct endpoint + .addHeader("dd-api-key", System.getenv("DD_API_KEY")) + .build(); +``` + +{% /tab %} + +{% tab title="Go" %} +The Go exporter is `otlploghttp`. To configure the exporter, use the following code snippet: + +```go +import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + +logExporter, err := otlploghttp.New( + ctx, + otlploghttp.WithEndpointURL("${YOUR_ENDPOINT}"), // Replace this with the correct endpoint, minus the URL path + otlploghttp.WithURLPath("/v1/logs"), + otlploghttp.WithHeaders( + map[string]string{ + "dd-api-key": os.Getenv("DD_API_KEY"), + }), +) +``` + +{% /tab %} + +## OpenTelemetry Collector{% #opentelemetry-collector %} + +If you are using the OpenTelemetry Collector and don't want to use the Datadog Exporter, you can configure [`otlphttpexporter`](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) to export logs to the Datadog OTLP logs intake endpoint. + +Configure your `config.yaml` like this: + +```yaml +exporters: + otlphttp: + logs_endpoint: ${YOUR_ENDPOINT} // Replace this with the correct endpoint + headers: + dd-api-key: ${env:DD_API_KEY} + +service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp] +``` + +## Troubleshooting{% #troubleshooting %} + +### Error: 403 Forbidden{% #error-403-forbidden %} + +If you receive a `403 Forbidden` error when sending logs to the Datadog OTLP logs intake endpoint, it indicates one potential issue: + +- The endpoint URL is incorrect for your organization.**Solution**: Use the correct endpoint URL for your organization. Your site is , so you need to use the endpoint. + +## Further reading{% #further-reading %} + +- [General OpenTelemetry SDK Configuration](https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/) +- [OpenTelemetry Environment Variable Spec](https://opentelemetry.io/docs/reference/specification/sdk-environment-variables/) +- [OpenTelemetry Protocol Exporter](https://opentelemetry.io/docs/reference/specification/protocol/exporter/) diff --git a/opentelemetry-mdoc/setup/otlp_ingest/metrics/index.md b/opentelemetry-mdoc/setup/otlp_ingest/metrics/index.md new file mode 100644 index 0000000000000..caef9cade22b2 --- /dev/null +++ b/opentelemetry-mdoc/setup/otlp_ingest/metrics/index.md @@ -0,0 +1,276 @@ +--- +title: Datadog OTLP Metrics Intake Endpoint +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + OTLP Intake Endpoint > Datadog OTLP Metrics Intake Endpoint +--- + +# Datadog OTLP Metrics Intake Endpoint + +{% callout %} +# Important note for users on the following Datadog sites: app.ddog-gov.com + +{% alert level="warning" %} +This product is not supported for your selected [Datadog site](http://localhost:1313/getting_started/site). (). +{% /alert %} + +{% /callout %} + +## Overview{% #overview %} + +Datadog's OpenTelemetry Protocol (OTLP) metrics intake API endpoint allows you to send metrics directly to Datadog. With this feature, you don't need to run the [Datadog Agent](http://localhost:1313/opentelemetry/otlp_ingest_in_the_agent/) or [OpenTelemetry Collector + Datadog Exporter](http://localhost:1313/opentelemetry/collector_exporter/). + +You might prefer this option if you're looking for a straightforward setup and want to send metrics directly to Datadog without using the Datadog Agent or OpenTelemetry Collector. + +This endpoint is particularly useful in the following scenarios: + +- **OpenTelemetry distributions without Datadog Exporter support**: Some OpenTelemetry distributions, such as the [AWS Distro for OpenTelemetry (ADOT)](https://aws-otel.github.io/docs/getting-started/lambda), have removed vendor-specific exporters in favor of a unified OTLP exporter. The OTLP metrics endpoint enables these distributions to send metrics directly to Datadog seamlessly. + +- **Technical constraints using the Datadog Exporter or Agent**: Ideal for scenarios where installing additional software is impractical or restrictive, such as third-party managed services (for example, Vercel), applications on customer devices, or environments requiring streamlined, Agentless observability pipelines. The OTLP metrics endpoint enables direct OTLP metric ingestion in these scenarios. + +## Configuration{% #configuration %} + +To export OTLP metrics data to the Datadog OTLP metrics intake endpoint: + +1. Ensure only delta metrics are sent. +1. Configure the OTLP HTTP exporter. + - Set the Datadog OTLP metrics intake endpoint. + - Configure the required HTTP headers. +1. (Optional) Set the `dd-otel-metric-config` HTTP header to configure the metric translator behavior. + +### Configure the exporter{% #configure-the-exporter %} + +To send OTLP data to the Datadog OTLP metrics intake endpoint, use the OTLP HTTP exporter. For metrics, the exporter supports both HTTP Protobuf and HTTP JSON. HTTP Protobuf is recommended for better performance. + +The process differs depending on whether you're using automatic or manual instrumentation for OpenTelemetry. + +{% alert level="info" %} +Based on your [Datadog site](http://localhost:1313/getting_started/site/), which is : Replace your endpoint with in the following examples. +{% /alert %} + +#### Ensure only delta metrics are sent{% #ensure-only-delta-metrics-are-sent %} + +The Datadog OTLP metrics intake endpoint accepts only **delta** metrics. If you attempt to send **cumulative** metrics (the default in most SDKs), you will receive an error. Make sure to configure your OpenTelemetry SDK or Collector to produce delta metrics. + +- For [supported languages](https://github.com/open-telemetry/opentelemetry-specification/blob/main/spec-compliance-matrix.md#environment-variables), set the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable to `delta`. +- Otherwise, set the temporality preference in your code. For more information, read [Configure delta temporality in OpenTelemetry](http://localhost:1313/opentelemetry/guide/otlp_delta_temporality/). + +#### Automatic instrumentation{% #automatic-instrumentation %} + +If you are using [OpenTelemetry automatic instrumentation](https://opentelemetry.io/docs/reference/specification/glossary/#automatic-instrumentation), set the following environment variables: + +```shell +export OTEL_EXPORTER_OTLP_METRICS_PROTOCOL="http/protobuf" +export OTEL_EXPORTER_OTLP_METRICS_ENDPOINT="" +export OTEL_EXPORTER_OTLP_METRICS_HEADERS="dd-api-key=${DD_API_KEY}" +export OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE="delta" +``` + +#### Manual instrumentation{% #manual-instrumentation %} + +If you are using manual instrumentation with OpenTelemetry SDKs, configure the OTLP HTTP Protobuf exporter programmatically. + +{% tab title="JavaScript" %} +The JavaScript exporter is [`@opentelemetry/exporter-metrics-otlp-proto`](https://www.npmjs.com/package/@opentelemetry/exporter-metrics-otlp-proto). To configure the exporter, use the following code snippet: + +```javascript +const { OTLPMetricExporter } = require('@opentelemetry/exporter-metrics-otlp-proto'); + +const exporter = new OTLPMetricExporter({ + url: 'https://otlp.datadoghq.com/v1/metrics', + temporalityPreference: AggregationTemporalityPreference.DELTA, // Ensure delta temporality + headers: { + 'dd-api-key': process.env.DD_API_KEY, + 'dd-otel-metric-config': '{"resource_attributes_as_tags": true}', + }, +}); +``` + +{% /tab %} + +{% tab title="Java" %} +The Java exporter is [`OtlpHttpMetricExporter`](https://javadoc.io/doc/io.opentelemetry/opentelemetry-exporter-otlp-http-metrics/). To configure the exporter, use the following code snippet: + +```java +import io.opentelemetry.exporter.otlp.http.metrics.OtlpHttpMetricExporter; + +OtlpHttpMetricExporter exporter = OtlpHttpMetricExporter.builder() + .setEndpoint("https://otlp.datadoghq.com/v1/metrics") + .setAggregationTemporalitySelector( + AggregationTemporalitySelector.deltaPreferred()) // Ensure delta temporality + .addHeader("dd-api-key", System.getenv("DD_API_KEY")) + .addHeader("dd-otel-metric-config", "{\"resource_attributes_as_tags\": true}") + .build(); +``` + +{% /tab %} + +{% tab title="Go" %} +The Go exporter is [`otlpmetrichttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp). To configure the exporter, use the following code snippet: + +```go +import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + +metricExporter, err := otlpmetrichttp.New( + ctx, + otlpmetrichttp.WithEndpoint("otlp.datadoghq.com"), + otlpmetrichttp.WithURLPath("/v1/metrics"), + otlpmetrichttp.WithTemporalitySelector(deltaSelector), // Ensure delta temporality + otlpmetrichttp.WithHeaders( + map[string]string{ + "dd-api-key": os.Getenv("DD_API_KEY"), + "dd-otel-metric-config": "{\"resource_attributes_as_tags\": true}", + }), +) +``` + +{% /tab %} + +{% tab title="Python" %} +The Python exporter is [`OTLPMetricExporter`](https://pypi.org/project/opentelemetry-exporter-otlp-proto-http/). To configure the exporter, use the following code snippet: + +```python +from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter + +exporter = OTLPMetricExporter( + endpoint="https://otlp.datadoghq.com/v1/metrics", + preferred_temporality=deltaTemporality, # Ensure delta temporality + headers={ + "dd-api-key": os.environ.get("DD_API_KEY"), + "dd-otel-metric-config": '{"resource_attributes_as_tags": true}', + }, +) +``` + +{% /tab %} + +### (Optional) Configure the metric translator{% #optional-configure-the-metric-translator %} + +Use the `dd-otel-metric-config` header to configure how metrics are translated and sent to Datadog. The JSON header contains the following fields: + +{% dl %} + +{% dt %} +`resource_attributes_as_tags` +{% /dt %} + +{% dd %} +**Type**: Boolean**Default**: `false`If set to `true`, transforms all resource attributes into metric labels, which are then converted into tags. +{% /dd %} + +{% dt %} +`instrumentation_scope_metadata_as_tags` +{% /dt %} + +{% dd %} +**Type**: Boolean**Default**: `false`If set to `true`, adds the name and version of the instrumentation scope that created a metric to the metric tags. +{% /dd %} + +{% dt %} +`histograms.mode` +{% /dt %} + +{% dd %} +**Type**: StringMode for exporting histograms. Valid values are: +- `distributions`: sends histograms as Datadog distributions (recommended). +- `counters`: sends histograms as Datadog counts, one metric per bucket. +- `nobuckets`: sends no bucket histogram metrics. + +{% /dd %} + +{% dt %} +`histograms.send_aggregation_metrics` +{% /dt %} + +{% dd %} +**Type**: BooleanIf set to `true`, writes additional `.sum`, `.count`, `.min`, and `.max` metrics for histograms. +{% /dd %} + +{% dt %} +`summaries.mode` +{% /dt %} + +{% dd %} +**Type**: StringMode for exporting OTLP summaries. Valid values are: +- `noquantiles`: sends no `.quantile` metrics. `.sum` and `.count` metrics are still sent. +- `gauges`: sends `.quantile` metrics as gauges tagged by the quantile. + +{% /dd %} + +{% /dl %} + +For example: + +```json +{ + "resource_attributes_as_tags": true, + "instrumentation_scope_metadata_as_tags": true, + "histograms": { + "mode": "distributions", + "send_aggregation_metrics": true + }, + "summaries": { + "mode": "gauges" + } +} +``` + +## OpenTelemetry Collector{% #opentelemetry-collector %} + +If you are using an OpenTelemetry Collector distribution that doesn't support the Datadog Exporter, you can configure the [`otlphttpexporter`](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) to export metrics to the Datadog OTLP metrics intake endpoint. + +For example, your `config.yaml` file would look like this: + +```yaml +... +exporters: + otlphttp: + metrics_endpoint: + headers: + dd-api-key: ${env:DD_API_KEY} + dd-otel-metric-config: '{"resource_attributes_as_tags": true}' +... + +service: + pipelines: + metrics: + receivers: [otlp] + processors: [batch, cumulativetodelta] + exporters: [otlphttp] +``` + +{% alert level="info" %} +Note the `cumulativetodelta` processor in the pipeline, which converts cumulative metrics to delta metrics. Delta metrics are required for the OTLP metrics intake endpoint. For more information, see [Configure delta temporality in OpenTelemetry](http://localhost:1313/opentelemetry/guide/otlp_delta_temporality/). +{% /alert %} + +## Troubleshooting{% #troubleshooting %} + +### Error: 403 Forbidden{% #error-403-forbidden %} + +If you receive a `403 Forbidden` error when sending metrics to the Datadog OTLP metrics intake endpoint, it indicates one of the following issues: + +- The endpoint URL is incorrect for your organization.**Solution**: Use the correct endpoint URL for your organization. Your site is , so you need to use the endpoint. + +### Error: 413 Request Entity Too Large{% #error-413-request-entity-too-large %} + +If you receive a `413 Request Entity Too Large` error when sending metrics to the Datadog OTLP metrics intake endpoint, it indicates that the payload size sent by the OTLP exporter exceeds the Datadog metrics intake endpoint's limit of 500KB for uncompressed payloads, or 5MB for compressed payloads after decompression. + +This error usually occurs when the OpenTelemetry SDK batches too much telemetry data in a single request payload. + +**Solution**: Reduce the export batch size of the SDK's batch processor. For example, in the OpenTelemetry Java SDK, you can adjust `BatchMetricExportProcessor`. + +### Issue: Missing datapoints or lower than expected metric values{% #issue-missing-datapoints-or-lower-than-expected-metric-values %} + +If you notice missing datapoints or lower than expected metric values, it may be because you are sending multiple datapoints for a metric that have the same timestamp (in seconds) and same dimensions. In such cases, Datadog only accepts the last datapoint, and previous datapoints are dropped (last-write-wins). Datadog requires the timeseries data of a metric to be unique in the context of {timestamp + dimensions}. + +**Solution**: Ensure that your datapoints of a given metric at one timestamp are uniquely tagged. For example, if you send multiple datapoints for a metric simultaneously from multiple AWS Lambda invocations, make sure to include unique identifiers (such as the Lambda ARN) as resource attributes in your metrics. Use the `resource_attributes_as_tags` option to add these resource attributes as metric tags. + +## Further reading{% #further-reading %} + +- [General OpenTelemetry SDK Configuration](https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/) +- [OpenTelemetry Environment Variable Spec](https://opentelemetry.io/docs/reference/specification/sdk-environment-variables/) +- [OpenTelemetry Protocol Exporter](https://opentelemetry.io/docs/specs/otel/protocol/exporter/) +- [OTLP Metrics Exporter](https://opentelemetry.io/docs/specs/otel/metrics/sdk_exporters/otlp/%20) +- [Configure delta temporality in OpenTelemetry](http://localhost:1313/opentelemetry/guide/otlp_delta_temporality/) +- [OTLP Metrics Mapping in Datadog](http://localhost:1313/metrics/otlp/?tab=summary#mapping) diff --git a/opentelemetry-mdoc/setup/otlp_ingest/traces/index.md b/opentelemetry-mdoc/setup/otlp_ingest/traces/index.md new file mode 100644 index 0000000000000..cf3fad5712403 --- /dev/null +++ b/opentelemetry-mdoc/setup/otlp_ingest/traces/index.md @@ -0,0 +1,260 @@ +--- +isPrivate: true +title: Datadog OTLP Traces Intake Endpoint +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > Datadog + OTLP Intake Endpoint > Datadog OTLP Traces Intake Endpoint +--- + +# Datadog OTLP Traces Intake Endpoint + +{% callout %} +# Important note for users on the following Datadog sites: app.ddog-gov.com + +{% alert level="warning" %} +This product is not supported for your selected [Datadog site](http://localhost:1313/getting_started/site). (). +{% /alert %} + +{% /callout %} + +{% callout %} +The Datadog OTLP traces intake endpoint is in Preview. To request access, contact your account representative. +{% /callout %} + +## Overview{% #overview %} + +Datadog's OpenTelemetry protocol (OTLP) intake API endpoint allows you to send traces directly to Datadog. With this feature, you don't need to run the [Datadog Agent](http://localhost:1313/opentelemetry/otlp_ingest_in_the_agent/) or [OpenTelemetry Collector + Datadog Exporter](http://localhost:1313/opentelemetry/collector_exporter/). + +You might prefer this option if you're looking for a straightforward setup and want to send traces directly to Datadog without using the Datadog Agent or OpenTelemetry Collector. + +{% alert level="info" %} +The OTLP trace intake endpoint only supports `http/protobuf` encoding. `http/json` and `grpc` are not supported at this time. +{% /alert %} + +## Configuration{% #configuration %} + +To export OTLP data to the Datadog OTLP traces intake endpoint: + +1. Configure the OTLP HTTP Protobuf exporter. + - Set the Datadog OTLP traces intake endpoint. + - Configure the required HTTP headers. +1. (Optional) Set the `dd-otel-span-mapping` HTTP header to map or filter spans. + +### Configure the exporter{% #configure-the-exporter %} + +To send OTLP data to the Datadog OTLP traces intake endpoint, you need to use the OTLP HTTP Protobuf exporter. The process differs depending on whether you are using automatic or manual instrumentation for OpenTelemetry. + +#### Automatic instrumentation{% #automatic-instrumentation %} + +If you are using [OpenTelemetry automatic instrumentation](https://opentelemetry.io/docs/specs/otel/glossary/#automatic-instrumentation), set the following environment variables: + +```shell +export OTEL_EXPORTER_OTLP_TRACES_PROTOCOL="http/protobuf" +export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT="" +export OTEL_EXPORTER_OTLP_TRACES_HEADERS="dd-api-key=${DD_API_KEY},dd-otlp-source=${YOUR_SITE}" +``` + +{% alert level="info" %} +The value for `dd-otlp-source` should be provided to you by Datadog after being allowlisted for the intake endpoint. This is a specific identifier assigned to your organization. +{% /alert %} + +#### Manual instrumentation{% #manual-instrumentation %} + +If you are using manual instrumentation with OpenTelemetry SDKs, configure the OTLP HTTP Protobuf exporter programmatically. + +{% alert level="info" %} +Based on your [Datadog site](http://localhost:1313/getting_started/site/), which is : +- Replace `${YOUR_ENDPOINT}` with . +- Replace `${YOUR_SITE}` with the organization name you received from Datadog. + +{% /alert %} + +{% tab title="JavaScript" %} +The JavaScript exporter is [`exporter-trace-otlp-proto`](https://www.npmjs.com/package/@opentelemetry/exporter-trace-otlp-proto). To configure the exporter, use the following code snippet: + +```javascript +const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-proto'); // OTLP http/protobuf exporter + +const exporter = new OTLPTraceExporter({ + url: '${YOUR_ENDPOINT}', // Replace this with the correct endpoint + headers: { + 'dd-api-key': process.env.DD_API_KEY, + 'dd-otel-span-mapping': '{span_name_as_resource_name: true}', + 'dd-otlp-source': '${YOUR_SITE}', // Replace with the specific value provided by Datadog for your organization + }, +}); +``` + +{% /tab %} + +{% tab title="Java" %} +The Java exporter is [`OtlpHttpSpanExporter`](https://javadoc.io/doc/io.opentelemetry/opentelemetry-exporter-otlp-http-trace/). To configure the exporter, use the following code snippet: + +```java +import io.opentelemetry.exporter.otlp.http.trace.OtlpHttpSpanExporter; + +OtlpHttpSpanExporter exporter = OtlpHttpSpanExporter.builder() + .setEndpoint("${YOUR_ENDPOINT}") // Replace this with the correct endpoint + .addHeader("dd-api-key", System.getenv("DD_API_KEY")) + .addHeader("dd-otel-span-mapping", "{span_name_as_resource_name: true}") + .addHeader("dd-otlp-source", "${YOUR_SITE}") // Replace with the specific value provided by Datadog for your organization + .build(); +``` + +{% /tab %} + +{% tab title="Go" %} +The Go exporter is [`otlptracehttp`](http://go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp). To configure the exporter, use the following code snippet: + +```go +import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + +traceExporter, err := otlptracehttp.New( + ctx, + otlptracehttp.WithEndpoint("${YOUR_ENDPOINT}"), // Replace this with the correct endpoint + otlptracehttp.WithURLPath("/v1/traces"), + otlptracehttp.WithHeaders( + map[string]string{ + "dd-api-key": os.Getenv("DD_API_KEY"), + "dd-otel-span-mapping": "{span_name_as_resource_name: true}", + "dd-otlp-source": "${YOUR_SITE}", // Replace with the specific value provided by Datadog for your organization + }), +) +``` + +{% /tab %} + +{% tab title="Python" %} +The Python exporter is [`OTLPSpanExporter`](https://pypi.org/project/opentelemetry-exporter-otlp-proto-http/). To configure the exporter, use the following code snippet: + +```python +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter + +exporter = OTLPSpanExporter( + endpoint="${YOUR_ENDPOINT}", # Replace this with the correct endpoint + headers={ + "dd-api-key": os.environ.get("DD_API_KEY"), + "dd-otel-span-mapping": "{span_name_as_resource_name: true}", + "dd-otlp-source": "${YOUR_SITE}" # Replace with the specific value provided by Datadog for your organization + }, +) +``` + +{% /tab %} + +### (Optional) Map or filter span names{% #optional-map-or-filter-span-names %} + +Use the `dd-otel-span-mapping` header to configure span mapping and filtering. The JSON header contains the following fields: + +- `ignore_resources`: A list of regular expressions to disable traces based on their resource name. +- `span_name_remappings`: A map of Datadog span names to preferred names. +- `span_name_as_resource_name`: Specifies whether to use the OpenTelemetry span's name as the Datadog span's operation name (default: true). If false, the operation name is derived from a combination of the instrumentation scope name and span kind. + +For example: + +```json +{ + "span_name_as_resource_name":false, + "span_name_remappings":{ + "io.opentelemetry.javaagent.spring.client":"spring.client" + }, + "ignore_resources":[ + "io.opentelemetry.javaagent.spring.internal" + ] +} +``` + +## OpenTelemetry Collector{% #opentelemetry-collector %} + +If you are using the OpenTelemetry Collector and don't want to use the Datadog Exporter, you can configure [`otlphttpexporter`](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) to export traces to the Datadog OTLP traces intake endpoint. + +For example, configure your `config.yaml` like this: + +```yaml +... + +exporters: + otlphttp: + traces_endpoint: + headers: + dd-api-key: ${env:DD_API_KEY} + dd-otel-span-mapping: "{span_name_as_resource_name: false}" + dd-otlp-source: "${YOUR_SITE}", # Replace with the specific value provided by Datadog for your organization +... + +service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp] +``` + +## Troubleshooting{% #troubleshooting %} + +### Error: 403 Forbidden{% #error-403-forbidden %} + +If you receive a `403 Forbidden` error when sending traces to the Datadog OTLP traces intake endpoint, it indicates one of the following issues: + +- The API key belongs to an organization that is not allowed to access the Datadog OTLP traces intake endpoint. **Solution**: To request access, contact your account representative. +- The `dd-otlp-source` header is missing or has an incorrect value. **Solution**: Ensure that the `dd-otlp-source` header is set with the proper value for your site. You should have received an allowlisted value for this header from Datadog if you are a platform partner. +- The endpoint URL is incorrect for your organization. **Solution**: Use the correct endpoint URL for your organization. Your site is , so you need to use the endpoint. + +### Error: 413 Request Entity Too Large{% #error-413-request-entity-too-large %} + +If you receive a `413 Request Entity Too Large` error when sending traces to the Datadog OTLP traces intake endpoint, it indicates that the payload size sent by the OTLP exporter exceeds the Datadog traces intake endpoint's limit of 3.2MB. + +This error usually occurs when the OpenTelemetry SDK batches too much telemetry data in a single request payload. + +**Solution**: Reduce the export batch size of the SDK's batch span processor. Here's an example of how to modify the `BatchSpanProcessorBuilder` in the OpenTelemetry Java SDK: + +```java +CopyBatchSpanProcessor batchSpanProcessor = + BatchSpanProcessor + .builder(exporter) + .setMaxExportBatchSize(10) // Default is 512 + .build(); +``` + +Adjust the `setMaxExportBatchSize` value according to your needs. A smaller value results in more frequent exports with smaller payloads, reducing the likelihood of exceeding the 3.2MB limit. + +### Warning: "traces export: failed … 202 Accepted" in Go{% #warning-traces-export-failed--202-accepted-in-go %} + +If you are using the OpenTelemetry Go SDK and see a warning message similar to `traces export: failed … 202 Accepted`, it is due to a known issue in the OpenTelemetry Go OTLP HTTP exporter. + +The OpenTelemetry Go OTLP HTTP exporter treats any HTTP status code other than 200 as an error, even if the export succeeds ([Issue 3706](https://github.com/open-telemetry/opentelemetry-go/issues/3706)). In contrast, other OpenTelemetry SDKs consider any status code in the range [200, 300) as a success. The Datadog OTLP traces intake endpoint returns a `202 Accepted` status code for successful exports. + +The OpenTelemetry community is still discussing whether other `2xx` status codes should be treated as successes ([Issue 3203](https://github.com/open-telemetry/opentelemetry-specification/issues/3203)). + +**Solution**: If you are using the Datadog OTLP traces intake endpoint with the OpenTelemetry Go SDK, you can safely ignore this warning message. Your traces are being successfully exported despite the warning. + +### Issue: Unexpected span operation names{% #issue-unexpected-span-operation-names %} + +When using the Datadog OTLP trace intake endpoint, you may notice that the span operation names are different from those generated when using the Datadog Agent or OpenTelemetry Collector. + +The Datadog OTLP trace intake endpoint has the `span_name_as_resource_name` option set to `true` by default. This means that Datadog uses the OpenTelemetry span's name as the operation name. In contrast, the Datadog Agent and OpenTelemetry Collector have this option set to `false` by default. + +When `span_name_as_resource_name` is set to `false`, the operation name is derived from a combination of the instrumentation scope name and the span kind. For example, an operation name might appear as `opentelemetry.client`. + +**Solution**: If you want to disable the `span_name_as_resource_name` option in the Datadog OTLP traces intake endpoint to match the behavior of the Datadog Agent or OpenTelemetry Collector, follow these steps: + +1. Refer to Map or filter span names in this document. +1. Set the `span_name_as_resource_name` option to `false` in the `dd-otel-span-mapping` header. + +For example: + +```json +jsonCopy{ + "span_name_as_resource_name": false, + ... +} +``` + +This ensures that the span operation names are consistent across the Datadog OTLP traces intake endpoint, Datadog Agent, and OpenTelemetry Collector. + +## Further reading{% #further-reading %} + +- [General OpenTelemetry SDK Configuration](https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/) +- [OpenTelemetry Environment Variable Spec](https://opentelemetry.io/docs/reference/specification/sdk-environment-variables/) +- [OpenTelemetry Protocol Exporter](https://opentelemetry.io/docs/reference/specification/protocol/exporter/%20) diff --git a/opentelemetry-mdoc/setup/otlp_ingest_in_the_agent/index.md b/opentelemetry-mdoc/setup/otlp_ingest_in_the_agent/index.md new file mode 100644 index 0000000000000..3b2d900bc1997 --- /dev/null +++ b/opentelemetry-mdoc/setup/otlp_ingest_in_the_agent/index.md @@ -0,0 +1,316 @@ +--- +title: OTLP Ingestion by the Datadog Agent +description: Ingest OTLP trace data through the Datadog Agent +breadcrumbs: >- + Docs > OpenTelemetry in Datadog > Send OpenTelemetry Data to Datadog > OTLP + Ingestion by the Datadog Agent +--- + +# OTLP Ingestion by the Datadog Agent + +OTLP Ingest in the Agent is a way to send telemetry data directly from applications instrumented with [OpenTelemetry SDKs](https://opentelemetry.io/docs/instrumentation/) to Datadog Agent. Since versions 6.32.0 and 7.32.0, the Datadog Agent can ingest OTLP traces and [OTLP metrics](http://localhost:1313/metrics/open_telemetry/otlp_metric_types/) through gRPC or HTTP. Since versions 6.48.0 and 7.48.0, the Datadog Agent can ingest OTLP logs through gRPC or HTTP. + +OTLP Ingest in the Agent allows you to use observability features in the Datadog Agent. Data from applications instrumented with OpenTelemetry SDK cannot be used in some Datadog proprietary products, such as App and API Protection, Continuous Profiler, and Ingestion Rules. [OpenTelemetry Runtime Metrics are supported for some languages](http://localhost:1313/opentelemetry/runtime_metrics/). + +{% image + source="http://localhost:1313/images/opentelemetry/setup/dd-agent-otlp-ingest.5c618e65990e9be5954c60e908ab5f09.png?auto=format" + alt="Diagram: OpenTelemetry SDK sends data through OTLP protocol to a Collector with Datadog Exporter, which forwards to Datadog's platform." /%} + +{% alert level="info" %} +To see which Datadog features are supported with this setup, see the [feature compatibility table](http://localhost:1313/opentelemetry/compatibility/) under OTel to Datadog Agent (OTLP). +{% /alert %} + +## Initial setup{% #initial-setup %} + +To get started, you first [instrument your application](https://opentelemetry.io/docs/concepts/instrumenting/) with OpenTelemetry SDKs. Then, export the telemetry data in OTLP format to the Datadog Agent. Configuring this varies depending on the kind of infrastructure your service is deployed on, as described on the page below. Although the aim is to be compatible with the latest OTLP version, the OTLP Ingest in the Agent is not compatible with all OTLP versions. The versions of OTLP that are compatible with the Datadog Agent are those that are also supported by the OTLP receiver in the OpenTelemetry Collector. To verify the exact versions supported, check the `go.opentelemetry.io/collector` version in the Agent `go.mod` file. + +Read the OpenTelemetry instrumentation documentation to understand how to point your instrumentation to the Agent. The `receiver` section described below follows the [OpenTelemetry Collector OTLP receiver configuration schema](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/config.md). + +{% alert level="warning" %} +**Note**: The supported setup is an ingesting Agent deployed on every OpenTelemetry-data generating host. You cannot send OpenTelemetry telemetry from collectors or instrumented apps running one host to an Agent on a different host. But, provided the Agent is local to the collector or SDK instrumented app, you can set up multiple pipelines. +{% /alert %} + +## Enabling OTLP Ingestion on the Datadog Agent{% #enabling-otlp-ingestion-on-the-datadog-agent %} + +{% tab title="Host" %} +OTLP ingestion is off by default, and you can turn it on by updating your `datadog.yaml` file configuration or by setting environment variables. The following `datadog.yaml` configurations enable endpoints on the default ports. + +{% alert level="warning" %} +The following examples use `0.0.0.0` as the endpoint address for convenience. This allows connections from any network interface. For enhanced security, especially in local deployments, consider using `localhost` instead. For more information on secure endpoint configuration, see the [OpenTelemetry security documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks). +{% /alert %} + +For gRPC, default port 4317: + +```yaml +otlp_config: + receiver: + protocols: + grpc: + endpoint: 0.0.0.0:4317 +``` + +For HTTP, default port 4318: + +```yaml +otlp_config: + receiver: + protocols: + http: + endpoint: 0.0.0.0:4318 +``` + +Alternatively, configure the endpoints by providing the port through the environment variables: + +- For gRPC (`localhost:4317`): `DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT` +- For HTTP (`localhost:4318`): `DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_HTTP_ENDPOINT` + +These must be passed to both the core Agent and trace Agent processes. If running in a containerized environment, use `0.0.0.0` instead of `localhost` to ensure the server is available on non-local interfaces. + +Configure either gRPC or HTTP for this feature. Here is [an example application that shows configuration for both](https://gist.github.com/gbbr/4a54dd02d34ad05e694952e0a02e1c67). + +OTLP logs ingestion on the Datadog Agent is disabled by default so that you don't have unexpected logs product usage that may impact billing. To enable OTLP logs ingestion: + +1. Explicitly enable log collection as a whole by following [Host Agent Log collection setup](http://localhost:1313/agent/logs/): + + ```yaml + logs_enabled: true + ``` + +1. Set `otlp_config.logs.enabled` to true: + + ```yaml + otlp_config: + logs: + enabled: true + ``` + +{% /tab %} + +{% tab title="Docker" %} + +1. Follow the [Datadog Docker Agent setup](http://localhost:1313/agent/docker/). + +1. For the Datadog Agent container, set the following endpoint environment variables and expose the corresponding port: + + - For gRPC: Set `DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT` to `0.0.0.0:4317` and expose port `4317`. + - For HTTP: Set `DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_HTTP_ENDPOINT` to `0.0.0.0:4318` and expose port `4318`. + +1. If you want to enable OTLP logs ingestion, set the following endpoint environment variables in the Datadog Agent container: + + - Set `DD_LOGS_ENABLED` to true. + - Set `DD_OTLP_CONFIG_LOGS_ENABLED` to true. + +{% alert level="warning" %} +**Known Issue**: Starting with Agent version 7.61.0, OTLP ingestion pipelines may fail to start in Docker environments, displaying the error: `Error running the OTLP ingest pipeline: failed to register process metrics: process does not exist`.If you are using an affected version, you can use one of these workarounds:1. Set the environment variable `HOST_PROC` to `/proc` in your Agent Docker container.2. Remove `/proc/:/host/proc/:ro` from `volumes` in your Agent Docker container.3. Set `pid` to `host` in your Agent Docker container.These configurations can be applied through either the `docker` command or Docker compose file. +{% /alert %} + +{% /tab %} + +{% tab title="Kubernetes (Daemonset)" %} + +1. Follow the [Kubernetes Agent setup](http://localhost:1313/agent/kubernetes/?tab=daemonset). + +1. Configure the following environment variables in both the trace Agent container and the core Agent container: + +For gRPC: + + ``` + name: DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT # enables gRPC receiver on port 4317 + value: "0.0.0.0:4317" + ``` + +For HTTP: + + ``` + name: DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_HTTP_ENDPOINT # enables HTTP receiver on port 4318 + value: "0.0.0.0:4318" + ``` + +1. Map the container ports 4317 or 4318 to the host port for the core Agent container: + +For gRPC: + + ``` + ports: + - containerPort: 4317 + hostPort: 4317 + name: traceportgrpc + protocol: TCP + ``` + +For HTTP + + ``` + ports: + - containerPort: 4318 + hostPort: 4318 + name: traceporthttp + protocol: TCP + ``` + +1. If you want to enable OTLP logs ingestion, set the following endpoint environment variables in the core Agent container: + +Enable [log collection with your DaemonSet](http://localhost:1313/containers/guide/kubernetes_daemonset/#log-collection): + + ``` + name: DD_LOGS_ENABLED + value: "true" + ``` + +And enable OTLP logs ingestion: + + ``` + name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "true" + ``` + +{% /tab %} + +{% tab title="Kubernetes (Helm) - values.yaml" %} + +1. Follow the [Kubernetes Agent setup](http://localhost:1313/agent/kubernetes/?tab=helm). + +1. Enable the OTLP endpoints in the Agent by editing the `datadog.otlp` section of the `values.yaml` file: + +For gRPC: + + ``` + otlp: + receiver: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + enabled: true + ``` + +For HTTP: + + ``` + otlp: + receiver: + protocols: + http: + endpoint: 0.0.0.0:4318 + enabled: true + ``` + +This enables each protocol in the default port (`4317` for OTLP/gRPC and `4318` for OTLP/HTTP). +{% /tab %} + +{% tab title="Kubernetes (Helm) - set" %} + +1. Follow the [Kubernetes Agent setup](http://localhost:1313/agent/kubernetes/?tab=helm). + +1. Enable the preferred protocol: + +For gRPC: + + ``` + --set "datadog.otlp.receiver.protocols.grpc.enabled=true" + ``` + +For HTTP: + + ``` + --set "datadog.otlp.receiver.protocols.http.enabled=true" + ``` + +This enables each protocol in the default port (`4317` for OTLP/gRPC and `4318` for OTLP/HTTP). +{% /tab %} + +{% tab title="Kubernetes (Operator)" %} + +1. Follow the [Kubernetes Agent setup](http://localhost:1313/agent/kubernetes/?tab=helm). + +1. Enable the preferred protocol in your Operator's manifest: + +For gRPC: + + ```yaml + features: + otlp: + receiver: + protocols: + grpc: + enabled: true + ``` + +For HTTP: + + ```yaml + features: + otlp: + receiver: + protocols: + http: + enabled: true + ``` + +This enables each protocol in the default port (`4317` for OTLP/gRPC and `4318` for OTLP/HTTP). +{% /tab %} + +{% tab title="AWS Lambda" %} +For detailed instructions on using OpenTelemetry with AWS Lambda and Datadog, including: + +- Instrumenting your Lambda functions with OpenTelemetry +- Using OpenTelemetry API support within Datadog tracers +- Sending OpenTelemetry traces to the Datadog Lambda Extension + +See the Serverless documentation for [AWS Lambda and OpenTelemetry](http://localhost:1313/serverless/aws_lambda/opentelemetry/). +{% /tab %} + +There are many other environment variables and settings supported in the Datadog Agent. To get an overview of them all, see [the configuration template](https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml). + +## Sending OpenTelemetry traces, metrics, and logs to Datadog Agent{% #sending-opentelemetry-traces-metrics-and-logs-to-datadog-agent %} + +{% tab title="Docker" %} + +1. For the application container, set `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable to point to the Datadog Agent container. For example: + + ``` + OTEL_EXPORTER_OTLP_ENDPOINT=http://:4318 + ``` + +1. Both containers must be defined in the same bridge network, which is handled automatically if you use Docker Compose. Otherwise, follow the Docker example in [Tracing Docker Applications](http://localhost:1313/agent/docker/apm/#docker-network) to set up a bridge network with the correct ports. + +{% /tab %} + +{% tab title="Kubernetes" %} +In the application deployment file, configure the endpoint that the OpenTelemetry client sends traces to with the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable. + +For gRPC: + +```yaml +env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://$(HOST_IP):4317" # sends to gRPC receiver on port 4317 +``` + +For HTTP: + +```yaml +env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://$(HOST_IP):4318" # sends to HTTP receiver on port 4318 +``` + +**Note**: To enrich container tags for custom metrics, set the appropriate resource attributes in the application code where your OTLP metrics are generated. For example, set the `container.id` resource attribute to the pod's UID. +{% /tab %} + +{% alert level="info" %} +When configuring the endpoint for sending traces, ensure you use the correct path required by your OTLP library. Some libraries expect traces to be sent to the `/v1/traces` path, while others use the root path `/`. +{% /alert %} + +## Further reading{% #further-reading %} + +- [OTLP ingestion in the Agent](https://www.datadoghq.com/about/latest-news/press-releases/datadog-announces-opentelemetry-protocol-support/) +- [OTLP Metrics Types](http://localhost:1313/metrics/open_telemetry/otlp_metric_types) +- [OpenTelemetry Runtime Metrics](http://localhost:1313/opentelemetry/runtime_metrics/) diff --git a/opentelemetry-mdoc/troubleshooting/index.md b/opentelemetry-mdoc/troubleshooting/index.md new file mode 100644 index 0000000000000..2990cbdb9882f --- /dev/null +++ b/opentelemetry-mdoc/troubleshooting/index.md @@ -0,0 +1,279 @@ +--- +title: Troubleshooting +description: Datadog, the leading service for cloud-scale monitoring. +breadcrumbs: Docs > OpenTelemetry in Datadog > Troubleshooting +--- + +# Troubleshooting + +If you experience unexpected behavior using OpenTelemetry with Datadog, this guide may help you resolve the issue. If you continue to have trouble, contact [Datadog Support](http://localhost:1313/help/) for further assistance. + +## Incorrect or unexpected hostnames{% #incorrect-or-unexpected-hostnames %} + +When using OpenTelemetry with Datadog, you might encounter various hostname-related issues. The following sections cover common scenarios and their solutions. + +### Different Kubernetes hostname and node name{% #different-kubernetes-hostname-and-node-name %} + +**Symptom**: When deploying in Kubernetes, the hostname reported by Datadog does not match the expected node name. + +**Cause**: This is typically the result of missing `k8s.node.name` (and optionally `k8s.cluster.name`) tags. + +**Resolution**: + +1. Configure the `k8s.pod.ip` attribute for your application deployment: + + ```yaml + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: OTEL_RESOURCE + value: k8s.pod.ip=$(MY_POD_IP) + ``` + +1. Enable the `k8sattributes` processor in your Collector: + + ```yaml + k8sattributes: + [...] + processors: + - k8sattributes + ``` + +Alternatively, you can override the hostname using the `datadog.host.name` attribute: + +```yaml +processors: + transform: + trace_statements: + - context: resource + statements: + - set(attributes["datadog.host.name"], "${NODE_NAME}") +``` + +For more information on host-identifying attributes, see [Mapping OpenTelemetry Semantic Conventions to Hostnames](http://localhost:1313/opentelemetry/schema_semantics/hostname/). + +### Unexpected hostnames with AWS Fargate deployment{% #unexpected-hostnames-with-aws-fargate-deployment %} + +**Symptom**: In AWS Fargate environments, an incorrect hostname might be reported for traces. + +**Cause**: In Fargate environments, the default resource detection may not properly identify the ECS metadata, leading to incorrect hostname assignment. + +**Resolution**: + +Configure the `resourcedetection` processor in your Collector configuration and enable the `ecs` detector: + +```yaml +processors: + resourcedetection: + detectors: [env, ecs] + timeout: 2s + override: false +``` + +### Gateway collector not forwarding host metadata{% #gateway-collector-not-forwarding-host-metadata %} + +**Symptom**: In a gateway deployment, telemetry from multiple hosts appears to come from a single host, or host metadata isn't being properly forwarded. + +**Cause**: This occurs when the gateway collector configuration doesn't preserve or properly forward the host metadata attributes from the agent collectors. + +**Resolution**: + +1. Configure agent collectors to collect and forward host metadata: + + ```yaml + processors: + resourcedetection: + detectors: [system, env] + k8sattributes: + passthrough: true + ``` + +1. Configure the gateway collector to extract and forward necessary metadata: + + ```yaml + processors: + k8sattributes: + extract: + metadata: [node.name, k8s.node.name] + transform: + trace_statements: + - context: resource + statements: + - set(attributes["datadog.host.use_as_metadata"], true) + + exporters: + datadog: + hostname_source: resource_attribute + ``` + +For more information, see [Mapping OpenTelemetry Semantic Conventions to Infrastructure List Host Information](http://localhost:1313/opentelemetry/schema_semantics/host_metadata/). + +### The same host shows up multiple times under different names{% #the-same-host-shows-up-multiple-times-under-different-names %} + +**Symptom**: A single host appears under multiple names in Datadog. For example, you might see one entry from the OpenTelemetry Collector (with the OTel logo) and another from the Datadog Agent. + +**Cause**: When a host is monitored through more than one ingestion method (for example, OTLP + Datadog Agent, or DogStatsD + OTLP) without aligning on a single hostname resource attribute, Datadog treats each path as a separate host. + +**Resolution**: + +1. Identify all active telemetry ingestion paths sending data from the same machine to Datadog. +1. Choose a single hostname source and decide whether you want to rely on the Datadog Agent's hostname or a specific resource attribute (for example, `k8s.node.name`). +1. Configure each path (Agent, Collector, etc.) so that they report a consistent hostname. For example, if you're setting the hostname with OTLP attributes, configure your transform processor: + ```yaml + processors: + transform: + trace_statements: + - context: resource + statements: + - set(attributes["datadog.host.name"], "shared-hostname") + ``` +1. Validate in Datadog (Infrastructure List, host map, etc.) to confirm the host now appears under a single name. + +## Host tag delays after startup{% #host-tag-delays-after-startup %} + +**Symptom**: You may experience a delay in host tags appearing on your telemetry data after starting the Datadog Agent or OpenTelemetry Collector. This delay typically lasts under 10 minutes but can extend up to 40-50 minutes in some cases. + +**Cause**: This delay occurs because host metadata must be processed and indexed by Datadog's backend before tags can be associated with telemetry data. + +**Resolution**: + +Host tags configured in either the Datadog exporter configuration (`host_metadata::tags`) or the Datadog Agent's `tags` section are not immediately applied to telemetry data. The tags eventually appear after the backend resolves the host metadata. + +Choose your setup for specific instructions: + +{% tab title="Datadog Agent OTLP Ingestion" %} +Configure `expected_tags_duration` in `datadog.yaml` to bridge the gap until host tags are resolved: + +```yaml +expected_tags_duration: "15m" +``` + +This configuration adds the expected tags to all telemetry for the specified duration (in this example, 15 minutes). +{% /tab %} + +{% tab title="OpenTelemetry Collector" %} +Use the `transform` processor to set your host tags as OTLP attributes. For example, to add environment and team tags: + +```yaml +processors: + transform: + trace_statements: + - context: resource + statements: + # OpenTelemetry semantic conventions + - set(attributes["deployment.environment.name"], "prod") + # Datadog-specific host tags + - set(attributes["ddtags"], "env:prod,team:backend") +... +``` + +This approach combines OpenTelemetry semantic conventions with Datadog-specific host tags to ensure proper functionality in both OpenTelemetry and Datadog environments. +{% /tab %} + +## Unable to map 'team' attribute to Datadog team tag{% #unable-to-map-team-attribute-to-datadog-team-tag %} + +**Symptom**: The team tag is not appearing in Datadog for logs and traces, despite being set as a resource attribute in OpenTelemetry configurations. + +**Cause**: This happens because OpenTelemetry resource attributes need explicit mapping to Datadog's tag format using the `ddtags` attribute. + +**Resolution**: + +Use the OpenTelemetry Collector's transform processor to map the team resource attribute to the `ddtags` attribute: + +```yaml +processors: + transform/datadog_team_tag: + metric_statements: + - context: datapoint + statements: + - set(attributes["ddtags"], Concat(["team:", resource.attributes["team"]],"")) + log_statements: + - context: log + statements: + - set(attributes["ddtags"], Concat(["team:", resource.attributes["team"]],"")) + trace_statements: + - context: span + statements: + - set(attributes["ddtags"], Concat(["team:", resource.attributes["team"]],"")) +``` + +{% alert level="info" %} +Replace `resource.attributes["team"]` with the actual attribute name if different in your setup (for example, `resource.attributes["arm.team.name"]`). +{% /alert %} + +To verify the configuration: + +1. Restart the OpenTelemetry Collector to apply the changes. +1. Generate test logs and traces. +1. Check if the team tag appears in your Datadog logs and traces. +1. Verify that the team tag functions as expected in filtering and dashboards. + +## Container tags not appearing on Containers page{% #container-tags-not-appearing-on-containers-page %} + +**Symptom**: Container tags are not appearing on the Containers page in Datadog, which affects container monitoring and management capabilities. + +**Cause**: This occurs when container resource attributes aren't properly mapped to Datadog's expected container metadata format. + +**Resolution**: + +When using OTLP ingestion in the Datadog Agent, you need to set specific resource attributes to ensure proper container metadata association. For more information, see [Resource Attribute Mapping](http://localhost:1313/opentelemetry/schema_semantics/semantic_mapping/). + +To verify the configuration: + +1. Check the raw trace data to confirm that container IDs and tags are properly translated into Datadog format (for example, `container.id` should become `container_id`). +1. Verify that container metadata appears on the Containers page. + +## Missing metrics in Software Catalog and dashboards{% #missing-metrics-in-software-catalog-and-dashboards %} + +**Symptom**: Metrics are not appearing in the Software Catalog and dashboards despite being properly collected. + +**Cause**: This typically occurs due to incorrect or improperly mapped semantic conventions. + +**Resolution**: + +To verify the configuration: + +1. Check that your metrics contain the required [semantic conventions](http://localhost:1313/opentelemetry/schema_semantics/semantic_mapping/). +1. Verify metric names follow OpenTelemetry naming conventions. +1. Confirm metrics are being properly translated to the Datadog format using the [metrics mapping reference](http://localhost:1313/opentelemetry/schema_semantics/metrics_mapping/#metrics-mappings). + +{% alert level="info" %} +When working with semantic conventions, ensure you're following the latest OpenTelemetry specification for metric naming and attributes. +{% /alert %} + +## Port binding errors and connection failures{% #port-binding-errors-and-connection-failures %} + +**Symptom**: You experience port conflicts or binding issues when deploying the DDOT Collector, or applications cannot connect to the DDOT Collector. + +**Cause**: This typically occurs due to port naming conflicts, incorrect port configurations, or when multiple services attempt to use the same ports. + +**Resolution**: + +The Datadog Operator automatically binds the OpenTelemetry Collector to ports `4317` (named `otel-grpc`) and `4318` (named `otel-http`) by default. + +To explicitly override the default ports, use the `features.otelCollector.ports` parameter: + +```yaml +# Enable Features +features: + otelCollector: + enabled: true + ports: + - containerPort: 4317 + hostPort: 4317 + name: otel-grpc + - containerPort: 4318 + hostPort: 4318 + name: otel-http +``` + +{% alert level="warning" %} +When configuring ports `4317` and `4318`, you must use the default names `otel-grpc` and `otel-http` respectively to avoid port conflicts. +{% /alert %} + +## Further reading{% #further-reading %} + +- [OpenTelemetry Troubleshooting](https://opentelemetry.io/docs/collector/troubleshooting/) diff --git a/package.json b/package.json index 662f929f1aab5..ae9b8328e96c9 100644 --- a/package.json +++ b/package.json @@ -55,6 +55,7 @@ "docs-llms-txt": "https://s3.amazonaws.com/origin-static-assets/corp-node-packages/master/docs-llms-txt-v1.0.0.tgz", "fancy-log": "^1.3.3", "geo-locate": "https://s3.amazonaws.com/origin-static-assets/corp-node-packages/master/geo-locate-v1.0.2.tgz", + "html-to-mdoc": "https://s3.amazonaws.com/origin-static-assets/corp-node-packages/jen.gilbert/llm-support/html-to-mdoc-v1.0.3.tgz", "hugo-bin": "0.139.0", "instantsearch.js": "^4.74.1", "js-cookie": "^2.2.1", diff --git a/yarn.lock b/yarn.lock index 05373a061d6cb..78b63f5f04449 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3627,6 +3627,29 @@ __metadata: languageName: node linkType: hard +"@markdoc/markdoc@npm:0.5.2": + version: 0.5.2 + resolution: "@markdoc/markdoc@npm:0.5.2" + dependencies: + "@types/linkify-it": ^3.0.1 + "@types/markdown-it": 12.2.3 + peerDependencies: + "@types/react": "*" + react: "*" + dependenciesMeta: + "@types/linkify-it": + optional: true + "@types/markdown-it": + optional: true + peerDependenciesMeta: + "@types/react": + optional: true + react: + optional: true + checksum: add5efbde67da9dac832b90ff9348f6c2726df8b196cbe18611f0be07d706148cbf7652d0b8f637ad1ebf61b29d11972ae70365d4b026c1f5c8a13128149822f + languageName: node + linkType: hard + "@nodelib/fs.scandir@npm:2.1.5": version: 2.1.5 resolution: "@nodelib/fs.scandir@npm:2.1.5" @@ -4679,6 +4702,13 @@ __metadata: languageName: node linkType: hard +"@trysound/sax@npm:0.2.0": + version: 0.2.0 + resolution: "@trysound/sax@npm:0.2.0" + checksum: 11226c39b52b391719a2a92e10183e4260d9651f86edced166da1d95f39a0a1eaa470e44d14ac685ccd6d3df7e2002433782872c0feeb260d61e80f21250e65c + languageName: node + linkType: hard + "@tsconfig/node10@npm:^1.0.7": version: 1.0.11 resolution: "@tsconfig/node10@npm:1.0.11" @@ -4963,6 +4993,13 @@ __metadata: languageName: node linkType: hard +"@types/relateurl@npm:^0.2.33": + version: 0.2.33 + resolution: "@types/relateurl@npm:0.2.33" + checksum: a4b7876cc24da3eddc1202d9f57fb6cdd551ff3d884124365dd15012dde20c2b4c19eee9bcd3b17e7c43e8edbe82a33753a6c266e41e3761283d44e6234d47da + languageName: node + linkType: hard + "@types/request@npm:^2.48.8": version: 2.48.12 resolution: "@types/request@npm:2.48.12" @@ -6194,6 +6231,13 @@ __metadata: languageName: node linkType: hard +"boolbase@npm:^1.0.0": + version: 1.0.0 + resolution: "boolbase@npm:1.0.0" + checksum: 3e25c80ef626c3a3487c73dbfc70ac322ec830666c9ad915d11b701142fab25ec1e63eff2c450c74347acfd2de854ccde865cd79ef4db1683f7c7b046ea43bb0 + languageName: node + linkType: hard + "bootstrap@npm:^5.2": version: 5.2.3 resolution: "bootstrap@npm:5.2.3" @@ -6946,6 +6990,13 @@ __metadata: languageName: node linkType: hard +"commander@npm:^7.2.0": + version: 7.2.0 + resolution: "commander@npm:7.2.0" + checksum: 53501cbeee61d5157546c0bef0fedb6cdfc763a882136284bed9a07225f09a14b82d2a84e7637edfd1a679fb35ed9502fd58ef1d091e6287f60d790147f68ddc + languageName: node + linkType: hard + "component-emitter@npm:^1.2.1": version: 1.3.0 resolution: "component-emitter@npm:1.3.0" @@ -7045,6 +7096,23 @@ __metadata: languageName: node linkType: hard +"cosmiconfig@npm:^9.0.0": + version: 9.0.0 + resolution: "cosmiconfig@npm:9.0.0" + dependencies: + env-paths: ^2.2.1 + import-fresh: ^3.3.0 + js-yaml: ^4.1.0 + parse-json: ^5.2.0 + peerDependencies: + typescript: ">=4.9.5" + peerDependenciesMeta: + typescript: + optional: true + checksum: a30c424b53d442ea0bdd24cb1b3d0d8687c8dda4a17ab6afcdc439f8964438801619cdb66e8e79f63b9caa3e6586b60d8bab9ce203e72df6c5e80179b971fe8f + languageName: node + linkType: hard + "cpu-features@npm:~0.0.9": version: 0.0.10 resolution: "cpu-features@npm:0.0.10" @@ -7158,6 +7226,46 @@ __metadata: languageName: node linkType: hard +"css-select@npm:^5.1.0": + version: 5.2.2 + resolution: "css-select@npm:5.2.2" + dependencies: + boolbase: ^1.0.0 + css-what: ^6.1.0 + domhandler: ^5.0.2 + domutils: ^3.0.1 + nth-check: ^2.0.1 + checksum: 0ab672620c6bdfe4129dfecf202f6b90f92018b24a1a93cfbb295c24026d0163130ba4b98d7443f87246a2c1d67413798a7a5920cd102b0cfecfbc89896515aa + languageName: node + linkType: hard + +"css-tree@npm:^2.3.1": + version: 2.3.1 + resolution: "css-tree@npm:2.3.1" + dependencies: + mdn-data: 2.0.30 + source-map-js: ^1.0.1 + checksum: 493cc24b5c22b05ee5314b8a0d72d8a5869491c1458017ae5ed75aeb6c3596637dbe1b11dac2548974624adec9f7a1f3a6cf40593dc1f9185eb0e8279543fbc0 + languageName: node + linkType: hard + +"css-tree@npm:~2.2.0": + version: 2.2.1 + resolution: "css-tree@npm:2.2.1" + dependencies: + mdn-data: 2.0.28 + source-map-js: ^1.0.1 + checksum: b94aa8cc2f09e6f66c91548411fcf74badcbad3e150345074715012d16333ce573596ff5dfca03c2a87edf1924716db765120f94247e919d72753628ba3aba27 + languageName: node + linkType: hard + +"css-what@npm:^6.1.0": + version: 6.2.2 + resolution: "css-what@npm:6.2.2" + checksum: 4d1f07b348a638e1f8b4c72804a1e93881f35e0f541256aec5ac0497c5855df7db7ab02da030de950d4813044f6d029a14ca657e0f92c3987e4b604246235b2b + languageName: node + linkType: hard + "cssdb@npm:^4.4.0": version: 4.4.0 resolution: "cssdb@npm:4.4.0" @@ -7183,6 +7291,15 @@ __metadata: languageName: node linkType: hard +"csso@npm:^5.0.5": + version: 5.0.5 + resolution: "csso@npm:5.0.5" + dependencies: + css-tree: ~2.2.0 + checksum: 0ad858d36bf5012ed243e9ec69962a867509061986d2ee07cc040a4b26e4d062c00d4c07e5ba8d430706ceb02dd87edd30a52b5937fd45b1b6f2119c4993d59a + languageName: node + linkType: hard + "cssom@npm:^0.4.1": version: 0.4.4 resolution: "cssom@npm:0.4.4" @@ -7656,6 +7773,7 @@ __metadata: eslint-plugin-standard: ^4.1.0 fancy-log: ^1.3.3 geo-locate: "https://s3.amazonaws.com/origin-static-assets/corp-node-packages/master/geo-locate-v1.0.2.tgz" + html-to-mdoc: "https://s3.amazonaws.com/origin-static-assets/corp-node-packages/jen.gilbert/llm-support/html-to-mdoc-v1.0.3.tgz" hugo-bin: 0.139.0 husky: ^9.1.7 instantsearch.js: ^4.74.1 @@ -7738,6 +7856,28 @@ __metadata: languageName: node linkType: hard +"dom-serializer@npm:^1.0.1": + version: 1.4.1 + resolution: "dom-serializer@npm:1.4.1" + dependencies: + domelementtype: ^2.0.1 + domhandler: ^4.2.0 + entities: ^2.0.0 + checksum: fbb0b01f87a8a2d18e6e5a388ad0f7ec4a5c05c06d219377da1abc7bb0f674d804f4a8a94e3f71ff15f6cb7dcfc75704a54b261db672b9b3ab03da6b758b0b22 + languageName: node + linkType: hard + +"dom-serializer@npm:^2.0.0": + version: 2.0.0 + resolution: "dom-serializer@npm:2.0.0" + dependencies: + domelementtype: ^2.3.0 + domhandler: ^5.0.2 + entities: ^4.2.0 + checksum: cd1810544fd8cdfbd51fa2c0c1128ec3a13ba92f14e61b7650b5de421b88205fd2e3f0cc6ace82f13334114addb90ed1c2f23074a51770a8e9c1273acbc7f3e6 + languageName: node + linkType: hard + "domelementtype@npm:1": version: 1.3.1 resolution: "domelementtype@npm:1.3.1" @@ -7752,6 +7892,13 @@ __metadata: languageName: node linkType: hard +"domelementtype@npm:^2.2.0, domelementtype@npm:^2.3.0": + version: 2.3.0 + resolution: "domelementtype@npm:2.3.0" + checksum: ee837a318ff702622f383409d1f5b25dd1024b692ef64d3096ff702e26339f8e345820f29a68bcdcea8cfee3531776b3382651232fbeae95612d6f0a75efb4f6 + languageName: node + linkType: hard + "domexception@npm:^1.0.1": version: 1.0.1 resolution: "domexception@npm:1.0.1" @@ -7770,6 +7917,24 @@ __metadata: languageName: node linkType: hard +"domhandler@npm:^4.2.0, domhandler@npm:^4.2.2": + version: 4.3.1 + resolution: "domhandler@npm:4.3.1" + dependencies: + domelementtype: ^2.2.0 + checksum: 4c665ceed016e1911bf7d1dadc09dc888090b64dee7851cccd2fcf5442747ec39c647bb1cb8c8919f8bbdd0f0c625a6bafeeed4b2d656bbecdbae893f43ffaaa + languageName: node + linkType: hard + +"domhandler@npm:^5.0.2, domhandler@npm:^5.0.3": + version: 5.0.3 + resolution: "domhandler@npm:5.0.3" + dependencies: + domelementtype: ^2.3.0 + checksum: 0f58f4a6af63e6f3a4320aa446d28b5790a009018707bce2859dcb1d21144c7876482b5188395a188dfa974238c019e0a1e610d2fc269a12b2c192ea2b0b131c + languageName: node + linkType: hard + "domutils@npm:1.5": version: 1.5.1 resolution: "domutils@npm:1.5.1" @@ -7780,6 +7945,28 @@ __metadata: languageName: node linkType: hard +"domutils@npm:^2.8.0": + version: 2.8.0 + resolution: "domutils@npm:2.8.0" + dependencies: + dom-serializer: ^1.0.1 + domelementtype: ^2.2.0 + domhandler: ^4.2.0 + checksum: abf7434315283e9aadc2a24bac0e00eab07ae4313b40cc239f89d84d7315ebdfd2fb1b5bf750a96bc1b4403d7237c7b2ebf60459be394d625ead4ca89b934391 + languageName: node + linkType: hard + +"domutils@npm:^3.0.1": + version: 3.2.2 + resolution: "domutils@npm:3.2.2" + dependencies: + dom-serializer: ^2.0.0 + domelementtype: ^2.3.0 + domhandler: ^5.0.3 + checksum: ae941d56f03d857077d55dde9297e960a625229fc2b933187cc4123084d7c2d2517f58283a7336567127029f1e008449bac8ac8506d44341e29e3bb18e02f906 + languageName: node + linkType: hard + "dot-prop@npm:^6.0.0": version: 6.0.1 resolution: "dot-prop@npm:6.0.1" @@ -7940,14 +8127,28 @@ __metadata: languageName: node linkType: hard -"entities@npm:^4.4.0, entities@npm:^4.5.0": +"entities@npm:^3.0.1": + version: 3.0.1 + resolution: "entities@npm:3.0.1" + checksum: aaf7f12033f0939be91f5161593f853f2da55866db55ccbf72f45430b8977e2b79dbd58c53d0fdd2d00bd7d313b75b0968d09f038df88e308aa97e39f9456572 + languageName: node + linkType: hard + +"entities@npm:^4.2.0, entities@npm:^4.4.0, entities@npm:^4.5.0": version: 4.5.0 resolution: "entities@npm:4.5.0" checksum: 853f8ebd5b425d350bffa97dd6958143179a5938352ccae092c62d1267c4e392a039be1bae7d51b6e4ffad25f51f9617531fedf5237f15df302ccfb452cbf2d7 languageName: node linkType: hard -"env-paths@npm:^2.2.0": +"entities@npm:^6.0.0": + version: 6.0.1 + resolution: "entities@npm:6.0.1" + checksum: 937b952e81aca641660a6a07f70001c6821973dea3ae7f6a5013eadce94620f3ed2e9c745832d503c8811ce6e97704d8a0396159580c0e567d815234de7fdecf + languageName: node + linkType: hard + +"env-paths@npm:^2.2.0, env-paths@npm:^2.2.1": version: 2.2.1 resolution: "env-paths@npm:2.2.1" checksum: 65b5df55a8bab92229ab2b40dad3b387fad24613263d103a97f91c9fe43ceb21965cd3392b1ccb5d77088021e525c4e0481adb309625d0cb94ade1d1fb8dc17e @@ -10035,6 +10236,59 @@ __metadata: languageName: node linkType: hard +"html-to-mdoc@https://s3.amazonaws.com/origin-static-assets/corp-node-packages/jen.gilbert/llm-support/html-to-mdoc-v1.0.3.tgz": + version: 1.0.3 + resolution: "html-to-mdoc@https://s3.amazonaws.com/origin-static-assets/corp-node-packages/jen.gilbert/llm-support/html-to-mdoc-v1.0.3.tgz" + dependencies: + "@markdoc/markdoc": 0.5.2 + htmlnano: ^2.1.5 + md5: ^2.3.0 + p-limit: ^7.0.0 + parse5: ^7.3.0 + svgo: ^3.3.2 + typescript: ^5.8.3 + zod: ^3.25.28 + checksum: ed009d5523373c7a191a900270fab3308e8a19809328908ddcc02ad38394b850f238190de203ce4d100b66c4f4815da89b3558bf1ec627182346da3326d6f044 + languageName: node + linkType: hard + +"htmlnano@npm:^2.1.5": + version: 2.1.5 + resolution: "htmlnano@npm:2.1.5" + dependencies: + "@types/relateurl": ^0.2.33 + cosmiconfig: ^9.0.0 + posthtml: ^0.16.5 + peerDependencies: + cssnano: ^7.0.0 + postcss: ^8.3.11 + purgecss: ^7.0.2 + relateurl: ^0.2.7 + srcset: 5.0.1 + svgo: ^3.0.2 + terser: ^5.10.0 + uncss: ^0.17.3 + peerDependenciesMeta: + cssnano: + optional: true + postcss: + optional: true + purgecss: + optional: true + relateurl: + optional: true + srcset: + optional: true + svgo: + optional: true + terser: + optional: true + uncss: + optional: true + checksum: e9998f2b4e417814a83b26cbc74bce0be20329e0f27569ba1a36261807090fe42a907b5ef5ab23e8f8784296f40fd4e73225073bdd43ca75742679ed96353c8e + languageName: node + linkType: hard + "htmlparser2@npm:3.8.x": version: 3.8.3 resolution: "htmlparser2@npm:3.8.3" @@ -10048,6 +10302,18 @@ __metadata: languageName: node linkType: hard +"htmlparser2@npm:^7.1.1": + version: 7.2.0 + resolution: "htmlparser2@npm:7.2.0" + dependencies: + domelementtype: ^2.0.1 + domhandler: ^4.2.2 + domutils: ^2.8.0 + entities: ^3.0.1 + checksum: 96563d9965729cfcb3f5f19c26d013c6831b4cb38d79d8c185e9cd669ea6a9ffe8fb9ccc74d29a068c9078aa0e2767053ed6b19aa32723c41550340d0094bea0 + languageName: node + linkType: hard + "http-cache-semantics@npm:^4.1.0": version: 4.1.0 resolution: "http-cache-semantics@npm:4.1.0" @@ -10248,6 +10514,16 @@ __metadata: languageName: node linkType: hard +"import-fresh@npm:^3.3.0": + version: 3.3.1 + resolution: "import-fresh@npm:3.3.1" + dependencies: + parent-module: ^1.0.0 + resolve-from: ^4.0.0 + checksum: a06b19461b4879cc654d46f8a6244eb55eb053437afd4cbb6613cad6be203811849ed3e4ea038783092879487299fda24af932b86bdfff67c9055ba3612b8c87 + languageName: node + linkType: hard + "import-local@npm:^3.0.2": version: 3.0.2 resolution: "import-local@npm:3.0.2" @@ -10770,6 +11046,13 @@ __metadata: languageName: node linkType: hard +"is-json@npm:^2.0.1": + version: 2.0.1 + resolution: "is-json@npm:2.0.1" + checksum: 29efc4f82e912bf54cd7b28632dd8e52a311085ca879fe51c869a81ba1313bb689eb440ace53dd480edbc009f92a425c24059e0766f4117fe9888fe59e86186f + languageName: node + linkType: hard + "is-lambda@npm:^1.0.1": version: 1.0.1 resolution: "is-lambda@npm:1.0.1" @@ -12508,6 +12791,20 @@ __metadata: languageName: node linkType: hard +"mdn-data@npm:2.0.28": + version: 2.0.28 + resolution: "mdn-data@npm:2.0.28" + checksum: f51d587a6ebe8e426c3376c74ea6df3e19ec8241ed8e2466c9c8a3904d5d04397199ea4f15b8d34d14524b5de926d8724ae85207984be47e165817c26e49e0aa + languageName: node + linkType: hard + +"mdn-data@npm:2.0.30": + version: 2.0.30 + resolution: "mdn-data@npm:2.0.30" + checksum: d6ac5ac7439a1607df44b22738ecf83f48e66a0874e4482d6424a61c52da5cde5750f1d1229b6f5fa1b80a492be89465390da685b11f97d62b8adcc6e88189aa + languageName: node + linkType: hard + "mdurl@npm:^1.0.1": version: 1.0.1 resolution: "mdurl@npm:1.0.1" @@ -13097,6 +13394,15 @@ __metadata: languageName: node linkType: hard +"nth-check@npm:^2.0.1": + version: 2.1.1 + resolution: "nth-check@npm:2.1.1" + dependencies: + boolbase: ^1.0.0 + checksum: 5afc3dafcd1573b08877ca8e6148c52abd565f1d06b1eb08caf982e3fa289a82f2cae697ffb55b5021e146d60443f1590a5d6b944844e944714a5b549675bcd3 + languageName: node + linkType: hard + "num2fraction@npm:^1.2.2": version: 1.2.2 resolution: "num2fraction@npm:1.2.2" @@ -13390,6 +13696,15 @@ __metadata: languageName: node linkType: hard +"p-limit@npm:^7.0.0": + version: 7.1.1 + resolution: "p-limit@npm:7.1.1" + dependencies: + yocto-queue: ^1.2.1 + checksum: 80b15159bc5dc1d1f48ba6809a7747b5338176862f137233518a488865fb9fa98b545106838e7f3797f95fb57eba4d7e3714ad2b1f1831a08aa995b5d0459653 + languageName: node + linkType: hard + "p-locate@npm:^4.1.0": version: 4.1.0 resolution: "p-locate@npm:4.1.0" @@ -13493,6 +13808,18 @@ __metadata: languageName: node linkType: hard +"parse-json@npm:^5.2.0": + version: 5.2.0 + resolution: "parse-json@npm:5.2.0" + dependencies: + "@babel/code-frame": ^7.0.0 + error-ex: ^1.3.1 + json-parse-even-better-errors: ^2.3.0 + lines-and-columns: ^1.1.6 + checksum: 62085b17d64da57f40f6afc2ac1f4d95def18c4323577e1eced571db75d9ab59b297d1d10582920f84b15985cbfc6b6d450ccbf317644cfa176f3ed982ad87e2 + languageName: node + linkType: hard + "parse-node-version@npm:^1.0.0": version: 1.0.1 resolution: "parse-node-version@npm:1.0.1" @@ -13516,6 +13843,15 @@ __metadata: languageName: node linkType: hard +"parse5@npm:^7.3.0": + version: 7.3.0 + resolution: "parse5@npm:7.3.0" + dependencies: + entities: ^6.0.0 + checksum: ffd040c4695d93f0bc370e3d6d75c1b352178514af41be7afa212475ea5cead1d6e377cd9d4cec6a5e2bcf497ca50daf9e0088eadaa37dbc271f60def08fdfcd + languageName: node + linkType: hard + "pascalcase@npm:^0.1.1": version: 0.1.1 resolution: "pascalcase@npm:0.1.1" @@ -14161,6 +14497,34 @@ __metadata: languageName: node linkType: hard +"posthtml-parser@npm:^0.11.0": + version: 0.11.0 + resolution: "posthtml-parser@npm:0.11.0" + dependencies: + htmlparser2: ^7.1.1 + checksum: 37dca546a04dc2ddc936a629596edccc9e439a7f6ad503dae5165ea197ddc53f102e69259719a49ecd491e01b093b95c96287c38101f985b78a846c05a206b3c + languageName: node + linkType: hard + +"posthtml-render@npm:^3.0.0": + version: 3.0.0 + resolution: "posthtml-render@npm:3.0.0" + dependencies: + is-json: ^2.0.1 + checksum: 5ed2d6e8813af63c4e5a2d9d026f611fd178c9052a16b302a6e0e81d1badb64dab36e3fc1531b5bdd376465f39d19a6488299b3c6dfe13beae3dd525ff856573 + languageName: node + linkType: hard + +"posthtml@npm:^0.16.5": + version: 0.16.6 + resolution: "posthtml@npm:0.16.6" + dependencies: + posthtml-parser: ^0.11.0 + posthtml-render: ^3.0.0 + checksum: 8b9b9d27bd2417d6b5b7d408000b23316c3c4d2a2d0ea62080a8fbec5654cc7376ea9d6317b290c030d616142144a8ca0a96ffe1e919493e3eac17442d362596 + languageName: node + linkType: hard + "preact@npm:^10.10.0": version: 10.11.2 resolution: "preact@npm:10.11.2" @@ -15722,7 +16086,7 @@ __metadata: languageName: node linkType: hard -"source-map-js@npm:^1.2.1": +"source-map-js@npm:^1.0.1, source-map-js@npm:^1.2.1": version: 1.2.1 resolution: "source-map-js@npm:1.2.1" checksum: 4eb0cd997cdf228bc253bcaff9340afeb706176e64868ecd20efbe6efea931465f43955612346d6b7318789e5265bdc419bc7669c1cebe3db0eb255f57efa76b @@ -16323,6 +16687,23 @@ __metadata: languageName: node linkType: hard +"svgo@npm:^3.3.2": + version: 3.3.2 + resolution: "svgo@npm:3.3.2" + dependencies: + "@trysound/sax": 0.2.0 + commander: ^7.2.0 + css-select: ^5.1.0 + css-tree: ^2.3.1 + css-what: ^6.1.0 + csso: ^5.0.5 + picocolors: ^1.0.0 + bin: + svgo: ./bin/svgo + checksum: a3f8aad597dec13ab24e679c4c218147048dc1414fe04e99447c5f42a6e077b33d712d306df84674b5253b98c9b84dfbfb41fdd08552443b04946e43d03e054e + languageName: node + linkType: hard + "symbol-tree@npm:^3.2.2, symbol-tree@npm:^3.2.4": version: 3.2.4 resolution: "symbol-tree@npm:3.2.4" @@ -16879,6 +17260,16 @@ __metadata: languageName: node linkType: hard +"typescript@npm:^5.8.3": + version: 5.9.3 + resolution: "typescript@npm:5.9.3" + bin: + tsc: bin/tsc + tsserver: bin/tsserver + checksum: 0d0ffb84f2cd072c3e164c79a2e5a1a1f4f168e84cb2882ff8967b92afe1def6c2a91f6838fb58b168428f9458c57a2ba06a6737711fdd87a256bbe83e9a217f + languageName: node + linkType: hard + "typescript@patch:typescript@^4.9.4#~builtin": version: 4.9.5 resolution: "typescript@patch:typescript@npm%3A4.9.5#~builtin::version=4.9.5&hash=289587" @@ -16889,6 +17280,16 @@ __metadata: languageName: node linkType: hard +"typescript@patch:typescript@^5.8.3#~builtin": + version: 5.9.3 + resolution: "typescript@patch:typescript@npm%3A5.9.3#~builtin::version=5.9.3&hash=14eedb" + bin: + tsc: bin/tsc + tsserver: bin/tsserver + checksum: 8bb8d86819ac86a498eada254cad7fb69c5f74778506c700c2a712daeaff21d3a6f51fd0d534fe16903cb010d1b74f89437a3d02d4d0ff5ca2ba9a4660de8497 + languageName: node + linkType: hard + "typesense-instantsearch-adapter@npm:^2.8.0": version: 2.8.0 resolution: "typesense-instantsearch-adapter@npm:2.8.0" @@ -17850,9 +18251,23 @@ __metadata: languageName: node linkType: hard +"yocto-queue@npm:^1.2.1": + version: 1.2.1 + resolution: "yocto-queue@npm:1.2.1" + checksum: 0843d6c2c0558e5c06e98edf9c17942f25c769e21b519303a5c2adefd5b738c9b2054204dc856ac0cd9d134b1bc27d928ce84fd23c9e2423b7e013d5a6f50577 + languageName: node + linkType: hard + "zod@npm:^3.22.4": version: 3.24.2 resolution: "zod@npm:3.24.2" checksum: c02455c09678c5055c636d64f9fcda2424fea0aa46ac7d9681e7f41990bc55f488bcd84b9d7cfef0f6e906f51f55b245239d92a9f726248aa74c5b84edf00c2d languageName: node linkType: hard + +"zod@npm:^3.25.28": + version: 3.25.76 + resolution: "zod@npm:3.25.76" + checksum: c9a403a62b329188a5f6bd24d5d935d2bba345f7ab8151d1baa1505b5da9f227fb139354b043711490c798e91f3df75991395e40142e6510a4b16409f302b849 + languageName: node + linkType: hard