diff --git a/internal/pkg/agent/application/monitoring/component/testdata/monitoring_config_full_otel.yaml b/internal/pkg/agent/application/monitoring/component/testdata/monitoring_config_full_otel.yaml new file mode 100644 index 00000000000..b113a7e70e0 --- /dev/null +++ b/internal/pkg/agent/application/monitoring/component/testdata/monitoring_config_full_otel.yaml @@ -0,0 +1,851 @@ +agent: + monitoring: + http: + enabled: false + metrics: true +inputs: +- _runtime_experimental: otel + id: filestream-monitoring-agent + name: filestream-monitoring-agent + streams: + - close: + on_state_change: + inactive: 5m + data_stream: + dataset: elastic_agent + namespace: default + type: logs + id: filestream-monitoring-agent + parsers: + - ndjson: + add_error_key: true + message_key: message + overwrite_keys: true + target: "" + paths: + - placeholder + processors: + - drop_event: + when: + regexp: + component.id: .*-monitoring$ + - drop_event: + when: + regexp: + message: ^Non-zero metrics in the last + - drop_event: + when: + equals: + log.type: event + - copy_fields: + fields: + - from: data_stream.dataset + to: data_stream.dataset_original + - drop_fields: + fields: + - data_stream.dataset + - copy_fields: + fail_on_error: false + fields: + - from: component.dataset + to: data_stream.dataset + ignore_missing: true + - copy_fields: + fail_on_error: false + fields: + - from: data_stream.dataset_original + to: data_stream.dataset + when: + not: + has_fields: + - data_stream.dataset + - drop_fields: + fields: + - data_stream.dataset_original + - event.dataset + - copy_fields: + fields: + - from: data_stream.dataset + to: event.dataset + - drop_fields: + fields: + - ecs.version + ignore_missing: true + - add_formatted_index: + index: '%{[data_stream.type]}-%{[data_stream.dataset]}-%{[data_stream.namespace]}' + type: filestream + - close: + on_state_change: + inactive: 5m + data_stream: + dataset: elastic_agent.endpoint_security + namespace: default + type: logs + id: filestream-monitoring-endpoint-default + parsers: + - ndjson: + add_error_key: true + message_key: message + overwrite_keys: true + target: "" + paths: + - placeholder + processors: + - add_fields: + fields: + binary: endpoint-security + dataset: elastic_agent.endpoint_security + id: endpoint-default + type: "" + target: component + - add_fields: + fields: + source: endpoint-default + target: log + type: filestream + type: filestream + use_output: monitoring +- _runtime_experimental: otel + data_stream: + namespace: default + id: metrics-monitoring-beats + name: metrics-monitoring-beats + streams: + - data_stream: + dataset: elastic_agent.metricbeat + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-metricbeat + index: metrics-elastic_agent.metricbeat-default + metricsets: + - stats + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.metricbeat + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.metricbeat + target: event + - add_fields: + fields: + id: "" + process: metricbeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - add_fields: + fields: + binary: metricbeat + id: beat/metrics-monitoring + target: component + - drop_fields: + fields: + - beat.stats.cgroup + - beat.stats.cpu + - beat.stats.handles + - beat.stats.memstats + - beat.stats.runtime + ignore_missing: true + - data_stream: + dataset: elastic_agent.filebeat + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-filebeat + index: metrics-elastic_agent.filebeat-default + metricsets: + - stats + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.filebeat + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.filebeat + target: event + - add_fields: + fields: + id: "" + process: filebeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - add_fields: + fields: + binary: filebeat + id: filebeat-default + target: component + - data_stream: + dataset: elastic_agent.filebeat + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-filebeat + index: metrics-elastic_agent.filebeat-default + metricsets: + - stats + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.filebeat + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.filebeat + target: event + - add_fields: + fields: + id: "" + process: filebeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - add_fields: + fields: + binary: filebeat + id: filestream-monitoring + target: component + - drop_fields: + fields: + - beat.stats.cgroup + - beat.stats.cpu + - beat.stats.handles + - beat.stats.memstats + - beat.stats.runtime + ignore_missing: true + - data_stream: + dataset: elastic_agent.filebeat + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-filebeat + index: metrics-elastic_agent.filebeat-default + metricsets: + - stats + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.filebeat + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.filebeat + target: event + - add_fields: + fields: + id: "" + process: filebeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - add_fields: + fields: + binary: filebeat + id: filestream-otel + target: component + - drop_fields: + fields: + - beat.stats.cgroup + - beat.stats.cpu + - beat.stats.handles + - beat.stats.memstats + - beat.stats.runtime + ignore_missing: true + - data_stream: + dataset: elastic_agent.metricbeat + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-metricbeat + index: metrics-elastic_agent.metricbeat-default + metricsets: + - stats + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.metricbeat + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.metricbeat + target: event + - add_fields: + fields: + id: "" + process: metricbeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - add_fields: + fields: + binary: metricbeat + id: http/metrics-monitoring + target: component + - drop_fields: + fields: + - beat.stats.cgroup + - beat.stats.cpu + - beat.stats.handles + - beat.stats.memstats + - beat.stats.runtime + ignore_missing: true + - data_stream: + dataset: elastic_agent.metricbeat + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-metricbeat + index: metrics-elastic_agent.metricbeat-default + metricsets: + - stats + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.metricbeat + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.metricbeat + target: event + - add_fields: + fields: + id: "" + process: metricbeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - add_fields: + fields: + binary: metricbeat + id: prometheus/metrics-monitoring + target: component + - drop_fields: + fields: + - beat.stats.cgroup + - beat.stats.cpu + - beat.stats.handles + - beat.stats.memstats + - beat.stats.runtime + ignore_missing: true + type: beat/metrics + use_output: monitoring +- _runtime_experimental: otel + data_stream: + namespace: default + id: metrics-monitoring-agent + name: metrics-monitoring-agent + streams: + - data_stream: + dataset: elastic_agent.elastic_agent + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-agent + index: metrics-elastic_agent.elastic_agent-default + metricsets: + - json + namespace: agent + path: /stats + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.elastic_agent + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.elastic_agent + target: event + - add_fields: + fields: + id: "" + process: elastic-agent + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - copy_fields: + fail_on_error: false + fields: + - from: http.agent.beat.cpu + to: system.process.cpu + - from: http.agent.beat.memstats.memory_sys + to: system.process.memory.size + - from: http.agent.beat.handles + to: system.process.fd + - from: http.agent.beat.cgroup + to: system.process.cgroup + - from: http.agent.apm-server + to: apm-server + - from: http.filebeat_input + to: filebeat_input + ignore_missing: true + - drop_fields: + fields: + - http + ignore_missing: true + - add_fields: + fields: + binary: elastic-agent + id: elastic-agent + target: component + - data_stream: + dataset: elastic_agent.elastic_agent + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-filebeat-1 + index: metrics-elastic_agent.elastic_agent-default + metricsets: + - json + namespace: agent + path: /stats + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.elastic_agent + target: event + - add_fields: + fields: + id: "" + process: filebeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - copy_fields: + fail_on_error: false + fields: + - from: http.agent.beat.cpu + to: system.process.cpu + - from: http.agent.beat.memstats.memory_sys + to: system.process.memory.size + - from: http.agent.beat.handles + to: system.process.fd + - from: http.agent.beat.cgroup + to: system.process.cgroup + - from: http.agent.apm-server + to: apm-server + - from: http.filebeat_input + to: filebeat_input + ignore_missing: true + - drop_fields: + fields: + - http + ignore_missing: true + - add_fields: + fields: + binary: filebeat + id: filebeat-default + target: component + - data_stream: + dataset: elastic_agent.filebeat_input + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-filebeat-1 + index: metrics-elastic_agent.filebeat_input-default + json.is_array: true + metricsets: + - json + namespace: filebeat_input + path: /inputs/ + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.filebeat_input + target: event + - add_fields: + fields: + id: "" + process: filebeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - copy_fields: + fail_on_error: false + fields: + - from: http.agent.beat.cpu + to: system.process.cpu + - from: http.agent.beat.memstats.memory_sys + to: system.process.memory.size + - from: http.agent.beat.handles + to: system.process.fd + - from: http.agent.beat.cgroup + to: system.process.cgroup + - from: http.agent.apm-server + to: apm-server + - from: http.filebeat_input + to: filebeat_input + ignore_missing: true + - drop_fields: + fields: + - http + ignore_missing: true + - add_fields: + fields: + binary: filebeat + id: filebeat-default + target: component + - data_stream: + dataset: elastic_agent.filebeat_input + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-filebeat-1 + index: metrics-elastic_agent.filebeat_input-default + json.is_array: true + metricsets: + - json + namespace: filebeat_input + path: /inputs/ + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.filebeat_input + target: event + - add_fields: + fields: + id: "" + process: filebeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - copy_fields: + fail_on_error: false + fields: + - from: http.agent.beat.cpu + to: system.process.cpu + - from: http.agent.beat.memstats.memory_sys + to: system.process.memory.size + - from: http.agent.beat.handles + to: system.process.fd + - from: http.agent.beat.cgroup + to: system.process.cgroup + - from: http.agent.apm-server + to: apm-server + - from: http.filebeat_input + to: filebeat_input + ignore_missing: true + - drop_fields: + fields: + - http + - system + ignore_missing: true + - add_fields: + fields: + binary: filebeat + id: filestream-monitoring + target: component + - data_stream: + dataset: elastic_agent.filebeat_input + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-filebeat-1 + index: metrics-elastic_agent.filebeat_input-default + json.is_array: true + metricsets: + - json + namespace: filebeat_input + path: /inputs/ + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.filebeat_input + target: event + - add_fields: + fields: + id: "" + process: filebeat + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - copy_fields: + fail_on_error: false + fields: + - from: http.agent.beat.cpu + to: system.process.cpu + - from: http.agent.beat.memstats.memory_sys + to: system.process.memory.size + - from: http.agent.beat.handles + to: system.process.fd + - from: http.agent.beat.cgroup + to: system.process.cgroup + - from: http.agent.apm-server + to: apm-server + - from: http.filebeat_input + to: filebeat_input + ignore_missing: true + - drop_fields: + fields: + - http + - system + ignore_missing: true + - add_fields: + fields: + binary: filebeat + id: filestream-otel + target: component + type: http/metrics + use_output: monitoring +- _runtime_experimental: otel + data_stream: + namespace: default + id: metrics-monitoring-collector + name: metrics-monitoring-collector + streams: + - data_stream: + dataset: elastic_agent.elastic_agent + namespace: default + type: metrics + failure_threshold: 5 + hosts: + - placeholder + id: metrics-monitoring-collector + index: metrics-elastic_agent.elastic_agent-default + metrics_path: /metrics + metricsets: + - collector + namespace: default + period: 1m0s + processors: + - add_fields: + fields: + dataset: elastic_agent.elastic_agent + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.elastic_agent + target: event + - add_fields: + fields: + id: "" + process: elastic-agent + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - add_fields: + fields: + binary: elastic-agent + id: elastic-agent/collector + target: component + - add_fields: + fields: + name: stats + target: metricset + - script: + lang: javascript + source: "// A script for use in the Beats script processor, to remap raw OTel + telemetry\n// from its prometheus endpoint to backwards-compatible Beats + metrics fields\n// that can be viewed in Agent dashboards.\n\nfunction process(event) + {\n // This hard-coded exporter name will not work for the general\n // + (non-monitoring) use case.\n var elastic_exporter = event.Get(\"prometheus.labels.exporter\") + == \"elasticsearch/_agent-component/monitoring\";\n var elastic_scope = + event.Get(\"prometheus.labels.otel_scope_name\") == \"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter\";\n\n + \ // We accept general collector fields that are scoped to the elasticsearch\n + \ // exporter (queue metrics, sent / error stats), or fields specifically\n + \ // scoped to the elasticsearch exporter (custom elastic metrics).\n if + (!elastic_exporter && !elastic_scope) {\n event.Cancel();\n return;\n + \ }\n\n // Hack: if the scope is elastic-custom fields, deterministically + mangle the\n // agent.id. Since the label set is different, these are passed + through in\n // different events, and if we don't do this one of the events + will be\n // rejected as a duplicate since they have the same component + id, agent id,\n // and metricset.\n var id = event.Get(\"agent.id\");\n + \ if (id != null && id.length > 0) {\n // Increment / wrap the last hex + character of the uuid\n var prefix = id.substring(0, id.length - 1);\n + \ var last = id.substring(id.length - 1);\n var rotated = \"0\";\n + \ if (last < \"f\") {\n rotated = String.fromCharCode(last.charCodeAt(0) + + 1);\n }\n id = prefix + rotated;\n event.Put(\"agent.id\", id);\n + \ }\n\n // The event will be discarded unless we find some valid metric + to convert.\n\tvar keep_event = false;\n\n\tvar queue_size = event.Get(\"prometheus.metrics.otelcol_exporter_queue_size\");\n\tvar + queue_capacity = event.Get(\"prometheus.metrics.otelcol_exporter_queue_capacity\");\n + \ if (queue_size != null) {\n \tkeep_event = true;\n event.Put(\"beat.stats.libbeat.pipeline.queue.filled.events\", + queue_size);\n }\n if (queue_capacity != null) {\n \tkeep_event = true;\n + \ event.Put(\"beat.stats.libbeat.pipeline.queue.max_events\", queue_capacity);\n + \ }\n\tif (queue_size != null && queue_capacity != null) {\n\t\tvar queue_pct + = queue_size / queue_capacity;\n\t\tif (!isNaN(queue_pct)) {\n\t\t\tevent.Put(\"beat.stats.libbeat.pipeline.queue.filled.pct\", + queue_pct);\n\t\t}\n\t}\n \n var total_sent = 0;\n var total_sent_valid + = false;\n // Add send statistics from all source types\n var sent_logs + = event.Get(\"prometheus.metrics.otelcol_exporter_sent_log_records_total\");\n + \ if (sent_logs != null) {\n total_sent += sent_logs;\n total_sent_valid + = true;\n }\n var sent_spans = event.Get(\"prometheus.metrics.otelcol_exporter_sent_spans_total\");\n + \ if (sent_spans != null) {\n total_sent += sent_spans;\n total_sent_valid + = true;\n }\n var sent_metrics = event.Get(\"prometheus.metrics.otelcol_exporter_sent_metric_points_total\");\n + \ if (sent_metrics != null) {\n total_sent += sent_metrics;\n total_sent_valid + = true;\n }\n if (total_sent_valid) {\n event.Put(\"beat.stats.libbeat.output.events.acked\", + total_sent);\n \tkeep_event = true;\n }\n\n var total_failed = 0;\n var + total_failed_valid = false;\n // Add failed statistics from all source + types\n var failed_logs = event.Get(\"prometheus.metrics.otelcol_exporter_send_failed_log_records_total\");\n + \ if (failed_logs != null) {\n total_failed += failed_logs;\n total_failed_valid + = true;\n }\n var failed_spans = event.Get(\"prometheus.metrics.otelcol_exporter_send_failed_spans_total\");\n + \ if (failed_spans != null) {\n total_failed += failed_spans;\n total_failed_valid + = true;\n }\n var failed_metrics = event.Get(\"prometheus.metrics.otelcol_exporter_send_failed_metric_points_total\");\n + \ if (failed_metrics != null) {\n total_failed += failed_metrics;\n total_failed_valid + = true;\n }\n if (total_failed_valid) {\n event.Put(\"beat.stats.libbeat.output.events.dropped\", + total_failed);\n \tkeep_event = true;\n }\n\n var flushed_bytes = event.Get(\"prometheus.metrics.otelcol_elasticsearch_flushed_bytes_total\");\n + \ if (flushed_bytes != null) {\n event.Put(\"beat.stats.libbeat.output.write.bytes\", + flushed_bytes);\n \tkeep_event = true;\n }\n\n var retried_docs = event.Get(\"prometheus.metrics.otelcol_elasticsearch_docs_retried_ratio_total\");\n + \ if (retried_docs != null) {\n // \"failed\" in the beats metric means + an event failed to ingest but was\n // not dropped, and will be retried.\n + \ event.Put(\"beat.stats.libbeat.output.events.failed\", retried_docs);\n + \ \tkeep_event = true;\n }\n\n var request_count = event.Get(\"prometheus.metrics.otelcol_elasticsearch_bulk_requests_count_ratio_total\");\n + \ if (request_count != null) {\n // This is not an exact semantic match + for how Beats measures batch count,\n // but it's close.\n event.Put(\"beat.stats.libbeat.output.events.batches\", + request_count);\n \tkeep_event = true;\n }\n\n var processed_docs_count + = event.Get(\"prometheus.metrics.otelcol_elasticsearch_docs_processed_ratio_total\");\n + \ if (processed_docs_count != null) {\n // Approximate semantic match: + the otel metric counts all document\n // ingestion attempts, including + success, failure, and retries,\n // which is a better match for the Beats + definition of total events\n // than otelcol_elasticsearch_docs_received_ratio_total + which\n // includes only unique events seen (regardless of retries etc).\n + \ event.Put(\"beat.stats.libbeat.output.events.total\", processed_docs_count);\n + \ \tkeep_event = true;\n }\n\n if (!keep_event) {\n event.Cancel();\n + \ }\n}\n" + type: prometheus/metrics + use_output: monitoring +- data_stream: + namespace: default + id: metrics-monitoring-endpoint_security + name: metrics-monitoring-endpoint_security + streams: + - data_stream: + dataset: elastic_agent.endpoint_security + namespace: default + type: metrics + id: metrics-monitoring-endpoint_security + index: metrics-elastic_agent.endpoint_security-default + metricsets: + - process + period: 1m0s + process.cgroups.enabled: false + process.pid: 1234 + processors: + - add_fields: + fields: + dataset: elastic_agent.endpoint_security + namespace: default + type: metrics + target: data_stream + - add_fields: + fields: + dataset: elastic_agent.endpoint_security + target: event + - add_fields: + fields: + id: "" + process: endpoint_security + snapshot: false + version: placeholder + target: elastic_agent + - add_fields: + fields: + id: "" + target: agent + - add_fields: + fields: + binary: endpoint_security + id: endpoint-default + target: component + type: system/metrics + use_output: monitoring +outputs: + monitoring: {} diff --git a/internal/pkg/agent/application/monitoring/component/testdata/monitoring_config_full.yaml b/internal/pkg/agent/application/monitoring/component/testdata/monitoring_config_full_process.yaml similarity index 100% rename from internal/pkg/agent/application/monitoring/component/testdata/monitoring_config_full.yaml rename to internal/pkg/agent/application/monitoring/component/testdata/monitoring_config_full_process.yaml diff --git a/internal/pkg/agent/application/monitoring/component/v1_monitor_test.go b/internal/pkg/agent/application/monitoring/component/v1_monitor_test.go index 2569cada75c..efcf104ba05 100644 --- a/internal/pkg/agent/application/monitoring/component/v1_monitor_test.go +++ b/internal/pkg/agent/application/monitoring/component/v1_monitor_test.go @@ -31,21 +31,6 @@ import ( func TestMonitoringFull(t *testing.T) { agentInfo, err := info.NewAgentInfo(context.Background(), false) require.NoError(t, err, "Error creating agent info") - testMon := BeatsMonitor{ - enabled: true, - config: &monitoringConfig{ - C: &monitoringcfg.MonitoringConfig{ - Enabled: true, - MonitorMetrics: true, - MonitorLogs: true, - HTTP: &monitoringcfg.MonitoringHTTPConfig{ - Enabled: true, - }, - RuntimeManager: monitoringcfg.DefaultRuntimeManager, - }, - }, - agentInfo: agentInfo, - } policy := map[string]any{ "agent": map[string]any{ @@ -100,35 +85,76 @@ func TestMonitoringFull(t *testing.T) { "endpoint-default": 1234, } - expectedConfigFilePath := filepath.Join(".", "testdata", "monitoring_config_full.yaml") - expectedConfigBytes, err := os.ReadFile(expectedConfigFilePath) - require.NoError(t, err) - - outCfg, err := testMon.MonitoringConfig(policy, compList, existingPidStateMap) - require.NoError(t, err) + testCases := []struct { + Name string + RuntimeManager string + ExpectedConfigPath string + }{ + { + Name: "Default runtime manager", + RuntimeManager: monitoringcfg.DefaultRuntimeManager, + ExpectedConfigPath: filepath.Join(".", "testdata", "monitoring_config_full_process.yaml"), + }, + { + Name: "Process runtime manager", + RuntimeManager: monitoringcfg.ProcessRuntimeManager, + ExpectedConfigPath: filepath.Join(".", "testdata", "monitoring_config_full_process.yaml"), + }, + { + Name: "Otel runtime manager", + RuntimeManager: monitoringcfg.OtelRuntimeManager, + ExpectedConfigPath: filepath.Join(".", "testdata", "monitoring_config_full_otel.yaml"), + }, + } - // Replace paths with placeholders. Log paths are different for each OS and it's annoying to fully account for the - // differences in this test. Same thing applies to endpoints. - for _, inputCfg := range outCfg["inputs"].([]any) { - inputCfgMap := inputCfg.(map[string]interface{}) - streams := inputCfgMap["streams"].([]interface{}) - for _, stream := range streams { - streamMap := stream.(map[string]interface{}) - if _, ok := streamMap["paths"]; ok { - streamMap["paths"] = []string{"placeholder"} + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + testMon := BeatsMonitor{ + enabled: true, + config: &monitoringConfig{ + C: &monitoringcfg.MonitoringConfig{ + Enabled: true, + MonitorMetrics: true, + MonitorLogs: true, + HTTP: &monitoringcfg.MonitoringHTTPConfig{ + Enabled: true, + }, + RuntimeManager: tc.RuntimeManager, + }, + }, + agentInfo: agentInfo, } - if _, ok := streamMap["hosts"]; ok { - streamMap["hosts"] = []string{"placeholder"} + + expectedConfigBytes, err := os.ReadFile(tc.ExpectedConfigPath) + require.NoError(t, err) + + outCfg, err := testMon.MonitoringConfig(policy, compList, existingPidStateMap) + require.NoError(t, err) + + // Replace paths with placeholders. Log paths are different for each OS and it's annoying to fully account for the + // differences in this test. Same thing applies to endpoints. + for _, inputCfg := range outCfg["inputs"].([]any) { + inputCfgMap := inputCfg.(map[string]interface{}) + streams := inputCfgMap["streams"].([]interface{}) + for _, stream := range streams { + streamMap := stream.(map[string]interface{}) + if _, ok := streamMap["paths"]; ok { + streamMap["paths"] = []string{"placeholder"} + } + if _, ok := streamMap["hosts"]; ok { + streamMap["hosts"] = []string{"placeholder"} + } + } } - } - } - outCfgBytes, err := yaml.Marshal(outCfg) - require.NoError(t, err) - outCfgString := string(outCfgBytes) - // replace the version with a placeholder - outCfgString = strings.ReplaceAll(outCfgString, agentInfo.Version(), "placeholder") - assert.Equal(t, string(expectedConfigBytes), outCfgString) + outCfgBytes, err := yaml.Marshal(outCfg) + require.NoError(t, err) + outCfgString := string(outCfgBytes) + // replace the version with a placeholder + outCfgString = strings.ReplaceAll(outCfgString, agentInfo.Version(), "placeholder") + assert.Equal(t, string(expectedConfigBytes), outCfgString) + }) + } } func TestMonitoringWithEndpoint(t *testing.T) {