From 4dfce45af893b95d8056fa38534dbc91b4fbdb75 Mon Sep 17 00:00:00 2001 From: Adam Benhassen Date: Mon, 17 Nov 2025 01:23:52 +0100 Subject: [PATCH] feat: add redpanda buffering layer with split ingress/egress otel collectors --- deployment/index.ts | 7 +- deployment/services/otel-collector.ts | 51 ++++- deployment/services/redpanda.ts | 198 ++++++++++++++++++ .../otel-collector/builder-config-egress.yaml | 21 ++ ...onfig.yaml => builder-config-ingress.yaml} | 6 +- .../configs/otel-collector/config-egress.yaml | 56 +++++ .../{config.yaml => config-ingress.yaml} | 38 ++-- docker/docker-compose.dev.yml | 45 +++- docker/docker.hcl | 48 +++-- docker/otel-collector-egress.dockerfile | 28 +++ ...file => otel-collector-ingress.dockerfile} | 8 +- 11 files changed, 445 insertions(+), 61 deletions(-) create mode 100644 deployment/services/redpanda.ts create mode 100644 docker/configs/otel-collector/builder-config-egress.yaml rename docker/configs/otel-collector/{builder-config.yaml => builder-config-ingress.yaml} (75%) create mode 100644 docker/configs/otel-collector/config-egress.yaml rename docker/configs/otel-collector/{config.yaml => config-ingress.yaml} (63%) create mode 100644 docker/otel-collector-egress.dockerfile rename docker/{otel-collector.dockerfile => otel-collector-ingress.dockerfile} (79%) diff --git a/deployment/index.ts b/deployment/index.ts index 159e2cce5d4..49cc8165039 100644 --- a/deployment/index.ts +++ b/deployment/index.ts @@ -20,6 +20,7 @@ import { deployPostgres } from './services/postgres'; import { deployProxy } from './services/proxy'; import { deployPublicGraphQLAPIGateway } from './services/public-graphql-api-gateway'; import { deployRedis } from './services/redis'; +import { deployRedpanda } from './services/redpanda'; import { deployS3, deployS3AuditLog, deployS3Mirror } from './services/s3'; import { deploySchema } from './services/schema'; import { configureSentry } from './services/sentry'; @@ -78,6 +79,7 @@ const clickhouse = deployClickhouse(); const postgres = deployPostgres(); const redis = deployRedis({ environment }); const kafka = deployKafka(); +const redpanda = deployRedpanda(); const s3 = deployS3(); const s3Mirror = deployS3Mirror(); const s3AuditLog = deployS3AuditLog(); @@ -284,6 +286,7 @@ const otelCollector = deployOTELCollector({ graphql, dbMigrations, clickhouse, + redpanda, image: docker.factory.getImageId('otel-collector', imagesTag), docker, }); @@ -344,5 +347,7 @@ export const schemaApiServiceId = schema.service.id; export const webhooksApiServiceId = webhooks.service.id; export const appId = app.deployment.id; -export const otelCollectorId = otelCollector.deployment.id; +export const otelCollectorIngressId = otelCollector.ingress.deployment.id; +export const otelCollectorEgressId = otelCollector.egress.deployment.id; +export const redpandaStatefulSetId = redpanda.statefulSet.id; export const publicIp = proxy.get()!.status.loadBalancer.ingress[0].ip; diff --git a/deployment/services/otel-collector.ts b/deployment/services/otel-collector.ts index 581907c2812..4451a2a440b 100644 --- a/deployment/services/otel-collector.ts +++ b/deployment/services/otel-collector.ts @@ -5,6 +5,7 @@ import { DbMigrations } from './db-migrations'; import { Docker } from './docker'; import { Environment } from './environment'; import { GraphQL } from './graphql'; +import { Redpanda } from './redpanda'; export type OTELCollector = ReturnType; @@ -15,9 +16,13 @@ export function deployOTELCollector(args: { clickhouse: Clickhouse; dbMigrations: DbMigrations; graphql: GraphQL; + redpanda: Redpanda; }) { - return new ServiceDeployment( - 'otel-collector', + const kafkaBroker = args.redpanda.brokerEndpoint; + + // Ingress: OTLP -> Redpanda + const ingress = new ServiceDeployment( + 'otel-collector-ingress', { image: args.image, imagePullSecret: args.docker.secret, @@ -26,6 +31,7 @@ export function deployOTELCollector(args: { HIVE_OTEL_AUTH_ENDPOINT: serviceLocalEndpoint(args.graphql.service).apply( value => value + '/otel-auth', ), + KAFKA_BROKER: kafkaBroker, }, /** * We are using the healthcheck extension. @@ -40,11 +46,40 @@ export function deployOTELCollector(args: { pdb: true, availabilityOnEveryNode: true, port: 4318, - memoryLimit: args.environment.podsConfig.tracingCollector.memoryLimit, + memoryLimit: '512Mi', autoScaling: { maxReplicas: args.environment.podsConfig.tracingCollector.maxReplicas, cpu: { - limit: args.environment.podsConfig.tracingCollector.cpuLimit, + limit: '500m', + cpuAverageToScale: 80, + }, + }, + }, + [args.dbMigrations], + ).deploy(); + + // Egress: Redpanda -> ClickHouse + const egress = new ServiceDeployment( + 'otel-collector-egress', + { + image: args.image, + imagePullSecret: args.docker.secret, + env: { + ...args.environment.envVars, + KAFKA_BROKER: kafkaBroker, + }, + probePort: 13133, + readinessProbe: '/', + livenessProbe: '/', + startupProbe: '/', + exposesMetrics: true, + replicas: args.environment.podsConfig.tracingCollector.maxReplicas, + pdb: true, + memoryLimit: '512Mi', + autoScaling: { + maxReplicas: args.environment.podsConfig.tracingCollector.maxReplicas, + cpu: { + limit: '500m', cpuAverageToScale: 80, }, }, @@ -57,4 +92,12 @@ export function deployOTELCollector(args: { .withSecret('CLICKHOUSE_PASSWORD', args.clickhouse.secret, 'password') .withSecret('CLICKHOUSE_PROTOCOL', args.clickhouse.secret, 'protocol') .deploy(); + + return { + ingress, + egress, + // For backward compatibility, expose ingress as the main deployment + deployment: ingress.deployment, + service: ingress.service, + }; } diff --git a/deployment/services/redpanda.ts b/deployment/services/redpanda.ts new file mode 100644 index 00000000000..30faeda48a1 --- /dev/null +++ b/deployment/services/redpanda.ts @@ -0,0 +1,198 @@ +import * as k8s from '@pulumi/kubernetes'; +import * as pulumi from '@pulumi/pulumi'; + +export type Redpanda = ReturnType; + +export function deployRedpanda() { + const redpandaConfig = new pulumi.Config('redpanda'); + const replicas = redpandaConfig.getNumber('replicas') || 3; + const storageSize = redpandaConfig.get('storageSize') || '20Gi'; + const memoryLimit = redpandaConfig.get('memoryLimit') || '1Gi'; + const cpuLimit = redpandaConfig.get('cpuLimit') || '1000m'; + + const labels = { app: 'redpanda' }; + + // StatefulSet for Redpanda + const statefulSet = new k8s.apps.v1.StatefulSet('redpanda', { + metadata: { + name: 'redpanda', + }, + spec: { + serviceName: 'redpanda', + replicas, + selector: { + matchLabels: labels, + }, + template: { + metadata: { + labels, + }, + spec: { + containers: [ + { + name: 'redpanda', + image: 'redpandadata/redpanda:v25.3.1', + imagePullPolicy: 'Always', + resources: { + limits: { + cpu: cpuLimit, + memory: memoryLimit, + }, + }, + args: [ + 'redpanda', + 'start', + '--overprovisioned', + '--smp', + '1', + '--memory', + memoryLimit, + '--kafka-addr', + 'PLAINTEXT://0.0.0.0:9092', + '--advertise-kafka-addr', + pulumi.interpolate`PLAINTEXT://\${HOSTNAME}.redpanda.default.svc.cluster.local:9092`, + ], + ports: [ + { containerPort: 9092, name: 'kafka' }, + { containerPort: 8082, name: 'http' }, + { containerPort: 33145, name: 'rpc' }, + { containerPort: 9644, name: 'admin' }, + ], + volumeMounts: [ + { + name: 'datadir', + mountPath: '/var/lib/redpanda/data', + }, + ], + livenessProbe: { + httpGet: { + path: '/v1/status/ready', + port: 9644 as any, + }, + initialDelaySeconds: 30, + periodSeconds: 10, + }, + readinessProbe: { + httpGet: { + path: '/v1/status/ready', + port: 9644 as any, + }, + initialDelaySeconds: 10, + periodSeconds: 5, + }, + }, + ], + }, + }, + volumeClaimTemplates: [ + { + metadata: { + name: 'datadir', + }, + spec: { + accessModes: ['ReadWriteOnce'], + resources: { + requests: { + storage: storageSize, + }, + }, + }, + }, + ], + }, + }); + + // Headless Service for StatefulSet (used for internal cluster communication) + const headlessService = new k8s.core.v1.Service('redpanda-headless', { + metadata: { + name: 'redpanda', + }, + spec: { + clusterIP: 'None', + selector: labels, + ports: [ + { name: 'kafka', port: 9092, targetPort: 9092 as any }, + { name: 'http', port: 8082, targetPort: 8082 as any }, + { name: 'rpc', port: 33145, targetPort: 33145 as any }, + { name: 'admin', port: 9644, targetPort: 9644 as any }, + ], + }, + }); + + // ClusterIP Service for clients (load balances across all pods) + const clientService = new k8s.core.v1.Service('redpanda-client-service', { + metadata: { + name: 'redpanda-client', + }, + spec: { + type: 'ClusterIP', + selector: labels, + ports: [ + { name: 'kafka', port: 9092, targetPort: 9092 as any }, + { name: 'http', port: 8082, targetPort: 8082 as any }, + ], + }, + }); + + // Create otel-traces topic + const topicCreationJob = new k8s.batch.v1.Job( + 'redpanda-topic-creation', + { + metadata: { + name: 'redpanda-topic-creation', + }, + spec: { + template: { + spec: { + restartPolicy: 'OnFailure', + containers: [ + { + name: 'rpk', + image: 'redpandadata/redpanda:v25.3.1', + imagePullPolicy: 'Always', + command: [ + '/bin/bash', + '-c', + ` + # Wait for Redpanda to be ready + for i in {1..60}; do + if rpk cluster health --brokers redpanda-0.redpanda:9092 2>/dev/null | grep -q 'Healthy'; then + echo "Redpanda cluster is ready" + break + fi + echo "Waiting for Redpanda cluster... ($i/60)" + sleep 5 + done + + # Create topic with partitioning only (no replication) + rpk topic create otel-traces \\ + --brokers redpanda-0.redpanda:9092 \\ + --replicas 1 \\ + --partitions 10 \\ + --config retention.ms=2592000000 \\ + --config compression.type=snappy \\ + --config max.message.bytes=10485760 \\ + || echo "Topic may already exist" + + # Verify topic creation + rpk topic describe otel-traces --brokers redpanda-0.redpanda:9092 + `, + ], + }, + ], + }, + }, + }, + }, + { dependsOn: [statefulSet, headlessService] }, + ); + + return { + statefulSet, + headlessService, + clientService, + topicCreationJob, + // Client service endpoint - auto-discovers all brokers + brokerEndpoint: 'redpanda-client:9092', + }; +} diff --git a/docker/configs/otel-collector/builder-config-egress.yaml b/docker/configs/otel-collector/builder-config-egress.yaml new file mode 100644 index 00000000000..6797b0ae950 --- /dev/null +++ b/docker/configs/otel-collector/builder-config-egress.yaml @@ -0,0 +1,21 @@ +dist: + version: 0.122.0 + name: otelcol-custom + description: Custom OTel Collector distribution + output_path: ./otelcol-custom + +receivers: + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.122.0 + +processors: + - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.122.0 + +exporters: + - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.122.0 + - gomod: + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/clickhouseexporter v0.122.0 + +extensions: + - gomod: + github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension + v0.122.0 diff --git a/docker/configs/otel-collector/builder-config.yaml b/docker/configs/otel-collector/builder-config-ingress.yaml similarity index 75% rename from docker/configs/otel-collector/builder-config.yaml rename to docker/configs/otel-collector/builder-config-ingress.yaml index 9fede9bff46..5c6359e5e7e 100644 --- a/docker/configs/otel-collector/builder-config.yaml +++ b/docker/configs/otel-collector/builder-config-ingress.yaml @@ -8,18 +8,14 @@ receivers: - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.122.0 processors: - - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.122.0 - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.122.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.122.0 - - gomod: - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.122.0 exporters: - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.122.0 - - gomod: - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/clickhouseexporter v0.122.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.122.0 extensions: - gomod: diff --git a/docker/configs/otel-collector/config-egress.yaml b/docker/configs/otel-collector/config-egress.yaml new file mode 100644 index 00000000000..18d5e1b7b0a --- /dev/null +++ b/docker/configs/otel-collector/config-egress.yaml @@ -0,0 +1,56 @@ +extensions: + health_check: + endpoint: '0.0.0.0:13133' +receivers: + kafka: + brokers: + - ${KAFKA_BROKER} + topic: otel-traces + encoding: otlp_proto + group_id: otel-collector-egress + session_timeout: 30s +processors: + batch: + timeout: 5s + send_batch_size: 8000 +exporters: + debug: + verbosity: basic + clickhouse: + endpoint: ${CLICKHOUSE_PROTOCOL}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT}?dial_timeout=10s&compress=lz4&async_insert=1&wait_for_async_insert=0 + database: default + async_insert: true + username: ${CLICKHOUSE_USERNAME} + password: ${CLICKHOUSE_PASSWORD} + create_schema: false + ttl: 720h + compress: lz4 + logs_table_name: otel_logs + traces_table_name: otel_traces + metrics_table_name: otel_metrics + timeout: 5s + retry_on_failure: + enabled: true + initial_interval: 5s + max_interval: 30s + max_elapsed_time: 300s +service: + extensions: + - health_check + telemetry: + logs: + level: INFO + encoding: json + output_paths: ['stdout'] + error_output_paths: ['stderr'] + metrics: + address: '0.0.0.0:10254' + level: detailed + pipelines: + traces: + receivers: [kafka] + processors: + - batch + exporters: + - clickhouse + # - debug diff --git a/docker/configs/otel-collector/config.yaml b/docker/configs/otel-collector/config-ingress.yaml similarity index 63% rename from docker/configs/otel-collector/config.yaml rename to docker/configs/otel-collector/config-ingress.yaml index efa61a1bc24..cf17e75708f 100644 --- a/docker/configs/otel-collector/config.yaml +++ b/docker/configs/otel-collector/config-ingress.yaml @@ -20,9 +20,6 @@ receivers: auth: authenticator: hiveauth processors: - batch: - timeout: 5s - send_batch_size: 5000 attributes: actions: - key: hive.target_id @@ -30,26 +27,20 @@ processors: action: insert memory_limiter: check_interval: 1s - limit_percentage: 80 - spike_limit_percentage: 20 + limit_mib: 512 + spike_limit_mib: 128 exporters: debug: - verbosity: detailed - sampling_initial: 5 - sampling_thereafter: 200 - clickhouse: - endpoint: ${CLICKHOUSE_PROTOCOL}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT}?dial_timeout=10s&compress=lz4&async_insert=1 - database: default - async_insert: true - username: ${CLICKHOUSE_USERNAME} - password: ${CLICKHOUSE_PASSWORD} - create_schema: false - ttl: 720h - compress: lz4 - logs_table_name: otel_logs - traces_table_name: otel_traces - metrics_table_name: otel_metrics - timeout: 5s + verbosity: basic + kafka: + brokers: + - ${KAFKA_BROKER} + topic: otel-traces + encoding: otlp_proto + producer: + compression: snappy + max_message_bytes: 10485760 # 10MB + timeout: 10s retry_on_failure: enabled: true initial_interval: 5s @@ -61,7 +52,7 @@ service: - health_check telemetry: logs: - level: DEBUG + level: INFO encoding: json output_paths: ['stdout'] error_output_paths: ['stderr'] @@ -74,7 +65,6 @@ service: processors: - memory_limiter - attributes - - batch exporters: - - clickhouse + - kafka # - debug diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index ea711421891..70639093e71 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -93,7 +93,7 @@ services: - 'stack' broker: - image: redpandadata/redpanda:v23.3.21 + image: redpandadata/redpanda:v25.2.11 mem_limit: 300m mem_reservation: 100m hostname: broker @@ -122,7 +122,6 @@ services: - --advertise-rpc-addr redpanda-1:33145 volumes: - ./.hive-dev/broker/db:/var/lib/kafka/data - supertokens: image: registry.supertokens.io/supertokens/supertokens-postgresql:9.3 mem_limit: 300m @@ -176,26 +175,50 @@ services: networks: - 'stack' - otel-collector: + broker_provision_otel_traces_topic: + image: redpandadata/redpanda:v25.2.11 + depends_on: + - broker + restart: 'no' + networks: + - 'stack' + entrypoint: > + /bin/sh -c " sleep 5; rpk topic create otel-traces --brokers broker:29092 || true; exit 0" + + otel-collector-ingress: + depends_on: + broker_provision_otel_traces_topic: + condition: service_completed_successfully + build: + context: ./configs/otel-collector + dockerfile: ./../../otel-collector-ingress.dockerfile + environment: + HIVE_OTEL_AUTH_ENDPOINT: 'http://host.docker.internal:3001/otel-auth' + KAFKA_BROKER: 'broker:29092' + ports: + - '4317:4317' + - '4318:4318' + - '10254:10254' + networks: + - 'stack' + + otel-collector-egress: depends_on: clickhouse: condition: service_healthy + broker_provision_otel_traces_topic: + condition: service_completed_successfully build: context: ./configs/otel-collector - dockerfile: ./../../otel-collector.dockerfile + dockerfile: ./../../otel-collector-egress.dockerfile + mem_limit: 512m environment: - HIVE_OTEL_AUTH_ENDPOINT: 'http://host.docker.internal:3001/otel-auth' + KAFKA_BROKER: 'broker:29092' CLICKHOUSE_PROTOCOL: 'http' CLICKHOUSE_HOST: clickhouse CLICKHOUSE_PORT: 8123 CLICKHOUSE_USERNAME: test CLICKHOUSE_PASSWORD: test - volumes: - - ./configs/otel-collector/builder-config.yaml:/builder-config.yaml - - ./configs/otel-collector/config.yaml:/etc/otel-config.yaml - ports: - - '4317:4317' - - '4318:4318' networks: - 'stack' diff --git a/docker/docker.hcl b/docker/docker.hcl index cf6f2cf36e5..467e2ae2dc4 100644 --- a/docker/docker.hcl +++ b/docker/docker.hcl @@ -82,8 +82,15 @@ target "router-base" { } } -target "otel-collector-base" { - dockerfile = "${PWD}/docker/otel-collector.dockerfile" +target "otel-collector-ingress-base" { + dockerfile = "${PWD}/docker/otel-collector-ingress.dockerfile" + args = { + RELEASE = "${RELEASE}" + } +} + +target "otel-collector-egress-base" { + dockerfile = "${PWD}/docker/otel-collector-egress.dockerfile" args = { RELEASE = "${RELEASE}" } @@ -378,18 +385,33 @@ target "apollo-router" { ] } -target "otel-collector" { - inherits = ["otel-collector-base", get_target()] +target "otel-collector-ingress" { + inherits = ["otel-collector-ingress-base", get_target()] + context = "${PWD}/docker/configs/otel-collector" + args = { + IMAGE_TITLE = "graphql-hive/otel-collector-ingress" + IMAGE_DESCRIPTION = "OTEL Collector Ingress for GraphQL Hive." + } + tags = [ + local_image_tag("otel-collector-ingress"), + stable_image_tag("otel-collector-ingress"), + image_tag("otel-collector-ingress", COMMIT_SHA), + image_tag("otel-collector-ingress", BRANCH_NAME) + ] +} + +target "otel-collector-egress" { + inherits = ["otel-collector-egress-base", get_target()] context = "${PWD}/docker/configs/otel-collector" args = { - IMAGE_TITLE = "graphql-hive/otel-collector" - IMAGE_DESCRIPTION = "OTEL Collector for GraphQL Hive." + IMAGE_TITLE = "graphql-hive/otel-collector-egress" + IMAGE_DESCRIPTION = "OTEL Collector Egress for GraphQL Hive." } tags = [ - local_image_tag("otel-collector"), - stable_image_tag("otel-collector"), - image_tag("otel-collector", COMMIT_SHA), - image_tag("otel-collector", BRANCH_NAME) + local_image_tag("otel-collector-egress"), + stable_image_tag("otel-collector-egress"), + image_tag("otel-collector-egress", COMMIT_SHA), + image_tag("otel-collector-egress", BRANCH_NAME) ] } @@ -424,7 +446,8 @@ group "build" { "commerce", "composition-federation-2", "app", - "otel-collector" + "otel-collector-ingress", + "otel-collector-egress" ] } @@ -441,7 +464,8 @@ group "integration-tests" { "webhooks", "server", "composition-federation-2", - "otel-collector" + "otel-collector-ingress", + "otel-collector-egress" ] } diff --git a/docker/otel-collector-egress.dockerfile b/docker/otel-collector-egress.dockerfile new file mode 100644 index 00000000000..297209c81a2 --- /dev/null +++ b/docker/otel-collector-egress.dockerfile @@ -0,0 +1,28 @@ +FROM golang:1.25-bookworm AS builder + +ARG OTEL_VERSION=0.122.0 + +WORKDIR /build + +RUN go install go.opentelemetry.io/collector/cmd/builder@v${OTEL_VERSION} + +# Copy the manifest file +COPY builder-config-egress.yaml builder-config.yaml + +# Build the custom collector +RUN CGO_ENABLED=0 builder --config=/build/builder-config.yaml + +# Stage 2: Final Image +FROM alpine:3.14 + +WORKDIR /app + +# Copy the generated collector binary from the builder stage +COPY --from=builder /build/otelcol-custom . +COPY config-egress.yaml /etc/otel-config.yaml + +# Expose necessary ports +EXPOSE 4317/tcp 4318/tcp 13133/tcp + +# Set the default command +CMD ["./otelcol-custom", "--config=/etc/otel-config.yaml"] diff --git a/docker/otel-collector.dockerfile b/docker/otel-collector-ingress.dockerfile similarity index 79% rename from docker/otel-collector.dockerfile rename to docker/otel-collector-ingress.dockerfile index 3b17da25058..1ab17a1d46c 100644 --- a/docker/otel-collector.dockerfile +++ b/docker/otel-collector-ingress.dockerfile @@ -1,9 +1,9 @@ FROM scratch AS config -COPY builder-config.yaml . +COPY builder-config-ingress.yaml . COPY extension-hiveauth/ ./extension-hiveauth/ -FROM golang:1.23.7-bookworm AS builder +FROM golang:1.25-bookworm AS builder ARG OTEL_VERSION=0.122.0 @@ -12,7 +12,7 @@ WORKDIR /build RUN go install go.opentelemetry.io/collector/cmd/builder@v${OTEL_VERSION} # Copy the manifest file and other necessary files -COPY --from=config builder-config.yaml . +COPY --from=config builder-config-ingress.yaml builder-config.yaml COPY --from=config extension-hiveauth/ ./extension-hiveauth/ # Build the custom collector @@ -25,7 +25,7 @@ WORKDIR /app # Copy the generated collector binary from the builder stage COPY --from=builder /build/otelcol-custom . -COPY config.yaml /etc/otel-config.yaml +COPY config-ingress.yaml /etc/otel-config.yaml # Expose necessary ports EXPOSE 4317/tcp 4318/tcp 13133/tcp