Skip to content

Commit 36a7cf3

Browse files
jinja2ChrsMark
andauthored
[chore] k8sattributeprocessor: update readme and add e2e test for container.id assoc (#43860)
<!--Ex. Fixing a bug - Describe the bug and how this fixes the issue. Ex. Adding a feature - Explain what this achieves.--> #### Description ~~Fixes a bug in the k8s attributes processor when container.id is used in pod_association but there is no container related extraction rule. In such situations, the processor is not able to associate to pod. The fix populates the internal Containers.ByID when container.id is used in pod association but there are no container related extraction rules.~~ It is expected to use a container attr in metadata extraction when container.id is used for association. The PR is now updating the README to clarify this and adds an e2e test which tests pod association when only container.id is in the source rules. <!-- Issue number (e.g. #1234) or full URL to issue, if applicable. --> #### Link to tracking issue Fixes #43689 <!--Describe what testing was performed and which tests were added.--> #### Testing E2E test for the scenario when telemetry to k8sattributes processor only has container.id for pod association. --------- Co-authored-by: Christos Markou <[email protected]>
1 parent 2f46968 commit 36a7cf3

File tree

11 files changed

+440
-1
lines changed

11 files changed

+440
-1
lines changed

processor/k8sattributesprocessor/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ correctly associate the matching container to the resource:
125125
instance. If it's not set, the latest container instance will be used:
126126
- container.id (not added by default, has to be specified in `metadata`)
127127

128-
Please note, however, that only `container.id` attribute can be used for source rules in the pod_association.
128+
Please note, however, that only `container.id` attribute can be used for source rules in the pod_association. To use `container.id` in pod association, at least one container attribute must be included in the `metadata` extraction configuration (e.g., `container.id`, `container.image.name`, etc.).
129129

130130
Example for extracting container level attributes:
131131

processor/k8sattributesprocessor/e2e_test.go

Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99
"context"
1010
"fmt"
1111
"os"
12+
"os/exec"
1213
"path/filepath"
1314
"regexp"
1415
"testing"
@@ -1966,3 +1967,150 @@ func waitForData(t *testing.T, entriesNum int, mc *consumertest.MetricsSink, tc
19661967
"failed to receive %d entries, received %d metrics, %d traces, %d logs, %d profiles in %d minutes", entriesNum,
19671968
len(mc.AllMetrics()), len(tc.AllTraces()), len(lc.AllLogs()), len(pc.AllProfiles()), timeoutMinutes)
19681969
}
1970+
1971+
func TestE2E_ContainerIDAssociation(t *testing.T) {
1972+
testDir := filepath.Join("testdata", "e2e", "container_id_association_only")
1973+
1974+
// Build custom telemetrygen image with shell capabilities for container ID detection
1975+
t.Log("Building custom telemetrygen-e2e image...")
1976+
buildCmd := exec.Command("docker", "build", "-t", "telemetrygen-e2e:latest",
1977+
filepath.Join(testDir, "build"))
1978+
buildOutput, err := buildCmd.CombinedOutput()
1979+
require.NoErrorf(t, err, "failed to build telemetrygen-e2e image: %s", string(buildOutput))
1980+
1981+
t.Log("Loading telemetrygen-e2e image into kind cluster...")
1982+
loadCmd := exec.Command("kind", "load", "docker-image", "telemetrygen-e2e:latest", "--name", "kind")
1983+
loadOutput, err := loadCmd.CombinedOutput()
1984+
require.NoErrorf(t, err, "failed to load telemetrygen-e2e image into kind: %s", string(loadOutput))
1985+
1986+
k8sClient, err := k8stest.NewK8sClient(testKubeConfig)
1987+
require.NoError(t, err)
1988+
1989+
nsFile := filepath.Join(testDir, "namespace.yaml")
1990+
buf, err := os.ReadFile(nsFile)
1991+
require.NoErrorf(t, err, "failed to read namespace object file %s", nsFile)
1992+
nsObj, err := k8stest.CreateObject(k8sClient, buf)
1993+
require.NoErrorf(t, err, "failed to create k8s namespace from file %s", nsFile)
1994+
1995+
testNs := nsObj.GetName()
1996+
defer func() {
1997+
require.NoErrorf(t, k8stest.DeleteObject(k8sClient, nsObj), "failed to delete namespace %s", testNs)
1998+
}()
1999+
2000+
metricsConsumer := new(consumertest.MetricsSink)
2001+
tracesConsumer := new(consumertest.TracesSink)
2002+
logsConsumer := new(consumertest.LogsSink)
2003+
profilesConsumer := new(consumertest.ProfilesSink)
2004+
shutdownSinks := startUpSinks(t, metricsConsumer, tracesConsumer, logsConsumer, profilesConsumer)
2005+
defer shutdownSinks()
2006+
2007+
testID := uuid.NewString()[:8]
2008+
2009+
collectorDir := filepath.Join(testDir, "collector")
2010+
collectorObjs := k8stest.CreateCollectorObjects(t, k8sClient, testID, collectorDir, map[string]string{}, "")
2011+
defer func() {
2012+
for _, obj := range collectorObjs {
2013+
require.NoErrorf(t, k8stest.DeleteObject(k8sClient, obj), "failed to delete collector object %s", obj.GetName())
2014+
}
2015+
}()
2016+
2017+
createTeleOpts := &k8stest.TelemetrygenCreateOpts{
2018+
ManifestsDir: filepath.Join(testDir, "telemetrygen"),
2019+
TestID: testID,
2020+
OtlpEndpoint: fmt.Sprintf("otelcol-%s.%s:4317", testID, testNs),
2021+
DataTypes: []string{"metrics", "logs", "traces"},
2022+
}
2023+
telemetryGenObjs, telemetryGenObjInfos := k8stest.CreateTelemetryGenObjects(t, k8sClient, createTeleOpts)
2024+
defer func() {
2025+
for _, obj := range telemetryGenObjs {
2026+
require.NoErrorf(t, k8stest.DeleteObject(k8sClient, obj), "failed to delete telemetrygen object %s", obj.GetName())
2027+
}
2028+
}()
2029+
2030+
for _, info := range telemetryGenObjInfos {
2031+
k8stest.WaitForTelemetryGenToStart(t, k8sClient, info.Namespace, info.PodLabelSelectors, info.Workload, info.DataType)
2032+
}
2033+
2034+
wantEntries := 10
2035+
waitForData(t, wantEntries, metricsConsumer, tracesConsumer, logsConsumer, profilesConsumer)
2036+
2037+
tcs := []struct {
2038+
name string
2039+
dataType pipeline.Signal
2040+
service string
2041+
attrs map[string]*expectedValue
2042+
}{
2043+
{
2044+
name: "traces-deployment-container-id-association",
2045+
dataType: pipeline.SignalTraces,
2046+
service: "test-traces-deployment",
2047+
attrs: map[string]*expectedValue{
2048+
"k8s.pod.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment-[a-z0-9]*-[a-z0-9]*"),
2049+
"k8s.pod.uid": newExpectedValue(regex, uidRe),
2050+
"k8s.namespace.name": newExpectedValue(equal, testNs),
2051+
"k8s.deployment.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment"),
2052+
"k8s.node.name": newExpectedValue(exist, ""),
2053+
"k8s.cluster.uid": newExpectedValue(regex, uidRe),
2054+
"k8s.labels.app": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment"),
2055+
"k8s.namespace.labels.test-namespace": newExpectedValue(equal, "container-id-association"),
2056+
"k8s.container.name": newExpectedValue(equal, "telemetrygen"),
2057+
"container.image.name": newExpectedValue(exist, ""),
2058+
"container.image.tag": newExpectedValue(exist, ""),
2059+
"container.id": newExpectedValue(regex, "[a-f0-9]{64}"),
2060+
},
2061+
},
2062+
{
2063+
name: "metrics-deployment-container-id-association",
2064+
dataType: pipeline.SignalMetrics,
2065+
service: "test-metrics-deployment",
2066+
attrs: map[string]*expectedValue{
2067+
"k8s.pod.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment-[a-z0-9]*-[a-z0-9]*"),
2068+
"k8s.pod.uid": newExpectedValue(regex, uidRe),
2069+
"k8s.namespace.name": newExpectedValue(equal, testNs),
2070+
"k8s.deployment.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment"),
2071+
"k8s.node.name": newExpectedValue(exist, ""),
2072+
"k8s.cluster.uid": newExpectedValue(regex, uidRe),
2073+
"k8s.labels.app": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment"),
2074+
"k8s.namespace.labels.test-namespace": newExpectedValue(equal, "container-id-association"),
2075+
"k8s.container.name": newExpectedValue(equal, "telemetrygen"),
2076+
"container.image.name": newExpectedValue(exist, ""),
2077+
"container.image.tag": newExpectedValue(exist, ""),
2078+
"container.id": newExpectedValue(regex, "[a-f0-9]{64}"),
2079+
},
2080+
},
2081+
{
2082+
name: "logs-deployment-container-id-association",
2083+
dataType: pipeline.SignalLogs,
2084+
service: "test-logs-deployment",
2085+
attrs: map[string]*expectedValue{
2086+
"k8s.pod.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment-[a-z0-9]*-[a-z0-9]*"),
2087+
"k8s.pod.uid": newExpectedValue(regex, uidRe),
2088+
"k8s.namespace.name": newExpectedValue(equal, testNs),
2089+
"k8s.deployment.name": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment"),
2090+
"k8s.node.name": newExpectedValue(exist, ""),
2091+
"k8s.cluster.uid": newExpectedValue(regex, uidRe),
2092+
"k8s.labels.app": newExpectedValue(regex, "telemetrygen-"+testID+"-.*-deployment"),
2093+
"k8s.namespace.labels.test-namespace": newExpectedValue(equal, "container-id-association"),
2094+
"k8s.container.name": newExpectedValue(equal, "telemetrygen"),
2095+
"container.image.name": newExpectedValue(exist, ""),
2096+
"container.image.tag": newExpectedValue(exist, ""),
2097+
"container.id": newExpectedValue(regex, "[a-f0-9]{64}"),
2098+
},
2099+
},
2100+
}
2101+
2102+
for _, tc := range tcs {
2103+
t.Run(tc.name, func(t *testing.T) {
2104+
switch tc.dataType {
2105+
case pipeline.SignalTraces:
2106+
scanTracesForAttributes(t, tracesConsumer, tc.service, tc.attrs)
2107+
case pipeline.SignalMetrics:
2108+
scanMetricsForAttributes(t, metricsConsumer, tc.service, tc.attrs)
2109+
case pipeline.SignalLogs:
2110+
scanLogsForAttributes(t, logsConsumer, tc.service, tc.attrs)
2111+
default:
2112+
t.Fatalf("unknown data type %s", tc.dataType)
2113+
}
2114+
})
2115+
}
2116+
}
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# Custom telemetrygen image with shell capabilities for e2e testing
2+
# This enables container ID detection from Kubernetes API for proper association testing
3+
4+
FROM alpine:latest as base
5+
6+
FROM ghcr.io/open-telemetry/opentelemetry-collector-contrib/telemetrygen:latest as telemetrygen-source
7+
8+
FROM base
9+
10+
RUN apk add --no-cache curl
11+
12+
COPY --from=telemetrygen-source /telemetrygen /telemetrygen
13+
ARG USER_UID=10001
14+
ARG USER_GID=10001
15+
USER ${USER_UID}:${USER_GID}
16+
17+
ENTRYPOINT ["/telemetrygen"]
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
apiVersion: rbac.authorization.k8s.io/v1
2+
kind: ClusterRole
3+
metadata:
4+
name: {{ .Name }}
5+
rules:
6+
- apiGroups: [""]
7+
resources: ["pods", "nodes"]
8+
verbs: ["get", "watch", "list"]
9+
- apiGroups: ["apps"]
10+
resources: ["replicasets", "deployments", "statefulsets", "daemonsets"]
11+
verbs: ["get", "watch", "list"]
12+
- apiGroups: ["batch"]
13+
resources: ["jobs"]
14+
verbs: ["get", "watch", "list"]
15+
- apiGroups: [ "" ]
16+
resources: ["namespaces"]
17+
verbs: [ "get", "watch", "list" ]
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
apiVersion: rbac.authorization.k8s.io/v1
2+
kind: ClusterRoleBinding
3+
metadata:
4+
name: {{ .Name }}
5+
roleRef:
6+
apiGroup: rbac.authorization.k8s.io
7+
kind: ClusterRole
8+
name: {{ .Name }}
9+
subjects:
10+
- kind: ServiceAccount
11+
name: {{ .Name }}
12+
namespace: e2ek8sattribute-container-id-association-only
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
apiVersion: v1
2+
kind: ConfigMap
3+
metadata:
4+
name: {{ .Name }}-config
5+
namespace: e2ek8sattribute-container-id-association-only
6+
data:
7+
relay: |
8+
exporters:
9+
otlp:
10+
endpoint: {{ .HostEndpoint }}:4317
11+
tls:
12+
insecure: true
13+
extensions:
14+
health_check:
15+
endpoint: 0.0.0.0:13133
16+
processors:
17+
k8sattributes:
18+
extract:
19+
metadata:
20+
- k8s.pod.name
21+
- k8s.pod.uid
22+
- k8s.namespace.name
23+
- k8s.deployment.name
24+
- k8s.node.name
25+
- k8s.cluster.uid
26+
- k8s.container.name
27+
- container.image.name
28+
- container.image.tag
29+
labels:
30+
- from: pod
31+
key: app
32+
tag_name: k8s.labels.app
33+
- from: namespace
34+
key: test-namespace
35+
tag_name: k8s.namespace.labels.test-namespace
36+
pod_association:
37+
- sources:
38+
- from: resource_attribute
39+
name: container.id
40+
receivers:
41+
otlp:
42+
protocols:
43+
grpc:
44+
endpoint: ${env:MY_POD_IP}:4317
45+
service:
46+
extensions:
47+
- health_check
48+
pipelines:
49+
logs:
50+
exporters:
51+
- otlp
52+
processors:
53+
- k8sattributes
54+
receivers:
55+
- otlp
56+
metrics:
57+
exporters:
58+
- otlp
59+
processors:
60+
- k8sattributes
61+
receivers:
62+
- otlp
63+
traces:
64+
exporters:
65+
- otlp
66+
processors:
67+
- k8sattributes
68+
receivers:
69+
- otlp
70+
profiles:
71+
exporters:
72+
- otlp
73+
processors:
74+
- k8sattributes
75+
receivers:
76+
- otlp
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
apiVersion: apps/v1
2+
kind: Deployment
3+
metadata:
4+
name: {{ .Name }}
5+
namespace: e2ek8sattribute-container-id-association-only
6+
spec:
7+
replicas: 1
8+
selector:
9+
matchLabels:
10+
app.kubernetes.io/name: opentelemetry-collector
11+
app.kubernetes.io/instance: {{ .Name }}
12+
template:
13+
metadata:
14+
labels:
15+
app.kubernetes.io/name: opentelemetry-collector
16+
app.kubernetes.io/instance: {{ .Name }}
17+
spec:
18+
serviceAccountName: {{ .Name }}
19+
containers:
20+
- name: opentelemetry-collector
21+
command:
22+
- /otelcontribcol
23+
- --config=/conf/relay.yaml
24+
- --feature-gates=service.profilesSupport
25+
image: "otelcontribcol:latest"
26+
imagePullPolicy: IfNotPresent
27+
ports:
28+
- name: otlp
29+
containerPort: 4317
30+
protocol: TCP
31+
env:
32+
- name: MY_POD_IP
33+
valueFrom:
34+
fieldRef:
35+
apiVersion: v1
36+
fieldPath: status.podIP
37+
livenessProbe:
38+
httpGet:
39+
path: /
40+
port: 13133
41+
initialDelaySeconds: 3
42+
readinessProbe:
43+
httpGet:
44+
path: /
45+
port: 13133
46+
initialDelaySeconds: 3
47+
resources:
48+
limits:
49+
cpu: 128m
50+
memory: 256Mi
51+
volumeMounts:
52+
- mountPath: /conf
53+
name: opentelemetry-collector-configmap
54+
volumes:
55+
- name: opentelemetry-collector-configmap
56+
configMap:
57+
name: {{ .Name }}-config
58+
items:
59+
- key: relay
60+
path: relay.yaml
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
apiVersion: v1
2+
kind: Service
3+
metadata:
4+
name: {{ .Name }}
5+
namespace: e2ek8sattribute-container-id-association-only
6+
spec:
7+
type: ClusterIP
8+
ports:
9+
- name: otlp
10+
port: 4317
11+
targetPort: 4317
12+
protocol: TCP
13+
appProtocol: grpc
14+
selector:
15+
app.kubernetes.io/name: opentelemetry-collector
16+
app.kubernetes.io/instance: {{ .Name }}
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
apiVersion: v1
2+
kind: ServiceAccount
3+
metadata:
4+
name: {{ .Name }}
5+
namespace: e2ek8sattribute-container-id-association-only
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
apiVersion: v1
2+
kind: Namespace
3+
metadata:
4+
name: e2ek8sattribute-container-id-association-only
5+
labels:
6+
test-namespace: "container-id-association"

0 commit comments

Comments
 (0)