Skip to content

Commit 2ac5dfe

Browse files
committed
e2e_node: check container metrics conditionally
When PodAndContainerStatsFromCRI FG is enabled, Kubelet tries to get list of metrics from the CRI runtime using CRI API 'ListMetricDescriptors'. As this API is not implemented in neither CRI-O nor Containerd versions used in the test-infra, ResourceMetrics test case fails to gather certain container metrics. Excluding container metrics from the expected list of metrics if PodAndContainerStatsFromCRI is enabled should solve the issue.
1 parent 352056f commit 2ac5dfe

File tree

1 file changed

+14
-2
lines changed

1 file changed

+14
-2
lines changed

test/e2e_node/resource_metrics_test.go

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,12 @@ import (
2222
"time"
2323

2424
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
25+
"k8s.io/kubernetes/pkg/features"
2526
"k8s.io/kubernetes/test/e2e/framework"
2627
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
2728
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
2829
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
30+
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
2931
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
3032
"k8s.io/kubernetes/test/e2e/nodefeature"
3133
admissionapi "k8s.io/pod-security-admission/api"
@@ -74,6 +76,17 @@ var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, func() {
7476
memoryCapacity := node.Status.Capacity["memory"]
7577
memoryLimit := memoryCapacity.Value()
7678

79+
keys := []string{
80+
"resource_scrape_error", "node_cpu_usage_seconds_total", "node_memory_working_set_bytes",
81+
"pod_cpu_usage_seconds_total", "pod_memory_working_set_bytes",
82+
}
83+
84+
// NOTE: This check should be removed when ListMetricDescriptors is implemented
85+
// by CRI-O and Containerd
86+
if !e2eskipper.IsFeatureGateEnabled(features.PodAndContainerStatsFromCRI) {
87+
keys = append(keys, "container_cpu_usage_seconds_total", "container_memory_working_set_bytes", "container_start_time_seconds")
88+
}
89+
7790
matchResourceMetrics := gomega.And(gstruct.MatchKeys(gstruct.IgnoreMissing, gstruct.Keys{
7891
"resource_scrape_error": gstruct.Ignore(),
7992
"node_cpu_usage_seconds_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{
@@ -113,8 +126,7 @@ var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, func() {
113126
fmt.Sprintf("%s::%s", f.Namespace.Name, pod1): boundedSample(0*e2evolume.Kb, 80*e2evolume.Mb),
114127
}),
115128
}),
116-
haveKeys("resource_scrape_error", "node_cpu_usage_seconds_total", "node_memory_working_set_bytes", "container_cpu_usage_seconds_total",
117-
"container_memory_working_set_bytes", "container_start_time_seconds", "pod_cpu_usage_seconds_total", "pod_memory_working_set_bytes"),
129+
haveKeys(keys...),
118130
)
119131
ginkgo.By("Giving pods a minute to start up and produce metrics")
120132
gomega.Eventually(ctx, getResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)

0 commit comments

Comments
 (0)