diff --git a/docs/metrics/workload/pod-metrics.md b/docs/metrics/workload/pod-metrics.md
index be76529ec..189351f66 100644
--- a/docs/metrics/workload/pod-metrics.md
+++ b/docs/metrics/workload/pod-metrics.md
@@ -31,6 +31,8 @@
| kube_pod_container_status_restarts_total | Counter | The number of container restarts per container | | `container`=<container-name>
`namespace`=<pod-namespace>
`pod`=<pod-name>
`uid`=<pod-uid> | STABLE | - |
| kube_pod_container_resource_requests | Gauge | The number of requested request resource by a container. It is recommended to use the `kube_pod_resource_requests` metric exposed by kube-scheduler instead, as it is more precise. | `cpu`=<core>
`memory`=<bytes> | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name>
`uid`=<pod-uid> | BETA | - |
| kube_pod_container_resource_limits | Gauge | The number of requested limit resource by a container. It is recommended to use the `kube_pod_resource_limits` metric exposed by kube-scheduler instead, as it is more precise. | `cpu`=<core>
`memory`=<bytes> | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name>
`uid`=<pod-uid> | BETA | - |
+| kube_pod_container_status_resource_requests | Gauge | The currently applied resource requests of a container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates. | `cpu`=<core>
`memory`=<bytes> | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name>
`uid`=<pod-uid> | EXPERIMENTAL | - |
+| kube_pod_container_status_resource_limits | Gauge | The currently applied resource limits of a container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates. | `cpu`=<core>
`memory`=<bytes> | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name>
`uid`=<pod-uid> | EXPERIMENTAL | - |
| kube_pod_overhead_cpu_cores | Gauge | The pod overhead in regards to cpu cores associated with running a pod | core | `pod`=<pod-name>
`namespace`=<pod-namespace>
`uid`=<pod-uid> | EXPERIMENTAL | - |
| kube_pod_overhead_memory_bytes | Gauge | The pod overhead in regards to memory associated with running a pod | bytes | `pod`=<pod-name>
`namespace`=<pod-namespace>
`uid`=<pod-uid> | EXPERIMENTAL | - |
| kube_pod_runtimeclass_name_info | Gauge | The runtimeclass associated with the pod | | `pod`=<pod-name>
`namespace`=<pod-namespace>
`uid`=<pod-uid> | EXPERIMENTAL | - |
@@ -48,6 +50,8 @@
| kube_pod_init_container_status_restarts_total | Counter | The number of restarts for the init container | integer | `container`=<container-name>
`namespace`=<pod-namespace>
`pod`=<pod-name>
`uid`=<pod-uid> | STABLE | - |
| kube_pod_init_container_resource_limits | Gauge | The number of CPU cores requested limit by an init container | `cpu`=<core>
`memory`=<bytes> | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name>
`uid`=<pod-uid> | EXPERIMENTAL | - |
| kube_pod_init_container_resource_requests | Gauge | The number of CPU cores requested by an init container | `cpu`=<core>
`memory`=<bytes> | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name>
`uid`=<pod-uid> | EXPERIMENTAL | - |
+| kube_pod_init_container_status_resource_requests | Gauge | The currently applied resource requests of an init container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates. | `cpu`=<core>
`memory`=<bytes> | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name>
`uid`=<pod-uid> | EXPERIMENTAL | - |
+| kube_pod_init_container_status_resource_limits | Gauge | The currently applied resource limits of an init container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates. | `cpu`=<core>
`memory`=<bytes> | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name>
`uid`=<pod-uid> | EXPERIMENTAL | - |
| kube_pod_spec_volumes_persistentvolumeclaims_info | Gauge | Information about persistentvolumeclaim volumes in a pod | | `pod`=<pod-name>
`namespace`=<pod-namespace>
`volume`=<volume-name>
`persistentvolumeclaim`=<persistentvolumeclaim-claimname>
`uid`=<pod-uid> | STABLE | - |
| kube_pod_spec_volumes_persistentvolumeclaims_readonly | Gauge | Describes whether a persistentvolumeclaim is mounted read only | bool | `pod`=<pod-name>
`namespace`=<pod-namespace>
`volume`=<volume-name>
`persistentvolumeclaim`=<persistentvolumeclaim-claimname>
`uid`=<pod-uid> | STABLE | - |
| kube_pod_status_reason | Gauge | The pod status reasons | | `pod`=<pod-name>
`namespace`=<pod-namespace>
`reason`=<Evicted\|NodeAffinity\|NodeLost\|Shutdown\|UnexpectedAdmissionError>
`uid`=<pod-uid> | EXPERIMENTAL | - |
diff --git a/internal/store/pod.go b/internal/store/pod.go
index 7182c9254..840d2aab9 100644
--- a/internal/store/pod.go
+++ b/internal/store/pod.go
@@ -57,6 +57,8 @@ func podMetricFamilies(allowAnnotationsList, allowLabelsList []string) []generat
createPodContainerStatusTerminatedReasonFamilyGenerator(),
createPodContainerStatusWaitingFamilyGenerator(),
createPodContainerStatusWaitingReasonFamilyGenerator(),
+ createPodContainerStatusResourceLimitsFamilyGenerator(),
+ createPodContainerStatusResourceRequestsFamilyGenerator(),
createPodCreatedFamilyGenerator(),
createPodDeletionTimestampFamilyGenerator(),
createPodInfoFamilyGenerator(),
@@ -72,6 +74,8 @@ func podMetricFamilies(allowAnnotationsList, allowLabelsList []string) []generat
createPodInitContainerStatusTerminatedReasonFamilyGenerator(),
createPodInitContainerStatusWaitingFamilyGenerator(),
createPodInitContainerStatusWaitingReasonFamilyGenerator(),
+ createPodInitContainerStatusResourceLimitsFamilyGenerator(),
+ createPodInitContainerStatusResourceRequestsFamilyGenerator(),
createPodAnnotationsGenerator(allowAnnotationsList),
createPodLabelsGenerator(allowLabelsList),
createPodOverheadCPUCoresFamilyGenerator(),
@@ -165,6 +169,48 @@ func createPodContainerInfoFamilyGenerator() generator.FamilyGenerator {
)
}
+func resourceListToMetric(containerName, nodeName string, resources v1.ResourceList) (ms []*metric.Metric) {
+ for resourceName, val := range resources {
+ switch resourceName {
+ case v1.ResourceCPU:
+ ms = append(ms, &metric.Metric{
+ LabelValues: []string{containerName, nodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitCore)},
+ Value: convertValueToFloat64(&val),
+ })
+ case v1.ResourceStorage:
+ fallthrough
+ case v1.ResourceEphemeralStorage:
+ fallthrough
+ case v1.ResourceMemory:
+ ms = append(ms, &metric.Metric{
+ LabelValues: []string{containerName, nodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
+ Value: float64(val.Value()),
+ })
+ default:
+ if isHugePageResourceName(resourceName) {
+ ms = append(ms, &metric.Metric{
+ LabelValues: []string{containerName, nodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
+ Value: float64(val.Value()),
+ })
+ }
+ if isAttachableVolumeResourceName(resourceName) {
+ ms = append(ms, &metric.Metric{
+ LabelValues: []string{containerName, nodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
+ Value: float64(val.Value()),
+ })
+ }
+ if isExtendedResourceName(resourceName) {
+ ms = append(ms, &metric.Metric{
+ LabelValues: []string{containerName, nodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitInteger)},
+ Value: float64(val.Value()),
+ })
+ }
+ }
+ }
+
+ return ms
+}
+
func createPodContainerResourceLimitsFamilyGenerator() generator.FamilyGenerator {
return *generator.NewFamilyGeneratorWithStability(
"kube_pod_container_resource_limits",
@@ -176,46 +222,7 @@ func createPodContainerResourceLimitsFamilyGenerator() generator.FamilyGenerator
ms := []*metric.Metric{}
for _, c := range p.Spec.Containers {
- lim := c.Resources.Limits
-
- for resourceName, val := range lim {
- switch resourceName {
- case v1.ResourceCPU:
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitCore)},
- Value: convertValueToFloat64(&val),
- })
- case v1.ResourceStorage:
- fallthrough
- case v1.ResourceEphemeralStorage:
- fallthrough
- case v1.ResourceMemory:
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- default:
- if isHugePageResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- }
- if isAttachableVolumeResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- Value: float64(val.Value()),
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- })
- }
- if isExtendedResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- Value: float64(val.Value()),
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitInteger)},
- })
-
- }
- }
- }
+ ms = append(ms, resourceListToMetric(c.Name, p.Spec.NodeName, c.Resources.Limits)...)
}
for _, metric := range ms {
@@ -240,45 +247,7 @@ func createPodContainerResourceRequestsFamilyGenerator() generator.FamilyGenerat
ms := []*metric.Metric{}
for _, c := range p.Spec.Containers {
- req := c.Resources.Requests
-
- for resourceName, val := range req {
- switch resourceName {
- case v1.ResourceCPU:
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitCore)},
- Value: convertValueToFloat64(&val),
- })
- case v1.ResourceStorage:
- fallthrough
- case v1.ResourceEphemeralStorage:
- fallthrough
- case v1.ResourceMemory:
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- default:
- if isHugePageResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- }
- if isAttachableVolumeResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- }
- if isExtendedResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitInteger)},
- Value: float64(val.Value()),
- })
- }
- }
- }
+ ms = append(ms, resourceListToMetric(c.Name, p.Spec.NodeName, c.Resources.Requests)...)
}
for _, metric := range ms {
@@ -740,49 +709,10 @@ func createPodInitContainerResourceLimitsFamilyGenerator() generator.FamilyGener
basemetrics.ALPHA,
"",
wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
+ var ms []*metric.Metric
for _, c := range p.Spec.InitContainers {
- lim := c.Resources.Limits
-
- for resourceName, val := range lim {
- switch resourceName {
- case v1.ResourceCPU:
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitCore)},
- Value: convertValueToFloat64(&val),
- })
- case v1.ResourceStorage:
- fallthrough
- case v1.ResourceEphemeralStorage:
- fallthrough
- case v1.ResourceMemory:
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- default:
- if isHugePageResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- }
- if isAttachableVolumeResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- Value: float64(val.Value()),
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- })
- }
- if isExtendedResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- Value: float64(val.Value()),
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitInteger)},
- })
-
- }
- }
- }
+ ms = append(ms, resourceListToMetric(c.Name, p.Spec.NodeName, c.Resources.Limits)...)
}
for _, metric := range ms {
@@ -804,47 +734,63 @@ func createPodInitContainerResourceRequestsFamilyGenerator() generator.FamilyGen
basemetrics.ALPHA,
"",
wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
+ var ms []*metric.Metric
for _, c := range p.Spec.InitContainers {
- req := c.Resources.Requests
+ ms = append(ms, resourceListToMetric(c.Name, p.Spec.NodeName, c.Resources.Requests)...)
+ }
- for resourceName, val := range req {
- switch resourceName {
- case v1.ResourceCPU:
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitCore)},
- Value: convertValueToFloat64(&val),
- })
- case v1.ResourceStorage:
- fallthrough
- case v1.ResourceEphemeralStorage:
- fallthrough
- case v1.ResourceMemory:
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- default:
- if isHugePageResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- }
- if isAttachableVolumeResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- }
- if isExtendedResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, p.Spec.NodeName, SanitizeLabelName(string(resourceName)), string(constant.UnitInteger)},
- Value: float64(val.Value()),
- })
- }
- }
+ for _, metric := range ms {
+ metric.LabelKeys = []string{"container", "node", "resource", "unit"}
+ }
+
+ return &metric.Family{
+ Metrics: ms,
+ }
+ }),
+ )
+}
+
+func createPodContainerStatusResourceLimitsFamilyGenerator() generator.FamilyGenerator {
+ return *generator.NewFamilyGeneratorWithStability(
+ "kube_pod_container_status_resource_limits",
+ "The currently applied resource limits of a container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.",
+ metric.Gauge,
+ basemetrics.ALPHA,
+ "",
+ wrapPodFunc(func(p *v1.Pod) *metric.Family {
+ var ms []*metric.Metric
+
+ for _, cs := range p.Status.ContainerStatuses {
+ if cs.Resources != nil {
+ ms = append(ms, resourceListToMetric(cs.Name, p.Spec.NodeName, cs.Resources.Limits)...)
+ }
+ }
+
+ for _, metric := range ms {
+ metric.LabelKeys = []string{"container", "node", "resource", "unit"}
+ }
+
+ return &metric.Family{
+ Metrics: ms,
+ }
+ }),
+ )
+}
+
+func createPodContainerStatusResourceRequestsFamilyGenerator() generator.FamilyGenerator {
+ return *generator.NewFamilyGeneratorWithStability(
+ "kube_pod_container_status_resource_requests",
+ "The currently applied resource requests of a container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.",
+ metric.Gauge,
+ basemetrics.ALPHA,
+ "",
+ wrapPodFunc(func(p *v1.Pod) *metric.Family {
+ var ms []*metric.Metric
+
+ for _, cs := range p.Status.ContainerStatuses {
+ if cs.Resources != nil {
+ ms = append(ms, resourceListToMetric(cs.Name, p.Spec.NodeName, cs.Resources.Requests)...)
}
}
@@ -1061,6 +1007,60 @@ func createPodInitContainerStatusWaitingReasonFamilyGenerator() generator.Family
)
}
+func createPodInitContainerStatusResourceLimitsFamilyGenerator() generator.FamilyGenerator {
+ return *generator.NewFamilyGeneratorWithStability(
+ "kube_pod_init_container_status_resource_limits",
+ "The currently applied resource limits of an init container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.",
+ metric.Gauge,
+ basemetrics.ALPHA,
+ "",
+ wrapPodFunc(func(p *v1.Pod) *metric.Family {
+ var ms []*metric.Metric
+
+ for _, cs := range p.Status.InitContainerStatuses {
+ if cs.Resources != nil {
+ ms = append(ms, resourceListToMetric(cs.Name, p.Spec.NodeName, cs.Resources.Limits)...)
+ }
+ }
+
+ for _, metric := range ms {
+ metric.LabelKeys = []string{"container", "node", "resource", "unit"}
+ }
+
+ return &metric.Family{
+ Metrics: ms,
+ }
+ }),
+ )
+}
+
+func createPodInitContainerStatusResourceRequestsFamilyGenerator() generator.FamilyGenerator {
+ return *generator.NewFamilyGeneratorWithStability(
+ "kube_pod_init_container_status_resource_requests",
+ "The currently applied resource requests of an init container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.",
+ metric.Gauge,
+ basemetrics.ALPHA,
+ "",
+ wrapPodFunc(func(p *v1.Pod) *metric.Family {
+ var ms []*metric.Metric
+
+ for _, cs := range p.Status.InitContainerStatuses {
+ if cs.Resources != nil {
+ ms = append(ms, resourceListToMetric(cs.Name, p.Spec.NodeName, cs.Resources.Requests)...)
+ }
+ }
+
+ for _, metric := range ms {
+ metric.LabelKeys = []string{"container", "node", "resource", "unit"}
+ }
+
+ return &metric.Family{
+ Metrics: ms,
+ }
+ }),
+ )
+}
+
func createPodAnnotationsGenerator(allowAnnotations []string) generator.FamilyGenerator {
return *generator.NewFamilyGeneratorWithStability(
"kube_pod_annotations",
diff --git a/internal/store/pod_test.go b/internal/store/pod_test.go
index 10e76f6ba..e08261403 100644
--- a/internal/store/pod_test.go
+++ b/internal/store/pod_test.go
@@ -2178,6 +2178,104 @@ func TestPodStore(t *testing.T) {
"kube_pod_scheduler",
},
},
+ {
+ Obj: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pod_with_container_status_resources",
+ Namespace: "ns1",
+ UID: "uid_csr",
+ },
+ Spec: v1.PodSpec{
+ NodeName: "node1",
+ InitContainers: []v1.Container{
+ {
+ Name: "init-container1",
+ },
+ },
+ Containers: []v1.Container{
+ {
+ Name: "container1",
+ },
+ {
+ Name: "container2",
+ },
+ },
+ },
+ Status: v1.PodStatus{
+ InitContainerStatuses: []v1.ContainerStatus{
+ {
+ Name: "init-container1",
+ Resources: &v1.ResourceRequirements{
+ Requests: map[v1.ResourceName]resource.Quantity{
+ v1.ResourceCPU: resource.MustParse("100m"),
+ v1.ResourceMemory: resource.MustParse("64Mi"),
+ },
+ Limits: map[v1.ResourceName]resource.Quantity{
+ v1.ResourceCPU: resource.MustParse("200m"),
+ v1.ResourceMemory: resource.MustParse("128Mi"),
+ },
+ },
+ },
+ },
+ ContainerStatuses: []v1.ContainerStatus{
+ {
+ Name: "container1",
+ Resources: &v1.ResourceRequirements{
+ Requests: map[v1.ResourceName]resource.Quantity{
+ v1.ResourceCPU: resource.MustParse("150m"),
+ v1.ResourceMemory: resource.MustParse("75Mi"),
+ },
+ Limits: map[v1.ResourceName]resource.Quantity{
+ v1.ResourceCPU: resource.MustParse("250m"),
+ v1.ResourceMemory: resource.MustParse("125Mi"),
+ },
+ },
+ },
+ {
+ Name: "container2",
+ Resources: &v1.ResourceRequirements{
+ Requests: map[v1.ResourceName]resource.Quantity{
+ v1.ResourceCPU: resource.MustParse("200m"),
+ v1.ResourceMemory: resource.MustParse("100Mi"),
+ },
+ Limits: map[v1.ResourceName]resource.Quantity{
+ v1.ResourceCPU: resource.MustParse("400m"),
+ v1.ResourceMemory: resource.MustParse("200Mi"),
+ },
+ },
+ },
+ },
+ },
+ },
+ Want: `
+ # HELP kube_pod_container_status_resource_limits The currently applied resource limits of a container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.
+ # HELP kube_pod_container_status_resource_requests The currently applied resource requests of a container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.
+ # HELP kube_pod_init_container_status_resource_limits The currently applied resource limits of an init container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.
+ # HELP kube_pod_init_container_status_resource_requests The currently applied resource requests of an init container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.
+ # TYPE kube_pod_container_status_resource_limits gauge
+ # TYPE kube_pod_container_status_resource_requests gauge
+ # TYPE kube_pod_init_container_status_resource_limits gauge
+ # TYPE kube_pod_init_container_status_resource_requests gauge
+ kube_pod_container_status_resource_limits{container="container1",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="cpu",uid="uid_csr",unit="core"} 0.25
+ kube_pod_container_status_resource_limits{container="container1",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="memory",uid="uid_csr",unit="byte"} 1.31072e+08
+ kube_pod_container_status_resource_limits{container="container2",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="cpu",uid="uid_csr",unit="core"} 0.4
+ kube_pod_container_status_resource_limits{container="container2",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="memory",uid="uid_csr",unit="byte"} 2.097152e+08
+ kube_pod_container_status_resource_requests{container="container1",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="cpu",uid="uid_csr",unit="core"} 0.15
+ kube_pod_container_status_resource_requests{container="container1",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="memory",uid="uid_csr",unit="byte"} 7.86432e+07
+ kube_pod_container_status_resource_requests{container="container2",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="cpu",uid="uid_csr",unit="core"} 0.2
+ kube_pod_container_status_resource_requests{container="container2",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="memory",uid="uid_csr",unit="byte"} 1.048576e+08
+ kube_pod_init_container_status_resource_limits{container="init-container1",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="cpu",uid="uid_csr",unit="core"} 0.2
+ kube_pod_init_container_status_resource_limits{container="init-container1",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="memory",uid="uid_csr",unit="byte"} 1.34217728e+08
+ kube_pod_init_container_status_resource_requests{container="init-container1",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="cpu",uid="uid_csr",unit="core"} 0.1
+ kube_pod_init_container_status_resource_requests{container="init-container1",namespace="ns1",node="node1",pod="pod_with_container_status_resources",resource="memory",uid="uid_csr",unit="byte"} 6.7108864e+07
+ `,
+ MetricNames: []string{
+ "kube_pod_container_status_resource_limits",
+ "kube_pod_container_status_resource_requests",
+ "kube_pod_init_container_status_resource_limits",
+ "kube_pod_init_container_status_resource_requests",
+ },
+ },
}
for i, c := range cases {
diff --git a/pkg/app/server_test.go b/pkg/app/server_test.go
index e65143d11..13b4663e2 100644
--- a/pkg/app/server_test.go
+++ b/pkg/app/server_test.go
@@ -213,6 +213,8 @@ func TestFullScrapeCycle(t *testing.T) {
# HELP kube_pod_container_resource_limits The number of requested limit resource by a container. It is recommended to use the kube_pod_resource_limits metric exposed by kube-scheduler instead, as it is more precise.
# HELP kube_pod_container_resource_requests The number of requested request resource by a container. It is recommended to use the kube_pod_resource_requests metric exposed by kube-scheduler instead, as it is more precise.
# HELP kube_pod_container_state_started [STABLE] Start time in unix timestamp for a pod container.
+# HELP kube_pod_container_status_resource_limits The currently applied resource limits of a container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.
+# HELP kube_pod_container_status_resource_requests The currently applied resource requests of a container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.
# HELP kube_pod_container_status_last_terminated_exitcode Describes the exit code for the last container in terminated state.
# HELP kube_pod_container_status_last_terminated_reason Describes the last reason the container was in terminated state.
# HELP kube_pod_container_status_last_terminated_timestamp Last terminated time for a pod container in unix timestamp.
@@ -229,6 +231,8 @@ func TestFullScrapeCycle(t *testing.T) {
# HELP kube_pod_init_container_info [STABLE] Information about an init container in a pod.
# HELP kube_pod_init_container_resource_limits The number of requested limit resource by an init container.
# HELP kube_pod_init_container_resource_requests The number of requested request resource by an init container.
+# HELP kube_pod_init_container_status_resource_limits The currently applied resource limits of an init container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.
+# HELP kube_pod_init_container_status_resource_requests The currently applied resource requests of an init container as reported by the container runtime. This represents the active cgroup configuration and may differ from the pod specification during in-place resource updates.
# HELP kube_pod_init_container_status_last_terminated_reason Describes the last reason the init container was in terminated state.
# HELP kube_pod_init_container_status_ready [STABLE] Describes whether the init containers readiness check succeeded.
# HELP kube_pod_init_container_status_restarts_total [STABLE] The number of restarts for the init container.
@@ -267,6 +271,8 @@ func TestFullScrapeCycle(t *testing.T) {
# TYPE kube_pod_container_resource_limits gauge
# TYPE kube_pod_container_resource_requests gauge
# TYPE kube_pod_container_state_started gauge
+# TYPE kube_pod_container_status_resource_limits gauge
+# TYPE kube_pod_container_status_resource_requests gauge
# TYPE kube_pod_container_status_last_terminated_exitcode gauge
# TYPE kube_pod_container_status_last_terminated_reason gauge
# TYPE kube_pod_container_status_last_terminated_timestamp gauge
@@ -283,6 +289,8 @@ func TestFullScrapeCycle(t *testing.T) {
# TYPE kube_pod_init_container_info gauge
# TYPE kube_pod_init_container_resource_limits gauge
# TYPE kube_pod_init_container_resource_requests gauge
+# TYPE kube_pod_init_container_status_resource_limits gauge
+# TYPE kube_pod_init_container_status_resource_requests gauge
# TYPE kube_pod_init_container_status_last_terminated_reason gauge
# TYPE kube_pod_init_container_status_ready gauge
# TYPE kube_pod_init_container_status_restarts_total counter