diff --git a/vertical-pod-autoscaler/docs/flags.md b/vertical-pod-autoscaler/docs/flags.md
index 4dececc28ac2..ffc4f662aa7e 100644
--- a/vertical-pod-autoscaler/docs/flags.md
+++ b/vertical-pod-autoscaler/docs/flags.md
@@ -14,7 +14,7 @@ This document is auto-generated from the flag definitions in the VPA admission-c
| `address` | string | ":8944" | The address to expose Prometheus metrics. |
| `alsologtostderr` | | | log to standard error as well as files (no effect when -logtostderr=true) |
| `client-ca-file` | string | "/etc/tls-certs/caCert.pem" | Path to CA PEM file. |
-| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true) |
+| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true)
NativeSidecar=true\|false (ALPHA - default=false) |
| `ignored-vpa-object-namespaces` | string | | A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector. |
| `kube-api-burst` | float | 100 | QPS burst limit when making requests to Kubernetes apiserver |
| `kube-api-qps` | float | 50 | QPS limit when making requests to Kubernetes apiserver |
@@ -68,7 +68,7 @@ This document is auto-generated from the flag definitions in the VPA recommender
| `cpu-integer-post-processor-enabled` | | | Enable the cpu-integer recommendation post processor. The post processor will round up CPU recommendations to a whole CPU for pods which were opted in by setting an appropriate label on VPA object (experimental) |
| `external-metrics-cpu-metric` | string | | ALPHA. Metric to use with external metrics provider for CPU usage. |
| `external-metrics-memory-metric` | string | | ALPHA. Metric to use with external metrics provider for memory usage. |
-| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true) |
+| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true)
NativeSidecar=true\|false (ALPHA - default=false) |
| `history-length` | string | "8d" | How much time back prometheus have to be queried to get historical metrics |
| `history-resolution` | string | "1h" | Resolution at which Prometheus is queried for historical metrics |
| `humanize-memory` | | | DEPRECATED: Convert memory values in recommendations to the highest appropriate SI unit with up to 2 decimal places for better readability. This flag is deprecated and will be removed in a future version. Use --round-memory-bytes instead. |
@@ -144,7 +144,7 @@ This document is auto-generated from the flag definitions in the VPA updater cod
| `eviction-rate-burst` | int | 1 | Burst of pods that can be evicted. |
| `eviction-rate-limit` | float | | Number of pods that can be evicted per seconds. A rate limit set to 0 or -1 will disable
the rate limiter. (default -1) |
| `eviction-tolerance` | float | 0.5 | Fraction of replica count that can be evicted for update, if more than one pod can be evicted. |
-| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true) |
+| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
InPlaceOrRecreate=true\|false (BETA - default=true)
NativeSidecar=true\|false (ALPHA - default=false) |
| `ignored-vpa-object-namespaces` | string | | A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector. |
| `in-recommendation-bounds-eviction-lifetime-threshold` | | 12h0m0s | duration Pods that live for at least that long can be evicted even if their request is within the [MinRecommended...MaxRecommended] range |
| `kube-api-burst` | float | 100 | QPS burst limit when making requests to Kubernetes apiserver |
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates.go
index 3bc230e9f029..4ae9351daf22 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates.go
@@ -25,6 +25,7 @@ import (
resource_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
resourcehelpers "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/resources"
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
)
@@ -54,7 +55,7 @@ func (*resourcesUpdatesPatchCalculator) PatchResourceTarget() PatchResourceTarge
func (c *resourcesUpdatesPatchCalculator) CalculatePatches(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]resource_admission.PatchRecord, error) {
result := []resource_admission.PatchRecord{}
- containersResources, annotationsPerContainer, err := c.recommendationProvider.GetContainersResourcesForPod(pod, vpa)
+ initContainersResources, containersResources, annotationsPerContainer, err := c.recommendationProvider.GetContainersResourcesForPod(pod, vpa)
if err != nil {
return []resource_admission.PatchRecord{}, fmt.Errorf("failed to calculate resource patch for pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
@@ -64,8 +65,14 @@ func (c *resourcesUpdatesPatchCalculator) CalculatePatches(pod *core.Pod, vpa *v
}
updatesAnnotation := []string{}
+ for i, containerResources := range initContainersResources {
+ newPatches, newUpdatesAnnotation := getContainerPatch(pod, i, annotationsPerContainer, containerResources, model.ContainerTypeInitSidecar)
+ result = append(result, newPatches...)
+ updatesAnnotation = append(updatesAnnotation, newUpdatesAnnotation)
+ }
+
for i, containerResources := range containersResources {
- newPatches, newUpdatesAnnotation := getContainerPatch(pod, i, annotationsPerContainer, containerResources)
+ newPatches, newUpdatesAnnotation := getContainerPatch(pod, i, annotationsPerContainer, containerResources, model.ContainerTypeStandard)
result = append(result, newPatches...)
updatesAnnotation = append(updatesAnnotation, newUpdatesAnnotation)
}
@@ -77,33 +84,40 @@ func (c *resourcesUpdatesPatchCalculator) CalculatePatches(pod *core.Pod, vpa *v
return result, nil
}
-func getContainerPatch(pod *core.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources vpa_api_util.ContainerResources) ([]resource_admission.PatchRecord, string) {
+func getContainerPatch(pod *core.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources vpa_api_util.ContainerResources, containerType model.ContainerType) ([]resource_admission.PatchRecord, string) {
var patches []resource_admission.PatchRecord
// Add empty resources object if missing.
- requests, limits := resourcehelpers.ContainerRequestsAndLimits(pod.Spec.Containers[i].Name, pod)
+ var container *core.Container
+ if containerType == model.ContainerTypeStandard {
+ container = &pod.Spec.Containers[i]
+ } else {
+ container = &pod.Spec.InitContainers[i]
+ }
+
+ requests, limits := resourcehelpers.ContainerRequestsAndLimits(container.Name, pod)
if limits == nil && requests == nil {
- patches = append(patches, GetPatchInitializingEmptyResources(i))
+ patches = append(patches, GetPatchInitializingEmptyResources(i, containerType))
}
- annotations, found := annotationsPerContainer[pod.Spec.Containers[i].Name]
+ annotations, found := annotationsPerContainer[container.Name]
if !found {
annotations = make([]string, 0)
}
- patches, annotations = appendPatchesAndAnnotations(patches, annotations, requests, i, containerResources.Requests, "requests", "request")
- patches, annotations = appendPatchesAndAnnotations(patches, annotations, limits, i, containerResources.Limits, "limits", "limit")
+ patches, annotations = appendPatchesAndAnnotations(patches, annotations, requests, i, containerResources.Requests, "requests", "request", containerType)
+ patches, annotations = appendPatchesAndAnnotations(patches, annotations, limits, i, containerResources.Limits, "limits", "limit", containerType)
- updatesAnnotation := fmt.Sprintf("container %d: ", i) + strings.Join(annotations, ", ")
+ updatesAnnotation := fmt.Sprintf("%s %d: ", containerType, i) + strings.Join(annotations, ", ")
return patches, updatesAnnotation
}
-func appendPatchesAndAnnotations(patches []resource_admission.PatchRecord, annotations []string, current core.ResourceList, containerIndex int, resources core.ResourceList, fieldName, resourceName string) ([]resource_admission.PatchRecord, []string) {
+func appendPatchesAndAnnotations(patches []resource_admission.PatchRecord, annotations []string, current core.ResourceList, containerIndex int, resources core.ResourceList, fieldName, resourceName string, containerType model.ContainerType) ([]resource_admission.PatchRecord, []string) {
// Add empty object if it's missing and we're about to fill it.
if current == nil && len(resources) > 0 {
- patches = append(patches, GetPatchInitializingEmptyResourcesSubfield(containerIndex, fieldName))
+ patches = append(patches, GetPatchInitializingEmptyResourcesSubfield(containerIndex, fieldName, containerType))
}
for resource, request := range resources {
- patches = append(patches, GetAddResourceRequirementValuePatch(containerIndex, fieldName, resource, request))
+ patches = append(patches, GetAddResourceRequirementValuePatch(containerIndex, fieldName, resource, request, containerType))
annotations = append(annotations, fmt.Sprintf("%s %s", resource, resourceName))
}
return patches, annotations
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go
index 2a3cc5d9a0ec..4b663e7a2556 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go
@@ -39,13 +39,14 @@ const (
)
type fakeRecommendationProvider struct {
+ initResources []vpa_api_util.ContainerResources
resources []vpa_api_util.ContainerResources
containerToAnnotations vpa_api_util.ContainerToAnnotationsMap
e error
}
-func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, error) {
- return frp.resources, frp.containerToAnnotations, frp.e
+func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]vpa_api_util.ContainerResources, []vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, error) {
+ return frp.initResources, frp.resources, frp.containerToAnnotations, frp.e
}
func addResourcesPatch(idx int) resource_admission.PatchRecord {
@@ -88,6 +89,44 @@ func addResourceLimitPatch(index int, res, amount string) resource_admission.Pat
}
}
+func addInitResourcesPatch(idx int) resource_admission.PatchRecord {
+ return resource_admission.PatchRecord{
+ Op: "add",
+ Path: fmt.Sprintf("/spec/initContainers/%d/resources", idx),
+ Value: core.ResourceRequirements{},
+ }
+}
+
+func addInitRequestsPatch(idx int) resource_admission.PatchRecord {
+ return resource_admission.PatchRecord{
+ Op: "add",
+ Path: fmt.Sprintf("/spec/initContainers/%d/resources/requests", idx),
+ Value: core.ResourceList{},
+ }
+}
+
+func addInitResourceRequestPatch(index int, res, amount string) resource_admission.PatchRecord {
+ return resource_admission.PatchRecord{
+ Op: "add",
+ Path: fmt.Sprintf("/spec/initContainers/%d/resources/requests/%s", index, res),
+ Value: resource.MustParse(amount),
+ }
+}
+
+func addInitAnnotationRequest(updateResources [][]string, kind string) resource_admission.PatchRecord {
+ requests := make([]string, 0)
+ for idx, podResources := range updateResources {
+ podRequests := make([]string, 0)
+ for _, resource := range podResources {
+ podRequests = append(podRequests, resource+" "+kind)
+ }
+ requests = append(requests, fmt.Sprintf("init-sidecar %d: %s", idx, strings.Join(podRequests, ", ")))
+ }
+
+ vpaUpdates := fmt.Sprintf("Pod resources updated by name: %s", strings.Join(requests, "; "))
+ return GetAddAnnotationPatch(ResourceUpdatesAnnotation, vpaUpdates)
+}
+
func addAnnotationRequest(updateResources [][]string, kind string) resource_admission.PatchRecord {
requests := make([]string, 0)
for idx, podResources := range updateResources {
@@ -107,6 +146,7 @@ func TestCalculatePatches_ResourceUpdates(t *testing.T) {
name string
pod *core.Pod
namespace string
+ initResources []vpa_api_util.ContainerResources
recommendResources []vpa_api_util.ContainerResources
recommendAnnotations vpa_api_util.ContainerToAnnotationsMap
recommendError error
@@ -136,6 +176,29 @@ func TestCalculatePatches_ResourceUpdates(t *testing.T) {
addAnnotationRequest([][]string{{cpu}}, request),
},
},
+ {
+ name: "new init cpu recommendation",
+ pod: &core.Pod{
+ Spec: core.PodSpec{
+ InitContainers: []core.Container{{}},
+ },
+ },
+ namespace: "default",
+ initResources: []vpa_api_util.ContainerResources{
+ {
+ Requests: core.ResourceList{
+ cpu: resource.MustParse("1"),
+ },
+ },
+ },
+ recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{},
+ expectPatches: []resource_admission.PatchRecord{
+ addInitResourcesPatch(0),
+ addInitRequestsPatch(0),
+ addInitResourceRequestPatch(0, cpu, "1"),
+ addInitAnnotationRequest([][]string{{cpu}}, request),
+ },
+ },
{
name: "replacement cpu recommendation",
pod: &core.Pod{
@@ -292,7 +355,7 @@ func TestCalculatePatches_ResourceUpdates(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendError}
+ frp := fakeRecommendationProvider{tc.initResources, tc.recommendResources, tc.recommendAnnotations, tc.recommendError}
c := NewResourceUpdatesCalculator(&frp)
patches, err := c.CalculatePatches(tc.pod, test.VerticalPodAutoscaler().WithContainer("test").WithName("name").Get())
if tc.expectError == nil {
@@ -334,7 +397,7 @@ func TestGetPatches_TwoReplacementResources(t *testing.T) {
},
}
recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{}
- frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, nil}
+ frp := fakeRecommendationProvider{nil, recommendResources, recommendAnnotations, nil}
c := NewResourceUpdatesCalculator(&frp)
patches, err := c.CalculatePatches(pod, test.VerticalPodAutoscaler().WithName("name").WithContainer("test").Get())
assert.NoError(t, err)
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/util.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/util.go
index 0c68ab6cd557..ddb5f57d8b43 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/util.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/util.go
@@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
resource_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
)
// GetAddEmptyAnnotationsPatch returns a patch initializing empty annotations.
@@ -44,28 +45,28 @@ func GetAddAnnotationPatch(annotationName, annotationValue string) resource_admi
}
// GetAddResourceRequirementValuePatch returns a patch record to add resource requirements to a container.
-func GetAddResourceRequirementValuePatch(i int, kind string, resource core.ResourceName, quantity resource.Quantity) resource_admission.PatchRecord {
+func GetAddResourceRequirementValuePatch(i int, kind string, resource core.ResourceName, quantity resource.Quantity, containerType model.ContainerType) resource_admission.PatchRecord {
return resource_admission.PatchRecord{
Op: "add",
- Path: fmt.Sprintf("/spec/containers/%d/resources/%s/%s", i, kind, resource),
+ Path: fmt.Sprintf("%s/%d/resources/%s/%s", containerType.GetPatchPath(), i, kind, resource),
Value: quantity.String()}
}
// GetPatchInitializingEmptyResources returns a patch record to initialize an empty resources object for a container.
-func GetPatchInitializingEmptyResources(i int) resource_admission.PatchRecord {
+func GetPatchInitializingEmptyResources(i int, containerType model.ContainerType) resource_admission.PatchRecord {
return resource_admission.PatchRecord{
Op: "add",
- Path: fmt.Sprintf("/spec/containers/%d/resources", i),
+ Path: fmt.Sprintf("%s/%d/resources", containerType.GetPatchPath(), i),
Value: core.ResourceRequirements{},
}
}
// GetPatchInitializingEmptyResourcesSubfield returns a patch record to initialize an empty subfield
// (e.g., "requests" or "limits") within a container's resources object.
-func GetPatchInitializingEmptyResourcesSubfield(i int, kind string) resource_admission.PatchRecord {
+func GetPatchInitializingEmptyResourcesSubfield(i int, kind string, containerType model.ContainerType) resource_admission.PatchRecord {
return resource_admission.PatchRecord{
Op: "add",
- Path: fmt.Sprintf("/spec/containers/%d/resources/%s", i, kind),
+ Path: fmt.Sprintf("%s/%d/resources/%s", containerType.GetPatchPath(), i, kind),
Value: core.ResourceList{},
}
}
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go
index 5cb9cbd96c50..a28222c510bf 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go
@@ -30,7 +30,7 @@ import (
// Provider gets current recommendation, annotations and vpaName for the given pod.
type Provider interface {
- GetContainersResourcesForPod(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, error)
+ GetContainersResourcesForPod(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]vpa_api_util.ContainerResources, []vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, error)
}
type recommendationProvider struct {
@@ -51,72 +51,88 @@ func NewProvider(calculator limitrange.LimitRangeCalculator,
// If addAll is set to true, containers w/o a recommendation are also added to the list (and their non-recommended requests and limits will always be preserved if present),
// otherwise they're skipped (default behaviour).
func GetContainersResources(pod *core.Pod, vpaResourcePolicy *vpa_types.PodResourcePolicy, podRecommendation vpa_types.RecommendedPodResources, limitRange *core.LimitRangeItem,
- addAll bool, annotations vpa_api_util.ContainerToAnnotationsMap) []vpa_api_util.ContainerResources {
- resources := make([]vpa_api_util.ContainerResources, len(pod.Spec.Containers))
- for i, container := range pod.Spec.Containers {
- containerRequests, containerLimits := resourcehelpers.ContainerRequestsAndLimits(container.Name, pod)
- recommendation := vpa_api_util.GetRecommendationForContainer(container.Name, &podRecommendation)
- if recommendation == nil {
- if !addAll {
- klog.V(2).InfoS("No recommendation found for container, skipping", "container", container.Name)
- continue
+ addAll bool, annotations vpa_api_util.ContainerToAnnotationsMap) ([]vpa_api_util.ContainerResources, []vpa_api_util.ContainerResources) {
+ initResources := getResourcesForPodContainers(pod.Spec.InitContainers, pod, vpaResourcePolicy, podRecommendation, limitRange, addAll, annotations)
+ containerResources := getResourcesForPodContainers(pod.Spec.Containers, pod, vpaResourcePolicy, podRecommendation, limitRange, addAll, annotations)
+ return initResources, containerResources
+}
+
+// getResourcesForPodContainers processes a list of containers and returns their recommended resources.
+func getResourcesForPodContainers(containers []core.Container, pod *core.Pod, vpaResourcePolicy *vpa_types.PodResourcePolicy, podRecommendation vpa_types.RecommendedPodResources,
+ limitRange *core.LimitRangeItem, addAll bool, annotations vpa_api_util.ContainerToAnnotationsMap) []vpa_api_util.ContainerResources {
+ resources := make([]vpa_api_util.ContainerResources, len(containers))
+ for i, container := range containers {
+ resources[i] = getSingleContainerResources(container, pod, vpaResourcePolicy, podRecommendation, limitRange, addAll, annotations)
+ }
+ return resources
+}
+
+// getSingleContainerResources returns the recommended resources for a single container.
+func getSingleContainerResources(container core.Container, pod *core.Pod, vpaResourcePolicy *vpa_types.PodResourcePolicy, podRecommendation vpa_types.RecommendedPodResources,
+ limitRange *core.LimitRangeItem, addAll bool, annotations vpa_api_util.ContainerToAnnotationsMap) vpa_api_util.ContainerResources {
+ containerRequests, containerLimits := resourcehelpers.ContainerRequestsAndLimits(container.Name, pod)
+ recommendation := vpa_api_util.GetRecommendationForContainer(container.Name, &podRecommendation)
+ res := vpa_api_util.ContainerResources{}
+
+ if recommendation == nil {
+ if !addAll {
+ klog.V(2).InfoS("No recommendation found for container, skipping", "container", container.Name)
+ return res
+ }
+ klog.V(2).InfoS("No match found for container, using Pod request", "container", container.Name)
+ res.Requests = containerRequests
+ } else {
+ res.Requests = recommendation.Target
+ }
+
+ defaultLimit := core.ResourceList{}
+ if limitRange != nil {
+ defaultLimit = limitRange.Default
+ }
+ containerControlledValues := vpa_api_util.GetContainerControlledValues(container.Name, vpaResourcePolicy)
+ if containerControlledValues == vpa_types.ContainerControlledValuesRequestsAndLimits {
+ proportionalLimits, limitAnnotations := vpa_api_util.GetProportionalLimit(containerLimits, containerRequests, res.Requests, defaultLimit)
+ if proportionalLimits != nil {
+ res.Limits = proportionalLimits
+ if len(limitAnnotations) > 0 {
+ annotations[container.Name] = append(annotations[container.Name], limitAnnotations...)
}
- klog.V(2).InfoS("No match found for container, using Pod request", "container", container.Name)
- resources[i].Requests = containerRequests
- } else {
- resources[i].Requests = recommendation.Target
}
- defaultLimit := core.ResourceList{}
- if limitRange != nil {
- defaultLimit = limitRange.Default
+ }
+ // Backfill missing resources if addAll is true
+ if addAll {
+ if res.Requests == nil {
+ res.Requests = core.ResourceList{}
}
- containerControlledValues := vpa_api_util.GetContainerControlledValues(container.Name, vpaResourcePolicy)
- if containerControlledValues == vpa_types.ContainerControlledValuesRequestsAndLimits {
- proportionalLimits, limitAnnotations := vpa_api_util.GetProportionalLimit(containerLimits, containerRequests, resources[i].Requests, defaultLimit)
- if proportionalLimits != nil {
- resources[i].Limits = proportionalLimits
- if len(limitAnnotations) > 0 {
- annotations[container.Name] = append(annotations[container.Name], limitAnnotations...)
- }
- }
+ if res.Limits == nil {
+ res.Limits = core.ResourceList{}
}
- // If the recommendation only contains CPU or Memory (if the VPA was configured this way), we need to make sure we "backfill" the other.
- // Only do this when the addAll flag is true.
- if addAll {
- if resources[i].Requests == nil {
- resources[i].Requests = core.ResourceList{}
- }
- if resources[i].Limits == nil {
- resources[i].Limits = core.ResourceList{}
- }
-
- cpuRequest, hasCpuRequest := containerRequests[core.ResourceCPU]
- if _, ok := resources[i].Requests[core.ResourceCPU]; !ok && hasCpuRequest {
- resources[i].Requests[core.ResourceCPU] = cpuRequest
- }
- memRequest, hasMemRequest := containerRequests[core.ResourceMemory]
- if _, ok := resources[i].Requests[core.ResourceMemory]; !ok && hasMemRequest {
- resources[i].Requests[core.ResourceMemory] = memRequest
- }
- cpuLimit, hasCpuLimit := containerLimits[core.ResourceCPU]
- if _, ok := resources[i].Limits[core.ResourceCPU]; !ok && hasCpuLimit {
- resources[i].Limits[core.ResourceCPU] = cpuLimit
- }
- memLimit, hasMemLimit := containerLimits[core.ResourceMemory]
- if _, ok := resources[i].Limits[core.ResourceMemory]; !ok && hasMemLimit {
- resources[i].Limits[core.ResourceMemory] = memLimit
- }
+ cpuRequest, hasCpuRequest := containerRequests[core.ResourceCPU]
+ if _, ok := res.Requests[core.ResourceCPU]; !ok && hasCpuRequest {
+ res.Requests[core.ResourceCPU] = cpuRequest
+ }
+ memRequest, hasMemRequest := containerRequests[core.ResourceMemory]
+ if _, ok := res.Requests[core.ResourceMemory]; !ok && hasMemRequest {
+ res.Requests[core.ResourceMemory] = memRequest
+ }
+ cpuLimit, hasCpuLimit := containerLimits[core.ResourceCPU]
+ if _, ok := res.Limits[core.ResourceCPU]; !ok && hasCpuLimit {
+ res.Limits[core.ResourceCPU] = cpuLimit
+ }
+ memLimit, hasMemLimit := containerLimits[core.ResourceMemory]
+ if _, ok := res.Limits[core.ResourceMemory]; !ok && hasMemLimit {
+ res.Limits[core.ResourceMemory] = memLimit
}
}
- return resources
+ return res
}
// GetContainersResourcesForPod returns recommended request for a given pod and associated annotations.
// The returned slice corresponds 1-1 to containers in the Pod.
-func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, error) {
+func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]vpa_api_util.ContainerResources, []vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, error) {
if vpa == nil || pod == nil {
klog.V(2).InfoS("Can't calculate recommendations, one of VPA or Pod is nil", "vpa", vpa, "pod", pod)
- return nil, nil, nil
+ return nil, nil, nil, nil
}
klog.V(2).InfoS("Updating requirements for pod", "pod", klog.KObj(pod))
@@ -128,25 +144,31 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa
recommendedPodResources, annotations, err = p.recommendationProcessor.Apply(vpa, pod)
if err != nil {
klog.V(2).InfoS("Cannot process recommendation for pod", "pod", klog.KObj(pod))
- return nil, annotations, err
+ return nil, nil, annotations, err
}
}
containerLimitRange, err := p.limitsRangeCalculator.GetContainerLimitRangeItem(pod.Namespace)
if err != nil {
- return nil, nil, fmt.Errorf("error getting containerLimitRange: %s", err)
+ return nil, nil, nil, fmt.Errorf("error getting containerLimitRange: %s", err)
}
var resourcePolicy *vpa_types.PodResourcePolicy
if vpa.Spec.UpdatePolicy == nil || vpa.Spec.UpdatePolicy.UpdateMode == nil || *vpa.Spec.UpdatePolicy.UpdateMode != vpa_types.UpdateModeOff {
resourcePolicy = vpa.Spec.ResourcePolicy
}
- containerResources := GetContainersResources(pod, resourcePolicy, *recommendedPodResources, containerLimitRange, false, annotations)
+ initContainerResources, containerResources := GetContainersResources(pod, resourcePolicy, *recommendedPodResources, containerLimitRange, false, annotations)
// Ensure that we are not propagating empty resource key if any.
- for _, resource := range containerResources {
+ for _, resource := range initContainerResources {
if resource.RemoveEmptyResourceKeyIfAny() {
klog.InfoS("An empty resource key was found and purged", "pod", klog.KObj(pod), "vpa", klog.KObj(vpa))
}
}
- return containerResources, annotations, nil
+ // Ensure that we are not propagating empty resource key if any.
+ for _, resource := range containerResources {
+ if resource.RemoveEmptyResourceKeyIfAny() {
+ klog.InfoS("An empty resource key was found and purged", "pod", klog.KObj(pod), "vpa", klog.KObj(vpa))
+ }
+ }
+ return initContainerResources, containerResources, annotations, nil
}
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider_test.go
index f259a61344a9..06a870c965ad 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider_test.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider_test.go
@@ -73,6 +73,10 @@ func TestUpdateResourceRequests(t *testing.T) {
initialized := test.Pod().WithName("test_initialized").
AddContainer(initializedContainer).WithLabels(labels).Get()
+ initializedWithInit := test.Pod().WithName("test_initialized").
+ AddContainer(initializedContainer).WithLabels(labels).
+ AddInitContainer(initializedContainer).WithLabels(labels).Get()
+
limitsMatchRequestsContainer := test.Container().WithName(containerName).
WithCPURequest(resource.MustParse("2")).WithCPULimit(resource.MustParse("2")).
WithMemRequest(resource.MustParse("200Mi")).WithMemLimit(resource.MustParse("200Mi")).Get()
@@ -163,6 +167,14 @@ func TestUpdateResourceRequests(t *testing.T) {
expectedMem: resource.MustParse("200Mi"),
expectedCPU: resource.MustParse("2"),
},
+ {
+ name: "pod with init container",
+ pod: initializedWithInit,
+ vpa: vpa,
+ expectedAction: true,
+ expectedMem: resource.MustParse("200Mi"),
+ expectedCPU: resource.MustParse("2"),
+ },
{
name: "high memory",
pod: initialized,
@@ -302,7 +314,7 @@ func TestUpdateResourceRequests(t *testing.T) {
},
}
- resources, annotations, err := recommendationProvider.GetContainersResourcesForPod(tc.pod, tc.vpa)
+ initResources, resources, annotations, err := recommendationProvider.GetContainersResourcesForPod(tc.pod, tc.vpa)
if tc.expectedAction {
assert.Nil(t, err)
@@ -310,6 +322,14 @@ func TestUpdateResourceRequests(t *testing.T) {
return
}
+ assert.Equal(t, len(tc.pod.Spec.InitContainers), len(initResources), "init containers resources length mismatch")
+ if len(tc.pod.Spec.InitContainers) > 0 {
+ cpuRequestInit := initResources[0].Requests[apiv1.ResourceCPU]
+ assert.Equal(t, tc.expectedCPU.Value(), cpuRequestInit.Value(), "init cpu request doesn't match")
+ memoryRequestInit := initResources[0].Requests[apiv1.ResourceMemory]
+ assert.Equal(t, tc.expectedMem.Value(), memoryRequestInit.Value(), "init memory request doesn't match")
+ }
+
assert.NotContains(t, resources, "", "expected empty resource to be removed")
cpuRequest := resources[0].Requests[apiv1.ResourceCPU]
@@ -538,7 +558,7 @@ func TestGetContainersResources(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
pod := test.Pod().WithName("pod").AddContainer(tc.container).AddContainerStatus(tc.containerStatus).Get()
- resources := GetContainersResources(pod, tc.vpa.Spec.ResourcePolicy, *tc.vpa.Status.Recommendation, nil, tc.addAll, vpa_api_util.ContainerToAnnotationsMap{})
+ _, resources := GetContainersResources(pod, tc.vpa.Spec.ResourcePolicy, *tc.vpa.Status.Recommendation, nil, tc.addAll, vpa_api_util.ContainerToAnnotationsMap{})
cpu, cpuPresent := resources[0].Requests[apiv1.ResourceCPU]
if tc.expectedCPU == nil {
diff --git a/vertical-pod-autoscaler/pkg/features/features.go b/vertical-pod-autoscaler/pkg/features/features.go
index 2c34ac400178..6eca81b8d330 100644
--- a/vertical-pod-autoscaler/pkg/features/features.go
+++ b/vertical-pod-autoscaler/pkg/features/features.go
@@ -48,6 +48,9 @@ const (
// InPlaceOrRecreate enables the InPlaceOrRecreate update mode to be used.
// Requires KEP-1287 InPlacePodVerticalScaling feature-gate to be enabled on the cluster.
InPlaceOrRecreate featuregate.Feature = "InPlaceOrRecreate"
+
+ // NativeSidecar enables support for native sidecars in VPA
+ NativeSidecar featuregate.Feature = "NativeSidecar"
)
// MutableFeatureGate is a mutable, versioned, global FeatureGate.
diff --git a/vertical-pod-autoscaler/pkg/features/versioned_features.go b/vertical-pod-autoscaler/pkg/features/versioned_features.go
index e623061fffd9..a61bfd290935 100644
--- a/vertical-pod-autoscaler/pkg/features/versioned_features.go
+++ b/vertical-pod-autoscaler/pkg/features/versioned_features.go
@@ -31,4 +31,7 @@ var defaultVersionedFeatureGates = map[featuregate.Feature]featuregate.Versioned
{Version: version.MustParse("1.4"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.5"), Default: true, PreRelease: featuregate.Beta},
},
+ NativeSidecar: {
+ {Version: version.MustParse("1.5"), Default: false, PreRelease: featuregate.Alpha},
+ },
}
diff --git a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer.go b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer.go
index d90ba189a2ac..14d60c3fd8fe 100644
--- a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer.go
+++ b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer.go
@@ -163,6 +163,14 @@ func buildAggregateContainerStateMap(vpa *model.Vpa, cluster model.ClusterState,
}
}
}
+ for containerName, container := range pod.InitSidecarsContainers {
+ aggregateKey := cluster.MakeAggregateStateKey(pod, containerName)
+ if vpa.UsesAggregation(aggregateKey) {
+ if aggregateContainerState, exists := aggregateContainerStateMap[containerName]; exists {
+ subtractCurrentContainerMemoryPeak(aggregateContainerState, container, now)
+ }
+ }
+ }
}
return aggregateContainerStateMap
}
diff --git a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go
index 70d727ca555b..eac9c72c279e 100644
--- a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go
@@ -79,7 +79,7 @@ func addVpa(t *testing.T, cluster model.ClusterState, vpaID model.VpaID, selecto
func TestMergeContainerStateForCheckpointDropsRecentMemoryPeak(t *testing.T) {
cluster := model.NewClusterState(testGcPeriod)
cluster.AddOrUpdatePod(testPodID1, testLabels, v1.PodRunning)
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID1, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID1, testRequest, model.ContainerTypeStandard))
container := cluster.GetContainer(testContainerID1)
timeNow := time.Unix(1, 0)
@@ -221,7 +221,7 @@ func TestStoreCheckpointsMakesProgressEvenForCancelledContext(t *testing.T) {
PodID: podID,
ContainerName: fmt.Sprintf("container-%d", j),
}
- err := clusterState.AddOrUpdateContainer(containerID, testRequest)
+ err := clusterState.AddOrUpdateContainer(containerID, testRequest, model.ContainerTypeStandard)
assert.NoError(t, err)
}
}
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go
index 1e98483f66e1..8bee55e1ab76 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go
@@ -37,6 +37,7 @@ import (
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1"
vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/history"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/metrics"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/oom"
@@ -102,6 +103,7 @@ func (m ClusterStateFeederFactory) Make() *clusterStateFeeder {
vpaCheckpointClient: m.VpaCheckpointClient,
vpaCheckpointLister: m.VpaCheckpointLister,
vpaLister: m.VpaLister,
+ podLister: m.PodLister,
clusterState: m.ClusterState,
specClient: spec.NewSpecClient(m.PodLister),
selectorFetcher: m.SelectorFetcher,
@@ -210,6 +212,7 @@ type clusterStateFeeder struct {
vpaCheckpointClient vpa_api.VerticalPodAutoscalerCheckpointsGetter
vpaCheckpointLister vpa_lister.VerticalPodAutoscalerCheckpointLister
vpaLister vpa_lister.VerticalPodAutoscalerLister
+ podLister v1lister.PodLister
clusterState model.ClusterState
selectorFetcher target.VpaTargetSelectorFetcher
memorySaveMode bool
@@ -219,53 +222,6 @@ type clusterStateFeeder struct {
vpaObjectNamespace string
}
-func (feeder *clusterStateFeeder) InitFromHistoryProvider(historyProvider history.HistoryProvider) {
- klog.V(3).InfoS("Initializing VPA from history provider")
- clusterHistory, err := historyProvider.GetClusterHistory()
- if err != nil {
- klog.ErrorS(err, "Cannot get cluster history")
- }
- for podID, podHistory := range clusterHistory {
- klog.V(4).InfoS("Adding pod with labels", "pod", podID, "labels", podHistory.LastLabels)
- feeder.clusterState.AddOrUpdatePod(podID, podHistory.LastLabels, apiv1.PodUnknown)
- for containerName, sampleList := range podHistory.Samples {
- containerID := model.ContainerID{
- PodID: podID,
- ContainerName: containerName,
- }
- if err = feeder.clusterState.AddOrUpdateContainer(containerID, nil); err != nil {
- klog.V(0).InfoS("Failed to add container", "container", containerID, "error", err)
- }
- klog.V(4).InfoS("Adding samples for container", "sampleCount", len(sampleList), "container", containerID)
- for _, sample := range sampleList {
- if err := feeder.clusterState.AddSample(
- &model.ContainerUsageSampleWithKey{
- ContainerUsageSample: sample,
- Container: containerID,
- }); err != nil {
- klog.V(0).InfoS("Failed to add sample", "sample", sample, "error", err)
- }
- }
- }
- }
-}
-
-func (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpoint) error {
- vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
- vpa, exists := feeder.clusterState.VPAs()[vpaID]
- if !exists {
- return fmt.Errorf("cannot load checkpoint to missing VPA object %s/%s", vpaID.Namespace, vpaID.VpaName)
- }
-
- cs := model.NewAggregateContainerState()
- err := cs.LoadFromCheckpoint(&checkpoint.Status)
- if err != nil {
- return fmt.Errorf("cannot load checkpoint for VPA %s/%s. Reason: %v", vpaID.Namespace, vpaID.VpaName, err)
- }
- vpa.ContainersInitialAggregateState[checkpoint.Spec.ContainerName] = cs
- return nil
-}
-
func (feeder *clusterStateFeeder) InitFromCheckpoints(ctx context.Context) {
klog.V(3).InfoS("Initializing VPA from checkpoints")
feeder.LoadVPAs(ctx)
@@ -297,6 +253,49 @@ func (feeder *clusterStateFeeder) InitFromCheckpoints(ctx context.Context) {
}
}
+func (feeder *clusterStateFeeder) InitFromHistoryProvider(historyProvider history.HistoryProvider) {
+ pods := feeder.podSpecLookup()
+ klog.V(3).InfoS("Initializing VPA from history provider")
+ clusterHistory, err := historyProvider.GetClusterHistory()
+ if err != nil {
+ klog.ErrorS(err, "Cannot get cluster history")
+ }
+ for podID, podHistory := range clusterHistory {
+ // no need to load history if the pod no longer exists
+ podSpec, ok := pods[podID]
+ if !ok {
+ continue
+ }
+ klog.V(4).InfoS("Adding pod with labels", "pod", podID, "labels", podHistory.LastLabels)
+ feeder.clusterState.AddOrUpdatePod(podID, podHistory.LastLabels, podSpec.Phase)
+ for containerName, sampleList := range podHistory.Samples {
+ containerID := model.ContainerID{
+ PodID: podID,
+ ContainerName: containerName,
+ }
+ klog.V(0).InfoS("Adding", "container", containerID)
+
+ containerSpec := podSpec.GetContainerSpec(containerName)
+ if containerSpec == nil {
+ continue
+ }
+ if err = feeder.clusterState.AddOrUpdateContainer(containerID, nil, containerSpec.ContainerType); err != nil {
+ klog.V(0).InfoS("Failed to add container", "container", containerID, "error", err)
+ }
+ klog.V(4).InfoS("Adding samples for container", "sampleCount", len(sampleList), "container", containerID)
+ for _, sample := range sampleList {
+ if err := feeder.clusterState.AddSample(
+ &model.ContainerUsageSampleWithKey{
+ ContainerUsageSample: sample,
+ Container: containerID,
+ }); err != nil {
+ klog.V(0).InfoS("Failed to add sample", "sample", sample, "error", err)
+ }
+ }
+ }
+ }
+}
+
func (feeder *clusterStateFeeder) GarbageCollectCheckpoints(ctx context.Context) {
klog.V(3).InfoS("Starting garbage collection of checkpoints")
@@ -338,82 +337,6 @@ func (feeder *clusterStateFeeder) GarbageCollectCheckpoints(ctx context.Context)
}
}
-func (feeder *clusterStateFeeder) shouldIgnoreNamespace(namespace string) bool {
- // 1. `vpaObjectNamespace` is set but doesn't match the current namespace.
- if feeder.vpaObjectNamespace != "" && namespace != feeder.vpaObjectNamespace {
- return true
- }
- // 2. `ignoredNamespaces` is set, and the current namespace is in the list.
- if len(feeder.ignoredNamespaces) > 0 && slices.Contains(feeder.ignoredNamespaces, namespace) {
- return true
- }
- return false
-}
-
-func (feeder *clusterStateFeeder) cleanupCheckpointsForNamespace(ctx context.Context, namespace string, allVPAKeys map[model.VpaID]bool) error {
- var err error
- checkpointList, err := feeder.vpaCheckpointLister.VerticalPodAutoscalerCheckpoints(namespace).List(labels.Everything())
-
- if err != nil {
- return err
- }
- for _, checkpoint := range checkpointList {
- vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
- if !allVPAKeys[vpaID] {
- if errFeeder := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).Delete(ctx, checkpoint.Name, metav1.DeleteOptions{}); errFeeder != nil {
- err = fmt.Errorf("failed to delete orphaned checkpoint %s: %w", klog.KRef(namespace, checkpoint.Name), err)
- continue
- }
- klog.V(3).InfoS("Orphaned VPA checkpoint cleanup - deleting", "checkpoint", klog.KRef(namespace, checkpoint.Name))
- }
- }
- return err
-}
-
-func implicitDefaultRecommender(selectors []*vpa_types.VerticalPodAutoscalerRecommenderSelector) bool {
- return len(selectors) == 0
-}
-
-func selectsRecommender(selectors []*vpa_types.VerticalPodAutoscalerRecommenderSelector, name *string) bool {
- for _, s := range selectors {
- if s.Name == *name {
- return true
- }
- }
- return false
-}
-
-// Filter VPA objects whose specified recommender names are not default
-func filterVPAs(feeder *clusterStateFeeder, allVpaCRDs []*vpa_types.VerticalPodAutoscaler) []*vpa_types.VerticalPodAutoscaler {
- klog.V(3).InfoS("Start selecting the vpaCRDs.")
- var vpaCRDs []*vpa_types.VerticalPodAutoscaler
- for _, vpaCRD := range allVpaCRDs {
- if feeder.recommenderName == DefaultRecommenderName {
- if !implicitDefaultRecommender(vpaCRD.Spec.Recommenders) && !selectsRecommender(vpaCRD.Spec.Recommenders, &feeder.recommenderName) {
- klog.V(6).InfoS("Ignoring vpaCRD as current recommender's name doesn't appear among its recommenders", "vpaCRD", klog.KObj(vpaCRD), "recommenderName", feeder.recommenderName)
- continue
- }
- } else {
- if implicitDefaultRecommender(vpaCRD.Spec.Recommenders) {
- klog.V(6).InfoS("Ignoring vpaCRD as recommender doesn't process CRDs implicitly destined to default recommender", "vpaCRD", klog.KObj(vpaCRD), "recommenderName", feeder.recommenderName, "defaultRecommenderName", DefaultRecommenderName)
- continue
- }
- if !selectsRecommender(vpaCRD.Spec.Recommenders, &feeder.recommenderName) {
- klog.V(6).InfoS("Ignoring vpaCRD as current recommender's name doesn't appear among its recommenders", "vpaCRD", klog.KObj(vpaCRD), "recommenderName", feeder.recommenderName)
- continue
- }
- }
-
- if feeder.shouldIgnoreNamespace(vpaCRD.Namespace) {
- klog.V(6).InfoS("Ignoring vpaCRD as this namespace is ignored", "vpaCRD", klog.KObj(vpaCRD))
- continue
- }
-
- vpaCRDs = append(vpaCRDs, vpaCRD)
- }
- return vpaCRDs
-}
-
// LoadVPAs fetches VPA objects and loads them into the cluster state.
func (feeder *clusterStateFeeder) LoadVPAs(ctx context.Context) {
// List VPA API objects.
@@ -465,14 +388,7 @@ func (feeder *clusterStateFeeder) LoadVPAs(ctx context.Context) {
// LoadPods loads pod into the cluster state.
func (feeder *clusterStateFeeder) LoadPods() {
- podSpecs, err := feeder.specClient.GetPodSpecs()
- if err != nil {
- klog.ErrorS(err, "Cannot get SimplePodSpecs")
- }
- pods := make(map[model.PodID]*spec.BasicPodSpec)
- for _, spec := range podSpecs {
- pods[spec.ID] = spec
- }
+ pods := feeder.podSpecLookup()
for key := range feeder.clusterState.Pods() {
if _, exists := pods[key]; !exists {
klog.V(3).InfoS("Deleting Pod", "pod", klog.KRef(key.Namespace, key.PodName))
@@ -485,14 +401,19 @@ func (feeder *clusterStateFeeder) LoadPods() {
}
feeder.clusterState.AddOrUpdatePod(pod.ID, pod.PodLabels, pod.Phase)
for _, container := range pod.Containers {
- if err = feeder.clusterState.AddOrUpdateContainer(container.ID, container.Request); err != nil {
+ if err := feeder.clusterState.AddOrUpdateContainer(container.ID, container.Request, container.ContainerType); err != nil {
klog.V(0).InfoS("Failed to add container", "container", container.ID, "error", err)
}
}
for _, initContainer := range pod.InitContainers {
- podInitContainers := feeder.clusterState.Pods()[pod.ID].InitContainers
- feeder.clusterState.Pods()[pod.ID].InitContainers = append(podInitContainers, initContainer.ID.ContainerName)
-
+ if features.Enabled(features.NativeSidecar) && initContainer.ContainerType == model.ContainerTypeInitSidecar {
+ if err := feeder.clusterState.AddOrUpdateContainer(initContainer.ID, initContainer.Request, initContainer.ContainerType); err != nil {
+ klog.V(0).InfoS("Failed to add initContainer", "container", initContainer.ID, "error", err)
+ }
+ } else {
+ podInitContainers := feeder.clusterState.Pods()[pod.ID].InitContainers
+ feeder.clusterState.Pods()[pod.ID].InitContainers = append(podInitContainers, initContainer.ID.ContainerName)
+ }
}
}
}
@@ -543,6 +464,109 @@ Loop:
metrics_recommender.RecordAggregateContainerStatesCount(feeder.clusterState.StateMapSize())
}
+func (feeder *clusterStateFeeder) podSpecLookup() map[model.PodID]*spec.BasicPodSpec {
+ podSpecs, err := feeder.specClient.GetPodSpecs()
+ if err != nil {
+ klog.ErrorS(err, "Cannot get SimplePodSpecs")
+ }
+ pods := make(map[model.PodID]*spec.BasicPodSpec)
+ for _, spec := range podSpecs {
+ pods[spec.ID] = spec
+ }
+ return pods
+}
+
+func (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpoint) error {
+ vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
+ vpa, exists := feeder.clusterState.VPAs()[vpaID]
+ if !exists {
+ return fmt.Errorf("cannot load checkpoint to missing VPA object %s/%s", vpaID.Namespace, vpaID.VpaName)
+ }
+
+ cs := model.NewAggregateContainerState()
+ err := cs.LoadFromCheckpoint(&checkpoint.Status)
+ if err != nil {
+ return fmt.Errorf("cannot load checkpoint for VPA %s/%s. Reason: %v", vpaID.Namespace, vpaID.VpaName, err)
+ }
+ vpa.ContainersInitialAggregateState[checkpoint.Spec.ContainerName] = cs
+ return nil
+}
+func (feeder *clusterStateFeeder) shouldIgnoreNamespace(namespace string) bool {
+ // 1. `vpaObjectNamespace` is set but doesn't match the current namespace.
+ if feeder.vpaObjectNamespace != "" && namespace != feeder.vpaObjectNamespace {
+ return true
+ }
+ // 2. `ignoredNamespaces` is set, and the current namespace is in the list.
+ if len(feeder.ignoredNamespaces) > 0 && slices.Contains(feeder.ignoredNamespaces, namespace) {
+ return true
+ }
+ return false
+}
+
+func (feeder *clusterStateFeeder) cleanupCheckpointsForNamespace(ctx context.Context, namespace string, allVPAKeys map[model.VpaID]bool) error {
+ var err error
+ checkpointList, err := feeder.vpaCheckpointLister.VerticalPodAutoscalerCheckpoints(namespace).List(labels.Everything())
+
+ if err != nil {
+ return err
+ }
+ for _, checkpoint := range checkpointList {
+ vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
+ if !allVPAKeys[vpaID] {
+ if errFeeder := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).Delete(ctx, checkpoint.Name, metav1.DeleteOptions{}); errFeeder != nil {
+ err = fmt.Errorf("failed to delete orphaned checkpoint %s: %w", klog.KRef(namespace, checkpoint.Name), err)
+ continue
+ }
+ klog.V(3).InfoS("Orphaned VPA checkpoint cleanup - deleting", "checkpoint", klog.KRef(namespace, checkpoint.Name))
+ }
+ }
+ return err
+}
+
+func implicitDefaultRecommender(selectors []*vpa_types.VerticalPodAutoscalerRecommenderSelector) bool {
+ return len(selectors) == 0
+}
+
+func selectsRecommender(selectors []*vpa_types.VerticalPodAutoscalerRecommenderSelector, name *string) bool {
+ for _, s := range selectors {
+ if s.Name == *name {
+ return true
+ }
+ }
+ return false
+}
+
+// Filter VPA objects whose specified recommender names are not default
+func filterVPAs(feeder *clusterStateFeeder, allVpaCRDs []*vpa_types.VerticalPodAutoscaler) []*vpa_types.VerticalPodAutoscaler {
+ klog.V(3).InfoS("Start selecting the vpaCRDs.")
+ var vpaCRDs []*vpa_types.VerticalPodAutoscaler
+ for _, vpaCRD := range allVpaCRDs {
+ if feeder.recommenderName == DefaultRecommenderName {
+ if !implicitDefaultRecommender(vpaCRD.Spec.Recommenders) && !selectsRecommender(vpaCRD.Spec.Recommenders, &feeder.recommenderName) {
+ klog.V(6).InfoS("Ignoring vpaCRD as current recommender's name doesn't appear among its recommenders", "vpaCRD", klog.KObj(vpaCRD), "recommenderName", feeder.recommenderName)
+ continue
+ }
+ } else {
+ if implicitDefaultRecommender(vpaCRD.Spec.Recommenders) {
+ klog.V(6).InfoS("Ignoring vpaCRD as recommender doesn't process CRDs implicitly destined to default recommender", "vpaCRD", klog.KObj(vpaCRD), "recommenderName", feeder.recommenderName, "defaultRecommenderName", DefaultRecommenderName)
+ continue
+ }
+ if !selectsRecommender(vpaCRD.Spec.Recommenders, &feeder.recommenderName) {
+ klog.V(6).InfoS("Ignoring vpaCRD as current recommender's name doesn't appear among its recommenders", "vpaCRD", klog.KObj(vpaCRD), "recommenderName", feeder.recommenderName)
+ continue
+ }
+ }
+
+ if feeder.shouldIgnoreNamespace(vpaCRD.Namespace) {
+ klog.V(6).InfoS("Ignoring vpaCRD as this namespace is ignored", "vpaCRD", klog.KObj(vpaCRD))
+ continue
+ }
+
+ vpaCRDs = append(vpaCRDs, vpaCRD)
+ }
+ return vpaCRDs
+}
+
func (feeder *clusterStateFeeder) matchesVPA(pod *spec.BasicPodSpec) bool {
for vpaKey, vpa := range feeder.clusterState.VPAs() {
podLabels := labels.Set(pod.PodLabels)
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go
index 42488f79d714..4b4e710a53ff 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go
@@ -695,6 +695,37 @@ func TestClusterStateFeeder_InitFromHistoryProvider(t *testing.T) {
t0 := time.Date(2021, time.August, 30, 10, 21, 0, 0, time.UTC)
containerCpu := "containerCpu"
containerMem := "containerMem"
+ containerInit := "containerinit"
+
+ client := &testSpecClient{pods: []*spec.BasicPodSpec{{
+ ID: pod1,
+ Containers: []spec.BasicContainerSpec{
+ {
+ ID: model.ContainerID{
+ PodID: pod1,
+ ContainerName: containerCpu,
+ },
+ ContainerType: model.ContainerTypeStandard,
+ },
+ },
+ InitContainers: []spec.BasicContainerSpec{
+ {
+ ID: model.ContainerID{
+ PodID: pod1,
+ ContainerName: containerMem,
+ },
+ ContainerType: model.ContainerTypeInitSidecar,
+ },
+ {
+ ID: model.ContainerID{
+ PodID: pod1,
+ ContainerName: containerInit,
+ },
+ ContainerType: model.ContainerTypeInit,
+ },
+ },
+ }}}
+
pod1History := history.PodHistory{
LastLabels: map[string]string{},
LastSeen: t0,
@@ -713,6 +744,13 @@ func TestClusterStateFeeder_InitFromHistoryProvider(t *testing.T) {
Resource: model.ResourceMemory,
},
},
+ containerInit: {
+ {
+ MeasureStart: t0,
+ Usage: memAmount,
+ Resource: model.ResourceMemory,
+ },
+ },
},
}
provider := fakeHistoryProvider{
@@ -723,6 +761,7 @@ func TestClusterStateFeeder_InitFromHistoryProvider(t *testing.T) {
clusterState := model.NewClusterState(testGcPeriod)
feeder := clusterStateFeeder{
+ specClient: client,
clusterState: clusterState,
}
feeder.InitFromHistoryProvider(&provider)
@@ -738,10 +777,14 @@ func TestClusterStateFeeder_InitFromHistoryProvider(t *testing.T) {
return
}
assert.Equal(t, t0, containerState.LastCPUSampleStart)
- if !assert.Contains(t, pod1State.Containers, containerMem) {
+ if !assert.Contains(t, pod1State.InitContainers, containerInit) {
+ return
+ }
+ containerInitState := pod1State.InitSidecarsContainers[containerMem]
+ if !assert.NotNil(t, containerInitState) {
return
}
- containerState = pod1State.Containers[containerMem]
+ containerState = pod1State.InitSidecarsContainers[containerMem]
if !assert.NotNil(t, containerState) {
return
}
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go
index 9e5fb9262e99..16d655962e97 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go
@@ -147,14 +147,19 @@ func (o *observer) OnUpdate(oldObj, newObj interface{}) {
klog.ErrorS(nil, "OOM observer received invalid newObj", "newObj", newObj)
}
- for _, containerStatus := range newPod.Status.ContainerStatuses {
+ o.processStatuses(newPod, oldPod, newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, oldPod.Spec.Containers)
+ o.processStatuses(newPod, oldPod, newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, oldPod.Spec.InitContainers)
+}
+
+func (o *observer) processStatuses(newPod *apiv1.Pod, oldPod *apiv1.Pod, statuses []apiv1.ContainerStatus, oldStatuses []apiv1.ContainerStatus, oldSpecs []apiv1.Container) {
+ for _, containerStatus := range statuses {
if containerStatus.RestartCount > 0 &&
containerStatus.LastTerminationState.Terminated != nil &&
containerStatus.LastTerminationState.Terminated.Reason == "OOMKilled" {
- oldStatus := findStatus(containerStatus.Name, oldPod.Status.ContainerStatuses)
+ oldStatus := findStatus(containerStatus.Name, oldStatuses)
if oldStatus != nil && containerStatus.RestartCount > oldStatus.RestartCount {
- oldSpec := findSpec(containerStatus.Name, oldPod.Spec.Containers)
+ oldSpec := findSpec(containerStatus.Name, oldSpecs)
if oldSpec != nil {
requests, _ := resourcehelpers.ContainerRequestsAndLimits(containerStatus.Name, oldPod)
var memory resource.Quantity
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go
index 7f0dc706b610..5491159f9f85 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go
@@ -44,12 +44,20 @@ metadata:
name: Pod1
namespace: mockNamespace
spec:
+ initContainers:
+ - name: InitName11
+ resources:
+ requests:
+ memory: "1024"
containers:
- name: Name11
resources:
requests:
memory: "1024"
status:
+ initContainerStatuses:
+ - name: InitName11
+ restartCount: 0
containerStatuses:
- name: Name11
restartCount: 0
@@ -62,12 +70,20 @@ metadata:
name: Pod1
namespace: mockNamespace
spec:
+ initContainers:
+ - name: InitName11
+ resources:
+ requests:
+ memory: "1024"
containers:
- name: Name11
resources:
requests:
memory: "1024"
status:
+ initContainerStatuses:
+ - name: InitName11
+ restartCount: 0
containerStatuses:
- name: Name11
restartCount: 1
@@ -77,6 +93,36 @@ status:
reason: OOMKilled
`
+const pod3Yaml = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: Pod1
+ namespace: mockNamespace
+spec:
+ initContainers:
+ - name: InitName11
+ resources:
+ requests:
+ memory: "1024"
+ containers:
+ - name: Name11
+ resources:
+ requests:
+ memory: "1024"
+status:
+ initContainerStatuses:
+ - name: InitName11
+ restartCount: 1
+ lastState:
+ terminated:
+ finishedAt: 2018-02-23T13:38:48Z
+ reason: OOMKilled
+ containerStatuses:
+ - name: Name11
+ restartCount: 0
+`
+
func newPod(yaml string) (*v1.Pod, error) {
decode := codecs.UniversalDeserializer().Decode
obj, _, err := decode([]byte(yaml), nil, nil)
@@ -100,6 +146,8 @@ func TestOOMReceived(t *testing.T) {
assert.NoError(t, err)
p2, err := newPod(pod2Yaml)
assert.NoError(t, err)
+ p3, err := newPod(pod3Yaml)
+ assert.NoError(t, err)
timestamp, err := time.Parse(time.RFC3339, "2018-02-23T13:38:48Z")
assert.NoError(t, err)
@@ -125,6 +173,22 @@ func TestOOMReceived(t *testing.T) {
Timestamp: timestamp,
},
},
+ {
+ desc: "OK InitContainer",
+ oldPod: p1,
+ newPod: p3,
+ wantOOMInfo: OomInfo{
+ ContainerID: model.ContainerID{
+ ContainerName: "InitName11",
+ PodID: model.PodID{
+ Namespace: "mockNamespace",
+ PodName: "Pod1",
+ },
+ },
+ Memory: model.ResourceAmount(int64(1024)),
+ Timestamp: timestamp,
+ },
+ },
{
desc: "Old pod does not set memory requests",
oldPod: func() *v1.Pod {
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client.go b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client.go
index 70e0ed417549..9d1978082e37 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client.go
@@ -39,6 +39,22 @@ type BasicPodSpec struct {
Phase v1.PodPhase
}
+// GetContainerSpec is a quick way to get container specs
+func (p *BasicPodSpec) GetContainerSpec(name string) *BasicContainerSpec {
+ for _, container := range p.Containers {
+ if container.ID.ContainerName == name {
+ return &container
+ }
+ }
+ for _, initContainer := range p.InitContainers {
+ if initContainer.ID.ContainerName == name {
+ return &initContainer
+ }
+ }
+
+ return nil
+}
+
// BasicContainerSpec contains basic information defining a container.
type BasicContainerSpec struct {
// ID identifies the container within a cluster.
@@ -47,6 +63,8 @@ type BasicContainerSpec struct {
Image string
// Currently requested resources for this container.
Request model.Resources
+ // Type of the container (e.g. main, init, init_sidecar)
+ ContainerType model.ContainerType
}
// SpecClient provides information about pods and containers Specification
@@ -94,33 +112,39 @@ func newBasicPodSpec(pod *v1.Pod) *BasicPodSpec {
}
return basicPodSpec
}
-
func newContainerSpecs(pod *v1.Pod, containers []v1.Container, isInitContainer bool) []BasicContainerSpec {
var containerSpecs []BasicContainerSpec
for _, container := range containers {
- containerSpec := newContainerSpec(pod, container, isInitContainer)
+ var containerType model.ContainerType
+ if !isInitContainer {
+ containerType = model.ContainerTypeStandard
+ } else if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
+ containerType = model.ContainerTypeInitSidecar
+ } else {
+ containerType = model.ContainerTypeInit
+ }
+
+ containerSpec := newContainerSpec(pod, container, containerType)
containerSpecs = append(containerSpecs, containerSpec)
}
return containerSpecs
}
-func newContainerSpec(pod *v1.Pod, container v1.Container, isInitContainer bool) BasicContainerSpec {
+func newContainerSpec(pod *v1.Pod, container v1.Container, containerType model.ContainerType) BasicContainerSpec {
containerSpec := BasicContainerSpec{
ID: model.ContainerID{
PodID: podID(pod),
ContainerName: container.Name,
},
- Image: container.Image,
- Request: calculateRequestedResources(pod, container, isInitContainer),
+ Image: container.Image,
+ Request: calculateRequestedResources(pod, container),
+ ContainerType: containerType,
}
return containerSpec
}
-func calculateRequestedResources(pod *v1.Pod, container v1.Container, isInitContainer bool) model.Resources {
+func calculateRequestedResources(pod *v1.Pod, container v1.Container) model.Resources {
requestsAndLimitsFn := resourcehelpers.ContainerRequestsAndLimits
- if isInitContainer {
- requestsAndLimitsFn = resourcehelpers.InitContainerRequestsAndLimits
- }
requests, _ := requestsAndLimitsFn(container.Name, pod)
cpuQuantity := requests[v1.ResourceCPU]
@@ -133,7 +157,6 @@ func calculateRequestedResources(pod *v1.Pod, container v1.Container, isInitCont
model.ResourceCPU: model.ResourceAmount(cpuMillicores),
model.ResourceMemory: model.ResourceAmount(memoryBytes),
}
-
}
func podID(pod *v1.Pod) model.PodID {
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go
index 3e55e3cb87be..c3a3e24d4070 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go
@@ -146,14 +146,14 @@ func newSpecClientTestCase() *specClientTestCase {
podID1 := model.PodID{Namespace: "", PodName: "Pod1"}
podID2 := model.PodID{Namespace: "", PodName: "Pod2"}
- containerSpec11 := newTestContainerSpec(podID1, "Name11", 500, 512*1024*1024)
- containerSpec12 := newTestContainerSpec(podID1, "Name12", 1000, 1024*1024*1024)
- containerSpec21 := newTestContainerSpec(podID2, "Name21", 2000, 2048*1024*1024)
- containerSpec22 := newTestContainerSpec(podID2, "Name22", 4000, 4096*1024*1024)
- containerSpec23 := newTestContainerSpec(podID2, "Name23", 30, 250*1024*1024)
+ containerSpec11 := newTestContainerSpec(podID1, "Name11", 500, 512*1024*1024, model.ContainerTypeStandard)
+ containerSpec12 := newTestContainerSpec(podID1, "Name12", 1000, 1024*1024*1024, model.ContainerTypeStandard)
+ containerSpec21 := newTestContainerSpec(podID2, "Name21", 2000, 2048*1024*1024, model.ContainerTypeStandard)
+ containerSpec22 := newTestContainerSpec(podID2, "Name22", 4000, 4096*1024*1024, model.ContainerTypeStandard)
+ containerSpec23 := newTestContainerSpec(podID2, "Name23", 30, 250*1024*1024, model.ContainerTypeStandard)
- initContainerSpec21 := newTestContainerSpec(podID2, "Name21-init", 40, 128*1024*1024)
- initContainerSpec22 := newTestContainerSpec(podID2, "Name22-init", 40, 350*1024*1024)
+ initContainerSpec21 := newTestContainerSpec(podID2, "Name21-init", 40, 128*1024*1024, model.ContainerTypeInit)
+ initContainerSpec22 := newTestContainerSpec(podID2, "Name22-init", 40, 350*1024*1024, model.ContainerTypeInit)
podSpec1 := newTestPodSpec(podID1, []BasicContainerSpec{containerSpec11, containerSpec12}, nil)
podSpec2 := newTestPodSpec(podID2, []BasicContainerSpec{containerSpec21, containerSpec22, containerSpec23}, []BasicContainerSpec{initContainerSpec21, initContainerSpec22})
@@ -164,7 +164,7 @@ func newSpecClientTestCase() *specClientTestCase {
}
}
-func newTestContainerSpec(podID model.PodID, containerName string, milicores int, memory int64) BasicContainerSpec {
+func newTestContainerSpec(podID model.PodID, containerName string, milicores int, memory int64, containerType model.ContainerType) BasicContainerSpec {
containerID := model.ContainerID{
PodID: podID,
ContainerName: containerName,
@@ -174,9 +174,10 @@ func newTestContainerSpec(podID model.PodID, containerName string, milicores int
model.ResourceMemory: model.ResourceAmount(memory),
}
return BasicContainerSpec{
- ID: containerID,
- Image: containerName + "Image",
- Request: requestedResources,
+ ID: containerID,
+ Image: containerName + "Image",
+ Request: requestedResources,
+ ContainerType: containerType,
}
}
diff --git a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go
index fd0e2f79a7f6..691c4095b09b 100644
--- a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go
@@ -85,7 +85,7 @@ func TestAggregateStateByContainerName(t *testing.T) {
{testPodID2, "app-C"},
}
for _, c := range containers {
- assert.NoError(t, cluster.AddOrUpdateContainer(c, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(c, testRequest, ContainerTypeStandard))
}
// Add CPU usage samples to all containers.
diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go
index 3bf8f56636ae..c84becc71902 100644
--- a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go
+++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go
@@ -46,7 +46,7 @@ type ClusterState interface {
AddOrUpdatePod(podID PodID, newLabels labels.Set, phase apiv1.PodPhase)
GetContainer(containerID ContainerID) *ContainerState
DeletePod(podID PodID)
- AddOrUpdateContainer(containerID ContainerID, request Resources) error
+ AddOrUpdateContainer(containerID ContainerID, request Resources, containerType ContainerType) error
AddSample(sample *ContainerUsageSampleWithKey) error
RecordOOM(containerID ContainerID, timestamp time.Time, requestedMemory ResourceAmount) error
AddOrUpdateVpa(apiObject *vpa_types.VerticalPodAutoscaler, selector labels.Selector) error
@@ -120,6 +120,8 @@ type PodState struct {
labelSetKey labelSetKey
// Containers that belong to the Pod, keyed by the container name.
Containers map[string]*ContainerState
+ // InitSidecarsContainers that belong to the Pod, keyed by the container name.
+ InitSidecarsContainers map[string]*ContainerState
// InitContainers is a list of init containers names which belong to the Pod.
InitContainers []string
// PodPhase describing current life cycle phase of the Pod.
@@ -170,7 +172,10 @@ func (cluster *clusterState) AddOrUpdatePod(podID PodID, newLabels labels.Set, p
containerID := ContainerID{PodID: podID, ContainerName: containerName}
container.aggregator = cluster.findOrCreateAggregateContainerState(containerID)
}
-
+ for containerName, container := range pod.InitSidecarsContainers {
+ containerID := ContainerID{PodID: podID, ContainerName: containerName}
+ container.aggregator = cluster.findOrCreateAggregateContainerState(containerID)
+ }
cluster.addPodToItsVpa(pod)
}
pod.Phase = phase
@@ -204,6 +209,10 @@ func (cluster *clusterState) GetContainer(containerID ContainerID) *ContainerSta
if containerExists {
return container
}
+ container, containerExists = pod.InitSidecarsContainers[containerID.ContainerName]
+ if containerExists {
+ return container
+ }
}
return nil
}
@@ -221,14 +230,27 @@ func (cluster *clusterState) DeletePod(podID PodID) {
// adds it to the parent pod in the clusterState object, if not yet present.
// Requires the pod to be added to the clusterState first. Otherwise an error is
// returned.
-func (cluster *clusterState) AddOrUpdateContainer(containerID ContainerID, request Resources) error {
+// TODO maybe make this take in the containerspec since it has all this info?
+func (cluster *clusterState) AddOrUpdateContainer(containerID ContainerID, request Resources, containerType ContainerType) error {
+
pod, podExists := cluster.pods[containerID.PodID]
if !podExists {
return NewKeyError(containerID.PodID)
}
- if container, containerExists := pod.Containers[containerID.ContainerName]; !containerExists {
+
+ if containerType == ContainerTypeInit {
+ pod.InitContainers = append(pod.InitContainers, containerID.ContainerName)
+ return nil
+ }
+
+ containerStateMap := pod.Containers
+ if containerType == ContainerTypeInitSidecar {
+ containerStateMap = pod.InitSidecarsContainers
+ }
+
+ if container, containerExists := containerStateMap[containerID.ContainerName]; !containerExists {
cluster.findOrCreateAggregateContainerState(containerID)
- pod.Containers[containerID.ContainerName] = NewContainerState(request, NewContainerStateAggregatorProxy(cluster, containerID))
+ containerStateMap[containerID.ContainerName] = NewContainerState(request, NewContainerStateAggregatorProxy(cluster, containerID))
} else {
// Container aleady exists. Possibly update the request.
container.Request = request
@@ -246,7 +268,11 @@ func (cluster *clusterState) AddSample(sample *ContainerUsageSampleWithKey) erro
}
containerState, containerExists := pod.Containers[sample.Container.ContainerName]
if !containerExists {
- return NewKeyError(sample.Container)
+ // check if the container exists as a sidecar
+ containerState, containerExists = pod.InitSidecarsContainers[sample.Container.ContainerName]
+ if !containerExists {
+ return NewKeyError(sample.Container)
+ }
}
if !containerState.AddSample(&sample.ContainerUsageSample) {
return fmt.Errorf("sample discarded (invalid or out of order)")
@@ -262,7 +288,10 @@ func (cluster *clusterState) RecordOOM(containerID ContainerID, timestamp time.T
}
containerState, containerExists := pod.Containers[containerID.ContainerName]
if !containerExists {
- return NewKeyError(containerID.ContainerName)
+ containerState, containerExists = pod.InitSidecarsContainers[containerID.ContainerName]
+ if !containerExists {
+ return NewKeyError(containerID.ContainerName)
+ }
}
err := containerState.RecordOOM(timestamp, requestedMemory)
if err != nil {
@@ -348,8 +377,9 @@ func (cluster *clusterState) ObservedVPAs() []*vpa_types.VerticalPodAutoscaler {
func newPod(id PodID) *PodState {
return &PodState{
- ID: id,
- Containers: make(map[string]*ContainerState),
+ ID: id,
+ Containers: make(map[string]*ContainerState),
+ InitSidecarsContainers: make(map[string]*ContainerState),
}
}
diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go
index e3b89096ef21..709dee2fbc36 100644
--- a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go
@@ -86,7 +86,7 @@ func TestClusterAddSample(t *testing.T) {
// Create a pod with a single container.
cluster := NewClusterState(testGcPeriod)
cluster.AddOrUpdatePod(testPodID, testLabels, apiv1.PodRunning)
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
// Add a usage sample to the container.
assert.NoError(t, cluster.AddSample(makeTestUsageSample()))
@@ -104,7 +104,7 @@ func TestClusterGCAggregateContainerStateDeletesOld(t *testing.T) {
vpa := addTestVpa(cluster)
addTestPod(cluster)
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
usageSample := makeTestUsageSample()
// Add a usage sample to the container.
@@ -129,7 +129,7 @@ func TestClusterGCAggregateContainerStateDeletesOldEmpty(t *testing.T) {
vpa := addTestVpa(cluster)
addTestPod(cluster)
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
// No usage samples added.
assert.NotEmpty(t, cluster.aggregateStateMap)
@@ -167,7 +167,7 @@ func TestClusterGCAggregateContainerStateDeletesEmptyInactiveWithoutController(t
err: nil,
}
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
// No usage samples added.
assert.NotEmpty(t, cluster.aggregateStateMap)
@@ -198,7 +198,7 @@ func TestClusterGCAggregateContainerStateLeavesEmptyInactiveWithController(t *te
// Controller Fetcher returns existing controller, meaning that there is a corresponding controller alive.
controller := testControllerFetcher
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
// No usage samples added.
assert.NotEmpty(t, cluster.aggregateStateMap)
@@ -226,7 +226,7 @@ func TestClusterGCAggregateContainerStateLeavesValid(t *testing.T) {
vpa := addTestVpa(cluster)
addTestPod(cluster)
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
usageSample := makeTestUsageSample()
// Add a usage sample to the container.
@@ -251,7 +251,7 @@ func TestAddSampleAfterAggregateContainerStateGCed(t *testing.T) {
pod := addTestPod(cluster)
addTestContainer(t, cluster)
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
usageSample := makeTestUsageSample()
// Add a usage sample to the container.
@@ -294,7 +294,7 @@ func TestClusterGCRateLimiting(t *testing.T) {
cluster.RateLimitedGarbageCollectAggregateCollectionStates(ctx, sampleExpireTime, testControllerFetcher)
vpa := addTestVpa(cluster)
addTestPod(cluster)
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
// Add a usage sample to the container.
assert.NoError(t, cluster.AddSample(usageSample))
@@ -317,7 +317,7 @@ func TestClusterRecordOOM(t *testing.T) {
// Create a pod with a single container.
cluster := NewClusterState(testGcPeriod)
cluster.AddOrUpdatePod(testPodID, testLabels, apiv1.PodRunning)
- assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard))
// RecordOOM
assert.NoError(t, cluster.RecordOOM(testContainerID, time.Unix(0, 0), ResourceAmount(10)))
@@ -337,7 +337,7 @@ func TestMissingKeys(t *testing.T) {
err = cluster.RecordOOM(testContainerID, time.Unix(0, 0), ResourceAmount(10))
assert.EqualError(t, err, "KeyError: {namespace-1 pod-1}")
- err = cluster.AddOrUpdateContainer(testContainerID, testRequest)
+ err = cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard)
assert.EqualError(t, err, "KeyError: {namespace-1 pod-1}")
}
@@ -368,7 +368,7 @@ func addTestPod(cluster ClusterState) *PodState {
}
func addTestContainer(t *testing.T, cluster ClusterState) *ContainerState {
- err := cluster.AddOrUpdateContainer(testContainerID, testRequest)
+ err := cluster.AddOrUpdateContainer(testContainerID, testRequest, ContainerTypeStandard)
assert.NoError(t, err)
return cluster.GetContainer(testContainerID)
}
@@ -656,9 +656,9 @@ func TestTwoPodsWithSameLabels(t *testing.T) {
cluster := NewClusterState(testGcPeriod)
cluster.AddOrUpdatePod(podID1, testLabels, apiv1.PodRunning)
cluster.AddOrUpdatePod(podID2, testLabels, apiv1.PodRunning)
- err := cluster.AddOrUpdateContainer(containerID1, testRequest)
+ err := cluster.AddOrUpdateContainer(containerID1, testRequest, ContainerTypeStandard)
assert.NoError(t, err)
- err = cluster.AddOrUpdateContainer(containerID2, testRequest)
+ err = cluster.AddOrUpdateContainer(containerID2, testRequest, ContainerTypeStandard)
assert.NoError(t, err)
// Expect only one aggregation to be created.
@@ -675,9 +675,9 @@ func TestTwoPodsWithDifferentNamespaces(t *testing.T) {
cluster := NewClusterState(testGcPeriod)
cluster.AddOrUpdatePod(podID1, testLabels, apiv1.PodRunning)
cluster.AddOrUpdatePod(podID2, testLabels, apiv1.PodRunning)
- err := cluster.AddOrUpdateContainer(containerID1, testRequest)
+ err := cluster.AddOrUpdateContainer(containerID1, testRequest, ContainerTypeStandard)
assert.NoError(t, err)
- err = cluster.AddOrUpdateContainer(containerID2, testRequest)
+ err = cluster.AddOrUpdateContainer(containerID2, testRequest, ContainerTypeStandard)
assert.NoError(t, err)
// Expect two separate aggregations to be created.
@@ -695,13 +695,13 @@ func TestEmptySelector(t *testing.T) {
// Create a pod with labels. Add a container.
cluster.AddOrUpdatePod(testPodID, testLabels, apiv1.PodRunning)
containerID1 := ContainerID{testPodID, "foo"}
- assert.NoError(t, cluster.AddOrUpdateContainer(containerID1, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(containerID1, testRequest, ContainerTypeStandard))
// Create a pod without labels. Add a container.
anotherPodID := PodID{"namespace-1", "pod-2"}
cluster.AddOrUpdatePod(anotherPodID, emptyLabels, apiv1.PodRunning)
containerID2 := ContainerID{anotherPodID, "foo"}
- assert.NoError(t, cluster.AddOrUpdateContainer(containerID2, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(containerID2, testRequest, ContainerTypeStandard))
// Both pods should be matched by the VPA.
assert.Contains(t, vpa.aggregateContainerStates, cluster.aggregateStateKeyForContainerID(containerID1))
@@ -921,7 +921,7 @@ func TestVPAWithMatchingPods(t *testing.T) {
for _, podDesc := range tc.pods {
cluster.AddOrUpdatePod(podDesc.id, podDesc.labels, podDesc.phase)
containerID := ContainerID{testPodID, "foo"}
- assert.NoError(t, cluster.AddOrUpdateContainer(containerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(containerID, testRequest, ContainerTypeStandard))
}
assert.Equal(t, tc.expectedMatch, cluster.vpas[vpa.ID].PodCount)
})
@@ -933,7 +933,7 @@ func TestVPAWithMatchingPods(t *testing.T) {
for _, podDesc := range tc.pods {
cluster.AddOrUpdatePod(podDesc.id, podDesc.labels, podDesc.phase)
containerID := ContainerID{testPodID, "foo"}
- assert.NoError(t, cluster.AddOrUpdateContainer(containerID, testRequest))
+ assert.NoError(t, cluster.AddOrUpdateContainer(containerID, testRequest, ContainerTypeStandard))
}
vpa := addVpa(cluster, testVpaID, testAnnotations, tc.vpaSelector, testTargetRef)
assert.Equal(t, tc.expectedMatch, cluster.vpas[vpa.ID].PodCount)
diff --git a/vertical-pod-autoscaler/pkg/recommender/model/types.go b/vertical-pod-autoscaler/pkg/recommender/model/types.go
index d4d817b8ef0a..935ed70494fa 100644
--- a/vertical-pod-autoscaler/pkg/recommender/model/types.go
+++ b/vertical-pod-autoscaler/pkg/recommender/model/types.go
@@ -215,3 +215,27 @@ type VpaID struct {
Namespace string
VpaName string
}
+
+// ContainerType helps us differentiate regular containers and different types of init containers
+type ContainerType string
+
+// GetPatchPath is used for getting the path for patch depending on the container type.
+func (c ContainerType) GetPatchPath() string {
+ switch c {
+ case ContainerTypeInit, ContainerTypeInitSidecar:
+ return "/spec/initContainers"
+ default:
+ return "/spec/containers"
+ }
+}
+
+const (
+ // ContainerTypeStandard represents a standard container.
+ ContainerTypeStandard ContainerType = "container"
+ // ContainerTypeInit represents a "regular" init container.
+ ContainerTypeInit ContainerType = "init"
+ // ContainerTypeInitSidecar represents an init with restartPolicy set to always.
+ ContainerTypeInitSidecar ContainerType = "init-sidecar"
+ // ContainerTypeUnknown represents an unknown container type.
+ ContainerTypeUnknown ContainerType = "unknown"
+)
diff --git a/vertical-pod-autoscaler/pkg/updater/inplace/resource_updates.go b/vertical-pod-autoscaler/pkg/updater/inplace/resource_updates.go
index d15d2bb67d73..86ba8ad08948 100644
--- a/vertical-pod-autoscaler/pkg/updater/inplace/resource_updates.go
+++ b/vertical-pod-autoscaler/pkg/updater/inplace/resource_updates.go
@@ -25,6 +25,7 @@ import (
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
)
@@ -49,11 +50,16 @@ func (*resourcesInplaceUpdatesPatchCalculator) PatchResourceTarget() patch.Patch
func (c *resourcesInplaceUpdatesPatchCalculator) CalculatePatches(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]resource_admission.PatchRecord, error) {
result := []resource_admission.PatchRecord{}
- containersResources, _, err := c.recommendationProvider.GetContainersResourcesForPod(pod, vpa)
+ initContainersResources, containersResources, _, err := c.recommendationProvider.GetContainersResourcesForPod(pod, vpa)
if err != nil {
return []resource_admission.PatchRecord{}, fmt.Errorf("failed to calculate resource patch for pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
+ for i, containerResources := range initContainersResources {
+ newPatches := getInitContainerPatch(pod, i, containerResources)
+ result = append(result, newPatches...)
+ }
+
for i, containerResources := range containersResources {
newPatches := getContainerPatch(pod, i, containerResources)
result = append(result, newPatches...)
@@ -62,27 +68,40 @@ func (c *resourcesInplaceUpdatesPatchCalculator) CalculatePatches(pod *core.Pod,
return result, nil
}
+func getInitContainerPatch(pod *core.Pod, i int, containerResources vpa_api_util.ContainerResources) []resource_admission.PatchRecord {
+ var patches []resource_admission.PatchRecord
+ // Add empty resources object if missing.
+ if pod.Spec.InitContainers[i].Resources.Limits == nil &&
+ pod.Spec.InitContainers[i].Resources.Requests == nil {
+ patches = append(patches, patch.GetPatchInitializingEmptyResources(i, model.ContainerTypeInitSidecar))
+ }
+
+ patches = appendPatches(patches, pod.Spec.InitContainers[i].Resources.Requests, i, containerResources.Requests, "requests", model.ContainerTypeInitSidecar)
+ patches = appendPatches(patches, pod.Spec.InitContainers[i].Resources.Limits, i, containerResources.Limits, "limits", model.ContainerTypeInitSidecar)
+
+ return patches
+}
func getContainerPatch(pod *core.Pod, i int, containerResources vpa_api_util.ContainerResources) []resource_admission.PatchRecord {
var patches []resource_admission.PatchRecord
// Add empty resources object if missing.
if pod.Spec.Containers[i].Resources.Limits == nil &&
pod.Spec.Containers[i].Resources.Requests == nil {
- patches = append(patches, patch.GetPatchInitializingEmptyResources(i))
+ patches = append(patches, patch.GetPatchInitializingEmptyResources(i, model.ContainerTypeStandard))
}
- patches = appendPatches(patches, pod.Spec.Containers[i].Resources.Requests, i, containerResources.Requests, "requests")
- patches = appendPatches(patches, pod.Spec.Containers[i].Resources.Limits, i, containerResources.Limits, "limits")
+ patches = appendPatches(patches, pod.Spec.Containers[i].Resources.Requests, i, containerResources.Requests, "requests", model.ContainerTypeStandard)
+ patches = appendPatches(patches, pod.Spec.Containers[i].Resources.Limits, i, containerResources.Limits, "limits", model.ContainerTypeStandard)
return patches
}
-func appendPatches(patches []resource_admission.PatchRecord, current core.ResourceList, containerIndex int, resources core.ResourceList, fieldName string) []resource_admission.PatchRecord {
+func appendPatches(patches []resource_admission.PatchRecord, current core.ResourceList, containerIndex int, resources core.ResourceList, fieldName string, containerType model.ContainerType) []resource_admission.PatchRecord {
// Add empty object if it's missing and we're about to fill it.
if current == nil && len(resources) > 0 {
- patches = append(patches, patch.GetPatchInitializingEmptyResourcesSubfield(containerIndex, fieldName))
+ patches = append(patches, patch.GetPatchInitializingEmptyResourcesSubfield(containerIndex, fieldName, containerType))
}
for resource, request := range resources {
- patches = append(patches, patch.GetAddResourceRequirementValuePatch(containerIndex, fieldName, resource, request))
+ patches = append(patches, patch.GetAddResourceRequirementValuePatch(containerIndex, fieldName, resource, request, containerType))
}
return patches
}
diff --git a/vertical-pod-autoscaler/pkg/updater/priority/priority_processor.go b/vertical-pod-autoscaler/pkg/updater/priority/priority_processor.go
index babe26fa4c0b..7511b0b649e8 100644
--- a/vertical-pod-autoscaler/pkg/updater/priority/priority_processor.go
+++ b/vertical-pod-autoscaler/pkg/updater/priority/priority_processor.go
@@ -53,14 +53,14 @@ func (*defaultPriorityProcessor) GetUpdatePriority(pod *apiv1.Pod, vpa *vpa_type
hasObservedContainers, vpaContainerSet := parseVpaObservedContainers(pod)
- for _, podContainer := range pod.Spec.Containers {
+ processContainer := func(podContainer apiv1.Container) {
if hasObservedContainers && !vpaContainerSet.Has(podContainer.Name) {
klog.V(4).InfoS("Not listed in VPA observed containers label. Skipping container priority calculations", "label", annotations.VpaObservedContainersLabel, "observedContainers", pod.GetAnnotations()[annotations.VpaObservedContainersLabel], "containerName", podContainer.Name, "vpa", klog.KObj(vpa))
- continue
+ return
}
recommendedRequest := vpa_api_util.GetRecommendationForContainer(podContainer.Name, recommendation)
if recommendedRequest == nil {
- continue
+ return
}
for resourceName, recommended := range recommendedRequest.Target {
totalRecommendedPerResource[resourceName] += recommended.MilliValue()
@@ -77,15 +77,19 @@ func (*defaultPriorityProcessor) GetUpdatePriority(pod *apiv1.Pod, vpa *vpa_type
outsideRecommendedRange = true
}
} else {
- // Note: if the request is not specified, the container will use the
- // namespace default request. Currently we ignore it and treat such
- // containers as if they had 0 request. A more correct approach would
- // be to always calculate the 'effective' request.
scaleUp = true
outsideRecommendedRange = true
}
}
}
+
+ for _, podContainer := range pod.Spec.Containers {
+ processContainer(podContainer)
+ }
+ for _, initContainer := range pod.Spec.InitContainers {
+ processContainer(initContainer)
+ }
+
resourceDiff := 0.0
for resource, totalRecommended := range totalRecommendedPerResource {
totalRequest := math.Max(float64(totalRequestPerResource[resource]), 1.0)
diff --git a/vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission.go b/vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission.go
index b45a77ace1f9..e8180e4d4d17 100644
--- a/vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission.go
+++ b/vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission.go
@@ -56,6 +56,18 @@ func (s *scalingDirectionPodEvictionAdmission) Admit(pod *apiv1.Pod, resources *
return true
}
}
+ for _, container := range pod.Spec.InitContainers {
+ recommendedResources := vpa_utils.GetRecommendationForContainer(container.Name, resources)
+ // if a container doesn't have a recommendation, the VPA has set `.containerPolicy.mode: off` for this container,
+ // so we can skip this container
+ if recommendedResources == nil {
+ continue
+ }
+ containerRequests, _ := resourcehelpers.ContainerRequestsAndLimits(container.Name, pod)
+ if s.admitContainer(containerRequests, recommendedResources, podEvictionRequirements) {
+ return true
+ }
+ }
return false
}
diff --git a/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers.go b/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers.go
index 54c71b9faa99..eca3722ecc31 100644
--- a/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers.go
+++ b/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers.go
@@ -32,79 +32,52 @@ import (
//
// [1] https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1287-in-place-update-pod-resources
func ContainerRequestsAndLimits(containerName string, pod *v1.Pod) (v1.ResourceList, v1.ResourceList) {
- cs := containerStatusFor(containerName, pod)
+ var containerStatuses, containers = pod.Status.ContainerStatuses, pod.Spec.Containers
+ var containerStatusSource, containerSource = metrics_resources.ContainerStatus, metrics_resources.PodSpecContainer
+ if isInitContainer(containerName, pod) {
+ containerStatuses, containers = pod.Status.InitContainerStatuses, pod.Spec.InitContainers
+ containerStatusSource, containerSource = metrics_resources.InitContainerStatus, metrics_resources.PodSpecInitContainer
+ }
+
+ cs := containerStatusFor(containerName, containerStatuses)
if cs != nil && cs.Resources != nil {
- metrics_resources.RecordGetResourcesCount(metrics_resources.ContainerStatus)
+ metrics_resources.RecordGetResourcesCount(containerStatusSource)
return cs.Resources.Requests.DeepCopy(), cs.Resources.Limits.DeepCopy()
}
klog.V(6).InfoS("Container resources not found in containerStatus for container. Falling back to resources defined in the pod spec. This is expected for clusters with in-place pod updates feature disabled.", "container", containerName, "containerStatus", cs)
- container := findContainer(containerName, pod)
+ container := findContainer(containerName, containers)
if container != nil {
- metrics_resources.RecordGetResourcesCount(metrics_resources.PodSpecContainer)
+ metrics_resources.RecordGetResourcesCount(containerSource)
return container.Resources.Requests.DeepCopy(), container.Resources.Limits.DeepCopy()
}
return nil, nil
}
-// InitContainerRequestsAndLimits returns a copy of the actual resource requests
-// and limits of a given initContainer:
-//
-// - If in-place pod updates feature [1] is enabled, the actual resource requests
-// are stored in the initContainer status field.
-// - Otherwise, fallback to the resource requests defined in the pod spec.
-//
-// [1] https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1287-in-place-update-pod-resources
-func InitContainerRequestsAndLimits(initContainerName string, pod *v1.Pod) (v1.ResourceList, v1.ResourceList) {
- cs := initContainerStatusFor(initContainerName, pod)
- if cs != nil && cs.Resources != nil {
- metrics_resources.RecordGetResourcesCount(metrics_resources.InitContainerStatus)
- return cs.Resources.Requests.DeepCopy(), cs.Resources.Limits.DeepCopy()
- }
-
- klog.V(6).InfoS("initContainer resources not found in initContainerStatus for initContainer. Falling back to resources defined in the pod spec. This is expected for clusters with in-place pod updates feature disabled.", "initContainer", initContainerName, "initContainerStatus", cs)
- initContainer := findInitContainer(initContainerName, pod)
- if initContainer != nil {
- metrics_resources.RecordGetResourcesCount(metrics_resources.PodSpecInitContainer)
- return initContainer.Resources.Requests.DeepCopy(), initContainer.Resources.Limits.DeepCopy()
- }
-
- return nil, nil
-}
-
-func findContainer(containerName string, pod *v1.Pod) *v1.Container {
- for i, container := range pod.Spec.Containers {
+func findContainer(containerName string, containers []v1.Container) *v1.Container {
+ for i, container := range containers {
if container.Name == containerName {
- return &pod.Spec.Containers[i]
+ return &containers[i]
}
}
return nil
}
-func findInitContainer(initContainerName string, pod *v1.Pod) *v1.Container {
- for i, initContainer := range pod.Spec.InitContainers {
- if initContainer.Name == initContainerName {
- return &pod.Spec.InitContainers[i]
- }
- }
- return nil
-}
-
-func containerStatusFor(containerName string, pod *v1.Pod) *v1.ContainerStatus {
- for i, containerStatus := range pod.Status.ContainerStatuses {
+func containerStatusFor(containerName string, containerStatuses []v1.ContainerStatus) *v1.ContainerStatus {
+ for i, containerStatus := range containerStatuses {
if containerStatus.Name == containerName {
- return &pod.Status.ContainerStatuses[i]
+ return &containerStatuses[i]
}
}
return nil
}
-func initContainerStatusFor(initContainerName string, pod *v1.Pod) *v1.ContainerStatus {
- for i, initContainerStatus := range pod.Status.InitContainerStatuses {
- if initContainerStatus.Name == initContainerName {
- return &pod.Status.InitContainerStatuses[i]
+func isInitContainer(containerName string, pod *v1.Pod) bool {
+ for _, container := range pod.Spec.InitContainers {
+ if container.Name == containerName {
+ return true
}
}
- return nil
+ return false
}
diff --git a/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers_test.go b/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers_test.go
index 857785f90b7e..201c25b5b5da 100644
--- a/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers_test.go
+++ b/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers_test.go
@@ -107,28 +107,27 @@ func TestContainerRequestsAndLimits(t *testing.T) {
wantLimits: nil,
},
{
- desc: "Init container with the same name as the container is ignored",
- containerName: "container-1",
+ desc: "InitContainer selected",
+ containerName: "container",
pod: test.Pod().AddInitContainer(
- test.Container().WithName("container-1").
+ test.Container().WithName("container").
WithCPURequest(resource.MustParse("1")).
WithMemRequest(resource.MustParse("10Mi")).
WithCPULimit(resource.MustParse("2")).
WithMemLimit(resource.MustParse("20Mi")).Get()).
- AddContainer(
- test.Container().WithName("container-1").
- WithCPURequest(resource.MustParse("4")).
- WithMemRequest(resource.MustParse("40Mi")).
- WithCPULimit(resource.MustParse("5")).
- WithMemLimit(resource.MustParse("50Mi")).Get()).
- Get(),
+ AddInitContainerStatus(
+ test.ContainerStatus().WithName("container").
+ WithCPURequest(resource.MustParse("3")).
+ WithMemRequest(resource.MustParse("30Mi")).
+ WithCPULimit(resource.MustParse("4")).
+ WithMemLimit(resource.MustParse("40Mi")).Get()).Get(),
wantRequests: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("4"),
- apiv1.ResourceMemory: resource.MustParse("40Mi"),
+ apiv1.ResourceCPU: resource.MustParse("3"),
+ apiv1.ResourceMemory: resource.MustParse("30Mi"),
},
wantLimits: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("5"),
- apiv1.ResourceMemory: resource.MustParse("50Mi"),
+ apiv1.ResourceCPU: resource.MustParse("4"),
+ apiv1.ResourceMemory: resource.MustParse("40Mi"),
},
},
{
@@ -184,162 +183,3 @@ func TestContainerRequestsAndLimits(t *testing.T) {
})
}
}
-
-func TestInitContainerRequestsAndLimits(t *testing.T) {
- testCases := []struct {
- desc string
- initContainerName string
- pod *apiv1.Pod
- wantRequests apiv1.ResourceList
- wantLimits apiv1.ResourceList
- }{
- {
- desc: "Prefer resource requests from initContainer status",
- initContainerName: "init-container",
- pod: test.Pod().AddInitContainer(
- test.Container().WithName("init-container").
- WithCPURequest(resource.MustParse("1")).
- WithMemRequest(resource.MustParse("10Mi")).
- WithCPULimit(resource.MustParse("2")).
- WithMemLimit(resource.MustParse("20Mi")).Get()).
- AddInitContainerStatus(
- test.ContainerStatus().WithName("init-container").
- WithCPURequest(resource.MustParse("3")).
- WithMemRequest(resource.MustParse("30Mi")).
- WithCPULimit(resource.MustParse("4")).
- WithMemLimit(resource.MustParse("40Mi")).Get()).Get(),
- wantRequests: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("3"),
- apiv1.ResourceMemory: resource.MustParse("30Mi"),
- },
- wantLimits: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("4"),
- apiv1.ResourceMemory: resource.MustParse("40Mi"),
- },
- },
- {
- desc: "No initContainer status, get resources from pod spec",
- initContainerName: "init-container",
- pod: test.Pod().AddInitContainer(
- test.Container().WithName("init-container").
- WithCPURequest(resource.MustParse("1")).
- WithMemRequest(resource.MustParse("10Mi")).
- WithCPULimit(resource.MustParse("2")).
- WithMemLimit(resource.MustParse("20Mi")).Get()).Get(),
- wantRequests: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("1"),
- apiv1.ResourceMemory: resource.MustParse("10Mi"),
- },
- wantLimits: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("2"),
- apiv1.ResourceMemory: resource.MustParse("20Mi"),
- },
- },
- {
- desc: "Only initContainerStatus, get resources from initContainerStatus",
- initContainerName: "init-container",
- pod: test.Pod().AddInitContainerStatus(
- test.ContainerStatus().WithName("init-container").
- WithCPURequest(resource.MustParse("0")).
- WithMemRequest(resource.MustParse("30Mi")).
- WithCPULimit(resource.MustParse("4")).
- WithMemLimit(resource.MustParse("40Mi")).Get()).Get(),
- wantRequests: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("0"),
- apiv1.ResourceMemory: resource.MustParse("30Mi"),
- },
- wantLimits: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("4"),
- apiv1.ResourceMemory: resource.MustParse("40Mi"),
- },
- },
- {
- desc: "Inexistent initContainer",
- initContainerName: "inexistent-init-container",
- pod: test.Pod().AddInitContainer(
- test.Container().WithName("init-container").
- WithCPURequest(resource.MustParse("1")).
- WithMemRequest(resource.MustParse("10Mi")).
- WithCPULimit(resource.MustParse("2")).
- WithMemLimit(resource.MustParse("20Mi")).Get()).Get(),
- wantRequests: nil,
- wantLimits: nil,
- },
- {
- desc: "Container with the same name as the initContainer is ignored",
- initContainerName: "container-1",
- pod: test.Pod().AddInitContainer(
- test.Container().WithName("container-1").
- WithCPURequest(resource.MustParse("1")).
- WithMemRequest(resource.MustParse("10Mi")).
- WithCPULimit(resource.MustParse("2")).
- WithMemLimit(resource.MustParse("20Mi")).Get()).
- AddContainer(
- test.Container().WithName("container-1").
- WithCPURequest(resource.MustParse("4")).
- WithMemRequest(resource.MustParse("40Mi")).
- WithCPULimit(resource.MustParse("5")).
- WithMemLimit(resource.MustParse("50Mi")).Get()).
- Get(),
- wantRequests: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("1"),
- apiv1.ResourceMemory: resource.MustParse("10Mi"),
- },
- wantLimits: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("2"),
- apiv1.ResourceMemory: resource.MustParse("20Mi"),
- },
- },
- {
- desc: "InitContainer with no requests or limits returns non-nil resources",
- initContainerName: "init-container",
- pod: test.Pod().AddInitContainer(test.Container().WithName("init-container").Get()).Get(),
- wantRequests: apiv1.ResourceList{},
- wantLimits: apiv1.ResourceList{},
- },
- {
- desc: "2 init containers",
- initContainerName: "init-container-1",
- pod: test.Pod().AddInitContainer(
- test.Container().WithName("init-container-1").
- WithCPURequest(resource.MustParse("1")).
- WithMemRequest(resource.MustParse("10Mi")).
- WithCPULimit(resource.MustParse("2")).
- WithMemLimit(resource.MustParse("20Mi")).Get()).
- AddInitContainerStatus(
- test.ContainerStatus().WithName("init-container-1").
- WithCPURequest(resource.MustParse("3")).
- WithMemRequest(resource.MustParse("30Mi")).
- WithCPULimit(resource.MustParse("4")).
- WithMemLimit(resource.MustParse("40Mi")).Get()).
- AddInitContainer(
- test.Container().WithName("init-container-2").
- WithCPURequest(resource.MustParse("5")).
- WithMemRequest(resource.MustParse("5Mi")).
- WithCPULimit(resource.MustParse("5")).
- WithMemLimit(resource.MustParse("5Mi")).Get()).
- AddInitContainerStatus(
- test.ContainerStatus().WithName("init-container-2").
- WithCPURequest(resource.MustParse("5")).
- WithMemRequest(resource.MustParse("5Mi")).
- WithCPULimit(resource.MustParse("5")).
- WithMemLimit(resource.MustParse("5Mi")).Get()).
- Get(),
- wantRequests: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("3"),
- apiv1.ResourceMemory: resource.MustParse("30Mi"),
- },
- wantLimits: apiv1.ResourceList{
- apiv1.ResourceCPU: resource.MustParse("4"),
- apiv1.ResourceMemory: resource.MustParse("40Mi"),
- },
- },
- }
- for _, tc := range testCases {
- t.Run(tc.desc, func(t *testing.T) {
- gotRequests, gotLimits := InitContainerRequestsAndLimits(tc.initContainerName, tc.pod)
- assert.Equal(t, tc.wantRequests, gotRequests, "requests don't match")
- assert.Equal(t, tc.wantLimits, gotLimits, "limits don't match")
- })
- }
-}
diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go
index 0f2842329c97..d10b46daa99a 100644
--- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go
+++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go
@@ -312,6 +312,11 @@ func getContainer(containerName string, pod *apiv1.Pod) *apiv1.Container {
return &pod.Spec.Containers[i]
}
}
+ for i, container := range pod.Spec.InitContainers {
+ if container.Name == containerName {
+ return &pod.Spec.InitContainers[i]
+ }
+ }
return nil
}
@@ -397,6 +402,14 @@ func zipContainersWithRecommendations(resources []vpa_types.RecommendedContainer
recommendation := getRecommendationForContainer(container.Name, resources)
result = append(result, containerWithRecommendation{container: &container, recommendation: recommendation})
}
+ for _, container := range pod.Spec.InitContainers {
+ if container.RestartPolicy == nil ||
+ *container.RestartPolicy != apiv1.ContainerRestartPolicyAlways {
+ continue
+ }
+ recommendation := getRecommendationForContainer(container.Name, resources)
+ result = append(result, containerWithRecommendation{container: &container, recommendation: recommendation})
+ }
return result
}
@@ -508,6 +521,19 @@ func insertRequestsForMissingRecommendations(containerRecommendations []vpa_type
Target: requests,
})
}
+ for _, container := range pod.Spec.InitContainers {
+ if recommendationForContainerExists(container.Name, containerRecommendations) {
+ continue
+ }
+ requests, _ := resourcehelpers.ContainerRequestsAndLimits(container.Name, pod)
+ if len(requests) == 0 {
+ continue
+ }
+ result = append(result, vpa_types.RecommendedContainerResources{
+ ContainerName: container.Name,
+ Target: requests,
+ })
+ }
return result
}