diff --git a/pkg/providers/instancetype/suite_test.go b/pkg/providers/instancetype/suite_test.go index 7e67b28d78ae..c444dbfe95fd 100644 --- a/pkg/providers/instancetype/suite_test.go +++ b/pkg/providers/instancetype/suite_test.go @@ -1322,11 +1322,11 @@ var _ = Describe("InstanceTypeProvider", func() { nodeClass.AMIFamily(), nil, ) - Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("50Mi")) + Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("100Mi")) }) }) Context("Eviction Soft", func() { - It("should override eviction threshold when specified as a quantity", func() { + It("should use default threshold when only evictionSoft is specified", func() { nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ string(corev1.ResourceMemory): "20Gi", @@ -1354,9 +1354,9 @@ var _ = Describe("InstanceTypeProvider", func() { nodeClass.AMIFamily(), nil, ) - Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("500Mi")) + Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("100Mi")) }) - It("should override eviction threshold when specified as a percentage value", func() { + It("should use evictionHard percentage and ignore evictionSoft percentage", func() { nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ string(corev1.ResourceMemory): "20Gi", @@ -1387,9 +1387,9 @@ var _ = Describe("InstanceTypeProvider", func() { nodeClass.AMIFamily(), nil, ) - Expect(it.Overhead.EvictionThreshold.Memory().Value()).To(BeNumerically("~", float64(it.Capacity.Memory().Value())*0.1, 10)) + Expect(it.Overhead.EvictionThreshold.Memory().Value()).To(BeNumerically("~", float64(it.Capacity.Memory().Value())*0.05, 10)) }) - It("should consider the eviction threshold disabled when specified as 100%", func() { + It("should use default threshold when evictionSoft is 100% (ignored)", func() { nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ string(corev1.ResourceMemory): "20Gi", @@ -1417,7 +1417,7 @@ var _ = Describe("InstanceTypeProvider", func() { nodeClass.AMIFamily(), nil, ) - Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("0")) + Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("100Mi")) }) It("should ignore eviction threshold when using Bottlerocket AMI", func() { nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Alias: "bottlerocket@latest"}} @@ -1476,7 +1476,7 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("100Mi")) Expect(it.Overhead.EvictionThreshold.StorageEphemeral().AsApproximateFloat64()).To(BeNumerically("~", resources.Quantity("2Gi").AsApproximateFloat64())) }) - It("should take the greater of evictionHard and evictionSoft for overhead as a value", func() { + It("should use only evictionHard for overhead, ignoring evictionSoft", func() { nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ string(corev1.ResourceMemory): "20Gi", @@ -1507,9 +1507,10 @@ var _ = Describe("InstanceTypeProvider", func() { nodeClass.AMIFamily(), nil, ) - Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("3Gi")) + // Should use evictionHard (1Gi), not evictionSoft (3Gi) + Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("1Gi")) }) - It("should take the greater of evictionHard and evictionSoft for overhead as a value", func() { + It("should use only evictionHard percentage for overhead, ignoring evictionSoft", func() { nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ string(corev1.ResourceMemory): "20Gi", @@ -1540,9 +1541,10 @@ var _ = Describe("InstanceTypeProvider", func() { nodeClass.AMIFamily(), nil, ) + // Should use evictionHard (5%), not evictionSoft (2%) Expect(it.Overhead.EvictionThreshold.Memory().Value()).To(BeNumerically("~", float64(it.Capacity.Memory().Value())*0.05, 10)) }) - It("should take the greater of evictionHard and evictionSoft for overhead with mixed percentage/value", func() { + It("should use only evictionHard value with mixed percentage/value types", func() { nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ string(corev1.ResourceMemory): "20Gi", @@ -1573,7 +1575,8 @@ var _ = Describe("InstanceTypeProvider", func() { nodeClass.AMIFamily(), nil, ) - Expect(it.Overhead.EvictionThreshold.Memory().Value()).To(BeNumerically("~", float64(it.Capacity.Memory().Value())*0.1, 10)) + // Should use evictionHard (1Gi), not evictionSoft (10%) + Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("1Gi")) }) }) It("should default max pods based off of network interfaces", func() { @@ -2596,7 +2599,6 @@ var _ = Describe("InstanceTypeProvider", func() { // kubelet.kubeReserved // kubelet.systemReserved // kubelet.evictionHard - // kubelet.evictionSoft // kubelet.maxPods nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ KubeReserved: map[string]string{string(corev1.ResourceCPU): "1"}, @@ -2612,7 +2614,6 @@ var _ = Describe("InstanceTypeProvider", func() { {KubeReserved: map[string]string{string(corev1.ResourceCPU): "20"}}, {SystemReserved: map[string]string{string(corev1.ResourceMemory): "10Gi"}}, {EvictionHard: map[string]string{"memory.available": "52%"}}, - {EvictionSoft: map[string]string{"nodefs.available": "132%"}}, {MaxPods: aws.Int32(20)}, } ExpectApplied(ctx, env.Client, nodeClass) @@ -2634,7 +2635,7 @@ var _ = Describe("InstanceTypeProvider", func() { instanceTypeResults = append(instanceTypeResults, instancetypes) } - // Based on the nodeclass configuration, we expect to have 5 unique set of instance types + // Based on the nodeclass configuration, we expect to have 4 unique set of instance types ExpectUniqueInstanceTypeLists(instanceTypeResults...) }) It("changes to nodeclass fields should result in a different set of instances types", func() { diff --git a/pkg/providers/instancetype/types.go b/pkg/providers/instancetype/types.go index de3524852661..11e264b8393d 100644 --- a/pkg/providers/instancetype/types.go +++ b/pkg/providers/instancetype/types.go @@ -145,7 +145,7 @@ func NewInstanceType( Overhead: &cloudprovider.InstanceTypeOverhead{ KubeReserved: kubeReservedResources(cpu(info), lo.Ternary(amiFamily.FeatureFlags().UsesENILimitedMemoryOverhead, ENILimitedPods(ctx, info, 0), pods(ctx, info, amiFamily, maxPods, podsPerCore)), kubeReserved), SystemReserved: systemReservedResources(systemReserved), - EvictionThreshold: evictionThreshold(memory(ctx, info), ephemeralStorage(info, amiFamily, blockDeviceMappings, instanceStorePolicy), amiFamily, evictionHard, evictionSoft), + EvictionThreshold: evictionThreshold(memory(ctx, info), ephemeralStorage(info, amiFamily, blockDeviceMappings, instanceStorePolicy), evictionHard), }, } if it.Requirements.Compatible(scheduling.NewRequirements(scheduling.NewRequirement(corev1.LabelOSStable, corev1.NodeSelectorOpIn, string(corev1.Windows)))) == nil { @@ -529,30 +529,23 @@ func kubeReservedResources(cpus, pods *resource.Quantity, kubeReserved map[strin })) } -func evictionThreshold(memory *resource.Quantity, storage *resource.Quantity, amiFamily amifamily.AMIFamily, evictionHard map[string]string, evictionSoft map[string]string) corev1.ResourceList { +func evictionThreshold(memory *resource.Quantity, storage *resource.Quantity, evictionHard map[string]string) corev1.ResourceList { overhead := corev1.ResourceList{ corev1.ResourceMemory: resource.MustParse("100Mi"), corev1.ResourceEphemeralStorage: resource.MustParse(fmt.Sprint(math.Ceil(float64(storage.Value()) / 100 * 10))), } override := corev1.ResourceList{} - var evictionSignals []map[string]string + // Only use evictionHard for allocatable memory calculation + // evictionSoft should not impact allocatable capacity as it's only a warning threshold + // See: https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds if evictionHard != nil { - evictionSignals = append(evictionSignals, evictionHard) - } - if evictionSoft != nil && amiFamily.FeatureFlags().EvictionSoftEnabled { - evictionSignals = append(evictionSignals, evictionSoft) - } - - for _, m := range evictionSignals { - temp := corev1.ResourceList{} - if v, ok := m[MemoryAvailable]; ok { - temp[corev1.ResourceMemory] = computeEvictionSignal(*memory, v) + if v, ok := evictionHard[MemoryAvailable]; ok { + override[corev1.ResourceMemory] = computeEvictionSignal(*memory, v) } - if v, ok := m[NodeFSAvailable]; ok { - temp[corev1.ResourceEphemeralStorage] = computeEvictionSignal(*storage, v) + if v, ok := evictionHard[NodeFSAvailable]; ok { + override[corev1.ResourceEphemeralStorage] = computeEvictionSignal(*storage, v) } - override = resources.MaxResources(override, temp) } // Assign merges maps from left to right so overrides will always be taken last return lo.Assign(overhead, override)