Skip to content

Commit b585c82

Browse files
committed
test: add unit tests for core/utils and improve nil-safety in helpers
Signed-off-by: kincoy <[email protected]>
1 parent 24ba0e7 commit b585c82

File tree

2 files changed

+222
-1
lines changed

2 files changed

+222
-1
lines changed

cluster-autoscaler/core/utils/utils.go

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,17 @@ import (
2828
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
2929
)
3030

31+
const (
32+
// VirtualNodeLabelValue is the value of the label that is set on virtual kubelet
33+
VirtualNodeLabelValue = "virtual-kubelet"
34+
)
35+
3136
// isVirtualNode determines if the node is created by virtual kubelet
3237
func isVirtualNode(node *apiv1.Node) bool {
33-
return node.ObjectMeta.Labels["type"] == "virtual-kubelet"
38+
if node == nil {
39+
return false
40+
}
41+
return node.ObjectMeta.Labels["type"] == VirtualNodeLabelValue
3442
}
3543

3644
// FilterOutNodesFromNotAutoscaledGroups return subset of input nodes for which cloud provider does not

cluster-autoscaler/core/utils/utils_test.go

Lines changed: 213 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ limitations under the License.
1717
package utils
1818

1919
import (
20+
"errors"
2021
"testing"
2122
"time"
2223

@@ -25,6 +26,8 @@ import (
2526
"github.com/stretchr/testify/assert"
2627
apiv1 "k8s.io/api/core/v1"
2728
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
29+
mock_cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/mocks"
30+
caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
2831
)
2932

3033
func TestGetNodeResource(t *testing.T) {
@@ -75,6 +78,12 @@ func TestGetNodeCoresAndMemory(t *testing.T) {
7578
cores, memory = GetNodeCoresAndMemory(nodeWithMissingCapacity)
7679
assert.Equal(t, int64(0), cores)
7780
assert.Equal(t, int64(0), memory)
81+
82+
// if we have negative capacity defined in capacity we expect getNodeCoresAndMemory to return 0s
83+
nodeWithNegativeCapacity := BuildTestNode("n1", -1000, -2*MiB)
84+
cores, memory = GetNodeCoresAndMemory(nodeWithNegativeCapacity)
85+
assert.Equal(t, int64(0), cores)
86+
assert.Equal(t, int64(0), memory)
7887
}
7988

8089
func TestGetOldestPod(t *testing.T) {
@@ -88,3 +97,207 @@ func TestGetOldestPod(t *testing.T) {
8897
assert.Equal(t, p1.CreationTimestamp.Time, GetOldestCreateTime([]*apiv1.Pod{p1, p2, p3}))
8998
assert.Equal(t, p1.CreationTimestamp.Time, GetOldestCreateTime([]*apiv1.Pod{p3, p2, p1}))
9099
}
100+
101+
func TestIsVirtualNode(t *testing.T) {
102+
type args struct {
103+
node *apiv1.Node
104+
}
105+
tests := []struct {
106+
name string
107+
args args
108+
want bool
109+
}{
110+
{
111+
name: "returns false when node is nil",
112+
args: args{
113+
node: nil,
114+
},
115+
want: false,
116+
},
117+
{
118+
name: "returns false when node does not have virtual-kubelet label",
119+
args: args{
120+
node: &apiv1.Node{
121+
ObjectMeta: metav1.ObjectMeta{
122+
Labels: map[string]string{
123+
"type": "not-virtual-kubelet",
124+
},
125+
},
126+
},
127+
},
128+
want: false,
129+
},
130+
{
131+
name: "returns true when node has virtual-kubelet label",
132+
args: args{
133+
node: &apiv1.Node{
134+
ObjectMeta: metav1.ObjectMeta{
135+
Labels: map[string]string{
136+
"type": VirtualNodeLabelValue,
137+
},
138+
},
139+
},
140+
},
141+
want: true,
142+
},
143+
}
144+
for _, tt := range tests {
145+
t.Run(tt.name, func(t *testing.T) {
146+
assert.Equalf(t, tt.want, isVirtualNode(tt.args.node), "isVirtualNode(%v)", tt.args.node)
147+
})
148+
}
149+
}
150+
151+
func TestFilterOutNodesFromNotAutoscaledGroups(t *testing.T) {
152+
nodeInGroup := BuildTestNode("node-in-group", 1000, 2*MiB)
153+
nodeNotInGroup := BuildTestNode("node-not-in-group", 1000, 2*MiB)
154+
nodeError := BuildTestNode("node-error", 1000, 2*MiB)
155+
nodeVirtual := BuildTestNode("node-virtual", 1000, 2*MiB)
156+
nodeVirtual.Labels["type"] = VirtualNodeLabelValue
157+
158+
mockProvider := &mock_cloudprovider.CloudProvider{}
159+
mockProvider.On("NodeGroupForNode", nodeInGroup).Return(new(mock_cloudprovider.NodeGroup), nil)
160+
mockProvider.On("NodeGroupForNode", nodeNotInGroup).Return(nil, nil)
161+
mockProvider.On("NodeGroupForNode", nodeError).Return(nil, errors.New("some error"))
162+
163+
t.Run("ignores virtual nodes", func(t *testing.T) {
164+
result, err := FilterOutNodesFromNotAutoscaledGroups([]*apiv1.Node{nodeVirtual}, mockProvider)
165+
assert.NoError(t, err)
166+
assert.Empty(t, result)
167+
168+
mockProvider.AssertNotCalled(t, "NodeGroupForNode", nodeVirtual)
169+
})
170+
171+
t.Run("returns error if cloud provider fails", func(t *testing.T) {
172+
result, err := FilterOutNodesFromNotAutoscaledGroups([]*apiv1.Node{nodeError}, mockProvider)
173+
assert.Error(t, err)
174+
assert.Equal(t, caerrors.CloudProviderError, err.Type())
175+
assert.Empty(t, result)
176+
})
177+
178+
t.Run("filters out nodes in autoscaled groups", func(t *testing.T) {
179+
result, err := FilterOutNodesFromNotAutoscaledGroups([]*apiv1.Node{nodeInGroup, nodeNotInGroup}, mockProvider)
180+
assert.NoError(t, err)
181+
assert.Len(t, result, 1)
182+
assert.Equal(t, "node-not-in-group", result[0].Name)
183+
})
184+
185+
mockProvider.AssertExpectations(t)
186+
}
187+
188+
func TestHasHardInterPodAffinity(t *testing.T) {
189+
type args struct {
190+
affinity *apiv1.Affinity
191+
}
192+
tests := []struct {
193+
name string
194+
args args
195+
want bool
196+
}{
197+
{
198+
name: "returns false when affinity is nil",
199+
args: args{
200+
affinity: nil,
201+
},
202+
want: false,
203+
},
204+
{
205+
name: "returns false when affinity is empty",
206+
args: args{
207+
affinity: &apiv1.Affinity{},
208+
},
209+
want: false,
210+
},
211+
{
212+
name: "returns true when PodAffinity has RequiredDuringScheduling terms",
213+
args: args{
214+
affinity: &apiv1.Affinity{
215+
PodAffinity: &apiv1.PodAffinity{
216+
RequiredDuringSchedulingIgnoredDuringExecution: []apiv1.PodAffinityTerm{
217+
{LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}},
218+
},
219+
},
220+
},
221+
},
222+
want: true,
223+
},
224+
{
225+
name: "returns false when PodAffinity has empty RequiredDuringScheduling",
226+
args: args{
227+
affinity: &apiv1.Affinity{
228+
PodAffinity: &apiv1.PodAffinity{
229+
PreferredDuringSchedulingIgnoredDuringExecution: []apiv1.WeightedPodAffinityTerm{
230+
{Weight: 10, PodAffinityTerm: apiv1.PodAffinityTerm{}},
231+
},
232+
},
233+
},
234+
},
235+
want: false,
236+
},
237+
{
238+
name: "returns true when PodAntiAffinity has RequiredDuringScheduling terms",
239+
args: args{
240+
affinity: &apiv1.Affinity{
241+
PodAntiAffinity: &apiv1.PodAntiAffinity{
242+
RequiredDuringSchedulingIgnoredDuringExecution: []apiv1.PodAffinityTerm{
243+
{LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}},
244+
},
245+
},
246+
},
247+
},
248+
want: true,
249+
},
250+
{
251+
name: "returns false when PodAntiAffinity has empty RequiredDuringScheduling",
252+
args: args{
253+
affinity: &apiv1.Affinity{
254+
PodAntiAffinity: &apiv1.PodAntiAffinity{
255+
PreferredDuringSchedulingIgnoredDuringExecution: []apiv1.WeightedPodAffinityTerm{
256+
{Weight: 10, PodAffinityTerm: apiv1.PodAffinityTerm{}},
257+
},
258+
},
259+
},
260+
},
261+
want: false,
262+
},
263+
}
264+
for _, tt := range tests {
265+
t.Run(tt.name, func(t *testing.T) {
266+
assert.Equalf(t, tt.want, hasHardInterPodAffinity(tt.args.affinity), "hasHardInterPodAffinity(%v)", tt.args.affinity)
267+
})
268+
}
269+
}
270+
271+
func TestGetOldestCreateTimeWithGpu(t *testing.T) {
272+
fixedTime := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)
273+
p1 := BuildTestPod("p1", 500, 1000)
274+
p1.CreationTimestamp = metav1.NewTime(fixedTime.Add(-1 * time.Minute))
275+
p2 := BuildTestPod("p2", 500, 1000)
276+
p2.CreationTimestamp = metav1.NewTime(fixedTime.Add(-2 * time.Minute))
277+
RequestGpuForPod(p2, 1)
278+
p3 := BuildTestPod("p3", 500, 1000)
279+
p3.CreationTimestamp = metav1.NewTime(fixedTime.Add(-3 * time.Minute))
280+
RequestGpuForPod(p3, 1)
281+
282+
t.Run("returns false when no pods", func(t *testing.T) {
283+
found, _ := GetOldestCreateTimeWithGpu([]*apiv1.Pod{})
284+
assert.False(t, found)
285+
})
286+
287+
t.Run("returns false when no GPU pods", func(t *testing.T) {
288+
found, _ := GetOldestCreateTimeWithGpu([]*apiv1.Pod{p1})
289+
assert.False(t, found)
290+
})
291+
292+
t.Run("returns creation time when single GPU pod", func(t *testing.T) {
293+
found, ts := GetOldestCreateTimeWithGpu([]*apiv1.Pod{p2})
294+
assert.True(t, found)
295+
assert.Equal(t, p2.CreationTimestamp.Time, ts)
296+
})
297+
298+
t.Run("returns oldest time among multiple GPU pods", func(t *testing.T) {
299+
found, ts := GetOldestCreateTimeWithGpu([]*apiv1.Pod{p1, p2, p3})
300+
assert.True(t, found)
301+
assert.Equal(t, p3.CreationTimestamp.Time, ts)
302+
})
303+
}

0 commit comments

Comments
 (0)