Skip to content

Commit 3738111

Browse files
committed
Add unit tests
adjust existing tests and add new test flows to cover new DRA manager behaviour Signed-off-by: adrianc <[email protected]>
1 parent 08b9420 commit 3738111

File tree

1 file changed

+200
-37
lines changed

1 file changed

+200
-37
lines changed

pkg/kubelet/cm/dra/manager_test.go

Lines changed: 200 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import (
2222
"net"
2323
"os"
2424
"path/filepath"
25+
"sync/atomic"
2526
"testing"
2627
"time"
2728

@@ -46,11 +47,15 @@ const (
4647

4748
type fakeDRADriverGRPCServer struct {
4849
drapbv1.UnimplementedNodeServer
49-
driverName string
50-
timeout *time.Duration
50+
driverName string
51+
timeout *time.Duration
52+
prepareResourceCalls atomic.Uint32
53+
unprepareResourceCalls atomic.Uint32
5154
}
5255

5356
func (s *fakeDRADriverGRPCServer) NodePrepareResources(ctx context.Context, req *drapbv1.NodePrepareResourcesRequest) (*drapbv1.NodePrepareResourcesResponse, error) {
57+
s.prepareResourceCalls.Add(1)
58+
5459
if s.timeout != nil {
5560
time.Sleep(*s.timeout)
5661
}
@@ -60,6 +65,8 @@ func (s *fakeDRADriverGRPCServer) NodePrepareResources(ctx context.Context, req
6065
}
6166

6267
func (s *fakeDRADriverGRPCServer) NodeUnprepareResources(ctx context.Context, req *drapbv1.NodeUnprepareResourcesRequest) (*drapbv1.NodeUnprepareResourcesResponse, error) {
68+
s.unprepareResourceCalls.Add(1)
69+
6370
if s.timeout != nil {
6471
time.Sleep(*s.timeout)
6572
}
@@ -68,10 +75,23 @@ func (s *fakeDRADriverGRPCServer) NodeUnprepareResources(ctx context.Context, re
6875

6976
type tearDown func()
7077

71-
func setupFakeDRADriverGRPCServer(shouldTimeout bool) (string, tearDown, error) {
78+
type fakeDRAServerInfo struct {
79+
// fake DRA server
80+
server *fakeDRADriverGRPCServer
81+
// fake DRA plugin socket name
82+
socketName string
83+
// teardownFn stops fake gRPC server
84+
teardownFn tearDown
85+
}
86+
87+
func setupFakeDRADriverGRPCServer(shouldTimeout bool) (fakeDRAServerInfo, error) {
7288
socketDir, err := os.MkdirTemp("", "dra")
7389
if err != nil {
74-
return "", nil, err
90+
return fakeDRAServerInfo{
91+
server: nil,
92+
socketName: "",
93+
teardownFn: nil,
94+
}, err
7595
}
7696

7797
socketName := filepath.Join(socketDir, "server.sock")
@@ -85,7 +105,11 @@ func setupFakeDRADriverGRPCServer(shouldTimeout bool) (string, tearDown, error)
85105
l, err := net.Listen("unix", socketName)
86106
if err != nil {
87107
teardown()
88-
return "", nil, err
108+
return fakeDRAServerInfo{
109+
server: nil,
110+
socketName: "",
111+
teardownFn: nil,
112+
}, err
89113
}
90114

91115
s := grpc.NewServer()
@@ -105,7 +129,11 @@ func setupFakeDRADriverGRPCServer(shouldTimeout bool) (string, tearDown, error)
105129
s.GracefulStop()
106130
}()
107131

108-
return socketName, teardown, nil
132+
return fakeDRAServerInfo{
133+
server: fakeDRADriverGRPCServer,
134+
socketName: socketName,
135+
teardownFn: teardown,
136+
}, nil
109137
}
110138

111139
func TestNewManagerImpl(t *testing.T) {
@@ -177,10 +205,12 @@ func TestGetResources(t *testing.T) {
177205
},
178206
},
179207
claimInfo: &ClaimInfo{
180-
annotations: []kubecontainer.Annotation{
181-
{
182-
Name: "test-annotation",
183-
Value: "123",
208+
annotations: map[string][]kubecontainer.Annotation{
209+
"test-plugin": {
210+
{
211+
Name: "test-annotation",
212+
Value: "123",
213+
},
184214
},
185215
},
186216
ClaimInfoState: state.ClaimInfoState{
@@ -280,14 +310,15 @@ func TestPrepareResources(t *testing.T) {
280310
fakeKubeClient := fake.NewSimpleClientset()
281311

282312
for _, test := range []struct {
283-
description string
284-
driverName string
285-
pod *v1.Pod
286-
claimInfo *ClaimInfo
287-
resourceClaim *resourcev1alpha2.ResourceClaim
288-
wantErr bool
289-
wantTimeout bool
290-
wantResourceSkipped bool
313+
description string
314+
driverName string
315+
pod *v1.Pod
316+
claimInfo *ClaimInfo
317+
resourceClaim *resourcev1alpha2.ResourceClaim
318+
wantErr bool
319+
wantTimeout bool
320+
wantResourceSkipped bool
321+
ExpectedPrepareCalls uint32
291322
}{
292323
{
293324
description: "failed to fetch ResourceClaim",
@@ -497,6 +528,7 @@ func TestPrepareResources(t *testing.T) {
497528
Namespace: "test-namespace",
498529
PodUIDs: sets.Set[string]{"test-another-pod-reserved": sets.Empty{}},
499530
},
531+
prepared: true,
500532
},
501533
resourceClaim: &resourcev1alpha2.ResourceClaim{
502534
ObjectMeta: metav1.ObjectMeta{
@@ -574,11 +606,12 @@ func TestPrepareResources(t *testing.T) {
574606
},
575607
},
576608
},
577-
wantErr: true,
578-
wantTimeout: true,
609+
wantErr: true,
610+
wantTimeout: true,
611+
ExpectedPrepareCalls: 1,
579612
},
580613
{
581-
description: "should prepare resource",
614+
description: "should prepare resource, claim not in cache",
582615
driverName: driverName,
583616
pod: &v1.Pod{
584617
ObjectMeta: metav1.ObjectMeta{
@@ -630,6 +663,78 @@ func TestPrepareResources(t *testing.T) {
630663
},
631664
},
632665
},
666+
ExpectedPrepareCalls: 1,
667+
},
668+
{
669+
description: "should prepare resource. claim in cache, manager did not prepare resource",
670+
driverName: driverName,
671+
pod: &v1.Pod{
672+
ObjectMeta: metav1.ObjectMeta{
673+
Name: "test-pod",
674+
Namespace: "test-namespace",
675+
UID: "test-reserved",
676+
},
677+
Spec: v1.PodSpec{
678+
ResourceClaims: []v1.PodResourceClaim{
679+
{
680+
Name: "test-pod-claim",
681+
Source: v1.ClaimSource{ResourceClaimName: func() *string {
682+
s := "test-pod-claim"
683+
return &s
684+
}()},
685+
},
686+
},
687+
Containers: []v1.Container{
688+
{
689+
Resources: v1.ResourceRequirements{
690+
Claims: []v1.ResourceClaim{
691+
{
692+
Name: "test-pod-claim",
693+
},
694+
},
695+
},
696+
},
697+
},
698+
},
699+
},
700+
claimInfo: &ClaimInfo{
701+
ClaimInfoState: state.ClaimInfoState{
702+
DriverName: driverName,
703+
ClassName: "test-class",
704+
ClaimName: "test-pod-claim",
705+
ClaimUID: "test-reserved",
706+
Namespace: "test-namespace",
707+
PodUIDs: sets.Set[string]{"test-reserved": sets.Empty{}},
708+
CDIDevices: map[string][]string{
709+
driverName: {fmt.Sprintf("%s/%s=some-device", driverName, driverClassName)},
710+
},
711+
ResourceHandles: []resourcev1alpha2.ResourceHandle{{Data: "test-data"}},
712+
},
713+
annotations: make(map[string][]kubecontainer.Annotation),
714+
prepared: false,
715+
},
716+
resourceClaim: &resourcev1alpha2.ResourceClaim{
717+
ObjectMeta: metav1.ObjectMeta{
718+
Name: "test-pod-claim",
719+
Namespace: "test-namespace",
720+
UID: "test-reserved",
721+
},
722+
Spec: resourcev1alpha2.ResourceClaimSpec{
723+
ResourceClassName: "test-class",
724+
},
725+
Status: resourcev1alpha2.ResourceClaimStatus{
726+
DriverName: driverName,
727+
Allocation: &resourcev1alpha2.AllocationResult{
728+
ResourceHandles: []resourcev1alpha2.ResourceHandle{
729+
{Data: "test-data"},
730+
},
731+
},
732+
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
733+
{UID: "test-reserved"},
734+
},
735+
},
736+
},
737+
ExpectedPrepareCalls: 1,
633738
},
634739
} {
635740
t.Run(test.description, func(t *testing.T) {
@@ -649,14 +754,14 @@ func TestPrepareResources(t *testing.T) {
649754
}
650755
}
651756

652-
socketName, teardown, err := setupFakeDRADriverGRPCServer(test.wantTimeout)
757+
draServerInfo, err := setupFakeDRADriverGRPCServer(test.wantTimeout)
653758
if err != nil {
654759
t.Fatal(err)
655760
}
656-
defer teardown()
761+
defer draServerInfo.teardownFn()
657762

658763
plg := plugin.NewRegistrationHandler()
659-
if err := plg.RegisterPlugin(test.driverName, socketName, []string{"1.27"}); err != nil {
764+
if err := plg.RegisterPlugin(test.driverName, draServerInfo.socketName, []string{"1.27"}); err != nil {
660765
t.Fatalf("failed to register plugin %s, err: %v", test.driverName, err)
661766
}
662767
defer plg.DeRegisterPlugin(test.driverName) // for sake of next tests
@@ -666,6 +771,9 @@ func TestPrepareResources(t *testing.T) {
666771
}
667772

668773
err = manager.PrepareResources(test.pod)
774+
775+
assert.Equal(t, test.ExpectedPrepareCalls, draServerInfo.server.prepareResourceCalls.Load())
776+
669777
if test.wantErr {
670778
assert.Error(t, err)
671779
return // PrepareResources returned an error so stopping the subtest here
@@ -705,13 +813,14 @@ func TestUnprepareResources(t *testing.T) {
705813
fakeKubeClient := fake.NewSimpleClientset()
706814

707815
for _, test := range []struct {
708-
description string
709-
driverName string
710-
pod *v1.Pod
711-
claimInfo *ClaimInfo
712-
wantErr bool
713-
wantTimeout bool
714-
wantResourceSkipped bool
816+
description string
817+
driverName string
818+
pod *v1.Pod
819+
claimInfo *ClaimInfo
820+
wantErr bool
821+
wantTimeout bool
822+
wantResourceSkipped bool
823+
expectedUnprepareCalls uint32
715824
}{
716825
{
717826
description: "plugin does not exist",
@@ -838,11 +947,12 @@ func TestUnprepareResources(t *testing.T) {
838947
},
839948
},
840949
},
841-
wantErr: true,
842-
wantTimeout: true,
950+
wantErr: true,
951+
wantTimeout: true,
952+
expectedUnprepareCalls: 1,
843953
},
844954
{
845-
description: "should unprepare resource",
955+
description: "should unprepare resource, claim previously prepared by currently running manager",
846956
driverName: driverName,
847957
pod: &v1.Pod{
848958
ObjectMeta: metav1.ObjectMeta{
@@ -885,7 +995,57 @@ func TestUnprepareResources(t *testing.T) {
885995
},
886996
},
887997
},
998+
prepared: true,
999+
},
1000+
expectedUnprepareCalls: 1,
1001+
},
1002+
{
1003+
description: "should unprepare resource, claim previously was not prepared by currently running manager",
1004+
driverName: driverName,
1005+
pod: &v1.Pod{
1006+
ObjectMeta: metav1.ObjectMeta{
1007+
Name: "test-pod",
1008+
Namespace: "test-namespace",
1009+
UID: "test-reserved",
1010+
},
1011+
Spec: v1.PodSpec{
1012+
ResourceClaims: []v1.PodResourceClaim{
1013+
{
1014+
Name: "test-pod-claim",
1015+
Source: v1.ClaimSource{ResourceClaimName: func() *string {
1016+
s := "test-pod-claim"
1017+
return &s
1018+
}()},
1019+
},
1020+
},
1021+
Containers: []v1.Container{
1022+
{
1023+
Resources: v1.ResourceRequirements{
1024+
Claims: []v1.ResourceClaim{
1025+
{
1026+
Name: "test-pod-claim",
1027+
},
1028+
},
1029+
},
1030+
},
1031+
},
1032+
},
1033+
},
1034+
claimInfo: &ClaimInfo{
1035+
ClaimInfoState: state.ClaimInfoState{
1036+
DriverName: driverName,
1037+
ClaimName: "test-pod-claim",
1038+
Namespace: "test-namespace",
1039+
ResourceHandles: []resourcev1alpha2.ResourceHandle{
1040+
{
1041+
DriverName: driverName,
1042+
Data: "test data",
1043+
},
1044+
},
1045+
},
1046+
prepared: false,
8881047
},
1048+
expectedUnprepareCalls: 1,
8891049
},
8901050
} {
8911051
t.Run(test.description, func(t *testing.T) {
@@ -894,14 +1054,14 @@ func TestUnprepareResources(t *testing.T) {
8941054
t.Fatalf("failed to create a new instance of the claimInfoCache, err: %v", err)
8951055
}
8961056

897-
socketName, teardown, err := setupFakeDRADriverGRPCServer(test.wantTimeout)
1057+
draServerInfo, err := setupFakeDRADriverGRPCServer(test.wantTimeout)
8981058
if err != nil {
8991059
t.Fatal(err)
9001060
}
901-
defer teardown()
1061+
defer draServerInfo.teardownFn()
9021062

9031063
plg := plugin.NewRegistrationHandler()
904-
if err := plg.RegisterPlugin(test.driverName, socketName, []string{"1.27"}); err != nil {
1064+
if err := plg.RegisterPlugin(test.driverName, draServerInfo.socketName, []string{"1.27"}); err != nil {
9051065
t.Fatalf("failed to register plugin %s, err: %v", test.driverName, err)
9061066
}
9071067
defer plg.DeRegisterPlugin(test.driverName) // for sake of next tests
@@ -916,6 +1076,9 @@ func TestUnprepareResources(t *testing.T) {
9161076
}
9171077

9181078
err = manager.UnprepareResources(test.pod)
1079+
1080+
assert.Equal(t, test.expectedUnprepareCalls, draServerInfo.server.unprepareResourceCalls.Load())
1081+
9191082
if test.wantErr {
9201083
assert.Error(t, err)
9211084
return // UnprepareResources returned an error so stopping the subtest here

0 commit comments

Comments
 (0)