@@ -66,6 +66,7 @@ import (
66
66
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
67
67
"k8s.io/kubernetes/test/e2e/storage/drivers/proxy"
68
68
"k8s.io/kubernetes/test/e2e/storage/utils"
69
+ "k8s.io/utils/ptr"
69
70
"sigs.k8s.io/yaml"
70
71
)
71
72
@@ -214,7 +215,6 @@ func (d *Driver) Run(nodes *Nodes, configureResources func() Resources, devicesP
214
215
// not run on all nodes.
215
216
resources .Nodes = nodes .NodeNames
216
217
}
217
- ginkgo .DeferCleanup (d .IsGone ) // Register first so it gets called last.
218
218
d .SetUp (nodes , resources , devicesPerNode ... )
219
219
ginkgo .DeferCleanup (d .TearDown )
220
220
}
@@ -227,12 +227,22 @@ type MethodInstance struct {
227
227
type Driver struct {
228
228
f * framework.Framework
229
229
ctx context.Context
230
- cleanup []func () // executed first-in-first-out
230
+ cleanup []func (context. Context ) // executed first-in-first-out
231
231
wg sync.WaitGroup
232
232
serviceAccountName string
233
233
234
+ // NameSuffix can be set while registering a test to deploy different
235
+ // drivers in the same test namespace.
234
236
NameSuffix string
235
- Name string
237
+
238
+ // InstanceSuffix can be set while registering a test to deploy two different
239
+ // instances of the same driver. Used to generate unique objects in the API server.
240
+ // The socket path is still the same.
241
+ InstanceSuffix string
242
+
243
+ // Name gets derived automatically from the current test namespace and
244
+ // (if set) the NameSuffix while setting up the driver for a test.
245
+ Name string
236
246
237
247
// Nodes contains entries for each node selected for a test when the test runs.
238
248
// In addition, there is one entry for a fictional node.
@@ -263,9 +273,13 @@ func (d *Driver) SetUp(nodes *Nodes, resources Resources, devicesPerNode ...map[
263
273
ctx , cancel := context .WithCancel (context .Background ())
264
274
logger := klog .FromContext (ctx )
265
275
logger = klog .LoggerWithValues (logger , "driverName" , d .Name )
276
+ if d .InstanceSuffix != "" {
277
+ instance , _ := strings .CutPrefix (d .InstanceSuffix , "-" )
278
+ logger = klog .LoggerWithValues (logger , "instance" , instance )
279
+ }
266
280
ctx = klog .NewContext (ctx , logger )
267
281
d .ctx = ctx
268
- d .cleanup = append (d .cleanup , cancel )
282
+ d .cleanup = append (d .cleanup , func (context. Context ) { cancel () } )
269
283
270
284
if ! resources .NodeLocal {
271
285
// Publish one resource pool with "network-attached" devices.
@@ -323,28 +337,32 @@ func (d *Driver) SetUp(nodes *Nodes, resources Resources, devicesPerNode ...map[
323
337
}
324
338
325
339
// Create service account and corresponding RBAC rules.
326
- d .serviceAccountName = "dra-kubelet-plugin-" + d .Name + "-service-account"
340
+ d .serviceAccountName = "dra-kubelet-plugin-" + d .Name + d . InstanceSuffix + "-service-account"
327
341
content := pluginPermissions
328
342
content = strings .ReplaceAll (content , "dra-kubelet-plugin-namespace" , d .f .Namespace .Name )
329
- content = strings .ReplaceAll (content , "dra-kubelet-plugin" , "dra-kubelet-plugin-" + d .Name )
343
+ content = strings .ReplaceAll (content , "dra-kubelet-plugin" , "dra-kubelet-plugin-" + d .Name + d . InstanceSuffix )
330
344
d .createFromYAML (ctx , []byte (content ), d .f .Namespace .Name )
331
345
346
+ // Using a ReplicaSet instead of a DaemonSet has the advantage that we can control
347
+ // the lifecycle explicitly, in particular run two pods per node long enough to
348
+ // run checks.
332
349
instanceKey := "app.kubernetes.io/instance"
333
350
rsName := ""
334
351
numNodes := int32 (len (nodes .NodeNames ))
335
352
pluginDataDirectoryPath := path .Join (framework .TestContext .KubeletRootDir , "plugins" , d .Name )
336
353
registrarDirectoryPath := path .Join (framework .TestContext .KubeletRootDir , "plugins_registry" )
337
354
registrarSocketFilename := d .Name + "-reg.sock"
355
+ instanceName := d .Name + d .InstanceSuffix
338
356
err := utils .CreateFromManifests (ctx , d .f , d .f .Namespace , func (item interface {}) error {
339
357
switch item := item .(type ) {
340
358
case * appsv1.ReplicaSet :
341
- item .Name += d .NameSuffix
359
+ item .Name += d .NameSuffix + d . InstanceSuffix
342
360
rsName = item .Name
343
361
item .Spec .Replicas = & numNodes
344
- item .Spec .Selector .MatchLabels [instanceKey ] = d . Name
345
- item .Spec .Template .Labels [instanceKey ] = d . Name
362
+ item .Spec .Selector .MatchLabels [instanceKey ] = instanceName
363
+ item .Spec .Template .Labels [instanceKey ] = instanceName
346
364
item .Spec .Template .Spec .ServiceAccountName = d .serviceAccountName
347
- item .Spec .Template .Spec .Affinity .PodAntiAffinity .RequiredDuringSchedulingIgnoredDuringExecution [0 ].LabelSelector .MatchLabels [instanceKey ] = d . Name
365
+ item .Spec .Template .Spec .Affinity .PodAntiAffinity .RequiredDuringSchedulingIgnoredDuringExecution [0 ].LabelSelector .MatchLabels [instanceKey ] = instanceName
348
366
item .Spec .Template .Spec .Affinity .NodeAffinity = & v1.NodeAffinity {
349
367
RequiredDuringSchedulingIgnoredDuringExecution : & v1.NodeSelector {
350
368
NodeSelectorTerms : []v1.NodeSelectorTerm {
@@ -376,7 +394,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources Resources, devicesPerNode ...map[
376
394
if err := e2ereplicaset .WaitForReplicaSetTargetAvailableReplicas (ctx , d .f .ClientSet , rs , numNodes ); err != nil {
377
395
framework .ExpectNoError (err , "all kubelet plugin proxies running" )
378
396
}
379
- requirement , err := labels .NewRequirement (instanceKey , selection .Equals , []string {d . Name })
397
+ requirement , err := labels .NewRequirement (instanceKey , selection .Equals , []string {instanceName })
380
398
framework .ExpectNoError (err , "create label selector requirement" )
381
399
selector := labels .NewSelector ().Add (* requirement )
382
400
pods , err := d .f .ClientSet .CoreV1 ().Pods (d .f .Namespace .Name ).List (ctx , metav1.ListOptions {LabelSelector : selector .String ()})
@@ -446,9 +464,20 @@ func (d *Driver) SetUp(nodes *Nodes, resources Resources, devicesPerNode ...map[
446
464
kubeletplugin .RegistrarListener (listen (d .f , & pod , & listenerPort )),
447
465
)
448
466
framework .ExpectNoError (err , "start kubelet plugin for node %s" , pod .Spec .NodeName )
449
- d .cleanup = append (d .cleanup , func () {
467
+ d .cleanup = append (d .cleanup , func (ctx context. Context ) {
450
468
// Depends on cancel being called first.
451
469
plugin .Stop ()
470
+
471
+ // Also explicitly stop all pods.
472
+ ginkgo .By ("scaling down driver proxy pods for " + d .Name )
473
+ rs , err := d .f .ClientSet .AppsV1 ().ReplicaSets (d .f .Namespace .Name ).Get (ctx , rsName , metav1.GetOptions {})
474
+ framework .ExpectNoError (err , "get ReplicaSet for driver " + d .Name )
475
+ rs .Spec .Replicas = ptr .To (int32 (0 ))
476
+ rs , err = d .f .ClientSet .AppsV1 ().ReplicaSets (d .f .Namespace .Name ).Update (ctx , rs , metav1.UpdateOptions {})
477
+ framework .ExpectNoError (err , "scale down ReplicaSet for driver " + d .Name )
478
+ if err := e2ereplicaset .WaitForReplicaSetTargetAvailableReplicas (ctx , d .f .ClientSet , rs , 0 ); err != nil {
479
+ framework .ExpectNoError (err , "all kubelet plugin proxies stopped" )
480
+ }
452
481
})
453
482
d .Nodes [nodename ] = KubeletPlugin {ExamplePlugin : plugin , ClientSet : driverClient }
454
483
}
@@ -717,22 +746,27 @@ func pipe(ctx context.Context, msg string, verbosity int) *io.PipeWriter {
717
746
return writer
718
747
}
719
748
720
- func (d * Driver ) TearDown () {
749
+ func (d * Driver ) TearDown (ctx context. Context ) {
721
750
for _ , c := range d .cleanup {
722
- c ()
751
+ c (ctx )
723
752
}
724
753
d .cleanup = nil
725
754
d .wg .Wait ()
726
755
}
727
756
757
+ // IsGone checks that the kubelet is done with the driver.
758
+ // This is done by waiting for the kubelet to remove the
759
+ // driver's ResourceSlices, which takes at least 5 minutes
760
+ // because of the delay in the kubelet. Only use this in slow
761
+ // tests...
728
762
func (d * Driver ) IsGone (ctx context.Context ) {
729
763
gomega .Eventually (ctx , func (ctx context.Context ) ([]resourceapi.ResourceSlice , error ) {
730
764
slices , err := d .f .ClientSet .ResourceV1beta1 ().ResourceSlices ().List (ctx , metav1.ListOptions {FieldSelector : resourceapi .ResourceSliceSelectorDriver + "=" + d .Name })
731
765
if err != nil {
732
766
return nil , err
733
767
}
734
768
return slices .Items , err
735
- }).Should (gomega .BeEmpty ())
769
+ }).WithTimeout ( 7 * time . Minute ). Should (gomega .BeEmpty ())
736
770
}
737
771
738
772
func (d * Driver ) interceptor (nodename string , ctx context.Context , req interface {}, info * grpc.UnaryServerInfo , handler grpc.UnaryHandler ) (resp interface {}, err error ) {
0 commit comments