@@ -38,6 +38,7 @@ import (
38
38
v1 "k8s.io/api/core/v1"
39
39
"k8s.io/apimachinery/pkg/api/resource"
40
40
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
41
+ runtimeSchema "k8s.io/apimachinery/pkg/runtime/schema"
41
42
"k8s.io/apimachinery/pkg/util/wait"
42
43
"k8s.io/client-go/informers"
43
44
"k8s.io/client-go/kubernetes/fake"
@@ -187,7 +188,7 @@ func TestNodeTopologyQueuePeriodicSync(t *testing.T) {
187
188
}
188
189
}
189
190
190
- func TestNodeTopologyQueue_AddOrUpdate (t * testing.T ) {
191
+ func TestNodeTopologyCR_AddOrUpdateNode (t * testing.T ) {
191
192
testClusterValues := gce .DefaultTestClusterValues ()
192
193
testClusterValues .SubnetworkURL = exampleSubnetURL
193
194
fakeGCE := gce .NewFakeGCECloud (testClusterValues )
@@ -210,7 +211,7 @@ func TestNodeTopologyQueue_AddOrUpdate(t *testing.T) {
210
211
},
211
212
},
212
213
}
213
- fakeClient := fake .NewSimpleClientset (defaultnode , mscnode )
214
+ fakeClient := fake .NewSimpleClientset (defaultnode )
214
215
fakeInformerFactory := informers .NewSharedInformerFactory (fakeClient , time .Second )
215
216
fakeNodeInformer := fakeInformerFactory .Core ().V1 ().Nodes ()
216
217
@@ -230,7 +231,8 @@ func TestNodeTopologyQueue_AddOrUpdate(t *testing.T) {
230
231
go cloudAllocator .Run (wait .NeverStop )
231
232
232
233
// TODO: Fix node_topology_syncer addOrUpdate should add default subnet regardless of nodes ordering on the informer
233
- fakeNodeInformer .Informer ().GetStore ().Add (mscnode )
234
+ time .Sleep (time .Millisecond * 500 )
235
+ fakeClient .Tracker ().Add (mscnode )
234
236
expectedSubnets := []string {"subnet-def" , "subnet1" }
235
237
i := 0
236
238
for i < 5 {
@@ -267,9 +269,40 @@ func TestNodeTopologyQueue_AddOrUpdate(t *testing.T) {
267
269
if i >= 5 {
268
270
t .Fatalf ("AddOrUpdate node topology CRD not working as expected" )
269
271
}
272
+ // Node subnet label should be immutable, update it just to test update node path
273
+ mscnode2 .ObjectMeta .Labels [testNodePoolSubnetLabelPrefix ] = "subnet3"
274
+ // TODO: automatically get gvr instead of hardcode
275
+ gvr := runtimeSchema.GroupVersionResource {
276
+ Version : "v1" ,
277
+ Resource : "nodes" ,
278
+ }
279
+ fakeClient .Tracker ().Update (gvr , mscnode2 , mscnode2 .GetNamespace (), metav1.UpdateOptions {})
280
+ expectedSubnets = []string {"subnet-def" , "subnet1" , "subnet2" , "subnet3" }
281
+ i = 0
282
+ for i < 5 {
283
+ if ok , _ := verifySubnetsInCR (t , expectedSubnets , nodeTopologyClient ); ok {
284
+ break
285
+ } else {
286
+ time .Sleep (time .Millisecond * 500 )
287
+ i ++
288
+ }
289
+ }
290
+ if i >= 5 {
291
+ t .Fatalf ("UpdateNode with different subnet lable should not dedup when enqueueing" )
292
+ }
293
+ // Reset nodetopology just for test update node de-dup when the label didn't change
294
+ nodeTopologyClient .NetworkingV1 ().NodeTopologies ().UpdateStatus (context .TODO (), ensuredNodeTopologyCR , metav1.UpdateOptions {})
295
+ // Update the node w/o changing node pool subnet label should de-dup, not enqueue
296
+ mscnode2 .ObjectMeta .Labels [testNodePoolSubnetLabelPrefix ] = "subnet3"
297
+ fakeClient .Tracker ().Update (gvr , mscnode2 , mscnode2 .GetNamespace (), metav1.UpdateOptions {})
298
+ time .Sleep (time .Millisecond * 500 )
299
+ expectedSubnets = []string {}
300
+ if ok , _ := verifySubnetsInCR (t , expectedSubnets , nodeTopologyClient ); ! ok {
301
+ t .Fatalf ("UpdateNode with the same subnet lable should dedup when enqueueing" )
302
+ }
270
303
}
271
304
272
- func TestNodeTopologyCR_DELETION (t * testing.T ) {
305
+ func TestNodeTopologyCR_DeleteNode (t * testing.T ) {
273
306
testClusterValues := gce .DefaultTestClusterValues ()
274
307
testClusterValues .SubnetworkURL = exampleSubnetURL
275
308
fakeGCE := gce .NewFakeGCECloud (testClusterValues )
@@ -279,15 +312,12 @@ func TestNodeTopologyCR_DELETION(t *testing.T) {
279
312
nwInformer := nwInfFactory .V1 ().Networks ()
280
313
gnpInformer := nwInfFactory .V1 ().GKENetworkParamSets ()
281
314
282
- mscnode := & v1.Node {
315
+ defaultnode := & v1.Node {
283
316
ObjectMeta : metav1.ObjectMeta {
284
- Name : "testNode" ,
285
- Labels : map [string ]string {
286
- testNodePoolSubnetLabelPrefix : "subnet1" ,
287
- },
317
+ Name : "nodeTopologyDefautNode" ,
288
318
},
289
319
}
290
- fakeClient := fake .NewSimpleClientset ()
320
+ fakeClient := fake .NewSimpleClientset (defaultnode )
291
321
fakeInformerFactory := informers .NewSharedInformerFactory (fakeClient , time .Second )
292
322
fakeNodeInformer := fakeInformerFactory .Core ().V1 ().Nodes ()
293
323
@@ -306,9 +336,17 @@ func TestNodeTopologyCR_DELETION(t *testing.T) {
306
336
fakeInformerFactory .Start (wait .NeverStop )
307
337
go cloudAllocator .Run (wait .NeverStop )
308
338
309
- fakeNodeInformer .Informer ().GetStore ().Add (mscnode )
339
+ mscnode := & v1.Node {
340
+ ObjectMeta : metav1.ObjectMeta {
341
+ Name : "testNode" ,
342
+ Labels : map [string ]string {
343
+ testNodePoolSubnetLabelPrefix : "subnet1" ,
344
+ },
345
+ },
346
+ }
347
+ fakeClient .Tracker ().Add (mscnode )
310
348
311
- expectedSubnets := []string {"subnet-def" }
349
+ expectedSubnets := []string {"subnet-def" , "subnet1" }
312
350
i := 0
313
351
for i < 5 {
314
352
if ok , _ := verifySubnetsInCR (t , expectedSubnets , nodeTopologyClient ); ok {
@@ -318,11 +356,142 @@ func TestNodeTopologyCR_DELETION(t *testing.T) {
318
356
i ++
319
357
}
320
358
}
359
+ if i >= 5 {
360
+ t .Fatalf ("Add node topology CR not working as expected" )
361
+ }
362
+ // TODO: automatically get gvr instead of using hardcoded value
363
+ gvr := runtimeSchema.GroupVersionResource {
364
+ Version : "v1" ,
365
+ Resource : "nodes" ,
366
+ }
367
+ fakeClient .Tracker ().Delete (gvr , mscnode .GetNamespace (), mscnode .GetName (), metav1.DeleteOptions {})
368
+
369
+ expectedSubnets = []string {"subnet-def" }
370
+ i = 0
371
+ for i < 5 {
372
+ if ok , _ := verifySubnetsInCR (t , expectedSubnets , nodeTopologyClient ); ok {
373
+ break
374
+ } else {
375
+ time .Sleep (time .Millisecond * 500 )
376
+ i ++
377
+ }
378
+ }
321
379
if i >= 5 {
322
380
t .Fatalf ("Delete node topology CR not working as expected" )
323
381
}
324
382
}
325
383
384
+ func TestUpdateUniqueNode (t * testing.T ) {
385
+ testClusterValues := gce .DefaultTestClusterValues ()
386
+ fakeGCE := gce .NewFakeGCECloud (testClusterValues )
387
+ nodeTopologySyncer := & NodeTopologySyncer {
388
+ nodeTopologyClient : ntfakeclient .NewSimpleClientset (),
389
+ cloud : fakeGCE ,
390
+ }
391
+ tests := []struct {
392
+ name string
393
+ oldNode * v1.Node
394
+ newNode * v1.Node
395
+ queued bool
396
+ }{
397
+ {
398
+ name : "DuplicatedNodeLabel" ,
399
+ oldNode : & v1.Node {
400
+ ObjectMeta : metav1.ObjectMeta {
401
+ Name : "testNode" ,
402
+ Labels : map [string ]string {
403
+ testNodePoolSubnetLabelPrefix : "subnet1" ,
404
+ },
405
+ },
406
+ },
407
+ newNode : & v1.Node {
408
+ ObjectMeta : metav1.ObjectMeta {
409
+ Name : "testNode" ,
410
+ Labels : map [string ]string {
411
+ testNodePoolSubnetLabelPrefix : "subnet1" ,
412
+ },
413
+ },
414
+ },
415
+ queued : false ,
416
+ },
417
+ {
418
+ name : "UpdatedNodeLable" ,
419
+ oldNode : & v1.Node {
420
+ ObjectMeta : metav1.ObjectMeta {
421
+ Name : "testNode" ,
422
+ Labels : map [string ]string {
423
+ testNodePoolSubnetLabelPrefix : "subnet1" ,
424
+ },
425
+ },
426
+ },
427
+ newNode : & v1.Node {
428
+ ObjectMeta : metav1.ObjectMeta {
429
+ Name : "testNode" ,
430
+ Labels : map [string ]string {
431
+ testNodePoolSubnetLabelPrefix : "subnet2" ,
432
+ },
433
+ },
434
+ },
435
+ queued : true ,
436
+ },
437
+ {
438
+ name : "DifferentLabelName" ,
439
+ oldNode : & v1.Node {
440
+ ObjectMeta : metav1.ObjectMeta {
441
+ Name : "testNode" ,
442
+ Labels : map [string ]string {
443
+ "cloud.google.com/unrelated" : "subnet1" ,
444
+ },
445
+ },
446
+ },
447
+ newNode : & v1.Node {
448
+ ObjectMeta : metav1.ObjectMeta {
449
+ Name : "testNode" ,
450
+ Labels : map [string ]string {
451
+ testNodePoolSubnetLabelPrefix : "subnet1" ,
452
+ },
453
+ },
454
+ },
455
+ queued : true ,
456
+ },
457
+ {
458
+ name : "EmptyLabel" ,
459
+ oldNode : & v1.Node {
460
+ ObjectMeta : metav1.ObjectMeta {
461
+ Name : "testNode" ,
462
+ Labels : map [string ]string {},
463
+ },
464
+ },
465
+ newNode : & v1.Node {
466
+ ObjectMeta : metav1.ObjectMeta {
467
+ Name : "testNode" ,
468
+ Labels : map [string ]string {
469
+ testNodePoolSubnetLabelPrefix : "subnet1" ,
470
+ },
471
+ },
472
+ },
473
+ queued : true ,
474
+ },
475
+ }
476
+ for _ , tc := range tests {
477
+ t .Run (tc .name , func (t * testing.T ) {
478
+ nodetopologyQueue := NewTaskQueue ("nodetopologgTaskQueueForTest" , "nodetopologyCRD" , 1 , nodeTopologyKeyFun , nodeTopologySyncer .sync )
479
+ ca := & cloudCIDRAllocator {
480
+ nodeTopologyQueue : nodetopologyQueue ,
481
+ }
482
+ ca .updateUniqueNode (tc .oldNode , tc .newNode )
483
+ expectLen := 0
484
+ if tc .queued {
485
+ expectLen = 1
486
+ }
487
+ got := nodetopologyQueue .queue .Len ()
488
+ if got != expectLen {
489
+ t .Errorf ("updateUniqueNode(%v, %v) returned queued %v, but want %v" , tc .oldNode , tc .newNode , got , expectLen )
490
+ }
491
+ })
492
+ }
493
+ }
494
+
326
495
func TestUpdateCIDRAllocation (t * testing.T ) {
327
496
ipv4ipv6Stack := stackIPv4IPv6
328
497
ipv6ipv4Stack := stackIPv6IPv4
0 commit comments