@@ -30,7 +30,9 @@ import (
30
30
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31
31
"k8s.io/apimachinery/pkg/labels"
32
32
"k8s.io/apimachinery/pkg/runtime/schema"
33
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
33
34
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
35
+ "k8s.io/apimachinery/pkg/util/sets"
34
36
"k8s.io/apimachinery/pkg/util/wait"
35
37
"k8s.io/client-go/discovery"
36
38
"k8s.io/client-go/informers"
@@ -320,22 +322,25 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
320
322
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
321
323
func (rq * ResourceQuotaController ) syncResourceQuota (resourceQuota * v1.ResourceQuota ) (err error ) {
322
324
// quota is dirty if any part of spec hard limits differs from the status hard limits
323
- dirty := ! apiequality .Semantic .DeepEqual (resourceQuota .Spec .Hard , resourceQuota .Status .Hard )
325
+ statusLimitsDirty := ! apiequality .Semantic .DeepEqual (resourceQuota .Spec .Hard , resourceQuota .Status .Hard )
324
326
325
327
// dirty tracks if the usage status differs from the previous sync,
326
328
// if so, we send a new usage with latest status
327
329
// if this is our first sync, it will be dirty by default, since we need track usage
328
- dirty = dirty || resourceQuota .Status .Hard == nil || resourceQuota .Status .Used == nil
330
+ dirty := statusLimitsDirty || resourceQuota .Status .Hard == nil || resourceQuota .Status .Used == nil
329
331
330
332
used := v1.ResourceList {}
331
333
if resourceQuota .Status .Used != nil {
332
334
used = quota .Add (v1.ResourceList {}, resourceQuota .Status .Used )
333
335
}
334
336
hardLimits := quota .Add (v1.ResourceList {}, resourceQuota .Spec .Hard )
335
337
338
+ errors := []error {}
339
+
336
340
newUsage , err := quota .CalculateUsage (resourceQuota .Namespace , resourceQuota .Spec .Scopes , hardLimits , rq .registry , resourceQuota .Spec .ScopeSelector )
337
341
if err != nil {
338
- return err
342
+ // if err is non-nil, remember it to return, but continue updating status with any resources in newUsage
343
+ errors = append (errors , err )
339
344
}
340
345
for key , value := range newUsage {
341
346
used [key ] = value
@@ -358,9 +363,11 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
358
363
// there was a change observed by this controller that requires we update quota
359
364
if dirty {
360
365
_ , err = rq .rqClient .ResourceQuotas (usage .Namespace ).UpdateStatus (usage )
361
- return err
366
+ if err != nil {
367
+ errors = append (errors , err )
368
+ }
362
369
}
363
- return nil
370
+ return utilerrors . NewAggregate ( errors )
364
371
}
365
372
366
373
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
@@ -423,26 +430,66 @@ func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, p
423
430
return
424
431
}
425
432
426
- // Something has changed, so track the new state and perform a sync.
427
- klog .V (2 ).Infof ("syncing resource quota controller with updated resources from discovery: %v" , newResources )
428
- oldResources = newResources
429
-
430
433
// Ensure workers are paused to avoid processing events before informers
431
434
// have resynced.
432
435
rq .workerLock .Lock ()
433
436
defer rq .workerLock .Unlock ()
434
437
438
+ // Something has changed, so track the new state and perform a sync.
439
+ if klog .V (2 ) {
440
+ klog .Infof ("syncing resource quota controller with updated resources from discovery: %s" , printDiff (oldResources , newResources ))
441
+ }
442
+
435
443
// Perform the monitor resync and wait for controllers to report cache sync.
436
444
if err := rq .resyncMonitors (newResources ); err != nil {
437
445
utilruntime .HandleError (fmt .Errorf ("failed to sync resource monitors: %v" , err ))
438
446
return
439
447
}
440
- if rq .quotaMonitor != nil && ! controller .WaitForCacheSync ("resource quota" , stopCh , rq .quotaMonitor .IsSynced ) {
448
+ // wait for caches to fill for a while (our sync period).
449
+ // this protects us from deadlocks where available resources changed and one of our informer caches will never fill.
450
+ // informers keep attempting to sync in the background, so retrying doesn't interrupt them.
451
+ // the call to resyncMonitors on the reattempt will no-op for resources that still exist.
452
+ if rq .quotaMonitor != nil && ! controller .WaitForCacheSync ("resource quota" , waitForStopOrTimeout (stopCh , period ), rq .quotaMonitor .IsSynced ) {
441
453
utilruntime .HandleError (fmt .Errorf ("timed out waiting for quota monitor sync" ))
454
+ return
442
455
}
456
+
457
+ // success, remember newly synced resources
458
+ oldResources = newResources
459
+ klog .V (2 ).Infof ("synced quota controller" )
443
460
}, period , stopCh )
444
461
}
445
462
463
+ // printDiff returns a human-readable summary of what resources were added and removed
464
+ func printDiff (oldResources , newResources map [schema.GroupVersionResource ]struct {}) string {
465
+ removed := sets .NewString ()
466
+ for oldResource := range oldResources {
467
+ if _ , ok := newResources [oldResource ]; ! ok {
468
+ removed .Insert (fmt .Sprintf ("%+v" , oldResource ))
469
+ }
470
+ }
471
+ added := sets .NewString ()
472
+ for newResource := range newResources {
473
+ if _ , ok := oldResources [newResource ]; ! ok {
474
+ added .Insert (fmt .Sprintf ("%+v" , newResource ))
475
+ }
476
+ }
477
+ return fmt .Sprintf ("added: %v, removed: %v" , added .List (), removed .List ())
478
+ }
479
+
480
+ // waitForStopOrTimeout returns a stop channel that closes when the provided stop channel closes or when the specified timeout is reached
481
+ func waitForStopOrTimeout (stopCh <- chan struct {}, timeout time.Duration ) <- chan struct {} {
482
+ stopChWithTimeout := make (chan struct {})
483
+ go func () {
484
+ defer close (stopChWithTimeout )
485
+ select {
486
+ case <- stopCh :
487
+ case <- time .After (timeout ):
488
+ }
489
+ }()
490
+ return stopChWithTimeout
491
+ }
492
+
446
493
// resyncMonitors starts or stops quota monitors as needed to ensure that all
447
494
// (and only) those resources present in the map are monitored.
448
495
func (rq * ResourceQuotaController ) resyncMonitors (resources map [schema.GroupVersionResource ]struct {}) error {
0 commit comments