Skip to content

Commit dd6c53d

Browse files
authored
Merge pull request kubernetes#93946 from alexzimmer96/68026-pkg-controller-resourcequota
Refactor pkg/controllers/resourcequota to fix golint errors
2 parents d5e2db8 + 86dc036 commit dd6c53d

File tree

7 files changed

+47
-48
lines changed

7 files changed

+47
-48
lines changed

cmd/kube-controller-manager/app/core.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -427,7 +427,7 @@ func startResourceQuotaController(ctx ControllerContext) (http.Handler, bool, er
427427
listerFuncForResource := generic.ListerFuncForResourceFunc(ctx.InformerFactory.ForResource)
428428
quotaConfiguration := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource)
429429

430-
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
430+
resourceQuotaControllerOptions := &resourcequotacontroller.ControllerOptions{
431431
QuotaClient: resourceQuotaControllerClient.CoreV1(),
432432
ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(),
433433
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod.Duration),
@@ -444,7 +444,7 @@ func startResourceQuotaController(ctx ControllerContext) (http.Handler, bool, er
444444
}
445445
}
446446

447-
resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions)
447+
resourceQuotaController, err := resourcequotacontroller.NewController(resourceQuotaControllerOptions)
448448
if err != nil {
449449
return nil, false, err
450450
}

hack/.golint_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ pkg/controller/replicaset
7676
pkg/controller/replicaset/config/v1alpha1
7777
pkg/controller/replication
7878
pkg/controller/replication/config/v1alpha1
79-
pkg/controller/resourcequota
8079
pkg/controller/resourcequota/config/v1alpha1
8180
pkg/controller/service/config/v1alpha1
8281
pkg/controller/serviceaccount/config/v1alpha1

pkg/controller/resourcequota/doc.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
1414
limitations under the License.
1515
*/
1616

17-
// resourcequota contains a controller that makes resource quota usage observations
17+
// Package resourcequota contains a controller that makes resource quota usage observations
1818
package resourcequota // import "k8s.io/kubernetes/pkg/controller/resourcequota"

pkg/controller/resourcequota/resource_quota_controller.go

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ import (
4242
"k8s.io/client-go/tools/cache"
4343
"k8s.io/client-go/util/workqueue"
4444
"k8s.io/kubernetes/pkg/controller"
45-
quota "k8s.io/kubernetes/pkg/quota/v1"
45+
"k8s.io/kubernetes/pkg/quota/v1"
4646
)
4747

4848
// NamespacedResourcesFunc knows how to discover namespaced resources.
@@ -52,8 +52,8 @@ type NamespacedResourcesFunc func() ([]*metav1.APIResourceList, error)
5252
// that may require quota to be recalculated.
5353
type ReplenishmentFunc func(groupResource schema.GroupResource, namespace string)
5454

55-
// ResourceQuotaControllerOptions holds options for creating a quota controller
56-
type ResourceQuotaControllerOptions struct {
55+
// ControllerOptions holds options for creating a quota controller
56+
type ControllerOptions struct {
5757
// Must have authority to list all quotas, and update quota status
5858
QuotaClient corev1client.ResourceQuotasGetter
5959
// Shared informer for resource quotas
@@ -74,8 +74,8 @@ type ResourceQuotaControllerOptions struct {
7474
ReplenishmentResyncPeriod controller.ResyncPeriodFunc
7575
}
7676

77-
// ResourceQuotaController is responsible for tracking quota usage status in the system
78-
type ResourceQuotaController struct {
77+
// Controller is responsible for tracking quota usage status in the system
78+
type Controller struct {
7979
// Must have authority to list all resources in the system, and update quota status
8080
rqClient corev1client.ResourceQuotasGetter
8181
// A lister/getter of resource quota objects
@@ -100,10 +100,10 @@ type ResourceQuotaController struct {
100100
workerLock sync.RWMutex
101101
}
102102

103-
// NewResourceQuotaController creates a quota controller with specified options
104-
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*ResourceQuotaController, error) {
103+
// NewController creates a quota controller with specified options
104+
func NewController(options *ControllerOptions) (*Controller, error) {
105105
// build the resource quota controller
106-
rq := &ResourceQuotaController{
106+
rq := &Controller{
107107
rqClient: options.QuotaClient,
108108
rqLister: options.ResourceQuotaInformer.Lister(),
109109
informerSyncedFuncs: []cache.InformerSynced{options.ResourceQuotaInformer.Informer().HasSynced},
@@ -175,7 +175,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*Resou
175175
}
176176

177177
// enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics
178-
func (rq *ResourceQuotaController) enqueueAll() {
178+
func (rq *Controller) enqueueAll() {
179179
defer klog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
180180
rqs, err := rq.rqLister.List(labels.Everything())
181181
if err != nil {
@@ -185,15 +185,15 @@ func (rq *ResourceQuotaController) enqueueAll() {
185185
for i := range rqs {
186186
key, err := controller.KeyFunc(rqs[i])
187187
if err != nil {
188-
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", rqs[i], err))
188+
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", rqs[i], err))
189189
continue
190190
}
191191
rq.queue.Add(key)
192192
}
193193
}
194194

195195
// obj could be an *v1.ResourceQuota, or a DeletionFinalStateUnknown marker item.
196-
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
196+
func (rq *Controller) enqueueResourceQuota(obj interface{}) {
197197
key, err := controller.KeyFunc(obj)
198198
if err != nil {
199199
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
@@ -202,7 +202,7 @@ func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
202202
rq.queue.Add(key)
203203
}
204204

205-
func (rq *ResourceQuotaController) addQuota(obj interface{}) {
205+
func (rq *Controller) addQuota(obj interface{}) {
206206
key, err := controller.KeyFunc(obj)
207207
if err != nil {
208208
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
@@ -220,7 +220,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
220220
// if we declared a constraint that has no usage (which this controller can calculate, prioritize it)
221221
for constraint := range resourceQuota.Status.Hard {
222222
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
223-
matchedResources := []v1.ResourceName{v1.ResourceName(constraint)}
223+
matchedResources := []v1.ResourceName{constraint}
224224
for _, evaluator := range rq.registry.List() {
225225
if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 {
226226
rq.missingUsageQueue.Add(key)
@@ -235,7 +235,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
235235
}
236236

237237
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
238-
func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface) func() {
238+
func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func() {
239239
workFunc := func() bool {
240240
key, quit := queue.Get()
241241
if quit {
@@ -265,7 +265,7 @@ func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface)
265265
}
266266

267267
// Run begins quota controller using the specified number of workers
268-
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
268+
func (rq *Controller) Run(workers int, stopCh <-chan struct{}) {
269269
defer utilruntime.HandleCrash()
270270
defer rq.queue.ShutDown()
271271

@@ -291,7 +291,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
291291
}
292292

293293
// syncResourceQuotaFromKey syncs a quota key
294-
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
294+
func (rq *Controller) syncResourceQuotaFromKey(key string) (err error) {
295295
startTime := time.Now()
296296
defer func() {
297297
klog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
@@ -301,7 +301,7 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
301301
if err != nil {
302302
return err
303303
}
304-
quota, err := rq.rqLister.ResourceQuotas(namespace).Get(name)
304+
resourceQuota, err := rq.rqLister.ResourceQuotas(namespace).Get(name)
305305
if errors.IsNotFound(err) {
306306
klog.Infof("Resource quota has been deleted %v", key)
307307
return nil
@@ -310,11 +310,11 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
310310
klog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
311311
return err
312312
}
313-
return rq.syncResourceQuota(quota)
313+
return rq.syncResourceQuota(resourceQuota)
314314
}
315315

316316
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
317-
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) {
317+
func (rq *Controller) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) {
318318
// quota is dirty if any part of spec hard limits differs from the status hard limits
319319
statusLimitsDirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
320320

@@ -329,12 +329,12 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
329329
}
330330
hardLimits := quota.Add(v1.ResourceList{}, resourceQuota.Spec.Hard)
331331

332-
errors := []error{}
332+
var errs []error
333333

334334
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry, resourceQuota.Spec.ScopeSelector)
335335
if err != nil {
336336
// if err is non-nil, remember it to return, but continue updating status with any resources in newUsage
337-
errors = append(errors, err)
337+
errs = append(errs, err)
338338
}
339339
for key, value := range newUsage {
340340
used[key] = value
@@ -358,14 +358,14 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
358358
if dirty {
359359
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage, metav1.UpdateOptions{})
360360
if err != nil {
361-
errors = append(errors, err)
361+
errs = append(errs, err)
362362
}
363363
}
364-
return utilerrors.NewAggregate(errors)
364+
return utilerrors.NewAggregate(errs)
365365
}
366366

367367
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
368-
func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupResource, namespace string) {
368+
func (rq *Controller) replenishQuota(groupResource schema.GroupResource, namespace string) {
369369
// check if the quota controller can evaluate this groupResource, if not, ignore it altogether...
370370
evaluator := rq.registry.Get(groupResource)
371371
if evaluator == nil {
@@ -398,7 +398,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupReso
398398
}
399399

400400
// Sync periodically resyncs the controller when new resources are observed from discovery.
401-
func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, period time.Duration, stopCh <-chan struct{}) {
401+
func (rq *Controller) Sync(discoveryFunc NamespacedResourcesFunc, period time.Duration, stopCh <-chan struct{}) {
402402
// Something has changed, so track the new state and perform a sync.
403403
oldResources := make(map[schema.GroupVersionResource]struct{})
404404
wait.Until(func() {
@@ -486,7 +486,7 @@ func waitForStopOrTimeout(stopCh <-chan struct{}, timeout time.Duration) <-chan
486486

487487
// resyncMonitors starts or stops quota monitors as needed to ensure that all
488488
// (and only) those resources present in the map are monitored.
489-
func (rq *ResourceQuotaController) resyncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
489+
func (rq *Controller) resyncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
490490
if rq.quotaMonitor == nil {
491491
return nil
492492
}
@@ -510,7 +510,7 @@ func GetQuotableResources(discoveryFunc NamespacedResourcesFunc) (map[schema.Gro
510510
quotableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"create", "list", "watch", "delete"}}, possibleResources)
511511
quotableGroupVersionResources, err := discovery.GroupVersionResources(quotableResources)
512512
if err != nil {
513-
return nil, fmt.Errorf("Failed to parse resources: %v", err)
513+
return nil, fmt.Errorf("failed to parse resources: %v", err)
514514
}
515515
// return the original discovery error (if any) in addition to the list
516516
return quotableGroupVersionResources, discoveryErr

pkg/controller/resourcequota/resource_quota_controller_test.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ import (
3939
core "k8s.io/client-go/testing"
4040
"k8s.io/client-go/tools/cache"
4141
"k8s.io/kubernetes/pkg/controller"
42-
quota "k8s.io/kubernetes/pkg/quota/v1"
42+
"k8s.io/kubernetes/pkg/quota/v1"
4343
"k8s.io/kubernetes/pkg/quota/v1/generic"
4444
"k8s.io/kubernetes/pkg/quota/v1/install"
4545
)
@@ -102,7 +102,7 @@ func (errorLister) ByNamespace(namespace string) cache.GenericNamespaceLister {
102102
}
103103

104104
type quotaController struct {
105-
*ResourceQuotaController
105+
*Controller
106106
stop chan struct{}
107107
}
108108

@@ -111,7 +111,7 @@ func setupQuotaController(t *testing.T, kubeClient kubernetes.Interface, lister
111111
quotaConfiguration := install.NewQuotaConfigurationForControllers(lister)
112112
alwaysStarted := make(chan struct{})
113113
close(alwaysStarted)
114-
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
114+
resourceQuotaControllerOptions := &ControllerOptions{
115115
QuotaClient: kubeClient.CoreV1(),
116116
ResourceQuotaInformer: informerFactory.Core().V1().ResourceQuotas(),
117117
ResyncPeriod: controller.NoResyncPeriodFunc,
@@ -122,7 +122,7 @@ func setupQuotaController(t *testing.T, kubeClient kubernetes.Interface, lister
122122
InformersStarted: alwaysStarted,
123123
InformerFactory: informerFactory,
124124
}
125-
qc, err := NewResourceQuotaController(resourceQuotaControllerOptions)
125+
qc, err := NewController(resourceQuotaControllerOptions)
126126
if err != nil {
127127
t.Fatal(err)
128128
}
@@ -1156,15 +1156,15 @@ type fakeServerResources struct {
11561156
InterfaceUsedCount int
11571157
}
11581158

1159-
func (_ *fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
1159+
func (*fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
11601160
return nil, nil
11611161
}
11621162

1163-
func (_ *fakeServerResources) ServerResources() ([]*metav1.APIResourceList, error) {
1163+
func (*fakeServerResources) ServerResources() ([]*metav1.APIResourceList, error) {
11641164
return nil, nil
11651165
}
11661166

1167-
func (_ *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
1167+
func (*fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
11681168
return nil, nil
11691169
}
11701170

pkg/controller/resourcequota/resource_quota_monitor.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ import (
3333
"k8s.io/client-go/tools/cache"
3434
"k8s.io/client-go/util/workqueue"
3535
"k8s.io/kubernetes/pkg/controller"
36-
quota "k8s.io/kubernetes/pkg/quota/v1"
36+
"k8s.io/kubernetes/pkg/quota/v1"
3737
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
3838
"k8s.io/kubernetes/pkg/quota/v1/generic"
3939
)
@@ -66,6 +66,7 @@ type event struct {
6666
gvr schema.GroupVersionResource
6767
}
6868

69+
// QuotaMonitor contains all necessary information to track quotas and trigger replenishments
6970
type QuotaMonitor struct {
7071
// each monitor list/watches a resource and determines if we should replenish quota
7172
monitors monitors
@@ -101,7 +102,8 @@ type QuotaMonitor struct {
101102
registry quota.Registry
102103
}
103104

104-
func NewQuotaMonitor(informersStarted <-chan struct{}, informerFactory controller.InformerFactory, ignoredResources map[schema.GroupResource]struct{}, resyncPeriod controller.ResyncPeriodFunc, replenishmentFunc ReplenishmentFunc, registry quota.Registry) *QuotaMonitor {
105+
// NewMonitor creates a new instance of a QuotaMonitor
106+
func NewMonitor(informersStarted <-chan struct{}, informerFactory controller.InformerFactory, ignoredResources map[schema.GroupResource]struct{}, resyncPeriod controller.ResyncPeriodFunc, replenishmentFunc ReplenishmentFunc, registry quota.Registry) *QuotaMonitor {
105107
return &QuotaMonitor{
106108
informersStarted: informersStarted,
107109
informerFactory: informerFactory,
@@ -131,8 +133,6 @@ func (m *monitor) Run() {
131133
type monitors map[schema.GroupVersionResource]*monitor
132134

133135
func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cache.Controller, error) {
134-
// TODO: pass this down
135-
clock := clock.RealClock{}
136136
handlers := cache.ResourceEventHandlerFuncs{
137137
UpdateFunc: func(oldObj, newObj interface{}) {
138138
// TODO: leaky abstraction! live w/ it for now, but should pass down an update filter func.
@@ -142,7 +142,7 @@ func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cac
142142
case schema.GroupResource{Resource: "pods"}:
143143
oldPod := oldObj.(*v1.Pod)
144144
newPod := newObj.(*v1.Pod)
145-
notifyUpdate = core.QuotaV1Pod(oldPod, clock) && !core.QuotaV1Pod(newPod, clock)
145+
notifyUpdate = core.QuotaV1Pod(oldPod, clock.RealClock{}) && !core.QuotaV1Pod(newPod, clock.RealClock{})
146146
case schema.GroupResource{Resource: "services"}:
147147
oldService := oldObj.(*v1.Service)
148148
newService := newObj.(*v1.Service)
@@ -199,7 +199,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s
199199
toRemove = monitors{}
200200
}
201201
current := monitors{}
202-
errs := []error{}
202+
var errs []error
203203
kept := 0
204204
added := 0
205205
for resource := range resources {

test/integration/quota/quota_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ func TestQuota(t *testing.T) {
102102
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
103103
qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource)
104104
informersStarted := make(chan struct{})
105-
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
105+
resourceQuotaControllerOptions := &resourcequotacontroller.ControllerOptions{
106106
QuotaClient: clientset.CoreV1(),
107107
ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(),
108108
ResyncPeriod: controller.NoResyncPeriodFunc,
@@ -113,7 +113,7 @@ func TestQuota(t *testing.T) {
113113
InformersStarted: informersStarted,
114114
Registry: generic.NewRegistry(qc.Evaluators()),
115115
}
116-
resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions)
116+
resourceQuotaController, err := resourcequotacontroller.NewController(resourceQuotaControllerOptions)
117117
if err != nil {
118118
t.Fatalf("unexpected err: %v", err)
119119
}
@@ -300,7 +300,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
300300
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
301301
qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource)
302302
informersStarted := make(chan struct{})
303-
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
303+
resourceQuotaControllerOptions := &resourcequotacontroller.ControllerOptions{
304304
QuotaClient: clientset.CoreV1(),
305305
ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(),
306306
ResyncPeriod: controller.NoResyncPeriodFunc,
@@ -311,7 +311,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
311311
InformersStarted: informersStarted,
312312
Registry: generic.NewRegistry(qc.Evaluators()),
313313
}
314-
resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions)
314+
resourceQuotaController, err := resourcequotacontroller.NewController(resourceQuotaControllerOptions)
315315
if err != nil {
316316
t.Fatalf("unexpected err: %v", err)
317317
}

0 commit comments

Comments
 (0)