Skip to content

Commit dbbe686

Browse files
author
Mario Valderrama
committed
Fix golint errors in pkg/controller/job
1 parent 56b4006 commit dbbe686

File tree

6 files changed

+53
-50
lines changed

6 files changed

+53
-50
lines changed

cmd/kube-controller-manager/app/batch.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ func startJobController(ctx ControllerContext) (http.Handler, bool, error) {
3434
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}] {
3535
return nil, false, nil
3636
}
37-
go job.NewJobController(
37+
go job.NewController(
3838
ctx.InformerFactory.Core().V1().Pods(),
3939
ctx.InformerFactory.Batch().V1().Jobs(),
4040
ctx.ClientBuilder.ClientOrDie("job-controller"),

hack/.golint_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@ pkg/controller/endpoint
6868
pkg/controller/endpoint/config/v1alpha1
6969
pkg/controller/garbagecollector
7070
pkg/controller/garbagecollector/config/v1alpha1
71-
pkg/controller/job
7271
pkg/controller/job/config/v1alpha1
7372
pkg/controller/namespace
7473
pkg/controller/namespace/config/v1alpha1

pkg/controller/job/job_controller.go

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,8 @@ var (
6060
MaxJobBackOff = 360 * time.Second
6161
)
6262

63-
type JobController struct {
63+
// Controller is a controller for Jobs.
64+
type Controller struct {
6465
kubeClient clientset.Interface
6566
podControl controller.PodControlInterface
6667

@@ -89,7 +90,8 @@ type JobController struct {
8990
recorder record.EventRecorder
9091
}
9192

92-
func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController {
93+
// NewController creates a new Controller.
94+
func NewController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller {
9395
eventBroadcaster := record.NewBroadcaster()
9496
eventBroadcaster.StartLogging(klog.Infof)
9597
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
@@ -98,7 +100,7 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin
98100
metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
99101
}
100102

101-
jm := &JobController{
103+
jm := &Controller{
102104
kubeClient: kubeClient,
103105
podControl: controller.RealPodControl{
104106
KubeClient: kubeClient,
@@ -136,7 +138,7 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin
136138
}
137139

138140
// Run the main goroutine responsible for watching and syncing jobs.
139-
func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
141+
func (jm *Controller) Run(workers int, stopCh <-chan struct{}) {
140142
defer utilruntime.HandleCrash()
141143
defer jm.queue.ShutDown()
142144

@@ -155,7 +157,7 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
155157
}
156158

157159
// getPodJobs returns a list of Jobs that potentially match a Pod.
158-
func (jm *JobController) getPodJobs(pod *v1.Pod) []*batch.Job {
160+
func (jm *Controller) getPodJobs(pod *v1.Pod) []*batch.Job {
159161
jobs, err := jm.jobLister.GetPodJobs(pod)
160162
if err != nil {
161163
return nil
@@ -175,7 +177,7 @@ func (jm *JobController) getPodJobs(pod *v1.Pod) []*batch.Job {
175177
// resolveControllerRef returns the controller referenced by a ControllerRef,
176178
// or nil if the ControllerRef could not be resolved to a matching controller
177179
// of the correct Kind.
178-
func (jm *JobController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *batch.Job {
180+
func (jm *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *batch.Job {
179181
// We can't look up by UID, so look up by Name and then verify UID.
180182
// Don't even try to look up by Name if it's the wrong Kind.
181183
if controllerRef.Kind != controllerKind.Kind {
@@ -194,7 +196,7 @@ func (jm *JobController) resolveControllerRef(namespace string, controllerRef *m
194196
}
195197

196198
// When a pod is created, enqueue the controller that manages it and update it's expectations.
197-
func (jm *JobController) addPod(obj interface{}) {
199+
func (jm *Controller) addPod(obj interface{}) {
198200
pod := obj.(*v1.Pod)
199201
if pod.DeletionTimestamp != nil {
200202
// on a restart of the controller controller, it's possible a new pod shows up in a state that
@@ -230,7 +232,7 @@ func (jm *JobController) addPod(obj interface{}) {
230232
// When a pod is updated, figure out what job/s manage it and wake them up.
231233
// If the labels of the pod have changed we need to awaken both the old
232234
// and new job. old and cur must be *v1.Pod types.
233-
func (jm *JobController) updatePod(old, cur interface{}) {
235+
func (jm *Controller) updatePod(old, cur interface{}) {
234236
curPod := cur.(*v1.Pod)
235237
oldPod := old.(*v1.Pod)
236238
if curPod.ResourceVersion == oldPod.ResourceVersion {
@@ -282,7 +284,7 @@ func (jm *JobController) updatePod(old, cur interface{}) {
282284

283285
// When a pod is deleted, enqueue the job that manages the pod and update its expectations.
284286
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
285-
func (jm *JobController) deletePod(obj interface{}) {
287+
func (jm *Controller) deletePod(obj interface{}) {
286288
pod, ok := obj.(*v1.Pod)
287289

288290
// When a delete is dropped, the relist will notice a pod in the store not
@@ -319,7 +321,7 @@ func (jm *JobController) deletePod(obj interface{}) {
319321
jm.enqueueController(job, true)
320322
}
321323

322-
func (jm *JobController) updateJob(old, cur interface{}) {
324+
func (jm *Controller) updateJob(old, cur interface{}) {
323325
oldJob := old.(*batch.Job)
324326
curJob := cur.(*batch.Job)
325327

@@ -351,7 +353,7 @@ func (jm *JobController) updateJob(old, cur interface{}) {
351353
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item,
352354
// immediate tells the controller to update the status right away, and should
353355
// happen ONLY when there was a successful pod run.
354-
func (jm *JobController) enqueueController(obj interface{}, immediate bool) {
356+
func (jm *Controller) enqueueController(obj interface{}, immediate bool) {
355357
key, err := controller.KeyFunc(obj)
356358
if err != nil {
357359
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
@@ -374,12 +376,12 @@ func (jm *JobController) enqueueController(obj interface{}, immediate bool) {
374376

375377
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
376378
// It enforces that the syncHandler is never invoked concurrently with the same key.
377-
func (jm *JobController) worker() {
379+
func (jm *Controller) worker() {
378380
for jm.processNextWorkItem() {
379381
}
380382
}
381383

382-
func (jm *JobController) processNextWorkItem() bool {
384+
func (jm *Controller) processNextWorkItem() bool {
383385
key, quit := jm.queue.Get()
384386
if quit {
385387
return false
@@ -403,7 +405,7 @@ func (jm *JobController) processNextWorkItem() bool {
403405
// getPodsForJob returns the set of pods that this Job should manage.
404406
// It also reconciles ControllerRef by adopting/orphaning.
405407
// Note that the returned Pods are pointers into the cache.
406-
func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
408+
func (jm *Controller) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
407409
selector, err := metav1.LabelSelectorAsSelector(j.Spec.Selector)
408410
if err != nil {
409411
return nil, fmt.Errorf("couldn't convert Job selector: %v", err)
@@ -433,7 +435,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
433435
// syncJob will sync the job with the given key if it has had its expectations fulfilled, meaning
434436
// it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked
435437
// concurrently with the same key.
436-
func (jm *JobController) syncJob(key string) (bool, error) {
438+
func (jm *Controller) syncJob(key string) (bool, error) {
437439
startTime := time.Now()
438440
defer func() {
439441
klog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
@@ -599,7 +601,7 @@ func (jm *JobController) syncJob(key string) (bool, error) {
599601
return forget, manageJobErr
600602
}
601603

602-
func (jm *JobController) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh chan<- error) {
604+
func (jm *Controller) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh chan<- error) {
603605
// TODO: below code should be replaced with pod termination resulting in
604606
// pod failures, rather than killing pods. Unfortunately none such solution
605607
// exists ATM. There's an open discussion in the topic in
@@ -681,7 +683,7 @@ func getStatus(pods []*v1.Pod) (succeeded, failed int32) {
681683
// manageJob is the core method responsible for managing the number of running
682684
// pods according to what is specified in the job.Spec.
683685
// Does NOT modify <activePods>.
684-
func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *batch.Job) (int32, error) {
686+
func (jm *Controller) manageJob(activePods []*v1.Pod, succeeded int32, job *batch.Job) (int32, error) {
685687
var activeLock sync.Mutex
686688
active := int32(len(activePods))
687689
parallelism := *job.Spec.Parallelism
@@ -820,7 +822,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
820822
return active, nil
821823
}
822824

823-
func (jm *JobController) updateJobStatus(job *batch.Job) error {
825+
func (jm *Controller) updateJobStatus(job *batch.Job) error {
824826
jobClient := jm.kubeClient.BatchV1().Jobs(job.Namespace)
825827
var err error
826828
for i := 0; i <= statusUpdateRetries; i = i + 1 {

0 commit comments

Comments
 (0)