Skip to content

Commit b17ddac

Browse files
authored
Merge pull request kubernetes#78944 from avorima/golint_fix_job
Fix golint errors in pkg/controller/job
2 parents 6239abe + 6ac7421 commit b17ddac

File tree

6 files changed

+55
-50
lines changed

6 files changed

+55
-50
lines changed

cmd/kube-controller-manager/app/batch.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ func startJobController(ctx ControllerContext) (http.Handler, bool, error) {
3434
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}] {
3535
return nil, false, nil
3636
}
37-
go job.NewJobController(
37+
go job.NewController(
3838
ctx.InformerFactory.Core().V1().Pods(),
3939
ctx.InformerFactory.Batch().V1().Jobs(),
4040
ctx.ClientBuilder.ClientOrDie("job-controller"),

hack/.golint_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,6 @@ pkg/controller/endpoint/config/v1alpha1
6464
pkg/controller/endpointslice/config/v1alpha1
6565
pkg/controller/garbagecollector
6666
pkg/controller/garbagecollector/config/v1alpha1
67-
pkg/controller/job
6867
pkg/controller/job/config/v1alpha1
6968
pkg/controller/namespace
7069
pkg/controller/namespace/config/v1alpha1

pkg/controller/job/job_controller.go

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,9 @@ var (
6161
MaxJobBackOff = 360 * time.Second
6262
)
6363

64-
type JobController struct {
64+
// Controller ensures that all Job objects have corresponding pods to
65+
// run their configured workload.
66+
type Controller struct {
6567
kubeClient clientset.Interface
6668
podControl controller.PodControlInterface
6769

@@ -90,7 +92,9 @@ type JobController struct {
9092
recorder record.EventRecorder
9193
}
9294

93-
func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController {
95+
// NewController creates a new Job controller that keeps the relevant pods
96+
// in sync with their corresponding Job objects.
97+
func NewController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller {
9498
eventBroadcaster := record.NewBroadcaster()
9599
eventBroadcaster.StartLogging(klog.Infof)
96100
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
@@ -99,7 +103,7 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin
99103
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
100104
}
101105

102-
jm := &JobController{
106+
jm := &Controller{
103107
kubeClient: kubeClient,
104108
podControl: controller.RealPodControl{
105109
KubeClient: kubeClient,
@@ -137,7 +141,7 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin
137141
}
138142

139143
// Run the main goroutine responsible for watching and syncing jobs.
140-
func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
144+
func (jm *Controller) Run(workers int, stopCh <-chan struct{}) {
141145
defer utilruntime.HandleCrash()
142146
defer jm.queue.ShutDown()
143147

@@ -156,7 +160,7 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
156160
}
157161

158162
// getPodJobs returns a list of Jobs that potentially match a Pod.
159-
func (jm *JobController) getPodJobs(pod *v1.Pod) []*batch.Job {
163+
func (jm *Controller) getPodJobs(pod *v1.Pod) []*batch.Job {
160164
jobs, err := jm.jobLister.GetPodJobs(pod)
161165
if err != nil {
162166
return nil
@@ -176,7 +180,7 @@ func (jm *JobController) getPodJobs(pod *v1.Pod) []*batch.Job {
176180
// resolveControllerRef returns the controller referenced by a ControllerRef,
177181
// or nil if the ControllerRef could not be resolved to a matching controller
178182
// of the correct Kind.
179-
func (jm *JobController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *batch.Job {
183+
func (jm *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *batch.Job {
180184
// We can't look up by UID, so look up by Name and then verify UID.
181185
// Don't even try to look up by Name if it's the wrong Kind.
182186
if controllerRef.Kind != controllerKind.Kind {
@@ -195,7 +199,7 @@ func (jm *JobController) resolveControllerRef(namespace string, controllerRef *m
195199
}
196200

197201
// When a pod is created, enqueue the controller that manages it and update it's expectations.
198-
func (jm *JobController) addPod(obj interface{}) {
202+
func (jm *Controller) addPod(obj interface{}) {
199203
pod := obj.(*v1.Pod)
200204
if pod.DeletionTimestamp != nil {
201205
// on a restart of the controller controller, it's possible a new pod shows up in a state that
@@ -231,7 +235,7 @@ func (jm *JobController) addPod(obj interface{}) {
231235
// When a pod is updated, figure out what job/s manage it and wake them up.
232236
// If the labels of the pod have changed we need to awaken both the old
233237
// and new job. old and cur must be *v1.Pod types.
234-
func (jm *JobController) updatePod(old, cur interface{}) {
238+
func (jm *Controller) updatePod(old, cur interface{}) {
235239
curPod := cur.(*v1.Pod)
236240
oldPod := old.(*v1.Pod)
237241
if curPod.ResourceVersion == oldPod.ResourceVersion {
@@ -283,7 +287,7 @@ func (jm *JobController) updatePod(old, cur interface{}) {
283287

284288
// When a pod is deleted, enqueue the job that manages the pod and update its expectations.
285289
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
286-
func (jm *JobController) deletePod(obj interface{}) {
290+
func (jm *Controller) deletePod(obj interface{}) {
287291
pod, ok := obj.(*v1.Pod)
288292

289293
// When a delete is dropped, the relist will notice a pod in the store not
@@ -320,7 +324,7 @@ func (jm *JobController) deletePod(obj interface{}) {
320324
jm.enqueueController(job, true)
321325
}
322326

323-
func (jm *JobController) updateJob(old, cur interface{}) {
327+
func (jm *Controller) updateJob(old, cur interface{}) {
324328
oldJob := old.(*batch.Job)
325329
curJob := cur.(*batch.Job)
326330

@@ -352,7 +356,7 @@ func (jm *JobController) updateJob(old, cur interface{}) {
352356
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item,
353357
// immediate tells the controller to update the status right away, and should
354358
// happen ONLY when there was a successful pod run.
355-
func (jm *JobController) enqueueController(obj interface{}, immediate bool) {
359+
func (jm *Controller) enqueueController(obj interface{}, immediate bool) {
356360
key, err := controller.KeyFunc(obj)
357361
if err != nil {
358362
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
@@ -375,12 +379,12 @@ func (jm *JobController) enqueueController(obj interface{}, immediate bool) {
375379

376380
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
377381
// It enforces that the syncHandler is never invoked concurrently with the same key.
378-
func (jm *JobController) worker() {
382+
func (jm *Controller) worker() {
379383
for jm.processNextWorkItem() {
380384
}
381385
}
382386

383-
func (jm *JobController) processNextWorkItem() bool {
387+
func (jm *Controller) processNextWorkItem() bool {
384388
key, quit := jm.queue.Get()
385389
if quit {
386390
return false
@@ -404,7 +408,7 @@ func (jm *JobController) processNextWorkItem() bool {
404408
// getPodsForJob returns the set of pods that this Job should manage.
405409
// It also reconciles ControllerRef by adopting/orphaning.
406410
// Note that the returned Pods are pointers into the cache.
407-
func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
411+
func (jm *Controller) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
408412
selector, err := metav1.LabelSelectorAsSelector(j.Spec.Selector)
409413
if err != nil {
410414
return nil, fmt.Errorf("couldn't convert Job selector: %v", err)
@@ -434,7 +438,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
434438
// syncJob will sync the job with the given key if it has had its expectations fulfilled, meaning
435439
// it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked
436440
// concurrently with the same key.
437-
func (jm *JobController) syncJob(key string) (bool, error) {
441+
func (jm *Controller) syncJob(key string) (bool, error) {
438442
startTime := time.Now()
439443
defer func() {
440444
klog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
@@ -601,7 +605,7 @@ func (jm *JobController) syncJob(key string) (bool, error) {
601605
return forget, manageJobErr
602606
}
603607

604-
func (jm *JobController) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh chan<- error) {
608+
func (jm *Controller) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh chan<- error) {
605609
// TODO: below code should be replaced with pod termination resulting in
606610
// pod failures, rather than killing pods. Unfortunately none such solution
607611
// exists ATM. There's an open discussion in the topic in
@@ -683,7 +687,7 @@ func getStatus(pods []*v1.Pod) (succeeded, failed int32) {
683687
// manageJob is the core method responsible for managing the number of running
684688
// pods according to what is specified in the job.Spec.
685689
// Does NOT modify <activePods>.
686-
func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *batch.Job) (int32, error) {
690+
func (jm *Controller) manageJob(activePods []*v1.Pod, succeeded int32, job *batch.Job) (int32, error) {
687691
var activeLock sync.Mutex
688692
active := int32(len(activePods))
689693
parallelism := *job.Spec.Parallelism
@@ -822,7 +826,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
822826
return active, nil
823827
}
824828

825-
func (jm *JobController) updateJobStatus(job *batch.Job) error {
829+
func (jm *Controller) updateJobStatus(job *batch.Job) error {
826830
jobClient := jm.kubeClient.BatchV1().Jobs(job.Namespace)
827831
var err error
828832
for i := 0; i <= statusUpdateRetries; i = i + 1 {

0 commit comments

Comments
 (0)