Skip to content

Commit 41a81b1

Browse files
committed
feat: support applying tolerations to cleanup jobs
At the moment, if the DaemonSet is deployed with tolerations, and `useJobForCleaning: true`, those tolerations do not get transferred to deployed Jobs. If, for example, the DaemonSet tolerates running on control plane nodes, the cleanup jobs it deploys targeting those nodes will never get scheduled due to lack of toleration. By adding a tolerations field to the config, the Helm chart can automatically populate it based on `.Values.tolerations`. Now all deployed jobs should inherit the same tolerations as the parent DaemonSet.
1 parent b6f4650 commit 41a81b1

File tree

5 files changed

+16
-3
lines changed

5 files changed

+16
-3
lines changed

helm/generated_examples/baremetal-tolerations.yaml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@ metadata:
2323
app.kubernetes.io/managed-by: Helm
2424
app.kubernetes.io/instance: local-static-provisioner
2525
data:
26+
jobTolerations: |
27+
- effect: NoSchedule
28+
key: node-role.kubernetes.io/master
2629
storageClassMap: |
2730
local-storage:
2831
hostDir: /mnt/disks
@@ -114,7 +117,7 @@ spec:
114117
app.kubernetes.io/name: local-static-provisioner
115118
app.kubernetes.io/instance: local-static-provisioner
116119
annotations:
117-
checksum/config: f81e575a8ce66fff1873e5bae2df0f963609f540da196b9a86c3146a94d284b8
120+
checksum/config: bdea962be4bc6072011b44367cc56d21c61868009d4cb63b6415c1c27695ce96
118121
spec:
119122
serviceAccountName: local-static-provisioner
120123
nodeSelector:

helm/provisioner/templates/configmap.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,9 @@ data:
2727
{{- if .Values.useJobForCleaning }}
2828
useJobForCleaning: "yes"
2929
{{- end}}
30+
{{- if .Values.tolerations }}
31+
jobTolerations: | {{ toYaml .Values.tolerations | nindent 4 }}
32+
{{- end }}
3033
{{- if .Values.useNodeNameOnly }}
3134
useNodeNameOnly: "true"
3235
{{- end }}

pkg/common/common.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,8 @@ type UserConfig struct {
108108
Namespace string
109109
// JobContainerImage of container to use for jobs (optional)
110110
JobContainerImage string
111+
// JobTolerations defines the tolerations to apply to jobs (optional)
112+
JobTolerations []v1.Toleration
111113
// MinResyncPeriod is minimum resync period. Resync period in reflectors
112114
// will be random between MinResyncPeriod and 2*MinResyncPeriod.
113115
MinResyncPeriod metav1.Duration
@@ -205,6 +207,9 @@ type ProvisionerConfiguration struct {
205207
// default is false.
206208
// +optional
207209
UseJobForCleaning bool `json:"useJobForCleaning" yaml:"useJobForCleaning"`
210+
// JobTolerations defines the tolerations to apply to jobs
211+
// +optional
212+
JobTolerations []v1.Toleration `json:"jobTolerations" yaml:"jobTolerations"`
208213
// MinResyncPeriod is minimum resync period. Resync period in reflectors
209214
// will be random between MinResyncPeriod and 2*MinResyncPeriod.
210215
MinResyncPeriod metav1.Duration `json:"minResyncPeriod" yaml:"minResyncPeriod"`
@@ -397,6 +402,7 @@ func UserConfigFromProvisionerConfig(node *v1.Node, namespace, jobImage string,
397402
UseNodeNameOnly: config.UseNodeNameOnly,
398403
Namespace: namespace,
399404
JobContainerImage: jobImage,
405+
JobTolerations: config.JobTolerations,
400406
LabelsForPV: config.LabelsForPV,
401407
SetPVOwnerRef: config.SetPVOwnerRef,
402408
}

pkg/deleter/deleter.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,7 @@ func (d *Deleter) runJob(pv *v1.PersistentVolume, volMode v1.PersistentVolumeMod
358358
if d.JobContainerImage == "" {
359359
return fmt.Errorf("cannot run cleanup job without specifying job image name in the environment variable")
360360
}
361-
job, err := NewCleanupJob(pv, volMode, d.JobContainerImage, d.Node.Name, d.Namespace, mountPath, config)
361+
job, err := NewCleanupJob(pv, volMode, d.JobContainerImage, d.JobTolerations, d.Node.Name, d.Namespace, mountPath, config)
362362
if err != nil {
363363
return err
364364
}

pkg/deleter/jobcontroller.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ func (c *jobController) RemoveJob(pvName string) (CleanupState, *time.Time, erro
253253
}
254254

255255
// NewCleanupJob creates manifest for a cleaning job.
256-
func NewCleanupJob(pv *apiv1.PersistentVolume, volMode apiv1.PersistentVolumeMode, imageName string, nodeName string, namespace string, mountPath string, config common.MountConfig) (*batch_v1.Job, error) {
256+
func NewCleanupJob(pv *apiv1.PersistentVolume, volMode apiv1.PersistentVolumeMode, imageName string, tolerations []apiv1.Toleration, nodeName string, namespace string, mountPath string, config common.MountConfig) (*batch_v1.Job, error) {
257257
priv := true
258258
// Container definition
259259
jobContainer := apiv1.Container{
@@ -325,6 +325,7 @@ func NewCleanupJob(pv *apiv1.PersistentVolume, volMode apiv1.PersistentVolumeMod
325325
Containers: []apiv1.Container{jobContainer},
326326
Volumes: volumes,
327327
NodeSelector: map[string]string{common.NodeNameLabel: nodeName},
328+
Tolerations: tolerations,
328329
}
329330
podTemplate.ObjectMeta = meta_v1.ObjectMeta{
330331
Name: generateCleaningJobName(pv.Name),

0 commit comments

Comments
 (0)