@@ -63,18 +63,17 @@ const controllerTemplate = `{{ .Boilerplate }}
63
63
package {{ if and .MultiGroup .Resource.Group }}{{ .Resource.PackageName }}{{ else }}controllers{{ end }}
64
64
65
65
import (
66
- appsv1 "k8s.io/api/apps/v1"
67
- corev1 "k8s.io/api/core/v1"
68
- apierrors "k8s.io/apimachinery/pkg/api/errors"
69
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
70
- "k8s.io/apimachinery/pkg/types"
71
-
72
66
"context"
73
67
"strings"
74
68
"time"
75
69
"fmt"
76
70
"os"
77
71
72
+ appsv1 "k8s.io/api/apps/v1"
73
+ corev1 "k8s.io/api/core/v1"
74
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
75
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
76
+ "k8s.io/apimachinery/pkg/types"
78
77
"k8s.io/apimachinery/pkg/runtime"
79
78
"k8s.io/client-go/tools/record"
80
79
ctrl "sigs.k8s.io/controller-runtime"
@@ -95,7 +94,8 @@ type {{ .Resource.Kind }}Reconciler struct {
95
94
Scheme *runtime.Scheme
96
95
Recorder record.EventRecorder
97
96
}
98
- // The following markers are used to generate the rules permissions on config/rbac using controller-gen
97
+
98
+ // The following markers are used to generate the rules permissions (RBAC) on config/rbac using controller-gen
99
99
// when the command <make manifests> is executed.
100
100
// To know more about markers see: https://book.kubebuilder.io/reference/markers.html
101
101
@@ -109,28 +109,27 @@ type {{ .Resource.Kind }}Reconciler struct {
109
109
// Reconcile is part of the main kubernetes reconciliation loop which aims to
110
110
// move the current state of the cluster closer to the desired state.
111
111
112
- // Note: It is essential for the controller's reconciliation loop to be idempotent. By following the Operator
113
- // pattern(https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) you will create
114
- // Controllers(https://kubernetes.io/docs/concepts/architecture/controller/) which provide a reconcile function
115
- // responsible for synchronizing resources until the desired state is reached on the cluster. Breaking this
116
- // recommendation goes against the design principles of Controller-runtime(https://github.com/kubernetes-sigs/controller-runtime)
112
+ // It is essential for the controller's reconciliation loop to be idempotent. By following the Operator
113
+ // pattern you will create Controllers which provide a reconcile function
114
+ // responsible for synchronizing resources until the desired state is reached on the cluster.
115
+ // Breaking this recommendation goes against the design principles of controller-runtime.
117
116
// and may lead to unforeseen consequences such as resources becoming stuck and requiring manual intervention.
118
- //
119
- // For more details, check Reconcile and its Result here:
117
+ // For further info:
118
+ // - About Operator Pattern: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/
119
+ // - About Controllers: https://kubernetes.io/docs/concepts/architecture/controller/
120
120
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@{{ .ControllerRuntimeVersion }}/pkg/reconcile
121
121
func (r *{{ .Resource.Kind }}Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
122
122
log := log.FromContext(ctx)
123
123
124
124
// Fetch the {{ .Resource.Kind }} instance
125
125
// The purpose is check if the Custom Resource for the Kind {{ .Resource.Kind }}
126
- // is applied on the cluster if not we return nill to stop the reconciliation
126
+ // is applied on the cluster if not we return nil to stop the reconciliation
127
127
{{ lower .Resource.Kind }} := &{{ .Resource.ImportAlias }}.{{ .Resource.Kind }}{}
128
128
err := r.Get(ctx, req.NamespacedName, {{ lower .Resource.Kind }})
129
129
if err != nil {
130
130
if apierrors.IsNotFound(err) {
131
- // Request object not found, could have been deleted after reconcile request.
132
- // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
133
- // Return and don't requeue
131
+ // If the custom resource is not found then, it usually means that it was deleted or not created
132
+ // In this way, we will stop the reconciliation
134
133
log.Info("{{ lower .Resource.Kind }} resource not found. Ignoring since object must be deleted")
135
134
return ctrl.Result{}, nil
136
135
}
@@ -141,15 +140,16 @@ func (r *{{ .Resource.Kind }}Reconciler) Reconcile(ctx context.Context, req ctrl
141
140
142
141
// Let's add a finalizer. Then, we can define some operations which should
143
142
// occurs before the custom resource to be deleted.
144
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/finalizers/
145
- // NOTE: You should not use finalizer to delete the resources that are
146
- // created in this reconciliation and have the ownerRef set by ctrl.SetControllerReference
147
- // because these will get deleted via k8s api
143
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/finalizers
148
144
if !controllerutil.ContainsFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer) {
149
145
log.Info("Adding Finalizer for {{ .Resource.Kind }}")
150
- controllerutil.AddFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer)
151
- err = r.Update(ctx, {{ lower .Resource.Kind }})
152
- if err != nil {
146
+ if ok := controllerutil.AddFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer); !ok {
147
+ log.Error(err, "Failed to add finalizer into the custom resource")
148
+ return ctrl.Result{Requeue: true}, nil
149
+ }
150
+
151
+ if err = r.Update(ctx, {{ lower .Resource.Kind }}); err != nil {
152
+ log.Error(err, "Failed to update custom resource to add finalizer")
153
153
return ctrl.Result{}, err
154
154
}
155
155
}
@@ -159,23 +159,18 @@ func (r *{{ .Resource.Kind }}Reconciler) Reconcile(ctx context.Context, req ctrl
159
159
is{{ .Resource.Kind }}MarkedToBeDeleted := {{ lower .Resource.Kind }}.GetDeletionTimestamp() != nil
160
160
if is{{ .Resource.Kind }}MarkedToBeDeleted {
161
161
if controllerutil.ContainsFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer) {
162
- // Run finalization logic for memcachedFinalizer. If the
163
- // finalization logic fails, don't remove the finalizer so
164
- // that we can retry during the next reconciliation.
165
162
log.Info("Performing Finalizer Operations for {{ .Resource.Kind }} before delete CR")
166
163
r.doFinalizerOperationsFor{{ .Resource.Kind }}({{ lower .Resource.Kind }})
167
164
168
- // Remove memcachedFinalizer. Once all finalizers have been
169
- // removed, the object will be deleted.
165
+ log.Info("Removing Finalizer for {{ .Resource.Kind }} after successfully perform the operations")
170
166
if ok:= controllerutil.RemoveFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer); !ok{
171
- if err != nil {
172
- log.Error(err, "Failed to remove finalizer for {{ .Resource.Kind }}")
173
- return ctrl.Result{}, err
174
- }
167
+ log.Error(err, "Failed to remove finalizer for {{ .Resource.Kind }}")
168
+ return ctrl.Result{Requeue: true}, nil
175
169
}
176
- err := r.Update(ctx, {{ lower .Resource.Kind }})
177
- if err != nil {
170
+
171
+ if err := r.Update(ctx, {{ lower .Resource.Kind }}); err != nil {
178
172
log.Error(err, "Failed to remove finalizer for {{ .Resource.Kind }}")
173
+ return ctrl.Result{}, err
179
174
}
180
175
}
181
176
return ctrl.Result{}, nil
@@ -186,13 +181,20 @@ func (r *{{ .Resource.Kind }}Reconciler) Reconcile(ctx context.Context, req ctrl
186
181
err = r.Get(ctx, types.NamespacedName{Name: {{ lower .Resource.Kind }}.Name, Namespace: {{ lower .Resource.Kind }}.Namespace}, found)
187
182
if err != nil && apierrors.IsNotFound(err) {
188
183
// Define a new deployment
189
- dep := r.deploymentFor{{ .Resource.Kind }}(ctx, {{ lower .Resource.Kind }})
190
- log.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
191
- err = r.Create(ctx, dep)
184
+ dep, err := r.deploymentFor{{ .Resource.Kind }}({{ lower .Resource.Kind }})
192
185
if err != nil {
193
- log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name )
186
+ log.Error(err, "Failed to define new Deployment resource for {{ .Resource.Kind }}" )
194
187
return ctrl.Result{}, err
195
188
}
189
+
190
+ log.Info("Creating a new Deployment",
191
+ "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
192
+ if err = r.Create(ctx, dep); err != nil {
193
+ log.Error(err, "Failed to create new Deployment",
194
+ "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
195
+ return ctrl.Result{}, err
196
+ }
197
+
196
198
// Deployment created successfully
197
199
// We will requeue the reconciliation so that we can ensure the state
198
200
// and move forward for the next operations
@@ -203,16 +205,19 @@ func (r *{{ .Resource.Kind }}Reconciler) Reconcile(ctx context.Context, req ctrl
203
205
return ctrl.Result{}, err
204
206
}
205
207
206
- // The API is defining that the {{ .Resource.Kind }} type, have a {{ .Resource.Kind }}Spec.Size field to set the quantity of {{ .Resource.Kind }} instances (CRs) to be deployed.
207
- // The following code ensure the deployment size is the same as the spec
208
+ // The CRD API is defining that the {{ .Resource.Kind }} type, have a {{ .Resource.Kind }}Spec.Size field
209
+ // to set the quantity of Deployment instances is the desired state on the cluster.
210
+ // Therefore, the following code will ensure the Deployment size is the same as defined
211
+ // via the Size spec of the Custom Resource which we are reconciling.
208
212
size := {{ lower .Resource.Kind }}.Spec.Size
209
213
if *found.Spec.Replicas != size {
210
214
found.Spec.Replicas = &size
211
- err = r.Update(ctx, found)
212
- if err != nil {
213
- log.Error(err, "Failed to update Deployment", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name)
215
+ if err = r.Update(ctx, found); err != nil {
216
+ log.Error( err, "Failed to update Deployment",
217
+ "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name)
214
218
return ctrl.Result{}, err
215
219
}
220
+
216
221
// Since it fails we want to re-queue the reconciliation
217
222
// The reconciliation will only stop when we be able to ensure
218
223
// the desired state on the cluster
@@ -228,6 +233,13 @@ func (r *{{ .Resource.Kind }}Reconciler) doFinalizerOperationsFor{{ .Resource.Ki
228
233
// needs to do before the CR can be deleted. Examples
229
234
// of finalizers include performing backups and deleting
230
235
// resources that are not owned by this CR, like a PVC.
236
+
237
+ // Note: It is not recommended to use finalizers with the purpose of delete resources which are
238
+ // created and managed in the reconciliation. These ones, such as the Deployment created on this reconcile,
239
+ // are defined as depended of the custom resource. See that we use the method ctrl.SetControllerReference.
240
+ // to set the ownerRef which means that the Deployment will be deleted by the Kubernetes API.
241
+ // More info: https://kubernetes.io/docs/tasks/administer-cluster/use-cascading-deletion/
242
+
231
243
// The following implementation will raise an event
232
244
r.Recorder.Event(cr, "Warning", "Deleting",
233
245
fmt.Sprintf("Custom Resource %s is being deleted from the namespace %s",
@@ -236,13 +248,15 @@ func (r *{{ .Resource.Kind }}Reconciler) doFinalizerOperationsFor{{ .Resource.Ki
236
248
}
237
249
238
250
// deploymentFor{{ .Resource.Kind }} returns a {{ .Resource.Kind }} Deployment object
239
- func (r *{{ .Resource.Kind }}Reconciler) deploymentFor{{ .Resource.Kind }}(ctx context.Context, {{ lower .Resource.Kind }} *{{ .Resource.ImportAlias }}.{{ .Resource.Kind }}) *appsv1.Deployment {
251
+ func (r *{{ .Resource.Kind }}Reconciler) deploymentFor{{ .Resource.Kind }}(
252
+ {{ lower .Resource.Kind }} *{{ .Resource.ImportAlias }}.{{ .Resource.Kind }}) (*appsv1.Deployment, error) {
240
253
ls := labelsFor{{ .Resource.Kind }}({{ lower .Resource.Kind }}.Name)
241
254
replicas := {{ lower .Resource.Kind }}.Spec.Size
242
- log := log.FromContext(ctx)
255
+
256
+ // Get the Operand image
243
257
image, err := imageFor{{ .Resource.Kind }}()
244
258
if err != nil {
245
- log.Error(err, "unable to get image for {{ .Resource.Kind }}")
259
+ return nil, err
246
260
}
247
261
248
262
dep := &appsv1.Deployment{
@@ -274,18 +288,17 @@ func (r *{{ .Resource.Kind }}Reconciler) deploymentFor{{ .Resource.Kind }}(ctx c
274
288
},
275
289
},
276
290
}
277
- // Set {{ .Resource.Kind }} instance as the owner and controller
278
- // You should use the method ctrl.SetControllerReference for all resources
279
- // which are created by your controller so that when the Custom Resource be deleted
280
- // all resources owned by it (child) will also be deleted.
281
- // To know more about it see: https://kubernetes.io/docs/tasks/administer-cluster/use-cascading-deletion/
282
- ctrl.SetControllerReference({{ lower .Resource.Kind }}, dep, r.Scheme)
283
- return dep
291
+
292
+ // Set the ownerRef for the Deployment
293
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/
294
+ if err := ctrl.SetControllerReference({{ lower .Resource.Kind }}, dep, r.Scheme); err != nil {
295
+ return nil, err
296
+ }
297
+ return dep, nil
284
298
}
285
299
286
300
// labelsFor{{ .Resource.Kind }} returns the labels for selecting the resources
287
- // belonging to the given {{ .Resource.Kind }} CR name.
288
- // Note that the labels follows the standards defined in: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
301
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
289
302
func labelsFor{{ .Resource.Kind }}(name string) map[string]string {
290
303
var imageTag string
291
304
image, err := imageFor{{ .Resource.Kind }}()
@@ -300,22 +313,20 @@ func labelsFor{{ .Resource.Kind }}(name string) map[string]string {
300
313
}
301
314
}
302
315
303
- // imageFor{{ .Resource.Kind }} gets the image for the resources belonging to the given {{ .Resource.Kind }} CR,
304
- // from the {{ upper .Resource.Kind }}_IMAGE ENV VAR defined in the config/manager/manager.yaml
316
+ // imageFor{{ .Resource.Kind }} gets the Operand image which is managed by this controller
317
+ // from the {{ upper .Resource.Kind }}_IMAGE environment variable defined in the config/manager/manager.yaml
305
318
func imageFor{{ .Resource.Kind }}() (string, error) {
306
319
var imageEnvVar = "{{ upper .Resource.Kind }}_IMAGE"
307
320
image, found := os.LookupEnv(imageEnvVar)
308
321
if !found {
309
- return "", fmt.Errorf("%s must be set ", imageEnvVar)
322
+ return "", fmt.Errorf("Unable to find %s environment variable with the image ", imageEnvVar)
310
323
}
311
324
return image, nil
312
325
}
313
326
314
327
// SetupWithManager sets up the controller with the Manager.
315
- // The following code specifies how the controller is built to watch a CR
316
- // and other resources that are owned and managed by that controller.
317
- // In this way, the reconciliation can be re-trigged when the CR and/or the Deployment
318
- // be created/edit/delete.
328
+ // Note that the Deployment will be also watched in order to ensure its
329
+ // desirable state on the cluster
319
330
func (r *{{ .Resource.Kind }}Reconciler) SetupWithManager(mgr ctrl.Manager) error {
320
331
return ctrl.NewControllerManagedBy(mgr).
321
332
{{ if not (isEmptyStr .Resource.Path) -}}
0 commit comments