Skip to content

Commit 0ebd9b3

Browse files
committed
Use the Hypervisor CRD instead of labels.
When there is no maintenance-contoller profile the GardenerLifecycleController will now set the hypervisor cro maintenance field. For confirmation, it will look on the conditions. That renders the labels obsolete.
1 parent 768f631 commit 0ebd9b3

File tree

6 files changed

+131
-394
lines changed

6 files changed

+131
-394
lines changed

cmd/main.go

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -236,14 +236,6 @@ func main() {
236236
os.Exit(1)
237237
}
238238

239-
if err = (&controller.NodeEvictionLabelReconciler{
240-
Client: mgr.GetClient(),
241-
Scheme: mgr.GetScheme(),
242-
}).SetupWithManager(mgr); err != nil {
243-
setupLog.Error(err, "unable to create controller", "controller", "Node")
244-
os.Exit(1)
245-
}
246-
247239
if err = (&controller.NodeDecommissionReconciler{
248240
Client: mgr.GetClient(),
249241
Scheme: mgr.GetScheme(),

internal/controller/gardener_node_lifecycle_controller.go

Lines changed: 28 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ import (
3232
corev1ac "k8s.io/client-go/applyconfigurations/core/v1"
3333
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
3434
policyv1ac "k8s.io/client-go/applyconfigurations/policy/v1"
35-
"k8s.io/client-go/util/retry"
3635
ctrl "sigs.k8s.io/controller-runtime"
3736
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
3837
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
@@ -72,40 +71,52 @@ func (r *GardenerNodeLifecycleController) Reconcile(ctx context.Context, req ctr
7271
return ctrl.Result{}, k8sclient.IgnoreNotFound(err)
7372
}
7473

75-
hv := kvmv1.Hypervisor{}
76-
if err := r.Get(ctx, k8sclient.ObjectKey{Name: req.Name}, &hv); k8sclient.IgnoreNotFound(err) != nil {
74+
hv := &kvmv1.Hypervisor{}
75+
if err := r.Get(ctx, k8sclient.ObjectKey{Name: req.Name}, hv); k8sclient.IgnoreNotFound(err) != nil {
7776
return ctrl.Result{}, err
7877
}
78+
7979
if !hv.Spec.LifecycleEnabled {
8080
// Nothing to be done
8181
return ctrl.Result{}, nil
8282
}
8383

84-
if isTerminating(node) {
85-
changed, err := setNodeLabels(ctx, r.Client, node, map[string]string{labelEvictionRequired: valueReasonTerminating})
86-
if changed || err != nil {
87-
return ctrl.Result{}, err
84+
// Only, if the maintenance controller is not active
85+
if _, found := node.Labels["cloud.sap/maintenance-profile"]; !found {
86+
// Sync the terminating status into the hypervisor spec
87+
if isTerminating(node) && hv.Spec.Maintenance != kvmv1.MaintenanceTermination {
88+
base := hv.DeepCopy()
89+
hv.Spec.Maintenance = kvmv1.MaintenanceTermination
90+
if err := r.Patch(ctx, hv, k8sclient.MergeFromWithOptions(base, k8sclient.MergeFromWithOptimisticLock{}), k8sclient.FieldOwner(MaintenanceControllerName)); err != nil {
91+
return ctrl.Result{}, err
92+
}
8893
}
8994
}
9095

9196
// We do not care about the particular value, as long as it isn't an error
9297
var minAvailable int32 = 1
93-
evictionValue, found := node.Labels[labelEvictionApproved]
94-
if found && evictionValue != "false" {
98+
99+
// Onboarding is not in progress anymore, i.e. the host is onboarded
100+
onboardingCompleted := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeOnboarding)
101+
// Evicting is not in progress anymore, i.e. the host is empty
102+
evictionComplete := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeEvicting)
103+
104+
if evictionComplete {
95105
minAvailable = 0
106+
107+
if onboardingCompleted && isTerminating(node) {
108+
// Onboarded & terminating & eviction complete -> disable HA
109+
if err := disableInstanceHA(hv); err != nil {
110+
return ctrl.Result{}, err
111+
}
112+
}
96113
}
97114

98-
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
99-
return r.ensureBlockingPodDisruptionBudget(ctx, node, minAvailable)
100-
}); err != nil {
115+
if err := r.ensureBlockingPodDisruptionBudget(ctx, node, minAvailable); err != nil {
101116
return ctrl.Result{}, err
102117
}
103118

104-
onboardingCompleted := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeOnboarding)
105-
106-
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
107-
return r.ensureSignallingDeployment(ctx, node, minAvailable, onboardingCompleted)
108-
}); err != nil {
119+
if err := r.ensureSignallingDeployment(ctx, node, minAvailable, onboardingCompleted); err != nil {
109120
return ctrl.Result{}, err
110121
}
111122

internal/controller/gardener_node_lifecycle_controller_test.go

Lines changed: 103 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -18,51 +18,137 @@ limitations under the License.
1818
package controller
1919

2020
import (
21+
"fmt"
22+
2123
. "github.com/onsi/ginkgo/v2"
2224
. "github.com/onsi/gomega"
25+
appsv1 "k8s.io/api/apps/v1"
2326
corev1 "k8s.io/api/core/v1"
27+
policyv1 "k8s.io/api/policy/v1"
28+
"k8s.io/apimachinery/pkg/api/meta"
2429
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2530
"k8s.io/apimachinery/pkg/types"
2631
ctrl "sigs.k8s.io/controller-runtime"
27-
"sigs.k8s.io/controller-runtime/pkg/client"
32+
33+
kvmv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
2834
)
2935

3036
var _ = Describe("Gardener Maintenance Controller", func() {
3137
const nodeName = "node-test"
32-
var controller *GardenerNodeLifecycleController
38+
var (
39+
controller *GardenerNodeLifecycleController
40+
name = types.NamespacedName{Name: nodeName}
41+
reconcileReq = ctrl.Request{NamespacedName: name}
42+
maintenanceName = types.NamespacedName{Name: fmt.Sprintf("maint-%v", nodeName), Namespace: "kube-system"}
43+
)
3344

3445
BeforeEach(func(ctx SpecContext) {
3546
controller = &GardenerNodeLifecycleController{
3647
Client: k8sClient,
3748
Scheme: k8sClient.Scheme(),
3849
}
3950

40-
By("creating the namespace for the reconciler")
41-
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "monsoon3"}}
42-
Expect(client.IgnoreAlreadyExists(k8sClient.Create(ctx, ns))).To(Succeed())
43-
4451
By("creating the core resource for the Kind Node")
45-
resource := &corev1.Node{
52+
node := &corev1.Node{
4653
ObjectMeta: metav1.ObjectMeta{
47-
Name: nodeName,
48-
Labels: map[string]string{labelEvictionRequired: "true"},
54+
Name: nodeName,
4955
},
5056
}
51-
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
57+
Expect(k8sClient.Create(ctx, node)).To(Succeed())
5258
DeferCleanup(func(ctx SpecContext) {
53-
Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, resource))).To(Succeed())
59+
By("Cleanup the specific node")
60+
Expect(k8sClient.Delete(ctx, node)).To(Succeed())
61+
})
62+
63+
By("creating the core resource for the Kind hypervisor")
64+
hypervisor := &kvmv1.Hypervisor{
65+
ObjectMeta: metav1.ObjectMeta{
66+
Name: nodeName,
67+
},
68+
Spec: kvmv1.HypervisorSpec{
69+
LifecycleEnabled: true,
70+
},
71+
}
72+
Expect(k8sClient.Create(ctx, hypervisor)).To(Succeed())
73+
DeferCleanup(func(ctx SpecContext) {
74+
Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed())
5475
})
5576
})
5677

57-
Context("When reconciling a node", func() {
58-
It("should successfully reconcile the resource", func(ctx SpecContext) {
59-
req := ctrl.Request{
60-
NamespacedName: types.NamespacedName{Name: nodeName},
61-
}
78+
Context("When reconciling a terminating node", func() {
79+
BeforeEach(func(ctx SpecContext) {
80+
By("Marking the node as terminating")
81+
node := &corev1.Node{}
82+
Expect(k8sClient.Get(ctx, name, node)).To(Succeed())
83+
node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{
84+
Type: "Terminating",
85+
})
86+
Expect(k8sClient.Status().Update(ctx, node)).To(Succeed())
87+
})
6288

89+
It("should successfully reconcile the resource", func(ctx SpecContext) {
6390
By("Reconciling the created resource")
64-
_, err := controller.Reconcile(ctx, req)
91+
_, err := controller.Reconcile(ctx, reconcileReq)
6592
Expect(err).NotTo(HaveOccurred())
93+
94+
hypervisor := &kvmv1.Hypervisor{}
95+
Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed())
96+
Expect(hypervisor.Spec.Maintenance).To(Equal(kvmv1.MaintenanceTermination))
6697
})
6798
})
99+
100+
Context("When reconciling a node", func() {
101+
JustBeforeEach(func(ctx SpecContext) {
102+
_, err := controller.Reconcile(ctx, reconcileReq)
103+
Expect(err).NotTo(HaveOccurred())
104+
})
105+
It("should create a poddisruptionbudget", func(ctx SpecContext) {
106+
pdb := &policyv1.PodDisruptionBudget{}
107+
Expect(k8sClient.Get(ctx, maintenanceName, pdb)).To(Succeed())
108+
Expect(pdb.Spec.MinAvailable).To(HaveField("IntVal", BeNumerically("==", 1)))
109+
})
110+
111+
It("should create a failing deployment to signal onboarding not being completed", func(ctx SpecContext) {
112+
dep := &appsv1.Deployment{}
113+
Expect(k8sClient.Get(ctx, maintenanceName, dep)).To(Succeed())
114+
Expect(dep.Spec.Template.Spec.Containers).To(HaveLen(1))
115+
Expect(dep.Spec.Template.Spec.Containers[0].StartupProbe.Exec.Command).To(Equal([]string{"/bin/false"}))
116+
})
117+
118+
When("the node has been onboarded", func() {
119+
BeforeEach(func(ctx SpecContext) {
120+
hypervisor := &kvmv1.Hypervisor{}
121+
Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed())
122+
meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{
123+
Type: kvmv1.ConditionTypeOnboarding,
124+
Status: metav1.ConditionFalse,
125+
Reason: "dontcare",
126+
Message: "dontcare",
127+
})
128+
Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed())
129+
})
130+
131+
It("should create a deployment with onboarding completed", func(ctx SpecContext) {
132+
dep := &appsv1.Deployment{}
133+
Expect(k8sClient.Get(ctx, maintenanceName, dep)).To(Succeed())
134+
Expect(dep.Spec.Template.Spec.Containers).To(HaveLen(1))
135+
Expect(dep.Spec.Template.Spec.Containers[0].StartupProbe.Exec.Command).To(Equal([]string{"/bin/true"}))
136+
})
137+
})
138+
139+
When("the node has been evicted", func() {
140+
BeforeEach(func(ctx SpecContext) {
141+
hypervisor := &kvmv1.Hypervisor{}
142+
Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed())
143+
meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{
144+
Type: kvmv1.ConditionTypeEvicting,
145+
Status: metav1.ConditionFalse,
146+
Reason: "dontcare",
147+
Message: "dontcare",
148+
})
149+
Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed())
150+
})
151+
})
152+
153+
})
68154
})

0 commit comments

Comments
 (0)