Skip to content

Commit 05b1031

Browse files
committed
Subnet claim and subnet observation for removal of scheduling gates from instances, instance status updates when programmed.
1 parent e07659c commit 05b1031

File tree

6 files changed

+155
-20
lines changed

6 files changed

+155
-20
lines changed

api/v1alpha/instance_types.go

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -383,11 +383,23 @@ type InstanceControllerStatus struct {
383383
const (
384384
// InstanceReady indicates that the instance is ready
385385
InstanceReady = "Ready"
386+
387+
// InstanceProgrammed indicates that the instance has been programmed
388+
InstanceProgrammed = "Programmed"
386389
)
387390

388391
const (
389392
// InstanceReadyReasonSchedulingGatesPresent indicates that the instance is not ready because scheduling gates are present.
390393
InstanceReadyReasonSchedulingGatesPresent = "SchedulingGatesPresent"
394+
395+
// InstanceProgrammedReasonNotProgrammed indicates that the instance has not been programmed
396+
InstanceProgrammedReasonNotProgrammed = "NotProgrammed"
397+
398+
// InstanceProgrammedReasonProgrammingInProgress indicates that the instance is being programmed.
399+
InstanceProgrammedReasonProgrammingInProgress = "ProgrammingInProgress"
400+
401+
// InstanceProgrammedReasonProgrammed indicates that the instance has been programmed
402+
InstanceProgrammedReasonProgrammed = "Programmed"
391403
)
392404

393405
type InstanceTemplateSpec struct {
@@ -419,7 +431,7 @@ type Instance struct {
419431

420432
// Status defines the current state of an Instance.
421433
//
422-
// +kubebuilder:default={conditions:{{type:"Running",status:"Unknown",reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type:"Ready",status:"Unknown",reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}}}
434+
// +kubebuilder:default={conditions:{{type:"Programmed",status:"Unknown",reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type:"Running",status:"Unknown",reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type:"Ready",status:"Unknown",reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}}}
423435
Status InstanceStatus `json:"status,omitempty"`
424436
}
425437

cmd/main.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,13 +178,13 @@ func main() {
178178
os.Exit(1)
179179
}
180180

181+
// +kubebuilder:scaffold:builder
182+
181183
if err = controller.AddIndexers(ctx, mgr); err != nil {
182184
setupLog.Error(err, "unable to add indexers")
183185
os.Exit(1)
184186
}
185187

186-
// +kubebuilder:scaffold:builder
187-
188188
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
189189
setupLog.Error(err, "unable to set up health check")
190190
os.Exit(1)

config/crd/bases/compute.datumapis.com_instances.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -811,6 +811,11 @@ spec:
811811
status:
812812
default:
813813
conditions:
814+
- lastTransitionTime: "1970-01-01T00:00:00Z"
815+
message: Waiting for controller
816+
reason: Pending
817+
status: Unknown
818+
type: Programmed
814819
- lastTransitionTime: "1970-01-01T00:00:00Z"
815820
message: Waiting for controller
816821
reason: Pending

internal/controller/indexers.go

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515
const (
1616
deploymentWorkloadUIDIndex = "deploymentWorkloadUIDIndex"
1717
workloadNetworksIndex = "workloadNetworksIndex"
18+
deploymentLocationIndex = "deploymentLocationIndex"
1819
)
1920

2021
func AddIndexers(ctx context.Context, mgr mcmanager.Manager) error {
@@ -29,6 +30,11 @@ func addWorkloadDeploymentIndexers(ctx context.Context, mgr mcmanager.Manager) e
2930
return fmt.Errorf("failed to add workload deployment indexer %q: %w", deploymentWorkloadUIDIndex, err)
3031
}
3132

33+
// Index workload deployments by location
34+
if err := mgr.GetFieldIndexer().IndexField(ctx, &computev1alpha.WorkloadDeployment{}, deploymentLocationIndex, deploymentLocationIndexFunc); err != nil {
35+
return fmt.Errorf("failed to add workload deployment indexer %q: %w", deploymentLocationIndex, err)
36+
}
37+
3238
return nil
3339
}
3440

@@ -38,6 +44,20 @@ func deploymentWorkloadUIDIndexFunc(o client.Object) []string {
3844
}
3945
}
4046

47+
func deploymentLocationIndexFunc(o client.Object) []string {
48+
deployment := o.(*computev1alpha.WorkloadDeployment)
49+
if deployment.Status.Location == nil {
50+
return nil
51+
}
52+
53+
return []string{
54+
types.NamespacedName{
55+
Namespace: deployment.Status.Location.Namespace,
56+
Name: deployment.Status.Location.Name,
57+
}.String(),
58+
}
59+
}
60+
4161
func addWorkloadIndexers(ctx context.Context, mgr mcmanager.Manager) error {
4262
if err := mgr.GetFieldIndexer().IndexField(ctx, &computev1alpha.Workload{}, workloadNetworksIndex, workloadNetworksIndexFunc); err != nil {
4363
return fmt.Errorf("failed to add workload indexer %q: %w", workloadNetworksIndex, err)

internal/controller/instance_controller.go

Lines changed: 52 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
apimeta "k8s.io/apimachinery/pkg/api/meta"
1212
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1313
ctrl "sigs.k8s.io/controller-runtime"
14+
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
1415
"sigs.k8s.io/controller-runtime/pkg/log"
1516
mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder"
1617
mccontext "sigs.k8s.io/multicluster-runtime/pkg/context"
@@ -53,6 +54,8 @@ func (r *InstanceReconciler) Reconcile(ctx context.Context, req mcreconcile.Requ
5354
logger.Info("reconciling instance")
5455
defer logger.Info("reconcile complete")
5556

57+
// TODO(jreese) better condition handling
58+
5659
if len(instance.Spec.Controller.SchedulingGates) > 0 {
5760
// Update Ready condition to False, Reason to "SchedulingGatesPresent"
5861
// and Message to "Scheduling gates present"
@@ -63,19 +66,58 @@ func (r *InstanceReconciler) Reconcile(ctx context.Context, req mcreconcile.Requ
6366
schedulingGateNames = append(schedulingGateNames, gate.Name)
6467
}
6568

66-
changed := apimeta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
69+
if _, err := controllerutil.CreateOrPatch(ctx, cl.GetClient(), &instance, func() error {
70+
apimeta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
71+
Type: computev1alpha.InstanceReady,
72+
Status: metav1.ConditionFalse,
73+
Reason: computev1alpha.InstanceReadyReasonSchedulingGatesPresent,
74+
Message: fmt.Sprintf("Scheduling gates present: %s", strings.Join(schedulingGateNames, ", ")),
75+
ObservedGeneration: instance.Generation,
76+
})
77+
return nil
78+
}); err != nil {
79+
return ctrl.Result{}, fmt.Errorf("failed updating instance status: %w", err)
80+
}
81+
82+
return ctrl.Result{}, nil
83+
}
84+
85+
if condition := apimeta.FindStatusCondition(instance.Status.Conditions, computev1alpha.InstanceProgrammed); condition.Status != metav1.ConditionTrue {
86+
logger.Info("instance is not programmed", "instance", instance.Name)
87+
message := "Instance has not been programmed"
88+
if condition.Status != metav1.ConditionUnknown {
89+
message = condition.Message
90+
}
91+
92+
logger.Info("updating instance status", "instance", instance.Name)
93+
if _, err := controllerutil.CreateOrPatch(ctx, cl.GetClient(), &instance, func() error {
94+
apimeta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
95+
Type: computev1alpha.InstanceReady,
96+
Status: metav1.ConditionFalse,
97+
Reason: computev1alpha.InstanceProgrammedReasonNotProgrammed,
98+
Message: message,
99+
ObservedGeneration: instance.Generation,
100+
})
101+
return nil
102+
}); err != nil {
103+
return ctrl.Result{}, fmt.Errorf("failed updating instance status: %w", err)
104+
}
105+
return ctrl.Result{}, nil
106+
}
107+
108+
logger.Info("instance is programmed", "instance", instance.Name)
109+
110+
if _, err := controllerutil.CreateOrPatch(ctx, cl.GetClient(), &instance, func() error {
111+
apimeta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
67112
Type: computev1alpha.InstanceReady,
68-
Status: metav1.ConditionFalse,
69-
Reason: computev1alpha.InstanceReadyReasonSchedulingGatesPresent,
70-
Message: fmt.Sprintf("Scheduling gates present: %s", strings.Join(schedulingGateNames, ", ")),
113+
Status: metav1.ConditionTrue,
114+
Reason: computev1alpha.InstanceProgrammedReasonProgrammed,
115+
Message: "Instance is ready",
71116
ObservedGeneration: instance.Generation,
72117
})
73-
74-
if changed {
75-
if err := cl.GetClient().Status().Update(ctx, &instance); err != nil {
76-
return ctrl.Result{}, fmt.Errorf("failed updating instance status: %w", err)
77-
}
78-
}
118+
return nil
119+
}); err != nil {
120+
return ctrl.Result{}, fmt.Errorf("failed updating instance status: %w", err)
79121
}
80122

81123
return ctrl.Result{}, nil

internal/controller/workloaddeployment_controller.go

Lines changed: 63 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,14 @@ import (
1010
apierrors "k8s.io/apimachinery/pkg/api/errors"
1111
apimeta "k8s.io/apimachinery/pkg/api/meta"
1212
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
13+
"k8s.io/apimachinery/pkg/types"
1314
ctrl "sigs.k8s.io/controller-runtime"
1415
"sigs.k8s.io/controller-runtime/pkg/client"
16+
"sigs.k8s.io/controller-runtime/pkg/cluster"
1517
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
18+
"sigs.k8s.io/controller-runtime/pkg/handler"
1619
"sigs.k8s.io/controller-runtime/pkg/log"
20+
"sigs.k8s.io/controller-runtime/pkg/reconcile"
1721
mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder"
1822
mccontext "sigs.k8s.io/multicluster-runtime/pkg/context"
1923
mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager"
@@ -110,6 +114,7 @@ func (r *WorkloadDeploymentReconciler) Reconcile(ctx context.Context, req mcreco
110114
// gates on instances. If any instances were created by actions above, those
111115
// will result in this reconciler being processed again, so will properly have
112116
// their gates removed.
117+
logger.Info("removing scheduling gates from instances")
113118

114119
for _, instance := range instances.Items {
115120
if len(instance.Spec.Controller.SchedulingGates) == 0 {
@@ -125,10 +130,10 @@ func (r *WorkloadDeploymentReconciler) Reconcile(ctx context.Context, req mcreco
125130
continue
126131
}
127132

128-
instance.Spec.Controller.SchedulingGates = newGates
129-
130-
// TODO(jreese) consider using patches
131-
if err := cl.GetClient().Update(ctx, &instance); err != nil {
133+
if _, err := controllerutil.CreateOrPatch(ctx, cl.GetClient(), &instance, func() error {
134+
instance.Spec.Controller.SchedulingGates = newGates
135+
return nil
136+
}); err != nil {
132137
return ctrl.Result{}, fmt.Errorf("failed updating instance: %w", err)
133138
}
134139
}
@@ -318,7 +323,7 @@ func (r *WorkloadDeploymentReconciler) reconcileNetworks(
318323
return result
319324
}
320325

321-
logger.Info("found subnet", "subnet", subnet.Name)
326+
logger.Info("subnet is ready", "subnet", subnet.Name)
322327

323328
}
324329

@@ -329,11 +334,62 @@ func (r *WorkloadDeploymentReconciler) reconcileNetworks(
329334
func (r *WorkloadDeploymentReconciler) SetupWithManager(mgr mcmanager.Manager) error {
330335
r.mgr = mgr
331336
// TODO(jreese) finalizers
332-
// TODO(jreese) watch subnet claims and subnets, enqueue based on location
333-
// match on workload deployment.
334337
return mcbuilder.ControllerManagedBy(mgr).
335338
For(&computev1alpha.WorkloadDeployment{}, mcbuilder.WithEngageWithLocalCluster(false)).
336339
Owns(&computev1alpha.Instance{}).
337340
Owns(&networkingv1alpha.NetworkBinding{}).
341+
Watches(&networkingv1alpha.SubnetClaim{}, func(clusterName string, cl cluster.Cluster) handler.TypedEventHandler[client.Object, mcreconcile.Request] {
342+
return handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []mcreconcile.Request {
343+
subnetClaim := o.(*networkingv1alpha.SubnetClaim)
344+
return enqueueWorkloadDeploymentByLocation(ctx, mgr, clusterName, subnetClaim.Spec.Location)
345+
})
346+
}).
347+
Watches(&networkingv1alpha.Subnet{}, func(clusterName string, cl cluster.Cluster) handler.TypedEventHandler[client.Object, mcreconcile.Request] {
348+
return handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []mcreconcile.Request {
349+
subnet := o.(*networkingv1alpha.Subnet)
350+
return enqueueWorkloadDeploymentByLocation(ctx, mgr, clusterName, subnet.Spec.Location)
351+
})
352+
}).
338353
Complete(r)
339354
}
355+
356+
func enqueueWorkloadDeploymentByLocation(ctx context.Context, mgr mcmanager.Manager, clusterName string, locationRef networkingv1alpha.LocationReference) []mcreconcile.Request {
357+
logger := log.FromContext(ctx)
358+
359+
cluster, err := mgr.GetCluster(ctx, clusterName)
360+
if err != nil {
361+
logger.Error(err, "failed to get cluster")
362+
return nil
363+
}
364+
clusterClient := cluster.GetClient()
365+
366+
locationName := (types.NamespacedName{
367+
Namespace: locationRef.Namespace,
368+
Name: locationRef.Name,
369+
}).String()
370+
listOpts := client.MatchingFields{
371+
deploymentLocationIndex: locationName,
372+
}
373+
374+
var workloadDeployments computev1alpha.WorkloadDeploymentList
375+
376+
if err := clusterClient.List(ctx, &workloadDeployments, listOpts); err != nil {
377+
logger.Error(err, "failed to list workloads")
378+
return nil
379+
}
380+
381+
var requests []mcreconcile.Request
382+
for _, workload := range workloadDeployments.Items {
383+
requests = append(requests, mcreconcile.Request{
384+
Request: reconcile.Request{
385+
NamespacedName: types.NamespacedName{
386+
Namespace: workload.Namespace,
387+
Name: workload.Name,
388+
},
389+
},
390+
ClusterName: clusterName,
391+
})
392+
}
393+
394+
return requests
395+
}

0 commit comments

Comments
 (0)