Skip to content

Commit 819d3f3

Browse files
authored
Merge pull request #11650 from ykakarap/sync-machine-labels
🌱 Add --additional-sync-machine-labels to allow syncing additional labels to Nodes
2 parents b1268f4 + 5f8e673 commit 819d3f3

File tree

8 files changed

+110
-16
lines changed

8 files changed

+110
-16
lines changed

controllers/alias.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package controllers
1818

1919
import (
2020
"context"
21+
"regexp"
2122
"time"
2223

2324
ctrl "sigs.k8s.io/controller-runtime"
@@ -72,6 +73,8 @@ type MachineReconciler struct {
7273
WatchFilterValue string
7374

7475
RemoteConditionsGracePeriod time.Duration
76+
77+
AdditionalSyncMachineLabels []*regexp.Regexp
7578
}
7679

7780
func (r *MachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
@@ -81,6 +84,7 @@ func (r *MachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag
8184
ClusterCache: r.ClusterCache,
8285
WatchFilterValue: r.WatchFilterValue,
8386
RemoteConditionsGracePeriod: r.RemoteConditionsGracePeriod,
87+
AdditionalSyncMachineLabels: r.AdditionalSyncMachineLabels,
8488
}).SetupWithManager(ctx, mgr, options)
8589
}
8690

docs/book/src/reference/api/metadata-propagation.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,9 @@ Top-level labels that meet a specific cretria are propagated to the Node labels
6363
- `.labels.[label-meets-criteria]` => `Node.labels`
6464
- `.annotations` => Not propagated.
6565

66-
Label should meet one of the following criteria to propagate to Node:
66+
Labels that meet at least one of the following criteria are always propagated to the Node:
6767
- Has `node-role.kubernetes.io` as prefix.
6868
- Belongs to `node-restriction.kubernetes.io` domain.
69-
- Belongs to `node.cluster.x-k8s.io` domain.
69+
- Belongs to `node.cluster.x-k8s.io` domain.
70+
71+
In addition, any labels that match at least one of the regexes provided by the `--additional-sync-machine-labels` flag on the manager will be synced from the Machine to the Node.

docs/proposals/20220927-label-sync-between-machine-and-nodes.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ With the "divide and conquer" principle in mind this proposal aims to address th
6868

6969
- Support label sync from Machine to the linked Kubernetes node, limited to `node-role.kubernetes.io/` prefix and the `node-restriction.kubernetes.io` domain.
7070
- Support syncing labels from Machine to the linked Kubernetes node for the Cluster API owned `node.cluster.x-k8s.io` domain.
71+
- Support a flag to sync additional user configured labels from the Machine to the Node.
7172

7273
### Non-Goals
7374

@@ -98,7 +99,9 @@ While designing a solution for syncing labels between Machine and underlying Kub
9899

99100
### Label domains & prefixes
100101

101-
The idea of scoping synchronization to a well defined set of labels is a first answer to security/concurrency concerns; labels to be managed by Cluster API have been selected based on following criteria:
102+
A default list of labels would always be synced from the Machines to the Nodes. An additional list of labels can be synced from the Machine to the Node by providing a list of regexes as a flag to the manager.
103+
104+
The following is the default list of label domains that would always be sync from Machines to Nodes:
102105

103106
- The `node-role.kubernetes.io` label has been used widely in the past to identify the role of a Kubernetes Node (e.g. `node-role.kubernetes.io/worker=''`). For example, `kubectl get node` looks for this specific label when displaying the role to the user.
104107

@@ -163,3 +166,4 @@ Users could also implement their own label synchronizer in their tooling, but th
163166

164167
- [ ] 09/27/2022: First Draft of this document
165168
- [ ] 09/28/2022: First Draft of this document presented in the Cluster API office hours meeting
169+
- [ ] 01/09/2025: Update to support configurable label syncing Ref:[11657](https://github.com/kubernetes-sigs/cluster-api/issues/11657)

internal/controllers/machine/machine_controller.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package machine
1919
import (
2020
"context"
2121
"fmt"
22+
"regexp"
2223
"slices"
2324
"strings"
2425
"time"
@@ -99,6 +100,8 @@ type Reconciler struct {
99100

100101
RemoteConditionsGracePeriod time.Duration
101102

103+
AdditionalSyncMachineLabels []*regexp.Regexp
104+
102105
controller controller.Controller
103106
recorder record.EventRecorder
104107
externalTracker external.ObjectTracker

internal/controllers/machine/machine_controller_noderef.go

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result,
127127
// Compute labels to be propagated from Machines to nodes.
128128
// NOTE: CAPI should manage only a subset of node labels, everything else should be preserved.
129129
// NOTE: Once we reconcile node labels for the first time, the NodeUninitializedTaint is removed from the node.
130-
nodeLabels := getManagedLabels(machine.Labels)
130+
nodeLabels := r.getManagedLabels(machine.Labels)
131131

132132
// Get interruptible instance status from the infrastructure provider and set the interruptible label on the node.
133133
interruptible := false
@@ -178,9 +178,10 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result,
178178

179179
// getManagedLabels gets a map[string]string and returns another map[string]string
180180
// filtering out labels not managed by CAPI.
181-
func getManagedLabels(labels map[string]string) map[string]string {
181+
func (r *Reconciler) getManagedLabels(labels map[string]string) map[string]string {
182182
managedLabels := make(map[string]string)
183183
for key, value := range labels {
184+
// Always sync the default set of labels.
184185
dnsSubdomainOrName := strings.Split(key, "/")[0]
185186
if dnsSubdomainOrName == clusterv1.NodeRoleLabelPrefix {
186187
managedLabels[key] = value
@@ -191,8 +192,15 @@ func getManagedLabels(labels map[string]string) map[string]string {
191192
if dnsSubdomainOrName == clusterv1.ManagedNodeLabelDomain || strings.HasSuffix(dnsSubdomainOrName, "."+clusterv1.ManagedNodeLabelDomain) {
192193
managedLabels[key] = value
193194
}
194-
}
195195

196+
// Sync if the labels matches at least one user provided regex.
197+
for _, regex := range r.AdditionalSyncMachineLabels {
198+
if regex.MatchString(key) {
199+
managedLabels[key] = value
200+
break
201+
}
202+
}
203+
}
196204
return managedLabels
197205
}
198206

internal/controllers/machine/machine_controller_noderef_test.go

Lines changed: 60 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package machine
1919
import (
2020
"context"
2121
"fmt"
22+
"regexp"
2223
"testing"
2324
"time"
2425

@@ -700,8 +701,7 @@ func TestSummarizeNodeConditions(t *testing.T) {
700701
}
701702

702703
func TestGetManagedLabels(t *testing.T) {
703-
// Create managedLabels map from known managed prefixes.
704-
managedLabels := map[string]string{
704+
defaultLabels := map[string]string{
705705
clusterv1.NodeRoleLabelPrefix + "/anyRole": "",
706706

707707
clusterv1.ManagedNodeLabelDomain: "",
@@ -715,22 +715,72 @@ func TestGetManagedLabels(t *testing.T) {
715715
"custom-prefix." + clusterv1.NodeRestrictionLabelDomain + "/anything": "",
716716
}
717717

718-
// Append arbitrary labels.
719-
allLabels := map[string]string{
720-
"foo": "",
721-
"bar": "",
718+
additionalLabels := map[string]string{
719+
"foo": "bar",
720+
"bar": "baz",
722721
"company.xyz/node.cluster.x-k8s.io": "not-managed",
723722
"gpu-node.cluster.x-k8s.io": "not-managed",
724723
"company.xyz/node-restriction.kubernetes.io": "not-managed",
725724
"gpu-node-restriction.kubernetes.io": "not-managed",
725+
"wrong.test.foo.com": "",
726726
}
727-
for k, v := range managedLabels {
727+
728+
exampleRegex := regexp.MustCompile(`foo`)
729+
defaultAndRegexLabels := map[string]string{}
730+
for k, v := range defaultLabels {
731+
defaultAndRegexLabels[k] = v
732+
}
733+
defaultAndRegexLabels["foo"] = "bar"
734+
defaultAndRegexLabels["wrong.test.foo.com"] = ""
735+
736+
allLabels := map[string]string{}
737+
for k, v := range defaultLabels {
738+
allLabels[k] = v
739+
}
740+
for k, v := range additionalLabels {
728741
allLabels[k] = v
729742
}
730743

731-
g := NewWithT(t)
732-
got := getManagedLabels(allLabels)
733-
g.Expect(got).To(BeEquivalentTo(managedLabels))
744+
tests := []struct {
745+
name string
746+
additionalSyncMachineLabels []*regexp.Regexp
747+
allLabels map[string]string
748+
managedLabels map[string]string
749+
}{
750+
{
751+
name: "always sync default labels",
752+
additionalSyncMachineLabels: nil,
753+
allLabels: allLabels,
754+
managedLabels: defaultLabels,
755+
},
756+
{
757+
name: "sync additional defined labels",
758+
additionalSyncMachineLabels: []*regexp.Regexp{
759+
exampleRegex,
760+
},
761+
allLabels: allLabels,
762+
managedLabels: defaultAndRegexLabels,
763+
},
764+
{
765+
name: "sync all labels",
766+
additionalSyncMachineLabels: []*regexp.Regexp{
767+
regexp.MustCompile(`.*`),
768+
},
769+
allLabels: allLabels,
770+
managedLabels: allLabels,
771+
},
772+
}
773+
774+
for _, tt := range tests {
775+
t.Run(tt.name, func(t *testing.T) {
776+
g := NewWithT(t)
777+
r := &Reconciler{
778+
AdditionalSyncMachineLabels: tt.additionalSyncMachineLabels,
779+
}
780+
got := r.getManagedLabels(tt.allLabels)
781+
g.Expect(got).To(BeEquivalentTo(tt.managedLabels))
782+
})
783+
}
734784
}
735785

736786
func TestPatchNode(t *testing.T) {

internal/controllers/machine/suite_test.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ func TestMain(m *testing.M) {
9494
APIReader: mgr.GetAPIReader(),
9595
ClusterCache: clusterCache,
9696
RemoteConditionsGracePeriod: 5 * time.Minute,
97+
AdditionalSyncMachineLabels: nil,
9798
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil {
9899
panic(fmt.Sprintf("Failed to start MachineReconciler: %v", err))
99100
}

main.go

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import (
2222
"flag"
2323
"fmt"
2424
"os"
25+
"regexp"
2526
goruntime "runtime"
2627
"time"
2728

@@ -35,6 +36,7 @@ import (
3536
"k8s.io/apimachinery/pkg/labels"
3637
"k8s.io/apimachinery/pkg/runtime"
3738
"k8s.io/apimachinery/pkg/selection"
39+
kerrors "k8s.io/apimachinery/pkg/util/errors"
3840
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
3941
"k8s.io/client-go/tools/leaderelection/resourcelock"
4042
cliflag "k8s.io/component-base/cli/flag"
@@ -125,6 +127,7 @@ var (
125127
machinePoolConcurrency int
126128
clusterResourceSetConcurrency int
127129
machineHealthCheckConcurrency int
130+
additionalSyncMachineLabels []string
128131
)
129132

130133
func init() {
@@ -251,6 +254,9 @@ func InitFlags(fs *pflag.FlagSet) {
251254
fs.StringVar(&healthAddr, "health-addr", ":9440",
252255
"The address the health endpoint binds to.")
253256

257+
fs.StringArrayVar(&additionalSyncMachineLabels, "additional-sync-machine-labels", []string{},
258+
"List of regexes to select the additional set of labels to sync from the Machine to the Node. A label will be synced as long as it matches at least one of the regexes.")
259+
254260
flags.AddManagerOptions(fs, &managerOptions)
255261

256262
feature.MutableGates.AddFlag(fs)
@@ -559,12 +565,28 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, watchNamespaces map
559565
setupLog.Error(err, "Unable to create controller", "controller", "Cluster")
560566
os.Exit(1)
561567
}
568+
569+
var errs []error
570+
var additionalSyncMachineLabelRegexes []*regexp.Regexp
571+
for _, re := range additionalSyncMachineLabels {
572+
reg, err := regexp.Compile(re)
573+
if err != nil {
574+
errs = append(errs, err)
575+
} else {
576+
additionalSyncMachineLabelRegexes = append(additionalSyncMachineLabelRegexes, reg)
577+
}
578+
}
579+
if len(errs) > 0 {
580+
setupLog.Error(fmt.Errorf("at least one of --additional-sync-machine-labels regexes is invalid: %w", kerrors.NewAggregate(errs)), "Unable to start manager")
581+
os.Exit(1)
582+
}
562583
if err := (&controllers.MachineReconciler{
563584
Client: mgr.GetClient(),
564585
APIReader: mgr.GetAPIReader(),
565586
ClusterCache: clusterCache,
566587
WatchFilterValue: watchFilterValue,
567588
RemoteConditionsGracePeriod: remoteConditionsGracePeriod,
589+
AdditionalSyncMachineLabels: additionalSyncMachineLabelRegexes,
568590
}).SetupWithManager(ctx, mgr, concurrency(machineConcurrency)); err != nil {
569591
setupLog.Error(err, "Unable to create controller", "controller", "Machine")
570592
os.Exit(1)

0 commit comments

Comments
 (0)