diff --git a/api/go.mod b/api/go.mod index 8ba16cc6f..25ddd68d3 100644 --- a/api/go.mod +++ b/api/go.mod @@ -25,6 +25,7 @@ require ( ) require ( + dario.cat/mergo v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect diff --git a/api/go.sum b/api/go.sum index 7924831a1..221eaf9c1 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,3 +1,5 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/common/go.mod b/common/go.mod index df1b57416..4fdfb4657 100644 --- a/common/go.mod +++ b/common/go.mod @@ -8,6 +8,7 @@ go 1.23.0 toolchain go1.24.5 require ( + dario.cat/mergo v1.0.1 github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.3 github.com/onsi/ginkgo/v2 v2.23.4 diff --git a/common/go.sum b/common/go.sum index dbba210bd..8f03685ad 100644 --- a/common/go.sum +++ b/common/go.sum @@ -1,5 +1,7 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= diff --git a/common/pkg/capi/clustertopology/handlers/mutation/meta.go b/common/pkg/capi/clustertopology/handlers/mutation/meta.go index 13aef75ad..225026906 100644 --- a/common/pkg/capi/clustertopology/handlers/mutation/meta.go +++ b/common/pkg/capi/clustertopology/handlers/mutation/meta.go @@ -8,6 +8,7 @@ import ( "fmt" "sync" + "github.com/samber/lo" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -18,6 +19,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/variables" ) type MutateFunc func( @@ -89,6 +91,21 @@ func (mgp metaGeneratePatches) GeneratePatches( ) { clusterKey := handlers.ClusterKeyFromReq(req) clusterGetter := mgp.CreateClusterGetter(clusterKey) + + // Create a map of variables from the request that can be overridden by machine deployment or control plane + // configuration. + // Filter out the "builtin" variable, which is already present in the vars map and should not be overridden by + // the global variables. + globalVars := lo.FilterSliceToMap( + req.Variables, + func(v runtimehooksv1.Variable) (string, apiextensionsv1.JSON, bool) { + if v.Name == "builtin" { + return "", apiextensionsv1.JSON{}, false + } + return v.Name, v.Value, true + }, + ) + topologymutation.WalkTemplates( ctx, unstructured.UnstructuredJSONScheme, @@ -108,16 +125,33 @@ func (mgp metaGeneratePatches) GeneratePatches( log.V(3).Info("Starting mutation pipeline", "handlerCount", len(mgp.mutators)) - for i, h := range mgp.mutators { - handlerName := fmt.Sprintf("%T", h) - log.V(5).Info("Running mutator", "index", i, "handler", handlerName) + // Merge the global variables to the current resource vars. This allows the handlers to access + // the variables defined in the cluster class or cluster configuration and use these correctly when + // overrides are specified on machine deployment or control plane configuration. + mergedVars, err := variables.MergeVariableOverridesWithGlobal(vars, globalVars) + if err != nil { + log.Error(err, "Failed to merge global variables") + return err + } - if err := h.Mutate(ctx, obj.(*unstructured.Unstructured), vars, holderRef, clusterKey, clusterGetter); err != nil { - log.Error(err, "Mutator failed", "index", i, "handler", handlerName) + for i, h := range mgp.mutators { + mutatorType := fmt.Sprintf("%T", h) + log.V(5). + Info("Running mutator", "index", i, "handler", mutatorType, "vars", mergedVars) + + if err := h.Mutate( + ctx, + obj.(*unstructured.Unstructured), + mergedVars, + holderRef, + clusterKey, + clusterGetter, + ); err != nil { + log.Error(err, "Mutator failed", "index", i, "handler", mutatorType) return err } - log.V(5).Info("Mutator completed successfully", "index", i, "handler", handlerName) + log.V(5).Info("Mutator completed successfully", "index", i, "handler", mutatorType) } log.V(3).Info("Mutation pipeline completed successfully", "handlerCount", len(mgp.mutators)) diff --git a/common/pkg/capi/clustertopology/handlers/mutation/meta_test.go b/common/pkg/capi/clustertopology/handlers/mutation/meta_test.go index 66c75b9c2..6752d370e 100644 --- a/common/pkg/capi/clustertopology/handlers/mutation/meta_test.go +++ b/common/pkg/capi/clustertopology/handlers/mutation/meta_test.go @@ -34,7 +34,7 @@ var _ MetaMutator = &testHandler{} func (h *testHandler) Mutate( _ context.Context, obj *unstructured.Unstructured, - _ map[string]apiextensionsv1.JSON, + vars map[string]apiextensionsv1.JSON, holderRef runtimehooksv1.HolderReference, _ client.ObjectKey, _ ClusterGetter, @@ -43,6 +43,31 @@ func (h *testHandler) Mutate( return fmt.Errorf("This is a failure") } + varAVal, ok := vars["varA"] + if !ok { + return fmt.Errorf("varA not found in vars") + } + if string(varAVal.Raw) != `{"a":1,"b":2}` { + return fmt.Errorf("varA value mismatch: %s", string(varAVal.Raw)) + } + + varBVal, ok := vars["varB"] + if !ok { + return fmt.Errorf("varB not found in vars") + } + switch obj.GetKind() { + case "KubeadmControlPlaneTemplate": + if string(varBVal.Raw) != `{"c":3,"d":{"e":4,"f":5}}` { + return fmt.Errorf("varB value mismatch: %s", string(varBVal.Raw)) + } + case "KubeadmConfigTemplate": + if string(varBVal.Raw) != `{"c":3,"d":{"e":5,"f":5}}` { + return fmt.Errorf("varB value mismatch: %s", string(varBVal.Raw)) + } + default: + return fmt.Errorf("unexpected object kind: %s", obj.GetKind()) + } + if h.mutateControlPlane { return patches.MutateIfApplicable( obj, nil, &holderRef, selectors.ControlPlane(), logr.Discard(), @@ -61,7 +86,7 @@ func (h *testHandler) Mutate( return patches.MutateIfApplicable( obj, - machineVars(), + vars, &holderRef, selectors.WorkersKubeadmConfigTemplateSelector(), logr.Discard(), @@ -78,10 +103,32 @@ func (h *testHandler) Mutate( ) } -func machineVars() map[string]apiextensionsv1.JSON { - return map[string]apiextensionsv1.JSON{ - "builtin": {Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`)}, - } +func globalVars() []runtimehooksv1.Variable { + return []runtimehooksv1.Variable{{ + Name: "varA", + Value: apiextensionsv1.JSON{ + Raw: []byte(`{"a": 1, "b": 2}`), + }, + }, { + Name: "varB", + Value: apiextensionsv1.JSON{ + Raw: []byte(`{"c": 3, "d": {"e": 4, "f": 5}}`), + }, + }} +} + +func overrideVars() []runtimehooksv1.Variable { + return []runtimehooksv1.Variable{{ + Name: "builtin", + Value: apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + }, { + Name: "varB", + Value: apiextensionsv1.JSON{ + Raw: []byte(`{"d": {"e": 5}}`), + }, + }} } func TestMetaGeneratePatches(t *testing.T) { @@ -221,9 +268,12 @@ func TestMetaGeneratePatches(t *testing.T) { h := NewMetaGeneratePatchesHandler("", nil, tt.mutaters...).(GeneratePatches) resp := &runtimehooksv1.GeneratePatchesResponse{} + kctReq := request.NewKubeadmConfigTemplateRequestItem("kubeadm-config") + kctReq.Variables = overrideVars() h.GeneratePatches(context.Background(), &runtimehooksv1.GeneratePatchesRequest{ + Variables: globalVars(), Items: []runtimehooksv1.GeneratePatchesRequestItem{ - request.NewKubeadmConfigTemplateRequestItem("kubeadm-config"), + kctReq, request.NewKubeadmControlPlaneTemplateRequestItem("kubeadm-control-plane"), }, }, resp) diff --git a/common/pkg/capi/clustertopology/variables/merge.go b/common/pkg/capi/clustertopology/variables/merge.go new file mode 100644 index 000000000..8c0b0e49f --- /dev/null +++ b/common/pkg/capi/clustertopology/variables/merge.go @@ -0,0 +1,66 @@ +// Copyright 2023 Nutanix. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package variables + +import ( + "encoding/json" + "fmt" + "maps" + + "dario.cat/mergo" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// MergeVariableOverridesWithGlobal merges the provided variable overrides with the global variables. +// It performs a deep merge, ensuring that if a variable exists in both maps, the value from the overrides is used. +func MergeVariableOverridesWithGlobal( + overrideVars, globalVars map[string]apiextensionsv1.JSON, +) (map[string]apiextensionsv1.JSON, error) { + mergedVars := maps.Clone(overrideVars) + + for k, v := range globalVars { + // If the value of v is nil, skip it. + if v.Raw == nil { + continue + } + + existingValue, exists := mergedVars[k] + + // If the variable does not exist in the mergedVars or the value is nil, add it and continue. + if !exists || existingValue.Raw == nil { + mergedVars[k] = v + continue + } + + // Wrap the value in a temporary key to ensure we can unmarshal to a map. + // This is necessary because the values could be scalars. + tempValJSON := fmt.Sprintf(`{"value": %s}`, string(existingValue.Raw)) + tempGlobalValJSON := fmt.Sprintf(`{"value": %s}`, string(v.Raw)) + + // Unmarshal the existing value and the global value into maps. + var val, globalVal map[string]interface{} + if err := json.Unmarshal([]byte(tempValJSON), &val); err != nil { + return nil, fmt.Errorf("failed to unmarshal existing value for key %q: %w", k, err) + } + + if err := json.Unmarshal([]byte(tempGlobalValJSON), &globalVal); err != nil { + return nil, fmt.Errorf("failed to unmarshal global value for key %q: %w", k, err) + } + + // Now use mergo to perform a deep merge of the values, retaining the values in `val` if present. + if err := mergo.Merge(&val, globalVal); err != nil { + return nil, fmt.Errorf("failed to merge values for key %q: %w", k, err) + } + + // Marshal the merged value back to JSON. + mergedVal, err := json.Marshal(val["value"]) + if err != nil { + return nil, fmt.Errorf("failed to marshal merged value for key %q: %w", k, err) + } + + mergedVars[k] = apiextensionsv1.JSON{Raw: mergedVal} + } + + return mergedVars, nil +} diff --git a/common/pkg/capi/clustertopology/variables/merge_test.go b/common/pkg/capi/clustertopology/variables/merge_test.go new file mode 100644 index 000000000..a41774d7a --- /dev/null +++ b/common/pkg/capi/clustertopology/variables/merge_test.go @@ -0,0 +1,207 @@ +// Copyright 2023 Nutanix. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package variables + +import ( + "encoding/json" + "testing" + + "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +func TestMergeVariableOverridesWithGlobal(t *testing.T) { + t.Parallel() + + type args struct { + vars map[string]apiextensionsv1.JSON + globalVars map[string]apiextensionsv1.JSON + } + tests := []struct { + name string + args args + want map[string]apiextensionsv1.JSON + wantErr bool + errString string + }{ + { + name: "no overlap, globalVars added", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`1`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "b": {Raw: []byte(`2`)}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`1`)}, + "b": {Raw: []byte(`2`)}, + }, + }, + { + name: "globalVars value is nil, skipped", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`1`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "b": {Raw: nil}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`1`)}, + }, + }, + { + name: "existing value is nil, globalVars value used", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: nil}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`2`)}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`2`)}, + }, + }, + { + name: "both values are scalars, globalVars ignored", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`1`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`2`)}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`1`)}, + }, + }, + { + name: "both values are objects, merged", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1,"y":2}`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"y":3,"z":4}`)}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1,"y":2,"z":4}`)}, + }, + }, + { + name: "both values are objects with nested objects, merged", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1,"y":{"a": 2,"b":{"c": 3}}}`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"y":{"a": 2,"b":{"c": 5, "d": 6}},"z":4}`)}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1,"y":{"a": 2,"b":{"c": 3, "d": 6}},"z":4}`)}, + }, + }, + { + name: "both values are objects with nested objects with vars having nil object explicitly, merged", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1,"y":{"a": 2,"b": null}}`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"y":{"a": 2,"b":{"c": 5, "d": 6}},"z":4}`)}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1,"y":{"a": 2,"b":{"c": 5, "d": 6}},"z":4}`)}, + }, + }, + { + name: "globalVars is scalar, vars is object, keep object", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1}`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`2`)}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1}`)}, + }, + }, + { + name: "vars is scalar, globalVars is object, keep scalar", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`2`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1}`)}, + }, + }, + want: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`2`)}, + }, + }, + { + name: "invalid JSON in vars", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{invalid}`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1}`)}, + }, + }, + wantErr: true, + errString: "failed to unmarshal existing value for key \"a\"", + }, + { + name: "invalid JSON in globalVars", + args: args{ + vars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{"x":1}`)}, + }, + globalVars: map[string]apiextensionsv1.JSON{ + "a": {Raw: []byte(`{invalid}`)}, + }, + }, + wantErr: true, + errString: "failed to unmarshal global value for key \"a\"", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + got, err := MergeVariableOverridesWithGlobal(tt.args.vars, tt.args.globalVars) + if tt.wantErr { + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err.Error()).To(gomega.ContainSubstring(tt.errString)) + return + } + g.Expect(err).ToNot(gomega.HaveOccurred()) + // Compare JSON values + for k, wantVal := range tt.want { + gotVal, ok := got[k] + g.Expect(ok).To(gomega.BeTrue(), "missing key %q", k) + var wantObj, gotObj interface{} + _ = json.Unmarshal(wantVal.Raw, &wantObj) + _ = json.Unmarshal(gotVal.Raw, &gotObj) + g.Expect(gotObj).To(gomega.Equal(wantObj), "key %q", k) + } + // Check for unexpected keys + g.Expect(len(got)).To(gomega.Equal(len(tt.want))) + }) + } +} diff --git a/pkg/webhook/cluster/nutanix_validator.go b/pkg/webhook/cluster/nutanix_validator.go index b8dbeef25..a4fc907ca 100644 --- a/pkg/webhook/cluster/nutanix_validator.go +++ b/pkg/webhook/cluster/nutanix_validator.go @@ -11,6 +11,7 @@ import ( "net/netip" v1 "k8s.io/api/admission/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/validation/field" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -18,6 +19,7 @@ import ( "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/variables" + commonvariables "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/variables" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/utils" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/helpers" ) @@ -220,41 +222,69 @@ func validateWorkerFailureDomainConfig( "workerConfig", ) - // Get the machineDetails from cluster variable "workerConfig" if it is configured - defaultWorkerConfig, err := variables.UnmarshalWorkerConfigVariable(cluster.Spec.Topology.Variables) - if err != nil { - fldErrs = append(fldErrs, field.InternalError(workerConfigVarPath, - fmt.Errorf("failed to unmarshal cluster topology variable %q: %w", v1alpha1.WorkerConfigVariableName, err))) - } + // Merge the global cluster variables with the worker config overrides. + mdVariables := commonvariables.ClusterVariablesToVariablesMap(cluster.Spec.Topology.Variables) if cluster.Spec.Topology.Workers != nil { for i := range cluster.Spec.Topology.Workers.MachineDeployments { md := cluster.Spec.Topology.Workers.MachineDeployments[i] hasFailureDomain := md.FailureDomain != nil && *md.FailureDomain != "" + wcfgPath := workerConfigVarPath - // Get the machineDetails from the overrides variable "workerConfig" if it is configured, - // otherwise use the defaultWorkerConfig if it is configured. - var workerConfig *variables.WorkerNodeConfigSpec + // Get the md variable overrides. + var mdVariableOverrides map[string]apiextensionsv1.JSON if md.Variables != nil && len(md.Variables.Overrides) > 0 { - workerConfig, err = variables.UnmarshalWorkerConfigVariable(md.Variables.Overrides) - if err != nil { - fldErrs = append(fldErrs, field.InternalError( - workerConfigMDVarOverridePath, - fmt.Errorf( - "failed to unmarshal worker overrides variable %q: %w", - v1alpha1.WorkerConfigVariableName, - err, - ), - )) + wcfgPath = workerConfigMDVarOverridePath + mdVariableOverrides = commonvariables.ClusterVariablesToVariablesMap(md.Variables.Overrides) + + // If mdVariables is nil, initialize it with mdVariableOverrides, otherwise merge global and + // variable overrides. + if mdVariables == nil { + mdVariables = mdVariableOverrides + } else { + // Merge global and variable overrides if global variables are present. + mergedVariables, err := commonvariables.MergeVariableOverridesWithGlobal( + mdVariableOverrides, + mdVariables, + ) + if err != nil { + fldErrs = append(fldErrs, field.InternalError( + workerConfigMDVarOverridePath, + fmt.Errorf( + "failed to merge global and worker variable overrides for MachineDeployment %q: %w", + md.Name, + err, + ), + )) + } + + mdVariables = mergedVariables } } - wcfgPath := workerConfigMDVarOverridePath - if workerConfig == nil { - workerConfig = defaultWorkerConfig - wcfgPath = workerConfigVarPath + workerConfigVarJSON, workerConfigPresent := mdVariables[v1alpha1.WorkerConfigVariableName] + if !workerConfigPresent { + continue + } + + workerConfigClusterVar := clusterv1.ClusterVariable{ + Name: v1alpha1.WorkerConfigVariableName, + Value: *workerConfigVarJSON.DeepCopy(), + } + + workerConfig := variables.WorkerNodeConfigSpec{} + if err := variables.UnmarshalClusterVariable(&workerConfigClusterVar, &workerConfig); err != nil { + fldErrs = append(fldErrs, field.InternalError( + workerConfigMDVarOverridePath, + fmt.Errorf( + "failed to unmarshal worker overrides variable %q: %w", + v1alpha1.WorkerConfigVariableName, + err, + ), + )) } - if workerConfig == nil || workerConfig.Nutanix == nil { + + if workerConfig.Nutanix == nil { continue } diff --git a/pkg/webhook/cluster/nutanix_validator_test.go b/pkg/webhook/cluster/nutanix_validator_test.go index 80d5865cf..7371a85f7 100644 --- a/pkg/webhook/cluster/nutanix_validator_test.go +++ b/pkg/webhook/cluster/nutanix_validator_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -366,6 +367,16 @@ func TestValidateTopologyFailureDomainConfig(t *testing.T) { expectedErr: false, expectedErrMessages: nil, }, + { + name: "worker cluster and subnet configured in default with failure domain in variables overrides violates XOR", //nolint:lll // name is long. + clusterConfig: fakeClusterConfigSpec(false, true, true), + cluster: fakeCluster(t, true, false, false, workerConfigDefaultOverridesConflicting), + expectedErr: true, + expectedErrMessages: []string{ + "spec.topology.workers.machineDeployments.variables.overrides.workerConfig.value.nutanix.machineDetails.cluster: Forbidden: \"cluster\" must not be set when failureDomain is configured.", //nolint:lll // Message is long. + "spec.topology.workers.machineDeployments.variables.overrides.workerConfig.value.nutanix.machineDetails.subnets: Forbidden: \"subnets\" must not be set when failureDomain is configured.", //nolint:lll // Message is long. + }, + }, } for _, tc := range testcases { @@ -432,10 +443,11 @@ func fakeClusterConfigSpec(fd, pe, subnets bool) *variables.ClusterConfigSpec { type workerConfigExistence string const ( - workerConfigDefaultOnly workerConfigExistence = "workerConfigDefaultOnly" - workerConfigOverridesOnly workerConfigExistence = "workerConfigOverridesOnly" - workerConfigDefaultOverridesBoth workerConfigExistence = "workerConfigDefaultOverridesBoth" - workerConfigNone workerConfigExistence = "workerConfigNone" + workerConfigDefaultOnly workerConfigExistence = "workerConfigDefaultOnly" + workerConfigOverridesOnly workerConfigExistence = "workerConfigOverridesOnly" + workerConfigDefaultOverridesBoth workerConfigExistence = "workerConfigDefaultOverridesBoth" + workerConfigDefaultOverridesConflicting workerConfigExistence = "workerConfigDefaultOverridesConflicting" + workerConfigNone workerConfigExistence = "workerConfigNone" ) func fakeCluster(t *testing.T, fd, pe, subnets bool, wcfg workerConfigExistence) *clusterv1.Cluster { @@ -469,8 +481,8 @@ func fakeCluster(t *testing.T, fd, pe, subnets bool, wcfg workerConfigExistence) MachineDetails: *fakeMachineDetails(pe, subnets), }, } - workerConfigVar, err := variables.MarshalToClusterVariable("workerConfig", workerConfig) - assert.NoError(t, err) + workerConfigVar, err := variables.MarshalToClusterVariable(string(v1alpha1.WorkerConfigVariableName), workerConfig) + require.NoError(t, err) switch wcfg { case workerConfigDefaultOnly: @@ -486,6 +498,26 @@ func fakeCluster(t *testing.T, fd, pe, subnets bool, wcfg workerConfigExistence) cluster.Spec.Topology.Workers.MachineDeployments[0].Variables.Overrides, *workerConfigVar, ) + case workerConfigDefaultOverridesConflicting: + // If a failure domain is defined, create a default worker config with cluster and subnet to force + // conflict and therefore error. + defaultWorkerConfig := variables.WorkerNodeConfigSpec{ + Nutanix: &v1alpha1.NutanixWorkerNodeSpec{ + MachineDetails: *fakeMachineDetails(fd, fd), + }, + } + + defaultWorkerConfigVar, err := variables.MarshalToClusterVariable( + string(v1alpha1.WorkerConfigVariableName), + defaultWorkerConfig, + ) + require.NoError(t, err) + + cluster.Spec.Topology.Variables = append(cluster.Spec.Topology.Variables, *defaultWorkerConfigVar) + cluster.Spec.Topology.Workers.MachineDeployments[0].Variables.Overrides = append( + cluster.Spec.Topology.Workers.MachineDeployments[0].Variables.Overrides, + *workerConfigVar, + ) case workerConfigNone: default: break