diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index db71a262a..316d4b08e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -81,7 +81,7 @@ repos: - id: check-yaml args: ["-m", "--unsafe"] stages: [pre-commit] - exclude: ^charts/.+/(templates|addons)/.+\.ya?ml$ + exclude: (embedded/kubeletconfigpatch.yaml|^charts/.+/(templates|addons)/.+\.ya?ml)$ - id: mixed-line-ending args: ["-f", "lf"] exclude: \.bat$ diff --git a/api/v1alpha1/clusterconfig_types.go b/api/v1alpha1/clusterconfig_types.go index a4b09ba42..111e1110e 100644 --- a/api/v1alpha1/clusterconfig_types.go +++ b/api/v1alpha1/clusterconfig_types.go @@ -246,6 +246,13 @@ type KubeadmClusterConfigSpec struct { // KubeProxy defines the configuration for kube-proxy. // +kubebuilder:validation:Optional KubeProxy *KubeProxy `json:"kubeProxy,omitempty"` + + // MaxParallelImagePullsPerNode defines the maximum number of parallel image pulls performed by each kubelet. + // If not set, the default value of 1 will be used. + // If set to 0, the maximum number of parallel image pulls will be unlimited. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=0 + MaxParallelImagePullsPerNode *int32 `json:"maxParallelImagePullsPerNode,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1alpha1/crds/caren.nutanix.com_awsclusterconfigs.yaml b/api/v1alpha1/crds/caren.nutanix.com_awsclusterconfigs.yaml index 925085447..963f15199 100644 --- a/api/v1alpha1/crds/caren.nutanix.com_awsclusterconfigs.yaml +++ b/api/v1alpha1/crds/caren.nutanix.com_awsclusterconfigs.yaml @@ -636,6 +636,14 @@ spec: minLength: 1 pattern: ^((?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*|\[(?:[a-fA-F0-9:]+)\])(:[0-9]+)?/)?[a-z0-9]+((?:[._]|__|[-]+)[a-z0-9]+)*(/[a-z0-9]+((?:[._]|__|[-]+)[a-z0-9]+)*)*$ type: string + maxParallelImagePullsPerNode: + description: |- + MaxParallelImagePullsPerNode defines the maximum number of parallel image pulls performed by each kubelet. + If not set, the default value of 1 will be used. + If set to 0, the maximum number of parallel image pulls will be unlimited. + format: int32 + minimum: 0 + type: integer ntp: description: NTP defines the NTP configuration for the cluster. properties: diff --git a/api/v1alpha1/crds/caren.nutanix.com_dockerclusterconfigs.yaml b/api/v1alpha1/crds/caren.nutanix.com_dockerclusterconfigs.yaml index ea94128ca..c9d510cc0 100644 --- a/api/v1alpha1/crds/caren.nutanix.com_dockerclusterconfigs.yaml +++ b/api/v1alpha1/crds/caren.nutanix.com_dockerclusterconfigs.yaml @@ -545,6 +545,14 @@ spec: minLength: 1 pattern: ^((?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*|\[(?:[a-fA-F0-9:]+)\])(:[0-9]+)?/)?[a-z0-9]+((?:[._]|__|[-]+)[a-z0-9]+)*(/[a-z0-9]+((?:[._]|__|[-]+)[a-z0-9]+)*)*$ type: string + maxParallelImagePullsPerNode: + description: |- + MaxParallelImagePullsPerNode defines the maximum number of parallel image pulls performed by each kubelet. + If not set, the default value of 1 will be used. + If set to 0, the maximum number of parallel image pulls will be unlimited. + format: int32 + minimum: 0 + type: integer ntp: description: NTP defines the NTP configuration for the cluster. properties: diff --git a/api/v1alpha1/crds/caren.nutanix.com_kubeadmclusterconfigs.yaml b/api/v1alpha1/crds/caren.nutanix.com_kubeadmclusterconfigs.yaml index 6b9a908ca..4227361a7 100644 --- a/api/v1alpha1/crds/caren.nutanix.com_kubeadmclusterconfigs.yaml +++ b/api/v1alpha1/crds/caren.nutanix.com_kubeadmclusterconfigs.yaml @@ -134,6 +134,14 @@ spec: minLength: 1 pattern: ^((?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*|\[(?:[a-fA-F0-9:]+)\])(:[0-9]+)?/)?[a-z0-9]+((?:[._]|__|[-]+)[a-z0-9]+)*(/[a-z0-9]+((?:[._]|__|[-]+)[a-z0-9]+)*)*$ type: string + maxParallelImagePullsPerNode: + description: |- + MaxParallelImagePullsPerNode defines the maximum number of parallel image pulls performed by each kubelet. + If not set, the default value of 1 will be used. + If set to 0, the maximum number of parallel image pulls will be unlimited. + format: int32 + minimum: 0 + type: integer type: object type: object served: true diff --git a/api/v1alpha1/crds/caren.nutanix.com_nutanixclusterconfigs.yaml b/api/v1alpha1/crds/caren.nutanix.com_nutanixclusterconfigs.yaml index 32d8624c9..a95bb73c2 100644 --- a/api/v1alpha1/crds/caren.nutanix.com_nutanixclusterconfigs.yaml +++ b/api/v1alpha1/crds/caren.nutanix.com_nutanixclusterconfigs.yaml @@ -733,6 +733,14 @@ spec: minLength: 1 pattern: ^((?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*|\[(?:[a-fA-F0-9:]+)\])(:[0-9]+)?/)?[a-z0-9]+((?:[._]|__|[-]+)[a-z0-9]+)*(/[a-z0-9]+((?:[._]|__|[-]+)[a-z0-9]+)*)*$ type: string + maxParallelImagePullsPerNode: + description: |- + MaxParallelImagePullsPerNode defines the maximum number of parallel image pulls performed by each kubelet. + If not set, the default value of 1 will be used. + If set to 0, the maximum number of parallel image pulls will be unlimited. + format: int32 + minimum: 0 + type: integer ntp: description: NTP defines the NTP configuration for the cluster. properties: diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 96c2a4c47..b5dfcbe9e 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1495,6 +1495,11 @@ func (in *KubeadmClusterConfigSpec) DeepCopyInto(out *KubeadmClusterConfigSpec) *out = new(KubeProxy) **out = **in } + if in.MaxParallelImagePullsPerNode != nil { + in, out := &in.MaxParallelImagePullsPerNode, &out.MaxParallelImagePullsPerNode + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeadmClusterConfigSpec. diff --git a/docs/content/customization/generic/parallel-image-pulls.md b/docs/content/customization/generic/parallel-image-pulls.md new file mode 100644 index 000000000..e47a7c9b3 --- /dev/null +++ b/docs/content/customization/generic/parallel-image-pulls.md @@ -0,0 +1,66 @@ ++++ +title = "Parallel Image Pulls" ++++ + +This customization will be available when the +[provider-specific cluster configuration patch]({{< ref "..">}}) is included in the `ClusterClass`. + +The parallel image pull configuration can then be manipulated via the cluster variables. +If the `maxParallelImagePullsPerNode` property is not specified, then the default value of `1` will be used +which is equivalent to serialized image pulls. + +Setting this value to `0` results in unlimited parallel image pulls. + +### Example + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: +spec: + topology: + variables: + - name: clusterConfig + value: + maxParallelImagePullsPerNodePerNode: 10 +``` + +Applying this configuration will result in a `KubeletConfiguration` patch being added which will be +applied by `kubeadm` on `init` and `join`: + +- `KubeadmControlPlaneTemplate`: + + - ```yaml + spec: + template: + spec: + kubeadmConfigSpec: + files: + - path: "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json" + owner: "root:root" + permissions: "0644" + content: |- + --- + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + serializeImagePulls: false + maxParallelImagePulls: 10 + ``` + +- `KubeadmConfigTemplate` + + - ```yaml + spec: + kubeadmConfigSpec: + files: + - path: "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json" + owner: "root:root" + permissions: "0644" + content: |- + --- + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + serializeImagePulls: false + maxParallelImagePulls: 10 + ``` diff --git a/pkg/handlers/generic/mutation/handlers.go b/pkg/handlers/generic/mutation/handlers.go index b5d3615f4..12c749ef5 100644 --- a/pkg/handlers/generic/mutation/handlers.go +++ b/pkg/handlers/generic/mutation/handlers.go @@ -25,6 +25,7 @@ import ( "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/mirrors" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/noderegistration" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/ntp" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/parallelimagepulls" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/taints" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/users" ) @@ -48,6 +49,7 @@ func MetaMutators(mgr manager.Manager) []mutation.MetaMutator { autorenewcerts.NewPatch(), kubeproxymode.NewPatch(), ntp.NewPatch(), + parallelimagepulls.NewPatch(), // Some patches may have changed containerd configuration. // We write the configuration changes to disk, and must run a command diff --git a/pkg/handlers/generic/mutation/parallelimagepulls/embedded/kubeletconfigpatch.yaml b/pkg/handlers/generic/mutation/parallelimagepulls/embedded/kubeletconfigpatch.yaml new file mode 100644 index 000000000..35542ca47 --- /dev/null +++ b/pkg/handlers/generic/mutation/parallelimagepulls/embedded/kubeletconfigpatch.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +{{- if gt .MaxParallelImagePullsPerNode 0 }} +maxParallelImagePulls: {{ .MaxParallelImagePullsPerNode }} +{{- end }} diff --git a/pkg/handlers/generic/mutation/parallelimagepulls/inject.go b/pkg/handlers/generic/mutation/parallelimagepulls/inject.go new file mode 100644 index 000000000..da6423240 --- /dev/null +++ b/pkg/handlers/generic/mutation/parallelimagepulls/inject.go @@ -0,0 +1,179 @@ +// Copyright 2025 Nutanix. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package parallelimagepulls + +import ( + "bytes" + "context" + _ "embed" + "fmt" + "text/template" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers/mutation" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/patches" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/patches/selectors" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/variables" +) + +const ( + // VariableName is the external patch variable name. + VariableName = "maxParallelImagePullsPerNode" + + kubeletConfigurationPatchFilePath = "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json" +) + +var ( + //go:embed embedded/kubeletconfigpatch.yaml + kubeletConfigPatchYAML []byte + + kubeletConfigPatchTemplate = template.Must(template.New("kubeletConfigPatch").Parse(string(kubeletConfigPatchYAML))) +) + +type maxParallelImagePullsPerNode struct { + variableName string + variableFieldPath []string +} + +func NewPatch() *maxParallelImagePullsPerNode { + return newMaxParallelImagePullsPerNodePatch( + v1alpha1.ClusterConfigVariableName, + VariableName, + ) +} + +func newMaxParallelImagePullsPerNodePatch( + variableName string, + variableFieldPath ...string, +) *maxParallelImagePullsPerNode { + return &maxParallelImagePullsPerNode{ + variableName: variableName, + variableFieldPath: variableFieldPath, + } +} + +func (h *maxParallelImagePullsPerNode) Mutate( + ctx context.Context, + obj *unstructured.Unstructured, + vars map[string]apiextensionsv1.JSON, + holderRef runtimehooksv1.HolderReference, + _ client.ObjectKey, + clusterGetter mutation.ClusterGetter, +) error { + log := ctrl.LoggerFrom(ctx).WithValues( + "holderRef", holderRef, + ) + + maxParallelImagePullsPerNode, err := variables.Get[int32]( + vars, + h.variableName, + h.variableFieldPath..., + ) + if err != nil { + if variables.IsNotFoundError(err) { + log.V(5).Info("max parallel image pulls is not set, skipping mutation") + return nil + } + return err + } + + if maxParallelImagePullsPerNode == 1 { + log.V(5).Info("max parallel image pulls is set to 1, skipping mutation resulting in serialized image pulls") + return nil + } + + log = log.WithValues( + "variableName", + h.variableName, + "variableFieldPath", + h.variableFieldPath, + "variableValue", + maxParallelImagePullsPerNode, + ) + + kubeletConfigPatch, err := templateMaxParallelImagePullsPerNodeConfigFile(maxParallelImagePullsPerNode) + if err != nil { + return err + } + + if err := patches.MutateIfApplicable( + obj, + vars, + &holderRef, + selectors.ControlPlane(), + log, + func(obj *controlplanev1.KubeadmControlPlaneTemplate) error { + log.WithValues( + "patchedObjectKind", obj.GetObjectKind().GroupVersionKind().String(), + "patchedObjectName", client.ObjectKeyFromObject(obj), + ).Info("adding max parallel image pulls patch to control plane kubeadm config spec") + + obj.Spec.Template.Spec.KubeadmConfigSpec.Files = append( + obj.Spec.Template.Spec.KubeadmConfigSpec.Files, + *kubeletConfigPatch, + ) + + return nil + }, + ); err != nil { + return err + } + + if err := patches.MutateIfApplicable( + obj, + vars, + &holderRef, + selectors.WorkersKubeadmConfigTemplateSelector(), + log, + func(obj *bootstrapv1.KubeadmConfigTemplate) error { + log.WithValues( + "patchedObjectKind", obj.GetObjectKind().GroupVersionKind().String(), + "patchedObjectName", client.ObjectKeyFromObject(obj), + ).Info("adding max parallel image pulls patch to worker node kubeadm config template") + + obj.Spec.Template.Spec.Files = append( + obj.Spec.Template.Spec.Files, + *kubeletConfigPatch, + ) + + return nil + }, + ); err != nil { + return err + } + + return nil +} + +// templateMaxParallelImagePullsPerNodeConfigFile adds the max parallel image pulls configuration patch file +// to the KCPTemplate. +func templateMaxParallelImagePullsPerNodeConfigFile( + maxParallelImagePullsPerNode int32, +) (*bootstrapv1.File, error) { + templateInput := struct { + MaxParallelImagePullsPerNode int32 + }{ + MaxParallelImagePullsPerNode: maxParallelImagePullsPerNode, + } + var b bytes.Buffer + err := kubeletConfigPatchTemplate.Execute(&b, templateInput) + if err != nil { + return nil, fmt.Errorf("failed executing kubeletconfig patch template: %w", err) + } + + return &bootstrapv1.File{ + Path: kubeletConfigurationPatchFilePath, + Owner: "root:root", + Permissions: "0644", + Content: b.String(), + }, nil +} diff --git a/pkg/handlers/generic/mutation/parallelimagepulls/inject_test.go b/pkg/handlers/generic/mutation/parallelimagepulls/inject_test.go new file mode 100644 index 000000000..07229f6cc --- /dev/null +++ b/pkg/handlers/generic/mutation/parallelimagepulls/inject_test.go @@ -0,0 +1,813 @@ +// Copyright 2025 Nutanix. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package parallelimagepulls + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/utils/ptr" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers/mutation" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/testutils/capitest" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/testutils/capitest/request" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/test/helpers" +) + +func TestMaxParallelImagePullsPerNodePatch(t *testing.T) { + gomega.RegisterFailHandler(Fail) + RunSpecs(t, "max parallel image pulls mutator suite") +} + +var patchGenerator = func() mutation.GeneratePatches { + return mutation.NewMetaGeneratePatchesHandler("", helpers.TestEnv.Client, NewPatch()).(mutation.GeneratePatches) +} + +var _ = DescribeTable("Generate max parallel image pulls patches", + func(tt capitest.PatchTestDef) { + capitest.AssertGeneratePatches( + GinkgoT(), + patchGenerator, + &tt, + ) + }, + Entry("unset max parallel image pulls defaults to 1 with AWS control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.AWSClusterConfigSpec{}, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("unset max parallel image pulls defaults to 1 with Nutanix control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.NutanixClusterConfigSpec{}, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("unset max parallel image pulls defaults to 1 with Docker control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.DockerClusterConfigSpec{}, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("unset max parallel image pulls defaults to 1 with AWS workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.AWSClusterConfigSpec{}, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("unset max parallel image pulls defaults to 1 with Nutanix workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.NutanixClusterConfigSpec{}, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("unset max parallel image pulls defaults to 1 with Docker workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.DockerClusterConfigSpec{}, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 1 with AWS control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.AWSClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(1)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 1 with Nutanix control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.NutanixClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(1)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 1 with Docker control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.DockerClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(1)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 1 with AWS workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.AWSClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(1)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 1 with Nutanix workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.NutanixClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(1)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 1 with Docker workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.DockerClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(1)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElement( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to unlimited with AWS control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.AWSClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(0)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to unlimited with Nutanix control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.NutanixClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(0)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to unlimited with Docker control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.DockerClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(0)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to unlimited with AWS workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.AWSClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(0)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to unlimited with Nutanix workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.NutanixClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(0)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to unlimited with Docker workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.DockerClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(0)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 10 with AWS control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.AWSClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(10)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +maxParallelImagePulls: 10 +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 10 with Nutanix control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.NutanixClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(10)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +maxParallelImagePulls: 10 +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 10 with Docker control plane", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.DockerClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(10)), + }, + }, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +maxParallelImagePulls: 10 +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 10 with AWS workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.AWSClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(10)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +maxParallelImagePulls: 10 +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 10 with Nutanix workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.NutanixClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(10)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +maxParallelImagePulls: 10 +`, + ), + ), + ), + }, + }, + }), + Entry("max parallel image pulls set to 10 with Docker workers", capitest.PatchTestDef{ + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + v1alpha1.ClusterConfigVariableName, + v1alpha1.DockerClusterConfigSpec{ + KubeadmClusterConfigSpec: v1alpha1.KubeadmClusterConfigSpec{ + MaxParallelImagePullsPerNode: ptr.To(int32(10)), + }, + }, + ), + capitest.VariableWithValue( + runtimehooksv1.BuiltinsName, + apiextensionsv1.JSON{ + Raw: []byte(`{"machineDeployment": {"class": "a-worker"}}`), + }, + ), + }, + RequestItem: request.NewKubeadmConfigTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/files", + ValueMatcher: gomega.ConsistOf( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "path", + "/etc/kubernetes/patches/kubeletconfigurationmaxparallelimagepulls+strategic.json", + ), + gomega.HaveKeyWithValue("owner", "root:root"), + gomega.HaveKeyWithValue("permissions", "0644"), + gomega.HaveKeyWithValue("content", `--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serializeImagePulls: false +maxParallelImagePulls: 10 +`, + ), + ), + ), + }, + }, + }), +) diff --git a/pkg/handlers/generic/mutation/parallelimagepulls/variables_test.go b/pkg/handlers/generic/mutation/parallelimagepulls/variables_test.go new file mode 100644 index 000000000..9e91a71dc --- /dev/null +++ b/pkg/handlers/generic/mutation/parallelimagepulls/variables_test.go @@ -0,0 +1,172 @@ +// Copyright 2025 Nutanix. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package parallelimagepulls + +import ( + "encoding/json" + "testing" + + "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/ptr" + + capxv1 "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/external/github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/testutils/capitest" + awsclusterconfig "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/aws/clusterconfig" + dockerclusterconfig "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/docker/clusterconfig" + nutanixclusterconfig "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/nutanix/clusterconfig" +) + +func testDefs[T any](t *testing.T, clusterConfig T) []capitest.VariableTestDef { + t.Helper() + + testDefs := []capitest.VariableTestDef{{ + Name: "unset", + Vals: nil, + }, { + Name: "parallel image pulls unlimited", + Vals: ptr.To[int32](0), + }, { + Name: "parallel image pulls set to 5", + Vals: ptr.To[int32](5), + }, { + Name: "parallel image pulls set to 1", + Vals: ptr.To[int32](1), + }, { + Name: "parallel image pulls set to -1", + Vals: ptr.To[int32](-1), + ExpectError: true, + }} + + g := gomega.NewWithT(t) + + for i := range testDefs { + testDef := &testDefs[i] + + if testDef.Vals != nil { + clusterConfigVal := updateParallelImagePulls(g, clusterConfig, testDef.Vals.(*int32)) + testDef.Vals = clusterConfigVal + } else { + testDef.Vals = clusterConfig + } + } + + return testDefs +} + +func updateParallelImagePulls[T any](g gomega.Gomega, clusterConfig T, parallelImagePulls *int32) T { + unmarshalled, err := json.Marshal(clusterConfig) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + var unstr map[string]any + g.Expect(json.Unmarshal(unmarshalled, &unstr)).To(gomega.Succeed()) + + if parallelImagePulls != nil { + err = unstructured.SetNestedField( + unstr, + int64(*parallelImagePulls), + "maxParallelImagePullsPerNode", + ) + } else { + err = unstructured.SetNestedField( + unstr, + nil, + "maxParallelImagePullsPerNode", + ) + } + g.Expect(err).NotTo(gomega.HaveOccurred()) + + unmarshalled, err = json.Marshal(unstr) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + var clusterConfigVal T + g.Expect(json.Unmarshal(unmarshalled, &clusterConfigVal)).To(gomega.Succeed()) + + return clusterConfigVal +} + +func TestVariableValidation_AWS(t *testing.T) { + capitest.ValidateDiscoverVariables( + t, + v1alpha1.ClusterConfigVariableName, + ptr.To(v1alpha1.AWSClusterConfig{}.VariableSchema()), + true, + awsclusterconfig.NewVariable, + testDefs(t, minimalAWSClusterConfigSpec())..., + ) +} + +func minimalAWSClusterConfigSpec() v1alpha1.AWSClusterConfigSpec { + return v1alpha1.AWSClusterConfigSpec{ + ControlPlane: &v1alpha1.AWSControlPlaneSpec{ + AWS: &v1alpha1.AWSControlPlaneNodeSpec{ + InstanceType: "t3.medium", + }, + }, + } +} + +func TestVariableValidation_Docker(t *testing.T) { + capitest.ValidateDiscoverVariables( + t, + v1alpha1.ClusterConfigVariableName, + ptr.To(v1alpha1.DockerClusterConfig{}.VariableSchema()), + true, + dockerclusterconfig.NewVariable, + testDefs(t, minimalDockerClusterConfigSpec())..., + ) +} + +func minimalDockerClusterConfigSpec() v1alpha1.DockerClusterConfigSpec { + return v1alpha1.DockerClusterConfigSpec{ + ControlPlane: &v1alpha1.DockerControlPlaneSpec{ + Docker: &v1alpha1.DockerNodeSpec{ + CustomImage: "fake-docker-image", + }, + }, + } +} + +func TestVariableValidation_Nutanix(t *testing.T) { + capitest.ValidateDiscoverVariables( + t, + v1alpha1.ClusterConfigVariableName, + ptr.To(v1alpha1.NutanixClusterConfig{}.VariableSchema()), + true, + nutanixclusterconfig.NewVariable, + testDefs(t, minimalNutanixClusterConfigSpec())..., + ) +} + +func minimalNutanixClusterConfigSpec() v1alpha1.NutanixClusterConfigSpec { + return v1alpha1.NutanixClusterConfigSpec{ + ControlPlane: &v1alpha1.NutanixControlPlaneSpec{ + Nutanix: &v1alpha1.NutanixControlPlaneNodeSpec{ + MachineDetails: v1alpha1.NutanixMachineDetails{ + BootType: capxv1.NutanixBootTypeLegacy, + VCPUSockets: 2, + VCPUsPerSocket: 1, + Image: &capxv1.NutanixResourceIdentifier{ + Type: capxv1.NutanixIdentifierName, + Name: ptr.To("fake-image"), + }, + Cluster: &capxv1.NutanixResourceIdentifier{ + Type: capxv1.NutanixIdentifierName, + Name: ptr.To("fake-pe-cluster"), + }, + MemorySize: resource.MustParse("8Gi"), + SystemDiskSize: resource.MustParse("40Gi"), + Subnets: []capxv1.NutanixResourceIdentifier{ + { + Type: capxv1.NutanixIdentifierName, + Name: ptr.To("fake-subnet"), + }, + }, + }, + }, + }, + } +}