|
| 1 | +package e2e |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + |
| 7 | + . "github.com/onsi/ginkgo/v2" |
| 8 | + . "github.com/onsi/gomega" |
| 9 | + "github.com/onsi/gomega/types" |
| 10 | + configv1 "github.com/openshift/api/config/v1" |
| 11 | + machinev1beta1 "github.com/openshift/api/machine/v1beta1" |
| 12 | + capiframework "github.com/openshift/cluster-capi-operator/e2e/framework" |
| 13 | + corev1 "k8s.io/api/core/v1" |
| 14 | + awsv1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" |
| 15 | + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" |
| 16 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 17 | + "sigs.k8s.io/controller-runtime/pkg/envtest/komega" |
| 18 | + |
| 19 | + mapiframework "github.com/openshift/cluster-api-actuator-pkg/pkg/framework" |
| 20 | + |
| 21 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 22 | +) |
| 23 | + |
| 24 | +var _ = Describe("[sig-cluster-lifecycle][OCPFeatureGate:MachineAPIMigration] Machine Migration Tests", Ordered, func() { |
| 25 | + BeforeAll(func() { |
| 26 | + if platform != configv1.AWSPlatformType { |
| 27 | + Skip(fmt.Sprintf("Skipping tests on %s, this only support on aws", platform)) |
| 28 | + } |
| 29 | + |
| 30 | + if !capiframework.IsMachineAPIMigrationEnabled(ctx, cl) { |
| 31 | + Skip("Skipping, this feature is only supported on MachineAPIMigration enabled clusters") |
| 32 | + } |
| 33 | + }) |
| 34 | + |
| 35 | + var _ = Describe("Machine Creation", Ordered, func() { |
| 36 | + var machineNameCAPI = "machine-auth-capi-creation" |
| 37 | + var newCapiMachine *clusterv1.Machine |
| 38 | + var newMapiMachine *machinev1beta1.Machine |
| 39 | + var err error |
| 40 | + |
| 41 | + Context("when existing CAPI Machine with same name should allow creating the MAPI Machine with specAPI: CAPI", func() { |
| 42 | + BeforeAll(func() { |
| 43 | + newCapiMachine = createCAPIMachine(ctx, cl, machineNameCAPI) |
| 44 | + newMapiMachine = createMAPIMachineWithAuthority(ctx, cl, machineNameCAPI, machinev1beta1.MachineAuthorityClusterAPI) |
| 45 | + |
| 46 | + DeferCleanup(func() { |
| 47 | + By("Cleaning up machine resources") |
| 48 | + cleanupMachineResources( |
| 49 | + ctx, |
| 50 | + cl, |
| 51 | + []*clusterv1.Machine{newCapiMachine}, |
| 52 | + []*machinev1beta1.Machine{newMapiMachine}, |
| 53 | + ) |
| 54 | + }) |
| 55 | + }) |
| 56 | + |
| 57 | + It("should verify MAPI Machine .status.authoritativeAPI to equal CAPI", func() { |
| 58 | + verifyMachineAuthoritative(newMapiMachine, machinev1beta1.MachineAuthorityClusterAPI) |
| 59 | + }) |
| 60 | + //there is a bug for this https://issues.redhat.com/browse/OCPBUGS-54703 |
| 61 | + PIt("should verify MAPI Machine Synchronized condition is True", func() { |
| 62 | + verifyMachineSynchronizedCondition(newMapiMachine, machinev1beta1.MachineAuthorityClusterAPI) |
| 63 | + }) |
| 64 | + It("should verify MAPI Machine Paused condition is True", func() { |
| 65 | + verifyMAPIMachinePausedCondition(newMapiMachine, machinev1beta1.MachineAuthorityClusterAPI) |
| 66 | + }) |
| 67 | + It("should verify CAPI Machine Paused condition is False", func() { |
| 68 | + verifyCAPIMachinePausedCondition(newCapiMachine, machinev1beta1.MachineAuthorityClusterAPI) |
| 69 | + }) |
| 70 | + }) |
| 71 | + |
| 72 | + Context("when no existing CAPI Machine with same name should allow creating the MAPI Machine with specAPI: CAPI", func() { |
| 73 | + BeforeAll(func() { |
| 74 | + newMapiMachine = createMAPIMachineWithAuthority(ctx, cl, machineNameCAPI, machinev1beta1.MachineAuthorityClusterAPI) |
| 75 | + |
| 76 | + DeferCleanup(func() { |
| 77 | + By("Cleaning up machine resources") |
| 78 | + cleanupMachineResources( |
| 79 | + ctx, |
| 80 | + cl, |
| 81 | + []*clusterv1.Machine{}, |
| 82 | + []*machinev1beta1.Machine{newMapiMachine}, |
| 83 | + ) |
| 84 | + }) |
| 85 | + }) |
| 86 | + |
| 87 | + It("should verify CAPI Machine get Running", func() { |
| 88 | + verifyMachineRunning(cl, newMapiMachine.Name, machinev1beta1.MachineAuthorityClusterAPI) |
| 89 | + }) |
| 90 | + |
| 91 | + It("should verify MAPI Machine .status.authoritativeAPI to equal CAPI", func() { |
| 92 | + verifyMachineAuthoritative(newMapiMachine, machinev1beta1.MachineAuthorityClusterAPI) |
| 93 | + }) |
| 94 | + //there is a bug for this https://issues.redhat.com/browse/OCPBUGS-54703 |
| 95 | + PIt("should verify MAPI Machine Synchronized condition is True", func() { |
| 96 | + verifyMachineSynchronizedCondition(newMapiMachine, machinev1beta1.MachineAuthorityClusterAPI) |
| 97 | + }) |
| 98 | + It("should verify MAPI Machine Paused condition is True", func() { |
| 99 | + verifyMAPIMachinePausedCondition(newMapiMachine, machinev1beta1.MachineAuthorityClusterAPI) |
| 100 | + }) |
| 101 | + |
| 102 | + It("should verify that the non-authoritative MAPI Machine has an authoritative CAPI Machine mirror", func() { |
| 103 | + Eventually(func() error { |
| 104 | + newCapiMachine, err = capiframework.GetMachine(cl, machineNameCAPI, capiframework.CAPINamespace) |
| 105 | + return err |
| 106 | + }, capiframework.WaitMedium, capiframework.RetryMedium).Should(Succeed(), "CAPI Machine should exist") |
| 107 | + }) |
| 108 | + |
| 109 | + It("should verify CAPI Machine Paused condition is False", func() { |
| 110 | + verifyCAPIMachinePausedCondition(newCapiMachine, machinev1beta1.MachineAuthorityClusterAPI) |
| 111 | + }) |
| 112 | + }) |
| 113 | + }) |
| 114 | +}) |
| 115 | + |
| 116 | +func createCAPIMachine(ctx context.Context, cl client.Client, machineName string) *clusterv1.Machine { |
| 117 | + capiMachineList, err := capiframework.GetMachines(cl) |
| 118 | + Expect(err).NotTo(HaveOccurred(), "Failed to list capi machines") |
| 119 | + // The test requires at least one existing capi machine to act as a template. |
| 120 | + Expect(capiMachineList).NotTo(BeEmpty(), "No capi machines found in the openshift-cluster-api namespace to use as a template") |
| 121 | + |
| 122 | + // Select the first machine from the list as our template. |
| 123 | + templateCapiMachine := capiMachineList[0] |
| 124 | + By(fmt.Sprintf("Using capi machine %s as a template", templateCapiMachine.Name)) |
| 125 | + |
| 126 | + // Define the new machine based on the template. |
| 127 | + newCapiMachine := &clusterv1.Machine{ |
| 128 | + ObjectMeta: metav1.ObjectMeta{ |
| 129 | + Name: machineName, |
| 130 | + Namespace: templateCapiMachine.Namespace, |
| 131 | + }, |
| 132 | + Spec: *templateCapiMachine.Spec.DeepCopy(), |
| 133 | + } |
| 134 | + |
| 135 | + // Clear status and other instance-specific fields that should not be copied. |
| 136 | + newCapiMachine.Spec.ProviderID = nil |
| 137 | + newCapiMachine.Spec.InfrastructureRef.Name = machineName |
| 138 | + newCapiMachine.ObjectMeta.Labels = nil |
| 139 | + newCapiMachine.Status = clusterv1.MachineStatus{} |
| 140 | + |
| 141 | + By(fmt.Sprintf("Creating a new capi machine in namespace: %s", newCapiMachine.Namespace)) |
| 142 | + Expect(cl.Create(ctx, newCapiMachine)).To(Succeed()) |
| 143 | + |
| 144 | + templateAWSMachine, err := capiframework.GetAWSMachine(cl, templateCapiMachine.Name, capiframework.CAPINamespace) |
| 145 | + Expect(err).NotTo(HaveOccurred(), "Failed to get AWSMachine") |
| 146 | + // Define the new awsmachine based on the template. |
| 147 | + newAWSMachine := &awsv1.AWSMachine{ |
| 148 | + ObjectMeta: metav1.ObjectMeta{ |
| 149 | + Name: machineName, |
| 150 | + Namespace: templateAWSMachine.Namespace, |
| 151 | + }, |
| 152 | + Spec: *templateAWSMachine.Spec.DeepCopy(), |
| 153 | + } |
| 154 | + |
| 155 | + // Clear status and other instance-specific fields that should not be copied. |
| 156 | + newAWSMachine.Spec.ProviderID = nil |
| 157 | + newAWSMachine.Spec.InstanceID = nil |
| 158 | + newAWSMachine.ObjectMeta.Labels = nil |
| 159 | + newAWSMachine.Status = awsv1.AWSMachineStatus{} |
| 160 | + |
| 161 | + By(fmt.Sprintf("Creating a new awsmachine in namespace: %s", newAWSMachine.Namespace)) |
| 162 | + Expect(cl.Create(ctx, newAWSMachine)).To(Succeed()) |
| 163 | + |
| 164 | + verifyMachineRunning(cl, newCapiMachine.Name, machinev1beta1.MachineAuthorityClusterAPI) |
| 165 | + |
| 166 | + return newCapiMachine |
| 167 | +} |
| 168 | + |
| 169 | +func createMAPIMachineWithAuthority(ctx context.Context, cl client.Client, machineName string, authority machinev1beta1.MachineAuthority) *machinev1beta1.Machine { |
| 170 | + workerLabelSelector := metav1.LabelSelector{ |
| 171 | + MatchLabels: map[string]string{ |
| 172 | + "machine.openshift.io/cluster-api-machine-role": "worker", |
| 173 | + }, |
| 174 | + } |
| 175 | + machineList, err := mapiframework.GetMachines(ctx, cl, &workerLabelSelector) |
| 176 | + |
| 177 | + Expect(err).NotTo(HaveOccurred(), "Failed to list mapi machines") |
| 178 | + // The test requires at least one existing mapi machine to act as a template. |
| 179 | + Expect(machineList).NotTo(BeEmpty(), "No mapi machines found in the openshift-machine-api namespace to use as a template") |
| 180 | + |
| 181 | + // Select the first machine from the list as our template. |
| 182 | + templateMachine := machineList[0] |
| 183 | + By(fmt.Sprintf("Using mapi machine %s as a template", templateMachine.Name)) |
| 184 | + |
| 185 | + // Define the new machine based on the template. |
| 186 | + newMachine := &machinev1beta1.Machine{ |
| 187 | + ObjectMeta: metav1.ObjectMeta{ |
| 188 | + Name: machineName, |
| 189 | + Namespace: templateMachine.Namespace, |
| 190 | + }, |
| 191 | + Spec: *templateMachine.Spec.DeepCopy(), |
| 192 | + } |
| 193 | + |
| 194 | + // Clear status and other instance-specific fields that should not be copied. |
| 195 | + newMachine.Spec.ProviderID = nil |
| 196 | + newMachine.ObjectMeta.Labels = nil |
| 197 | + newMachine.Status = machinev1beta1.MachineStatus{} |
| 198 | + newMachine.Spec.AuthoritativeAPI = authority |
| 199 | + By(fmt.Sprintf("Creating a new %s machine in namespace: %s", authority, newMachine.Namespace)) |
| 200 | + Expect(cl.Create(ctx, newMachine)).To(Succeed()) |
| 201 | + |
| 202 | + return newMachine |
| 203 | +} |
| 204 | + |
| 205 | +func verifyMachineRunning(cl client.Client, machineName string, authority machinev1beta1.MachineAuthority) { |
| 206 | + Eventually(func() string { |
| 207 | + switch authority { |
| 208 | + case machinev1beta1.MachineAuthorityClusterAPI: |
| 209 | + By("Verify the CAPI Machine is Running") |
| 210 | + capiMachine, err := capiframework.GetMachine(cl, machineName, capiframework.CAPINamespace) |
| 211 | + if err != nil { |
| 212 | + return "" |
| 213 | + } |
| 214 | + return string(capiMachine.Status.Phase) |
| 215 | + case machinev1beta1.MachineAuthorityMachineAPI: |
| 216 | + By("Verify the MAPI Machine is Running") |
| 217 | + mapiMachine, err := mapiframework.GetMachine(cl, machineName) |
| 218 | + if err != nil { |
| 219 | + return "" |
| 220 | + } |
| 221 | + return string(*mapiMachine.Status.Phase) |
| 222 | + default: |
| 223 | + Fail(fmt.Sprintf("unknown authoritativeAPI type: %v", authority)) |
| 224 | + return "" |
| 225 | + } |
| 226 | + |
| 227 | + }, capiframework.WaitLong, capiframework.RetryLong).Should(Equal("Running"), "%s Machine did not get Running", authority) |
| 228 | +} |
| 229 | + |
| 230 | +func verifyMachineAuthoritative(mapiMachine *machinev1beta1.Machine, authority machinev1beta1.MachineAuthority) { |
| 231 | + By(fmt.Sprintf("Verify the Machine authoritative is %s", authority)) |
| 232 | + Eventually(komega.Object(mapiMachine), capiframework.WaitMedium, capiframework.RetryMedium).Should( |
| 233 | + HaveField("Status.AuthoritativeAPI", Equal(authority)), |
| 234 | + fmt.Sprintf("Expected Machine with correct status.AuthoritativeAPI %s", authority), |
| 235 | + ) |
| 236 | +} |
| 237 | + |
| 238 | +func verifyMachineSynchronizedCondition(mapiMachine *machinev1beta1.Machine, authority machinev1beta1.MachineAuthority) { |
| 239 | + By("Verify the MAPI Machine synchronized condition is True") |
| 240 | + var expectedMessage string |
| 241 | + switch authority { |
| 242 | + case machinev1beta1.MachineAuthorityMachineAPI: |
| 243 | + expectedMessage = "Successfully synchronized MAPI Machine to CAPI" |
| 244 | + case machinev1beta1.MachineAuthorityClusterAPI: |
| 245 | + expectedMessage = "Successfully synchronized CAPI Machine to MAPI" |
| 246 | + default: |
| 247 | + Fail(fmt.Sprintf("unknown authoritativeAPI type: %v", authority)) |
| 248 | + } |
| 249 | + |
| 250 | + Eventually(komega.Object(mapiMachine), capiframework.WaitMedium, capiframework.RetryMedium).Should( |
| 251 | + WithTransform( |
| 252 | + func(m *machinev1beta1.Machine) []machinev1beta1.Condition { |
| 253 | + return m.Status.Conditions |
| 254 | + }, |
| 255 | + ContainElement( |
| 256 | + SatisfyAll( |
| 257 | + HaveField("Type", Equal(SynchronizedCondition)), |
| 258 | + HaveField("Status", Equal(corev1.ConditionTrue)), |
| 259 | + HaveField("Reason", Equal("ResourceSynchronized")), |
| 260 | + HaveField("Message", Equal(expectedMessage)), |
| 261 | + ), |
| 262 | + ), |
| 263 | + ), |
| 264 | + fmt.Sprintf("Expected Synchronized condition for %s not found or incorrect", authority), |
| 265 | + ) |
| 266 | +} |
| 267 | + |
| 268 | +func verifyMAPIMachinePausedCondition(mapiMachine *machinev1beta1.Machine, authority machinev1beta1.MachineAuthority) { |
| 269 | + var conditionMatcher types.GomegaMatcher |
| 270 | + |
| 271 | + switch authority { |
| 272 | + case machinev1beta1.MachineAuthorityMachineAPI: |
| 273 | + By("Verify the MAPI Machine is Unpaused") |
| 274 | + conditionMatcher = SatisfyAll( |
| 275 | + HaveField("Type", Equal(MAPIPausedCondition)), |
| 276 | + HaveField("Status", Equal(corev1.ConditionFalse)), |
| 277 | + HaveField("Reason", Equal("AuthoritativeAPIMachineAPI")), |
| 278 | + HaveField("Message", ContainSubstring("MachineAPI")), |
| 279 | + ) |
| 280 | + case machinev1beta1.MachineAuthorityClusterAPI: |
| 281 | + By("Verify the MAPI Machine is Paused") |
| 282 | + conditionMatcher = SatisfyAll( |
| 283 | + HaveField("Type", Equal(MAPIPausedCondition)), |
| 284 | + HaveField("Status", Equal(corev1.ConditionTrue)), |
| 285 | + HaveField("Reason", Equal("AuthoritativeAPINotMachineAPI")), |
| 286 | + HaveField("Message", ContainSubstring("ClusterAPI")), |
| 287 | + ) |
| 288 | + default: |
| 289 | + Fail(fmt.Sprintf("unknown authoritativeAPI type: %v", authority)) |
| 290 | + } |
| 291 | + |
| 292 | + Eventually(komega.Object(mapiMachine), capiframework.WaitMedium, capiframework.RetryMedium).Should( |
| 293 | + HaveField("Status.Conditions", ContainElement(conditionMatcher)), |
| 294 | + fmt.Sprintf("Expected MAPI Machine with correct paused condition for %s", authority), |
| 295 | + ) |
| 296 | +} |
| 297 | + |
| 298 | +func verifyCAPIMachinePausedCondition(capiMachine *clusterv1.Machine, authority machinev1beta1.MachineAuthority) { |
| 299 | + var conditionMatcher types.GomegaMatcher |
| 300 | + |
| 301 | + switch authority { |
| 302 | + case machinev1beta1.MachineAuthorityClusterAPI: |
| 303 | + By("Verify the CAPI Machine is Unpaused") |
| 304 | + conditionMatcher = SatisfyAll( |
| 305 | + HaveField("Type", Equal(CAPIPausedCondition)), |
| 306 | + HaveField("Status", Equal(metav1.ConditionFalse)), |
| 307 | + HaveField("Reason", Equal("NotPaused")), |
| 308 | + ) |
| 309 | + case machinev1beta1.MachineAuthorityMachineAPI: |
| 310 | + By("Verify the CAPI Machine is Paused") |
| 311 | + conditionMatcher = SatisfyAll( |
| 312 | + HaveField("Type", Equal(CAPIPausedCondition)), |
| 313 | + HaveField("Status", Equal(metav1.ConditionTrue)), |
| 314 | + HaveField("Reason", Equal("Paused")), |
| 315 | + ) |
| 316 | + default: |
| 317 | + Fail(fmt.Sprintf("unknown authoritativeAPI type: %v", authority)) |
| 318 | + } |
| 319 | + |
| 320 | + Eventually(komega.Object(capiMachine), capiframework.WaitMedium, capiframework.RetryMedium).Should( |
| 321 | + HaveField("Status.V1Beta2.Conditions", ContainElement(conditionMatcher)), |
| 322 | + fmt.Sprintf("Expected CAPI Machine with correct paused condition for %s", authority), |
| 323 | + ) |
| 324 | +} |
| 325 | + |
| 326 | +func cleanupMachineResources(ctx context.Context, cl client.Client, capiMachines []*clusterv1.Machine, mapiMachines []*machinev1beta1.Machine) { |
| 327 | + for _, m := range capiMachines { |
| 328 | + if m == nil { |
| 329 | + continue |
| 330 | + } |
| 331 | + By(fmt.Sprintf("Deleting CAPI Machine %s", m.Name)) |
| 332 | + capiframework.DeleteMachines(cl, capiframework.CAPINamespace, m) |
| 333 | + } |
| 334 | + |
| 335 | + for _, m := range mapiMachines { |
| 336 | + if m == nil { |
| 337 | + continue |
| 338 | + } |
| 339 | + By(fmt.Sprintf("Deleting MAPI Machine %s", m.Name)) |
| 340 | + mapiframework.DeleteMachines(ctx, cl, m) |
| 341 | + mapiframework.WaitForMachinesDeleted(cl, m) |
| 342 | + } |
| 343 | +} |
0 commit comments