|
| 1 | +package extended |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "path/filepath" |
| 7 | + "time" |
| 8 | + |
| 9 | + osconfigv1 "github.com/openshift/api/config/v1" |
| 10 | + machineclient "github.com/openshift/client-go/machine/clientset/versioned" |
| 11 | + |
| 12 | + g "github.com/onsi/ginkgo/v2" |
| 13 | + o "github.com/onsi/gomega" |
| 14 | + exutil "github.com/openshift/machine-config-operator/test/extended/util" |
| 15 | + |
| 16 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 17 | + "k8s.io/kubernetes/test/e2e/framework" |
| 18 | +) |
| 19 | + |
| 20 | +// This test is [Serial] because it modifies the cluster/machineconfigurations.operator.openshift.io object in each test. |
| 21 | +var _ = g.Describe("[sig-mco][Suite:openshift/machine-config-operator/disruptive][Serial][Disruptive][OCPFeatureGate:ManagedBootImagesAzure]", g.Ordered, func() { |
| 22 | + defer g.GinkgoRecover() |
| 23 | + var ( |
| 24 | + AllMachineSetFixture = filepath.Join("machineconfigurations", "managedbootimages-all.yaml") |
| 25 | + NoneMachineSetFixture = filepath.Join("machineconfigurations", "managedbootimages-none.yaml") |
| 26 | + PartialMachineSetFixture = filepath.Join("machineconfigurations", "managedbootimages-partial.yaml") |
| 27 | + EmptyMachineSetFixture = filepath.Join("machineconfigurations", "managedbootimages-empty.yaml") |
| 28 | + |
| 29 | + oc = exutil.NewCLI("mco-bootimage", exutil.KubeConfigPath()).AsAdmin() |
| 30 | + ) |
| 31 | + |
| 32 | + g.BeforeEach(func() { |
| 33 | + // Skip this test if not on Azure platform |
| 34 | + skipUnlessTargetPlatform(oc, osconfigv1.AzurePlatformType) |
| 35 | + // Skip this test if the cluster is not using MachineAPI |
| 36 | + skipUnlessFunctionalMachineAPI(oc) |
| 37 | + // Skip this test on single node platforms |
| 38 | + skipOnSingleNodeTopology(oc) |
| 39 | + }) |
| 40 | + |
| 41 | + g.AfterEach(func() { |
| 42 | + // Clear out boot image configuration between tests |
| 43 | + applyMachineConfigurationFixture(oc, EmptyMachineSetFixture) |
| 44 | + }) |
| 45 | + |
| 46 | + g.It("Should update boot images only on MachineSets that are opted in [apigroup:machineconfiguration.openshift.io]", func() { |
| 47 | + PartialMachineSetTest(oc, PartialMachineSetFixture) |
| 48 | + }) |
| 49 | + |
| 50 | + g.It("Should update boot images on all MachineSets when configured [apigroup:machineconfiguration.openshift.io]", func() { |
| 51 | + AllMachineSetTest(oc, AllMachineSetFixture) |
| 52 | + }) |
| 53 | + |
| 54 | + g.It("Should not update boot images on any MachineSet when not configured [apigroup:machineconfiguration.openshift.io]", func() { |
| 55 | + NoneMachineSetTest(oc, NoneMachineSetFixture) |
| 56 | + }) |
| 57 | + |
| 58 | + g.It("Should stamp coreos-bootimages configmap with current MCO hash and release version [apigroup:machineconfiguration.openshift.io]", func() { |
| 59 | + EnsureConfigMapStampTest(oc) |
| 60 | + }) |
| 61 | + |
| 62 | + g.It("Should update boot images on an Azure MachineSets with a legacy boot image and scale successfully [apigroup:machineconfiguration.openshift.io]", func() { |
| 63 | + AzureLegacyBootImageTest(oc, PartialMachineSetFixture) |
| 64 | + }) |
| 65 | +}) |
| 66 | + |
| 67 | +func AzureLegacyBootImageTest(oc *exutil.CLI, fixture string) { |
| 68 | + |
| 69 | + // This fixture applies a boot image update configuration that opts in any machineset with the label test=boot |
| 70 | + applyMachineConfigurationFixture(oc, fixture) |
| 71 | + |
| 72 | + // Pick a random machineset to test |
| 73 | + machineClient, err := machineclient.NewForConfig(oc.KubeFramework().ClientConfig()) |
| 74 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 75 | + machineSetUnderTest := getRandomMachineSet(machineClient) |
| 76 | + framework.Logf("MachineSet under test: %s", machineSetUnderTest.Name) |
| 77 | + |
| 78 | + // Label this machineset with the test=boot label |
| 79 | + err = oc.Run("label").Args(MAPIMachinesetQualifiedName, machineSetUnderTest.Name, "-n", MAPINamespace, "test=boot").Execute() |
| 80 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 81 | + defer func() { |
| 82 | + // Unlabel the machineset at the end of test |
| 83 | + err = oc.Run("label").Args(MAPIMachinesetQualifiedName, machineSetUnderTest.Name, "-n", MAPINamespace, "test-").Execute() |
| 84 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 85 | + }() |
| 86 | + |
| 87 | + // Set machineset under test to a legacy boot image |
| 88 | + newProviderSpecPatch, originalProviderSpecPatch, legacyBootImage, originalBootImage := generateLegacyAzureProviderSpecPatch(machineSetUnderTest) |
| 89 | + err = oc.Run("patch").Args(MAPIMachinesetQualifiedName, machineSetUnderTest.Name, "-p", newProviderSpecPatch, "-n", MAPINamespace, "--type=json").Execute() |
| 90 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 91 | + |
| 92 | + defer func() { |
| 93 | + // Restore machineSet to original boot image as the machineset may be used by other test variants, regardless of success/fail |
| 94 | + err = oc.Run("patch").Args(MAPIMachinesetQualifiedName, machineSetUnderTest.Name, "-p", originalProviderSpecPatch, "-n", MAPINamespace, "--type=json").Execute() |
| 95 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 96 | + framework.Logf("Restored build name in the machineset %s to \"%s\"", machineSetUnderTest.Name, originalBootImage) |
| 97 | + }() |
| 98 | + // Ensure boot image controller is not progressing |
| 99 | + framework.Logf("Waiting until the boot image controller is not progressing...") |
| 100 | + waitForBootImageControllerToComplete(oc) |
| 101 | + |
| 102 | + // Fetch the providerSpec of the machineset under test again |
| 103 | + providerSpec, err := oc.Run("get").Args(MAPIMachinesetQualifiedName, machineSetUnderTest.Name, "-o", "template", "--template=`{{.spec.template.spec.providerSpec.value}}`", "-n", MAPINamespace).Output() |
| 104 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 105 | + |
| 106 | + // Verify that the machineset does not have the legacy boot image |
| 107 | + o.Expect(providerSpec).ShouldNot(o.ContainSubstring(legacyBootImage)) |
| 108 | + |
| 109 | + // Get current set of ready nodes |
| 110 | + nodes, err := getReadyNodes(oc) |
| 111 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 112 | + |
| 113 | + // Scale up machineset under test |
| 114 | + err = oc.Run("scale").Args(MAPIMachinesetQualifiedName, machineSetUnderTest.Name, "-n", MAPINamespace, fmt.Sprintf("--replicas=%d", *machineSetUnderTest.Spec.Replicas+1)).Execute() |
| 115 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 116 | + |
| 117 | + framework.Logf("Waiting for scale-up to complete...") |
| 118 | + // Scale up a new node |
| 119 | + o.Eventually(func() bool { |
| 120 | + machineset, err := machineClient.MachineV1beta1().MachineSets(MAPINamespace).Get(context.TODO(), machineSetUnderTest.Name, metav1.GetOptions{}) |
| 121 | + if err != nil { |
| 122 | + framework.Logf("%v", err) |
| 123 | + return false |
| 124 | + } |
| 125 | + return machineset.Status.AvailableReplicas == *machineSetUnderTest.Spec.Replicas+1 |
| 126 | + }, 15*time.Minute, 10*time.Second).Should(o.BeTrue()) |
| 127 | + |
| 128 | + defer func() { |
| 129 | + // Scale-down the machineset at the end of test |
| 130 | + err = oc.Run("scale").Args(MAPIMachinesetQualifiedName, machineSetUnderTest.Name, "-n", MAPINamespace, fmt.Sprintf("--replicas=%d", *machineSetUnderTest.Spec.Replicas)).Execute() |
| 131 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 132 | + |
| 133 | + // Wait for scaledown to complete |
| 134 | + framework.Logf("Waiting for scale-down to complete...") |
| 135 | + o.Eventually(func() bool { |
| 136 | + machineset, err := machineClient.MachineV1beta1().MachineSets(MAPINamespace).Get(context.TODO(), machineSetUnderTest.Name, metav1.GetOptions{}) |
| 137 | + if err != nil { |
| 138 | + framework.Logf("%v", err) |
| 139 | + return false |
| 140 | + } |
| 141 | + return machineset.Status.AvailableReplicas == *machineSetUnderTest.Spec.Replicas |
| 142 | + }, 15*time.Minute, 10*time.Second).Should(o.BeTrue()) |
| 143 | + |
| 144 | + }() |
| 145 | + |
| 146 | + // Retrieve aleph version from new node |
| 147 | + var alephVersion string |
| 148 | + o.Eventually(func() bool { |
| 149 | + // Grab newly scaled up node by diffing against the old set of nodes |
| 150 | + newNodes, err := getReadyNodes(oc) |
| 151 | + if err != nil { |
| 152 | + return false |
| 153 | + } |
| 154 | + scaledUpNode := newNodes.Difference(nodes) |
| 155 | + scaledUpNodeName, scaledUpNodeReady := scaledUpNode.PopAny() |
| 156 | + if !scaledUpNodeReady { |
| 157 | + return false |
| 158 | + } |
| 159 | + |
| 160 | + // Log aleph version from the new node |
| 161 | + framework.Logf("Newly scaled up node: %v", scaledUpNodeName) |
| 162 | + alephVersion, err = getAlephVersionFromNode(oc, scaledUpNodeName) |
| 163 | + if err != nil { |
| 164 | + framework.Logf("Failed to get aleph version from node %s: %v", scaledUpNodeName, err) |
| 165 | + return false |
| 166 | + } |
| 167 | + |
| 168 | + framework.Logf("CoreOS aleph version from node %s: %s", scaledUpNodeName, alephVersion) |
| 169 | + return true |
| 170 | + }, 3*time.Minute, 3*time.Second).Should(o.BeTrue()) |
| 171 | + |
| 172 | + // Get the current release boot image for this architecture; this should match the |
| 173 | + // aleph version above |
| 174 | + arch, err := getArchFromMachineSet(&machineSetUnderTest) |
| 175 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 176 | + |
| 177 | + releaseBootImageVersion, err := getReleaseBootImageVersion(oc, arch) |
| 178 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 179 | + framework.Logf("Current release's boot image version: %s", releaseBootImageVersion) |
| 180 | + |
| 181 | + // TODO: Uncomment when https://issues.redhat.com/browse/CORS-3915 lands, so that drifts between |
| 182 | + // rhcos.json & rhcos-marketplace.json are resolved. |
| 183 | + // For now, a successful scaleup can be considered as a success. |
| 184 | + // o.Expect(alephVersion).To(o.Equal(releaseBootImageVersion)) |
| 185 | +} |
0 commit comments