diff --git a/internal/controller/onboarding_controller.go b/internal/controller/onboarding_controller.go index b6fda77..8211e31 100644 --- a/internal/controller/onboarding_controller.go +++ b/internal/controller/onboarding_controller.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "net/http" + "slices" "strings" "time" @@ -30,7 +31,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" k8sclient "sigs.k8s.io/controller-runtime/pkg/client" logger "sigs.k8s.io/controller-runtime/pkg/log" @@ -62,7 +62,6 @@ const ( testAggregateName = "tenant_filter_tests" testProjectName = "test" testDomainName = "cc3test" - testFlavorName = "c_k_c2_m2_v2" testImageName = "cirros-d240801-kvm" testPrefixName = "ohooc-" testVolumeType = "kvm-pilot" @@ -79,6 +78,7 @@ type OnboardingController struct { } // +kubebuilder:rbac:groups=kvm.cloud.sap,resources=hypervisors,verbs=get;list;watch;patch +// +kubebuilder:rbac:groups=kvm.cloud.sap,resources=hypervisors/status,verbs=get;list;watch;patch func (r *OnboardingController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := logger.FromContext(ctx).WithName(req.Name) ctx = logger.IntoContext(ctx, log) @@ -103,9 +103,7 @@ func (r *OnboardingController) Reconcile(ctx context.Context, req ctrl.Request) // We bail here out, because the openstack api is not the best to poll if hv.Status.HypervisorID == "" || hv.Status.ServiceID == "" { - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - return r.ensureNovaProperties(ctx, hv) - }); err != nil { + if err := r.ensureNovaProperties(ctx, hv); err != nil { if errors.Is(err, errRequeue) { return ctrl.Result{RequeueAfter: defaultWaitTime}, nil } @@ -207,6 +205,22 @@ func (r *OnboardingController) initialOnboarding(ctx context.Context, hv *kvmv1. return fmt.Errorf("failed to agg to test aggregate %w", err) } + var errs []error + for aggregateName, aggregate := range aggs { + if aggregateName == testAggregateName || aggregateName == zone { + continue + } + if slices.Contains(aggregate.Hosts, host) { + if err := removeFromAggregate(ctx, r.computeClient, aggs, host, aggregateName); err != nil { + errs = append(errs, err) + } + } + } + + if len(errs) > 0 { + return fmt.Errorf("failed to remove host %v from aggregates due to %w", host, errors.Join(errs...)) + } + // The service may be forced down previously due to an HA event, // so we need to ensure it not only enabled, but also not forced to be down. falseVal := false @@ -469,72 +483,22 @@ func (r *OnboardingController) createOrGetTestServer(ctx context.Context, zone, return foundServer, nil } - flavorPages, err := flavors.ListDetail(r.testComputeClient, nil).AllPages(ctx) - if err != nil { - return nil, err - } - extractedFlavors, err := flavors.ExtractFlavors(flavorPages) + flavorRef, err := r.findTestFlavor(ctx) if err != nil { return nil, err } - var flavorRef string - for _, flavor := range extractedFlavors { - if flavor.Name == testFlavorName { - flavorRef = flavor.ID - break - } - } - - if flavorRef == "" { - return nil, errors.New("couldn't find flavor") - } - - var imageRef string - - imagePages, err := images.List(r.testImageClient, images.ListOpts{Name: testImageName}).AllPages(ctx) + imageRef, err := r.findTestImage(ctx) if err != nil { - return nil, err + return nil, fmt.Errorf("could not list networks due to %w", err) } - imagesList, err := images.ExtractImages(imagePages) + networkRef, err := r.findTestNetwork(ctx) if err != nil { - return nil, err - } - - for _, image := range imagesList { - if image.Name == testImageName { - imageRef = image.ID - break - } - } - - if imageRef == "" { - return nil, errors.New("couldn't find image") - } - - falseVal := false - networkPages, err := networks.List(r.testNetworkClient, networks.ListOpts{Shared: &falseVal}).AllPages(ctx) - if err != nil { - return nil, err - } - - extractedNetworks, err := networks.ExtractNetworks(networkPages) - if err != nil { - return nil, err - } - - var networkRef string - for _, network := range extractedNetworks { - networkRef = network.ID - break - } - - if networkRef == "" { - return nil, errors.New("couldn't find network") + return nil, fmt.Errorf("could not extract network due to %w", err) } - log.Info("creating server", "name", serverName) + log.Info("creating server", "name", serverName, "flavor", flavorRef) server, err := servers.Create(ctx, r.testComputeClient, servers.CreateOpts{ Name: serverName, AvailabilityZone: fmt.Sprintf("%v:%v", zone, computeHost), @@ -566,6 +530,67 @@ func (r *OnboardingController) createOrGetTestServer(ctx context.Context, zone, return server, nil } +func (r *OnboardingController) findTestNetwork(ctx context.Context) (string, error) { + falseVal := false + networkPages, err := networks.List(r.testNetworkClient, networks.ListOpts{Shared: &falseVal}).AllPages(ctx) + if err != nil { + return "", err + } + + extractedNetworks, err := networks.ExtractNetworks(networkPages) + if err != nil { + return "", err + } + + for _, network := range extractedNetworks { + return network.ID, nil + } + + return "", errors.New("couldn't find network") +} + +func (r *OnboardingController) findTestImage(ctx context.Context) (string, error) { + imagePages, err := images.List(r.testImageClient, images.ListOpts{Name: testImageName}).AllPages(ctx) + if err != nil { + return "", err + } + + imagesList, err := images.ExtractImages(imagePages) + if err != nil { + return "", err + } + + for _, image := range imagesList { + if image.Name == testImageName { + return image.ID, nil + } + } + + return "", errors.New("couldn't find image") +} + +func (r *OnboardingController) findTestFlavor(ctx context.Context) (string, error) { + flavorPages, err := flavors.ListDetail(r.testComputeClient, flavors.ListOpts{SortDir: "asc", SortKey: "memory_mb"}).AllPages(ctx) + if err != nil { + return "", err + } + + extractedFlavors, err := flavors.ExtractFlavors(flavorPages) + if err != nil { + return "", err + } + + for _, flavor := range extractedFlavors { + _, found := flavor.ExtraSpecs["capabilities:hypervisor_type"] + if !found { + // Flavor does not restrict the hypervisor-type + return flavor.ID, nil + } + } + + return "", errors.New("couldn't find flavor") +} + // SetupWithManager sets up the controller with the Manager. func (r *OnboardingController) SetupWithManager(mgr ctrl.Manager) error { ctx := context.Background() diff --git a/internal/controller/onboarding_controller_test.go b/internal/controller/onboarding_controller_test.go index d3eb8ff..00dc82d 100644 --- a/internal/controller/onboarding_controller_test.go +++ b/internal/controller/onboarding_controller_test.go @@ -19,66 +19,537 @@ package controller import ( "context" + "fmt" + "net/http" + "os" + "github.com/gophercloud/gophercloud/v2/testhelper" + "github.com/gophercloud/gophercloud/v2/testhelper/client" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" kvmv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" ) var _ = Describe("Onboarding Controller", func() { const ( - hypervisorName = "some-test" + region = "test-region" + availabilityZone = "test-az" + hypervisorName = "test-host" + serviceId = "service-id" + hypervisorId = "c48f6247-abe4-4a24-824e-ea39e108874f" + aggregatesBodyInitial = `{ + "aggregates": [ + { + "name": "test-az", + "availability_zone": "test-az", + "deleted": false, + "id": 100001, + "hosts": [] + }, + { + "name": "tenant_filter_tests", + "availability_zone": "", + "deleted": false, + "id": 99, + "hosts": [] + } + ] +}` + + aggregatesBodyUnexpected = `{ + "aggregates": [ + { + "name": "test-az", + "availability_zone": "test-az", + "deleted": false, + "id": 100001, + "hosts": [] + }, + { + "name": "tenant_filter_tests", + "availability_zone": "", + "deleted": false, + "id": 99, + "hosts": [] + }, + { + "name": "unexpected", + "availability_zone": "", + "deleted": false, + "id": -1, + "hosts": ["test-host"] + } + ] +}` + + aggregatesBodySetup = `{ + "aggregates": [ + { + "name": "test-az", + "availability_zone": "test-az", + "deleted": false, + "id": 100001, + "hosts": ["test-host"] + }, + { + "name": "tenant_filter_tests", + "availability_zone": "", + "deleted": false, + "id": 99, + "hosts": ["test-host"] + } + ] +}` + addedHostToAzBody = `{ + "aggregate": { + "name": "test-az", + "availability_zone": "test-az", + "deleted": false, + "hosts": [ + "test-host" + ], + "id": 100001 + } +}` + + addedHostToTestBody = `{ + "aggregate": { + "name": "tenant_filter_tests", + "availability_zone": "", + "deleted": false, + "hosts": [ + "test-host" + ], + "id": 99 + } +}` + + flavorDetailsBody = `{ + "flavors": [ + { + "OS-FLV-DISABLED:disabled": false, + "disk": 0, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "1", + "links": [], + "name": "c_k_c2_m2_v2", + "ram": 2048, + "swap": 0, + "vcpus": 2, + "rxtx_factor": 1.0, + "description": null, + "extra_specs": {} + } + ] +}` + imagesBody = `{ + "images": [ + { + "status": "active", + "name": "cirros-d240801-kvm", + "tags": [], + "container_format": "bare", + "created_at": "2014-11-07T17:07:06Z", + "disk_format": "qcow2", + "updated_at": "2014-11-07T17:19:09Z", + "visibility": "public", + "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", + "min_disk": 0, + "protected": false, + "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", + "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", + "checksum": "64d7c1cd2b6f60c92c14662941cb7913", + "os_hash_algo": "sha512", + "os_hash_value": "...", + "os_hidden": false, + "owner": "5ef70662f8b34079a6eddb8da9d75fe8", + "size": 13167616, + "min_ram": 0, + "schema": "/v2/schemas/image", + "virtual_size": null + } + ] +}` + + networksBody = `{ + "networks": [ + { + "admin_state_up": true, + "id": "network-id", + "name": "net1", + "provider:network_type": "vlan", + "provider:physical_network": "physnet1", + "provider:segmentation_id": 1000, + "router:external": false, + "shared": false, + "status": "ACTIVE", + "subnets": [], + "tenant_id": "project-id", + "project_id": "project-id" + } + ] +}` + + emptyServersBody = `{"servers": [], "servers_links": []}` + + // The body actually doesn't return a status, but it simplifies the test + createServerBody = `{ + "server": { + "id": "server-id", + "status": "ACTIVE" + } +}` ) var ( onboardingReconciler *OnboardingController namespacedName = types.NamespacedName{Name: hypervisorName} - ) - - Context("When reconciling a hypervisor", func() { - ctx := context.Background() //nolint:govet - - reconcileLoop := func(steps int) (res ctrl.Result, err error) { - req := ctrl.Request{NamespacedName: namespacedName} + fakeServer testhelper.FakeServer + reconcileReq = ctrl.Request{NamespacedName: namespacedName} + reconcileLoop = func(ctx context.Context, steps int) (err error) { for range steps { - res, err = onboardingReconciler.Reconcile(ctx, req) + _, err = onboardingReconciler.Reconcile(ctx, reconcileReq) if err != nil { return } } return } + ) + + BeforeEach(func() { + By("creating the resource for the Kind Hypervisor") + hv := &kvmv1.Hypervisor{ + ObjectMeta: metav1.ObjectMeta{ + Name: hypervisorName, + Labels: map[string]string{ + corev1.LabelTopologyRegion: region, + corev1.LabelTopologyZone: availabilityZone, + corev1.LabelHostname: hypervisorName, + }, + }, + Spec: kvmv1.HypervisorSpec{ + LifecycleEnabled: true, + }, + } + Expect(k8sClient.Create(ctx, hv)).To(Succeed()) + DeferCleanup(func(ctx context.Context) { + By("Cleanup the specific hypervisor CRO") + Expect(k8sClient.Delete(ctx, hv)).To(Succeed()) + }) + + fakeServer = testhelper.SetupHTTP() + os.Setenv("KVM_HA_SERVICE_URL", fakeServer.Endpoint()+"instance-ha") + + DeferCleanup(func() { + os.Unsetenv("KVM_HA_SERVICE_URL") + fakeServer.Teardown() + }) + + onboardingReconciler = &OnboardingController{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + computeClient: client.ServiceClient(fakeServer), + testComputeClient: client.ServiceClient(fakeServer), + testImageClient: client.ServiceClient(fakeServer), + testNetworkClient: client.ServiceClient(fakeServer), + } + + DeferCleanup(func() { + onboardingReconciler = nil + }) + }) + + Context("initial setup of a new hypervisor", func() { BeforeEach(func() { - onboardingReconciler = &OnboardingController{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } + fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + Expect(fmt.Fprintf(w, HypervisorWithServers, serviceId, "", hypervisorName)).ToNot(BeNil()) + }) - By("creating the resource for the Kind Hypervisor") - hv := &kvmv1.Hypervisor{ - ObjectMeta: metav1.ObjectMeta{ - Name: hypervisorName, - }, - Spec: kvmv1.HypervisorSpec{}, - } - Expect(k8sClient.Create(ctx, hv)).To(Succeed()) + fakeServer.Mux.HandleFunc("POST /os-aggregates/100001/action", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, addedHostToAzBody) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("POST /os-aggregates/99/action", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, addedHostToTestBody) + Expect(err).NotTo(HaveOccurred()) + }) - DeferCleanup(func(ctx context.Context) { - By("Cleanup the specific hypervisor CRO") - Expect(client.IgnoreAlreadyExists(k8sClient.Delete(ctx, hv))).To(Succeed()) + fakeServer.Mux.HandleFunc("PUT /os-services/service-id", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprintf(w, `{"service": {"id": "%v", "status": "enabled"}}`, serviceId) + Expect(err).NotTo(HaveOccurred()) }) }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - _, err := reconcileLoop(1) - Expect(err).NotTo(HaveOccurred()) + When("it is a clean setup", func() { + BeforeEach(func() { + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, aggregatesBodyInitial) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + It("should set the Service- and HypervisorId from Nova", func(ctx SpecContext) { + By("Reconciling the created resource") + err := reconcileLoop(ctx, 1) + Expect(err).NotTo(HaveOccurred()) + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + Expect(hv.Status.ServiceID).To(Equal(serviceId)) + Expect(hv.Status.HypervisorID).To(Equal(hypervisorId)) + }) + + It("should update the status accordingly", func(ctx SpecContext) { + By("Reconciling the created resource") + err := reconcileLoop(ctx, 2) + Expect(err).NotTo(HaveOccurred()) + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + Expect(hv.Status.Conditions).To(ContainElements( + SatisfyAll( + HaveField("Type", kvmv1.ConditionTypeReady), + HaveField("Status", metav1.ConditionFalse), + HaveField("Reason", ConditionReasonOnboarding), + ), + SatisfyAll( + HaveField("Type", ConditionTypeOnboarding), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", ConditionReasonTesting), + ), + )) + }) }) + + When("it the host is already in an unexpected aggregate", func() { + BeforeEach(func() { + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, aggregatesBodyUnexpected) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("POST /os-aggregates/-1/action", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, addedHostToTestBody) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + It("should set the Service- and HypervisorId from Nova", func(ctx SpecContext) { + By("Reconciling the created resource") + err := reconcileLoop(ctx, 1) + Expect(err).NotTo(HaveOccurred()) + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + Expect(hv.Status.ServiceID).To(Equal(serviceId)) + Expect(hv.Status.HypervisorID).To(Equal(hypervisorId)) + }) + + It("should update the status accordingly", func(ctx SpecContext) { + By("Reconciling the created resource") + err := reconcileLoop(ctx, 2) + Expect(err).NotTo(HaveOccurred()) + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + Expect(hv.Status.Conditions).To(ContainElements( + SatisfyAll( + HaveField("Type", kvmv1.ConditionTypeReady), + HaveField("Status", metav1.ConditionFalse), + HaveField("Reason", ConditionReasonOnboarding), + ), + SatisfyAll( + HaveField("Type", ConditionTypeOnboarding), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", ConditionReasonTesting), + ), + )) + }) + }) + }) + + Context("running tests after initial setup", func() { + BeforeEach(func() { + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + hv.Status.HypervisorID = hypervisorId + hv.Status.ServiceID = serviceId + meta.SetStatusCondition(&hv.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: ConditionReasonOnboarding, + }) + meta.SetStatusCondition(&hv.Status.Conditions, metav1.Condition{ + Type: ConditionTypeOnboarding, + Status: metav1.ConditionTrue, + Reason: ConditionReasonInitial, + }) + Expect(k8sClient.Status().Update(ctx, hv)).To(Succeed()) + + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, aggregatesBodySetup) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("PUT /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, aggregatesBodySetup) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("PUT /os-services/service-id", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprintf(w, `{"service": {"id": "%v", "status": "enabled"}}`, serviceId) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("GET /servers", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, emptyServersBody) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("GET /servers/detail", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, emptyServersBody) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("POST /os-aggregates/99/action", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, addedHostToTestBody) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("POST /instance-ha", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, `{}`) + Expect(err).NotTo(HaveOccurred()) + }) + + // Only needed for mocking the test + fakeServer.Mux.HandleFunc("GET /flavors/detail", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, flavorDetailsBody) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("GET /images", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, imagesBody) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("GET /networks", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, networksBody) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("POST /servers", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, createServerBody) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("POST /servers/server-id/action", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprintf(w, `{"output": "FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE\nohooc--%v-%v\n"}`, hv.Name, hv.UID) + Expect(err).NotTo(HaveOccurred()) + + }) + fakeServer.Mux.HandleFunc("DELETE /servers/server-id", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + }) + }) + + When("SkipTests is set to true", func() { + BeforeEach(func() { + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + hv.Spec.SkipTests = true + Expect(k8sClient.Update(ctx, hv)).To(Succeed()) + }) + + It("should update the conditions", func() { + By("Reconciling the created resource") + err := reconcileLoop(ctx, 3) + Expect(err).NotTo(HaveOccurred()) + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + Expect(hv.Status.Conditions).To(ContainElements( + SatisfyAll( + HaveField("Type", kvmv1.ConditionTypeReady), + HaveField("Status", metav1.ConditionTrue), + ), + SatisfyAll( + HaveField("Type", ConditionTypeOnboarding), + HaveField("Status", metav1.ConditionFalse), + HaveField("Reason", ConditionReasonCompleted), + ), + )) + }) + }) + When("SkipTests is set to false", func() { + BeforeEach(func() { + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + hv.Spec.SkipTests = false + Expect(k8sClient.Update(ctx, hv)).To(Succeed()) + }) + + It("should update the conditions", func() { + By("Reconciling the created resource") + err := reconcileLoop(ctx, 3) + Expect(err).NotTo(HaveOccurred()) + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, namespacedName, hv)).To(Succeed()) + Expect(hv.Status.Conditions).To(ContainElements( + SatisfyAll( + HaveField("Type", kvmv1.ConditionTypeReady), + HaveField("Status", metav1.ConditionTrue), + ), + SatisfyAll( + HaveField("Type", ConditionTypeOnboarding), + HaveField("Status", metav1.ConditionFalse), + HaveField("Reason", ConditionReasonCompleted), + ), + )) + }) + }) + }) })