diff --git a/api/v1beta1/tempest_types.go b/api/v1beta1/tempest_types.go index a24e37fd..6322c8a4 100644 --- a/api/v1beta1/tempest_types.go +++ b/api/v1beta1/tempest_types.go @@ -529,3 +529,8 @@ func (instance Tempest) RbacNamespace() string { func (instance Tempest) RbacResourceName() string { return instance.Name } + +// GetConditions - return the conditions from the status +func (instance *Tempest) GetConditions() *condition.Conditions { + return &instance.Status.Conditions +} diff --git a/controllers/common_controller.go b/controllers/common_controller.go new file mode 100644 index 00000000..92e09053 --- /dev/null +++ b/controllers/common_controller.go @@ -0,0 +1,380 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/go-logr/logr" + networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/openstack-k8s-operators/lib-common/modules/common" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" + corev1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// FrameworkInstance defines the interface that all test framework CRs must implement +type FrameworkInstance interface { + client.Object + GetConditions() *condition.Conditions +} + +// FrameworkConfig defines framework-specific configuration and behavior +type FrameworkConfig[T FrameworkInstance] struct { + // ServiceName for labeling (e.g., "tempest", "tobiko") + ServiceName string + + // NeedsNetworkAttachments indicates if NADs should be handled + NeedsNetworkAttachments bool + + // NeedsConfigMaps indicates if ServiceConfigReadyCondition is needed + NeedsConfigMaps bool + + // GenerateServiceConfigMaps creates framework-specific config maps + GenerateServiceConfigMaps func(ctx context.Context, r *Reconciler, helper *helper.Helper, instance T, workflowStep int) error + + // BuildPod creates the framework-specific pod definition + BuildPod func(ctx context.Context, r *Reconciler, instance T, labels, annotations map[string]string, workflowStep int) (*corev1.Pod, error) + + // GetInitialConditions returns the condition list for a new instance + GetInitialConditions func() []*condition.Condition + + // Field accessors + GetWorkflowLength func(instance T) int + GetParallel func(instance T) bool + GetStorageClass func(instance T) string + GetNetworkAttachments func(instance T) []string + GetNetworkAttachmentStatus func(instance T) map[string][]string + SetNetworkAttachmentStatus func(instance T, status map[string][]string) + + GetSpec func(instance T) interface{} // Optional + GetWorkflowStep func(instance T, step int) interface{} // Optional +} + +// CommonReconcile executes the standard reconciliation workflow using generics +func CommonReconcile[T FrameworkInstance]( + ctx context.Context, + r *Reconciler, + req ctrl.Request, + instance T, + config FrameworkConfig[T], + Log logr.Logger, +) (result ctrl.Result, _err error) { + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Create a helper + helper, err := helper.NewHelper(instance, r.Client, r.Kclient, r.Scheme, r.Log) + if err != nil { + return ctrl.Result{}, err + } + + // Get conditions from instance + conditions := instance.GetConditions() + if conditions == nil { + return ctrl.Result{}, nil // TODO fmt.Errorf("instance does not support conditions") + } + + // Initialize status + isNewInstance := len(*conditions) == 0 + if isNewInstance { + *conditions = condition.Conditions{} + } + + // Save a copy of the condtions so that we can restore the LastTransitionTime + // when a condition's state doesn't change. + savedConditions := conditions.DeepCopy() + + // Always patch the instance status when exiting this function so we + // can persist any changes. + defer func() { + // update the overall status condition if service is ready + if conditions.AllSubConditionIsTrue() { + conditions.MarkTrue(condition.ReadyCondition, condition.ReadyMessage) + } + condition.RestoreLastTransitionTimes(conditions, savedConditions) + if conditions.IsUnknown(condition.ReadyCondition) { + conditions.Set(conditions.Mirror(condition.ReadyCondition)) + } + err := helper.PatchInstance(ctx, instance) + if err != nil { + _err = err + } + }() + + if isNewInstance { + cl := condition.CreateList(config.GetInitialConditions()...) + conditions.Init(&cl) + + // Register overall status immediately to have an early feedback + // e.g. in the cli + return ctrl.Result{}, nil + } + + // Initialize network attachments status if needed + if config.NeedsNetworkAttachments { + if config.GetNetworkAttachmentStatus(instance) == nil { + config.SetNetworkAttachmentStatus(instance, map[string][]string{}) + } + } + + // Handle service delete + if !instance.GetDeletionTimestamp().IsZero() { + Log.Info("Reconciling Service delete") + controllerutil.RemoveFinalizer(instance, helper.GetFinalizer()) + Log.Info("Reconciled Service delete successfully") + return ctrl.Result{}, nil + } + + workflowLength := config.GetWorkflowLength(instance) + nextAction, workflowStep, err := r.NextAction(ctx, instance, workflowLength) + + // Merge workflow step if applicable + if workflowLength != 0 && workflowStep < workflowLength { + spec := config.GetSpec(instance) + workflowStepData := config.GetWorkflowStep(instance, workflowStep) + MergeSections(spec, workflowStepData) + } + + switch nextAction { + case Failure: + return ctrl.Result{}, err + + case Wait: + Log.Info(InfoWaitingOnPod) + return ctrl.Result{RequeueAfter: RequeueAfterValue}, nil + + case EndTesting: + // All pods created by the instance were completed. Release the lock + // so that other instances can spawn their pods. + if lockReleased, err := r.ReleaseLock(ctx, instance); !lockReleased { + Log.Info(fmt.Sprintf(InfoCanNotReleaseLock, testOperatorLockName)) + return ctrl.Result{RequeueAfter: RequeueAfterValue}, err + } + + conditions.MarkTrue(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage) + Log.Info(InfoTestingCompleted) + return ctrl.Result{}, nil + + case CreateFirstPod: + lockAcquired, err := r.AcquireLock(ctx, instance, helper, config.GetParallel(instance)) + if !lockAcquired { + Log.Info(fmt.Sprintf(InfoCanNotAcquireLock, testOperatorLockName)) + return ctrl.Result{RequeueAfter: RequeueAfterValue}, err + } + + Log.Info(fmt.Sprintf(InfoCreatingFirstPod, workflowStep)) + + case CreateNextPod: + // Confirm that we still hold the lock. This is useful to check if for + // example somebody / something deleted the lock and it got claimed by + // another instance. This is considered to be an error state. + lockAcquired, err := r.AcquireLock(ctx, instance, helper, config.GetParallel(instance)) + if !lockAcquired { + Log.Error(err, ErrConfirmLockOwnership, testOperatorLockName) + return ctrl.Result{RequeueAfter: RequeueAfterValue}, err + } + + Log.Info(fmt.Sprintf(InfoCreatingNextPod, workflowStep)) + + default: + return ctrl.Result{}, ErrReceivedUnexpectedAction + } + + serviceLabels := map[string]string{ + common.AppSelector: config.ServiceName, + workflowStepLabel: strconv.Itoa(workflowStep), + instanceNameLabel: instance.GetName(), + operatorNameLabel: "test-operator", + } + + workflowStepNum := 0 + // Create multiple PVCs for parallel execution + if config.GetParallel(instance) && workflowStep < config.GetWorkflowLength(instance) { + workflowStepNum = workflowStep + } + + // Create PersistentVolumeClaim + ctrlResult, err := r.EnsureLogsPVCExists( + ctx, + instance, + helper, + serviceLabels, + config.GetStorageClass(instance), + workflowStepNum, + ) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // Generate ConfigMaps if needed + if config.NeedsConfigMaps { + if err = config.GenerateServiceConfigMaps(ctx, r, helper, instance, workflowStep); err != nil { + conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) + } + // Generate ConfigMaps - end + + // Handle network attachments if needed + var serviceAnnotations map[string]string + if config.NeedsNetworkAttachments { + annotations, ctrlResult, err := handleNetworkAttachments( + ctx, r, instance, helper, serviceLabels, config, workflowStep, conditions, + ) + if err != nil || (ctrlResult != ctrl.Result{}) { + return ctrlResult, err + } + serviceAnnotations = annotations + } + + // Build pod + podDef, err := config.BuildPod(ctx, r, instance, serviceLabels, serviceAnnotations, workflowStep) + if err != nil { + return ctrl.Result{}, err + } + + // Create a new pod + ctrlResult, err = r.CreatePod(ctx, *helper, podDef) + if err != nil { + // Release lock on failure + if lockReleased, lockErr := r.ReleaseLock(ctx, instance); lockReleased { + return ctrl.Result{RequeueAfter: RequeueAfterValue}, lockErr + } + + conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DeploymentReadyErrorMessage, + err.Error())) + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + return ctrlResult, nil + } + // Create a new pod - end + + return ctrl.Result{}, nil +} + +func handleNetworkAttachments[T FrameworkInstance]( + ctx context.Context, + r *Reconciler, + instance T, + helper *helper.Helper, + labels map[string]string, + config FrameworkConfig[T], + workflowStep int, + conditions *condition.Conditions, +) (map[string]string, ctrl.Result, error) { + nadList := []networkv1.NetworkAttachmentDefinition{} + networkAttachments := config.GetNetworkAttachments(instance) + + for _, netAtt := range networkAttachments { + nad, err := nad.GetNADWithName(ctx, helper, netAtt, instance.GetNamespace()) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Since the net-attach-def CR should have been manually created by the user and referenced in the spec, + // we treat this as a warning because it means that the service will not be able to start. + r.Log.Info(fmt.Sprintf("network-attachment-definition %s not found", netAtt)) + conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyWaitingMessage, + netAtt)) + return nil, ctrl.Result{RequeueAfter: time.Second * 10}, nil + } + conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + return nil, ctrl.Result{}, err + } + + if nad != nil { + nadList = append(nadList, *nad) + } + } + + serviceAnnotations, err := nad.EnsureNetworksAnnotation(nadList) + if err != nil { + return nil, ctrl.Result{}, fmt.Errorf("failed create network annotation from %s: %w", + networkAttachments, err) + } + + // Verify network status if pod exists + if r.PodExists(ctx, instance, workflowStep) { + networkReady, networkAttachmentStatus, err := nad.VerifyNetworkStatusFromAnnotation( + ctx, + helper, + networkAttachments, + labels, + 1, + ) + if err != nil { + return nil, ctrl.Result{}, err + } + + config.SetNetworkAttachmentStatus(instance, networkAttachmentStatus) + + if networkReady { + conditions.MarkTrue( + condition.NetworkAttachmentsReadyCondition, + condition.NetworkAttachmentsReadyMessage) + } else { + err := fmt.Errorf("%w: %s", ErrNetworkAttachmentsMismatch, networkAttachments) + conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + return nil, ctrl.Result{}, err + } + } + + return serviceAnnotations, ctrl.Result{}, nil +} diff --git a/controllers/tempest_controller.go b/controllers/tempest_controller.go index 939b3cbc..7842ba6e 100644 --- a/controllers/tempest_controller.go +++ b/controllers/tempest_controller.go @@ -18,25 +18,17 @@ package controllers import ( "context" - "fmt" - "strconv" - "time" "github.com/go-logr/logr" - networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "github.com/openstack-k8s-operators/lib-common/modules/common" "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/configmap" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" "github.com/openstack-k8s-operators/lib-common/modules/common/labels" - nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" "github.com/openstack-k8s-operators/lib-common/modules/common/util" testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" "github.com/openstack-k8s-operators/test-operator/pkg/tempest" corev1 "k8s.io/api/core/v1" - k8s_errors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -45,7 +37,7 @@ type TempestReconciler struct { Reconciler } -// GetLogger returns a logger object with a prefix of "controller.name" and additional controller context fields +// GetLogger returns a logger object with a prefix of "controller.name" func (r *TempestReconciler) GetLogger(ctx context.Context) logr.Logger { return log.FromContext(ctx).WithName("Controllers").WithName("Tempest") } @@ -61,244 +53,94 @@ func (r *TempestReconciler) GetLogger(ctx context.Context) logr.Logger { // +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;create;update;watch;patch;delete // Reconcile - Tempest -func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { - Log := r.GetLogger(ctx) - +func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { // Fetch the Tempest instance instance := &testv1beta1.Tempest{} - err := r.Client.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8s_errors.IsNotFound(err) { - return ctrl.Result{}, nil - } - return ctrl.Result{}, err - } - - // Create a helper - helper, err := helper.NewHelper( - instance, - r.Client, - r.Kclient, - r.Scheme, - r.Log, - ) - if err != nil { - return ctrl.Result{}, err - } - - // initialize status - isNewInstance := instance.Status.Conditions == nil - if isNewInstance { - instance.Status.Conditions = condition.Conditions{} - } - - // Save a copy of the condtions so that we can restore the LastTransitionTime - // when a condition's state doesn't change. - savedConditions := instance.Status.Conditions.DeepCopy() - - // Always patch the instance status when exiting this function so we - // can persist any changes. - defer func() { - // update the overall status condition if service is ready - if instance.Status.Conditions.AllSubConditionIsTrue() { - instance.Status.Conditions.MarkTrue(condition.ReadyCondition, condition.ReadyMessage) - } - condition.RestoreLastTransitionTimes(&instance.Status.Conditions, savedConditions) - if instance.Status.Conditions.IsUnknown(condition.ReadyCondition) { - instance.Status.Conditions.Set( - instance.Status.Conditions.Mirror(condition.ReadyCondition)) - } - err := helper.PatchInstance(ctx, instance) - if err != nil { - _err = err - return - } - }() - - if isNewInstance { - // Initialize conditions used later as Status=Unknown - cl := condition.CreateList( - condition.UnknownCondition(condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage), - condition.UnknownCondition(condition.ServiceConfigReadyCondition, condition.InitReason, condition.ServiceConfigReadyInitMessage), - condition.UnknownCondition(condition.DeploymentReadyCondition, condition.InitReason, condition.DeploymentReadyInitMessage), - condition.UnknownCondition(condition.NetworkAttachmentsReadyCondition, condition.InitReason, condition.NetworkAttachmentsReadyInitMessage), - ) - instance.Status.Conditions.Init(&cl) - - // Register overall status immediately to have an early feedback - // e.g. in the cli - return ctrl.Result{}, nil - } - - // If we're not deleting this and the service object doesn't have our - // finalizer, add it. - if instance.DeletionTimestamp.IsZero() && controllerutil.AddFinalizer(instance, helper.GetFinalizer()) { - return ctrl.Result{}, nil - } - - if instance.Status.NetworkAttachments == nil { - instance.Status.NetworkAttachments = map[string][]string{} - } - - // Handle service delete - if !instance.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, instance, helper) - } - workflowLength := len(instance.Spec.Workflow) - nextAction, nextWorkflowStep, err := r.NextAction(ctx, instance, workflowLength) - if nextWorkflowStep < workflowLength { - MergeSections(&instance.Spec, instance.Spec.Workflow[nextWorkflowStep]) - } - - switch nextAction { - case Failure: - return ctrl.Result{}, err - - case Wait: - Log.Info(InfoWaitingOnPod) - return ctrl.Result{RequeueAfter: RequeueAfterValue}, nil - - case EndTesting: - // All pods created by the instance were completed. Release the lock - // so that other instances can spawn their pods. - if lockReleased, err := r.ReleaseLock(ctx, instance); !lockReleased { - Log.Info(fmt.Sprintf(InfoCanNotReleaseLock, testOperatorLockName)) - return ctrl.Result{RequeueAfter: RequeueAfterValue}, err - } - - instance.Status.Conditions.MarkTrue( - condition.DeploymentReadyCondition, - condition.DeploymentReadyMessage) + config := FrameworkConfig[*testv1beta1.Tempest]{ + ServiceName: tempest.ServiceName, + NeedsNetworkAttachments: true, + NeedsConfigMaps: true, + GenerateServiceConfigMaps: generateTempestServiceConfigMaps, + BuildPod: buildTempestPod, + + GetInitialConditions: func() []*condition.Condition { + return []*condition.Condition{ + condition.UnknownCondition(condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage), + condition.UnknownCondition(condition.ServiceConfigReadyCondition, condition.InitReason, condition.ServiceConfigReadyInitMessage), + condition.UnknownCondition(condition.DeploymentReadyCondition, condition.InitReason, condition.DeploymentReadyInitMessage), + condition.UnknownCondition(condition.NetworkAttachmentsReadyCondition, condition.InitReason, condition.NetworkAttachmentsReadyInitMessage), + } + }, - Log.Info(InfoTestingCompleted) - return ctrl.Result{}, nil + GetSpec: func(instance *testv1beta1.Tempest) interface{} { + return &instance.Spec + }, - case CreateFirstPod: - lockAcquired, err := r.AcquireLock(ctx, instance, helper, instance.Spec.Parallel) - if !lockAcquired { - Log.Info(fmt.Sprintf(InfoCanNotAcquireLock, testOperatorLockName)) - return ctrl.Result{RequeueAfter: RequeueAfterValue}, err - } + GetWorkflowStep: func(instance *testv1beta1.Tempest, step int) interface{} { + return instance.Spec.Workflow[step] + }, - Log.Info(fmt.Sprintf(InfoCreatingFirstPod, nextWorkflowStep)) + GetWorkflowLength: func(instance *testv1beta1.Tempest) int { + return len(instance.Spec.Workflow) + }, - case CreateNextPod: - // Confirm that we still hold the lock. This is useful to check if for - // example somebody / something deleted the lock and it got claimed by - // another instance. This is considered to be an error state. - lockAcquired, err := r.AcquireLock(ctx, instance, helper, instance.Spec.Parallel) - if !lockAcquired { - Log.Error(err, ErrConfirmLockOwnership, testOperatorLockName) - return ctrl.Result{RequeueAfter: RequeueAfterValue}, err - } + GetParallel: func(instance *testv1beta1.Tempest) bool { + return instance.Spec.Parallel + }, - Log.Info(fmt.Sprintf(InfoCreatingNextPod, nextWorkflowStep)) + GetStorageClass: func(instance *testv1beta1.Tempest) string { + return instance.Spec.StorageClass + }, - default: - return ctrl.Result{}, ErrReceivedUnexpectedAction - } + GetNetworkAttachments: func(instance *testv1beta1.Tempest) []string { + return instance.Spec.NetworkAttachments + }, - serviceLabels := map[string]string{ - common.AppSelector: tempest.ServiceName, - workflowStepLabel: strconv.Itoa(nextWorkflowStep), - instanceNameLabel: instance.Name, - operatorNameLabel: "test-operator", - } + GetNetworkAttachmentStatus: func(instance *testv1beta1.Tempest) map[string][]string { + return instance.Status.NetworkAttachments + }, - workflowStepNum := 0 - // Create multiple PVCs for parallel execution - if instance.Spec.Parallel && nextWorkflowStep < len(instance.Spec.Workflow) { - workflowStepNum = nextWorkflowStep + SetNetworkAttachmentStatus: func(instance *testv1beta1.Tempest, status map[string][]string) { + instance.Status.NetworkAttachments = status + }, } - // Create PersistentVolumeClaim - ctrlResult, err := r.EnsureLogsPVCExists( - ctx, - instance, - helper, - serviceLabels, - instance.Spec.StorageClass, - workflowStepNum, - ) - - if err != nil { - return ctrlResult, err - } else if (ctrlResult != ctrl.Result{}) { - return ctrlResult, nil - } - // Create PersistentVolumeClaim - end + return CommonReconcile(ctx, &r.Reconciler, req, instance, config, r.GetLogger(ctx)) +} +func buildTempestPod( + ctx context.Context, + r *Reconciler, + instance *testv1beta1.Tempest, + labels, annotations map[string]string, + workflowStep int, +) (*corev1.Pod, error) { mountSSHKey := false if instance.Spec.SSHKeySecretName != "" { mountSSHKey = r.CheckSecretExists(ctx, instance, instance.Spec.SSHKeySecretName) } - // Generate ConfigMaps - err = r.generateServiceConfigMaps(ctx, helper, instance, nextWorkflowStep) - if err != nil { - instance.Status.Conditions.Set(condition.FalseCondition( - condition.ServiceConfigReadyCondition, - condition.ErrorReason, - condition.SeverityWarning, - condition.ServiceConfigReadyErrorMessage, - err.Error())) - return ctrl.Result{}, err - } - instance.Status.Conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) - // Generate ConfigMaps - end - - nadList := []networkv1.NetworkAttachmentDefinition{} - for _, netAtt := range instance.Spec.NetworkAttachments { - nad, err := nad.GetNADWithName(ctx, helper, netAtt, instance.Namespace) - if err != nil { - if k8s_errors.IsNotFound(err) { - // Since the net-attach-def CR should have been manually created by the user and referenced in the spec, - // we treat this as a warning because it means that the service will not be able to start. - Log.Info(fmt.Sprintf("network-attachment-definition %s not found", netAtt)) - instance.Status.Conditions.Set(condition.FalseCondition( - condition.NetworkAttachmentsReadyCondition, - condition.ErrorReason, - condition.SeverityWarning, - condition.NetworkAttachmentsReadyWaitingMessage, - netAtt)) - return ctrl.Result{RequeueAfter: time.Second * 10}, nil - } - instance.Status.Conditions.Set(condition.FalseCondition( - condition.NetworkAttachmentsReadyCondition, - condition.ErrorReason, - condition.SeverityWarning, - condition.NetworkAttachmentsReadyErrorMessage, - err.Error())) - return ctrl.Result{}, err - } - - if nad != nil { - nadList = append(nadList, *nad) - } - } + mountCerts := r.CheckSecretExists(ctx, instance, "combined-ca-bundle") + customDataConfigMapName := GetCustomDataConfigMapName(instance, workflowStep) + EnvVarsConfigMapName := GetEnvVarsConfigMapName(instance, workflowStep) + podName := r.GetPodName(instance, workflowStep) - serviceAnnotations, err := nad.EnsureNetworksAnnotation(nadList) - if err != nil { - return ctrl.Result{}, fmt.Errorf("failed create network annotation from %s: %w", - instance.Spec.NetworkAttachments, err) + workflowStepNum := 0 + if instance.Spec.Parallel && workflowStep < len(instance.Spec.Workflow) { + workflowStepNum = workflowStep } - - // Create a new pod - mountCerts := r.CheckSecretExists(ctx, instance, "combined-ca-bundle") - customDataConfigMapName := GetCustomDataConfigMapName(instance, nextWorkflowStep) - EnvVarsConfigMapName := GetEnvVarsConfigMapName(instance, nextWorkflowStep) - podName := r.GetPodName(instance, nextWorkflowStep) logsPVCName := r.GetPVCLogsName(instance, workflowStepNum) + containerImage, err := r.GetContainerImage(ctx, instance.Spec.ContainerImage, instance) if err != nil { - return ctrl.Result{}, err + return nil, err } - podDef := tempest.Pod( + return tempest.Pod( instance, - serviceLabels, - serviceAnnotations, + labels, + annotations, podName, EnvVarsConfigMapName, customDataConfigMapName, @@ -306,84 +148,7 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re mountCerts, mountSSHKey, containerImage, - ) - - ctrlResult, err = r.CreatePod(ctx, *helper, podDef) - if err != nil { - // Creation of the tempest pod was not successfull. - // Release the lock and allow other controllers to spawn - // a pod. - if lockReleased, lockErr := r.ReleaseLock(ctx, instance); lockReleased { - return ctrl.Result{RequeueAfter: RequeueAfterValue}, lockErr - } - - instance.Status.Conditions.Set(condition.FalseCondition( - condition.DeploymentReadyCondition, - condition.ErrorReason, - condition.SeverityWarning, - condition.DeploymentReadyErrorMessage, - err.Error())) - return ctrlResult, err - } else if (ctrlResult != ctrl.Result{}) { - instance.Status.Conditions.Set(condition.FalseCondition( - condition.DeploymentReadyCondition, - condition.RequestedReason, - condition.SeverityInfo, - condition.DeploymentReadyRunningMessage)) - return ctrlResult, nil - } - // Create a new pod - end - - // NetworkAttachments - if r.PodExists(ctx, instance, nextWorkflowStep) { - networkReady, networkAttachmentStatus, err := nad.VerifyNetworkStatusFromAnnotation( - ctx, - helper, - instance.Spec.NetworkAttachments, - serviceLabels, - 1, - ) - if err != nil { - return ctrl.Result{}, err - } - - instance.Status.NetworkAttachments = networkAttachmentStatus - - if networkReady { - instance.Status.Conditions.MarkTrue( - condition.NetworkAttachmentsReadyCondition, - condition.NetworkAttachmentsReadyMessage) - } else { - err := fmt.Errorf("%w: %s", ErrNetworkAttachmentsMismatch, instance.Spec.NetworkAttachments) - instance.Status.Conditions.Set(condition.FalseCondition( - condition.NetworkAttachmentsReadyCondition, - condition.ErrorReason, - condition.SeverityWarning, - condition.NetworkAttachmentsReadyErrorMessage, - err.Error())) - - return ctrl.Result{}, err - } - } - // NetworkAttachments - end - - return ctrl.Result{}, nil -} - -func (r *TempestReconciler) reconcileDelete( - ctx context.Context, - instance *testv1beta1.Tempest, - helper *helper.Helper, -) (ctrl.Result, error) { - Log := r.GetLogger(ctx) - Log.Info("Reconciling Service delete") - - // remove the finalizer - controllerutil.RemoveFinalizer(instance, helper.GetFinalizer()) - - Log.Info("Reconciled Service delete successfully") - - return ctrl.Result{}, nil + ), nil } // SetupWithManager sets up the controller with the Manager. @@ -396,13 +161,14 @@ func (r *TempestReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *TempestReconciler) setTempestConfigVars(envVars map[string]string, +func setTempestConfigVars( + r *Reconciler, + envVars map[string]string, customData map[string]string, instance *testv1beta1.Tempest, workflowStepNum int, ) { tRun := instance.Spec.TempestRun - testOperatorDir := "/etc/test_operator/" // Files @@ -491,14 +257,15 @@ func (r *TempestReconciler) setTempestConfigVars(envVars map[string]string, } } -func (r *TempestReconciler) setTempestconfConfigVars( +func setTempestconfConfigVars( + r *Reconciler, envVars map[string]string, customData map[string]string, instance *testv1beta1.Tempest, ) { tcRun := instance.Spec.TempestconfRun - testOperatorDir := "/etc/test_operator/" + value := tcRun.DeployerInput if len(value) != 0 { deployerInputFile := "deployer_input.ini" @@ -564,15 +331,15 @@ func (r *TempestReconciler) setTempestconfConfigVars( // execution of the tempest container // - %-config contains all the files that are needed for the execution of // the tempest container -func (r *TempestReconciler) generateServiceConfigMaps( +func generateTempestServiceConfigMaps( ctx context.Context, + r *Reconciler, h *helper.Helper, instance *testv1beta1.Tempest, workflowStepNum int, ) error { // Create/update configmaps from template cmLabels := labels.GetLabels(instance, labels.GetGroupLabel(tempest.ServiceName), map[string]string{}) - operatorLabels := map[string]string{ operatorNameLabel: "test-operator", instanceNameLabel: instance.Name, @@ -587,8 +354,8 @@ func (r *TempestReconciler) generateServiceConfigMaps( customData := make(map[string]string) envVars := make(map[string]string) - r.setTempestConfigVars(envVars, customData, instance, workflowStepNum) - r.setTempestconfConfigVars(envVars, customData, instance) + setTempestConfigVars(r, envVars, customData, instance, workflowStepNum) + setTempestconfConfigVars(r, envVars, customData, instance) r.setConfigOverwrite(customData, instance.Spec.ConfigOverwrite) envVars["TEMPEST_DEBUG_MODE"] = r.GetDefaultBool(instance.Spec.Debug) diff --git a/controllers/tobiko_controller.go b/controllers/tobiko_controller.go index c3e0cfdd..b1f20e0d 100644 --- a/controllers/tobiko_controller.go +++ b/controllers/tobiko_controller.go @@ -63,6 +63,7 @@ func (r *TobikoReconciler) GetLogger(ctx context.Context) logr.Logger { func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { Log := r.GetLogger(ctx) + // Fetch the Tobiko instance instance := &testv1beta1.Tobiko{} err := r.Client.Get(ctx, req.NamespacedName, instance) if err != nil {