diff --git a/.github/workflows/next-build.yml b/.github/workflows/next-build.yml index 0e2c05941..a9f40b1dc 100644 --- a/.github/workflows/next-build.yml +++ b/.github/workflows/next-build.yml @@ -73,6 +73,17 @@ jobs: quay.io/devfile/project-clone:sha-${{ steps.git-sha.outputs.sha }} file: ./project-clone/Dockerfile + - name: Build and push + uses: docker/build-push-action@0a97817b6ade9f46837855d676c4cca3a2471fc9 #v4.2.1 + with: + context: ./project-backup + push: true + platforms: linux/amd64, linux/arm64, linux/ppc64le, linux/s390x + tags: | + quay.io/devfile/project-backup:next + quay.io/devfile/project-backup:sha-${{ steps.git-sha.outputs.sha }} + file: ./project-backup/Dockerfile + build-next-olm-imgs: runs-on: ubuntu-latest needs: build-next-imgs @@ -147,6 +158,7 @@ jobs: export TAG="sha-${{ needs.build-next-imgs.outputs.git-sha }}" export DEFAULT_DWO_IMG="quay.io/devfile/devworkspace-controller:$TAG" export PROJECT_CLONE_IMG="quay.io/devfile/project-clone:$TAG" + export PROJECT_BACKUP_IMG="quay.io/devfile/project-backup:$TAG" # Next builds are not rolled out unless the version is incremented. We want to use semver # prerelease tags to make sure each new build increments on the previous one, e.g. diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 09b35f69a..df77e3725 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -130,3 +130,6 @@ jobs: - name: Check if project-clone dockerimage build is working run: docker build -f ./project-clone/Dockerfile . + - + name: Check if project-backup containerimage build is working + run: docker build -f ./project-backup/Containerfile project-backup/ diff --git a/Makefile b/Makefile index feff1b2e9..d7053812b 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,7 @@ export DWO_IMG ?= quay.io/devfile/devworkspace-controller:next export DWO_BUNDLE_IMG ?= quay.io/devfile/devworkspace-operator-bundle:next export DWO_INDEX_IMG ?= quay.io/devfile/devworkspace-operator-index:next export PROJECT_CLONE_IMG ?= quay.io/devfile/project-clone:next +export PROJECT_BACKUP_IMG ?= quay.io/devfile/project-backup:next export PULL_POLICY ?= Always export DEFAULT_ROUTING ?= basic export KUBECONFIG ?= ${HOME}/.kube/config @@ -128,6 +129,7 @@ _print_vars: @echo " DWO_BUNDLE_IMG=$(DWO_BUNDLE_IMG)" @echo " DWO_INDEX_IMG=$(DWO_INDEX_IMG)" @echo " PROJECT_CLONE_IMG=$(PROJECT_CLONE_IMG)" + @echo " PROJECT_BACKUP_IMG=$(PROJECT_BACKUP_IMG)" @echo " PULL_POLICY=$(PULL_POLICY)" @echo " ROUTING_SUFFIX=$(ROUTING_SUFFIX)" @echo " DEFAULT_ROUTING=$(DEFAULT_ROUTING)" @@ -369,6 +371,7 @@ help: Makefile @echo 'Supported environment variables:' @echo ' DWO_IMG - Image used for controller' @echo ' PROJECT_CLONE_IMG - Image used for project-clone init container' + @echo ' PROJECT_BACKUP_IMG - Image used for project-backup workspace backup container' @echo ' NAMESPACE - Namespace to use for deploying controller' @echo ' KUBECONFIG - Kubeconfig which should be used for accessing to the cluster. Currently is: $(KUBECONFIG)' @echo ' ROUTING_SUFFIX - Cluster routing suffix (e.g. $$(minikube ip).nip.io, apps-crc.testing)' diff --git a/apis/controller/v1alpha1/devworkspaceoperatorconfig_types.go b/apis/controller/v1alpha1/devworkspaceoperatorconfig_types.go index 9ffff832f..fe6a0bf83 100644 --- a/apis/controller/v1alpha1/devworkspaceoperatorconfig_types.go +++ b/apis/controller/v1alpha1/devworkspaceoperatorconfig_types.go @@ -72,6 +72,47 @@ type CleanupCronJobConfig struct { Schedule string `json:"schedule,omitempty"` } +type RegistryConfig struct { + // A registry where backup images are stored. Images are stored + // in {path}/${DEVWORKSPACE_NAMESPACE}/${DEVWORKSPACE_NAME}:latest + // +kubebuilder:validation:Required + Path string `json:"path,omitempty"` + // AuthSecret is the name of a Kubernetes secret of + // type kubernetes.io/dockerconfigjson. + // The secret is expected to be in the same namespace the workspace is running in. + // If secret is not found in the workspace namespace, the operator will look for the secret + // in the namespace where the operator is running in. + // as the DevWorkspaceOperatorCongfig. + // The secret must contain "controller.devfile.io/watch-secret=true" label so that it can be + // recognized by the operator. + // +kubebuilder:validation:Optional + AuthSecret string `json:"authSecret,omitempty"` +} + +type OrasConfig struct { + // ExtraArgs are additional arguments passed to the oras CLI + // +kubebuilder:validation:Optional + ExtraArgs string `json:"extraArgs,omitempty"` +} + +type BackupCronJobConfig struct { + // Enable determines whether backup CronJobs should be created for workspace PVCs. + // Defaults to false if not specified. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty"` + // RegistryConfig defines the registry configuration where backup images are stored. + // +kubebuilder:validation:Required + Registry *RegistryConfig `json:"registry,omitempty"` + // OrasConfig defines additional configuration options for the oras CLI used to + // push and pull backup images. + OrasConfig *OrasConfig `json:"oras,omitempty"` + // Schedule specifies the cron schedule for the backup cron job. + // For example, "0 1 * * *" runs daily at 1 AM. + // +kubebuilder:default:="0 1 * * *" + // +kubebuilder:validation:Optional + Schedule string `json:"schedule,omitempty"` +} + type RoutingConfig struct { // DefaultRoutingClass specifies the routingClass to be used when a DevWorkspace // specifies an empty `.spec.routingClass`. Supported routingClasses can be defined @@ -189,6 +230,8 @@ type WorkspaceConfig struct { RuntimeClassName *string `json:"runtimeClassName,omitempty"` // CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. CleanupCronJob *CleanupCronJobConfig `json:"cleanupCronJob,omitempty"` + // BackupCronJobConfig defines configuration options for a cron job that automatically backs up workspace PVCs. + BackupCronJob *BackupCronJobConfig `json:"backupCronJob,omitempty"` // PostStartTimeout defines the maximum duration the PostStart hook can run // before it is automatically failed. This timeout is used for the postStart lifecycle hook // that is used to run commands in the workspace container. The timeout is specified in seconds. @@ -331,14 +374,26 @@ type ConfigmapReference struct { Namespace string `json:"namespace"` } +type OperatorConfigurationStatus struct { + // Conditions represent the latest available observations of the OperatorConfiguration's state + Conditions []metav1.Condition `json:"conditions,omitempty"` + // LastBackupTime is the timestamp of the last successful backup. Nil if + // no backup is configured or no backup has yet succeeded. + LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"` +} + // DevWorkspaceOperatorConfig is the Schema for the devworkspaceoperatorconfigs API // +kubebuilder:object:root=true +// +kubebuilder:subresource:status // +kubebuilder:resource:path=devworkspaceoperatorconfigs,scope=Namespaced,shortName=dwoc type DevWorkspaceOperatorConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Config *OperatorConfiguration `json:"config,omitempty"` + // Status represents the current status of the DevWorkspaceOperatorConfig + // automatically managed by the DevWorkspace Operator. + Status *OperatorConfigurationStatus `json:"status,omitempty"` } // DevWorkspaceOperatorConfigList contains a list of DevWorkspaceOperatorConfig diff --git a/apis/controller/v1alpha1/zz_generated.deepcopy.go b/apis/controller/v1alpha1/zz_generated.deepcopy.go index f31eb7604..f64ec25ea 100644 --- a/apis/controller/v1alpha1/zz_generated.deepcopy.go +++ b/apis/controller/v1alpha1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ package v1alpha1 import ( "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -46,6 +47,36 @@ func (in Attributes) DeepCopy() Attributes { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupCronJobConfig) DeepCopyInto(out *BackupCronJobConfig) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Registry != nil { + in, out := &in.Registry, &out.Registry + *out = new(RegistryConfig) + **out = **in + } + if in.OrasConfig != nil { + in, out := &in.OrasConfig, &out.OrasConfig + *out = new(OrasConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupCronJobConfig. +func (in *BackupCronJobConfig) DeepCopy() *BackupCronJobConfig { + if in == nil { + return nil + } + out := new(BackupCronJobConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CleanupCronJobConfig) DeepCopyInto(out *CleanupCronJobConfig) { *out = *in @@ -101,6 +132,11 @@ func (in *DevWorkspaceOperatorConfig) DeepCopyInto(out *DevWorkspaceOperatorConf *out = new(OperatorConfiguration) (*in).DeepCopyInto(*out) } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(OperatorConfigurationStatus) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceOperatorConfig. @@ -433,6 +469,47 @@ func (in *OperatorConfiguration) DeepCopy() *OperatorConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorConfigurationStatus) DeepCopyInto(out *OperatorConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastBackupTime != nil { + in, out := &in.LastBackupTime, &out.LastBackupTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationStatus. +func (in *OperatorConfigurationStatus) DeepCopy() *OperatorConfigurationStatus { + if in == nil { + return nil + } + out := new(OperatorConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrasConfig) DeepCopyInto(out *OrasConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrasConfig. +func (in *OrasConfig) DeepCopy() *OrasConfig { + if in == nil { + return nil + } + out := new(OrasConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PersistentHomeConfig) DeepCopyInto(out *PersistentHomeConfig) { *out = *in @@ -584,6 +661,21 @@ func (in *Proxy) DeepCopy() *Proxy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryConfig) DeepCopyInto(out *RegistryConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryConfig. +func (in *RegistryConfig) DeepCopy() *RegistryConfig { + if in == nil { + return nil + } + out := new(RegistryConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoutingConfig) DeepCopyInto(out *RoutingConfig) { *out = *in @@ -788,6 +880,11 @@ func (in *WorkspaceConfig) DeepCopyInto(out *WorkspaceConfig) { *out = new(CleanupCronJobConfig) (*in).DeepCopyInto(*out) } + if in.BackupCronJob != nil { + in, out := &in.BackupCronJob, &out.BackupCronJob + *out = new(BackupCronJobConfig) + (*in).DeepCopyInto(*out) + } if in.HostUsers != nil { in, out := &in.HostUsers, &out.HostUsers *out = new(bool) diff --git a/build/scripts/generate_deployment.sh b/build/scripts/generate_deployment.sh index a10b30e72..e4a6e740f 100755 --- a/build/scripts/generate_deployment.sh +++ b/build/scripts/generate_deployment.sh @@ -31,7 +31,7 @@ set -e # List of environment variables that will be replaced by envsubst -SUBST_VARS='$NAMESPACE $DWO_IMG $PROJECT_CLONE_IMG $ROUTING_SUFFIX $DEFAULT_ROUTING $PULL_POLICY' +SUBST_VARS='$NAMESPACE $DWO_IMG $PROJECT_CLONE_IMG $PROJECT_BACKUP_IMG $ROUTING_SUFFIX $DEFAULT_ROUTING $PULL_POLICY' SCRIPT_DIR=$(cd "$(dirname "$0")"; pwd) DEPLOY_DIR="$SCRIPT_DIR/../../deploy/" @@ -58,6 +58,11 @@ Arguments: '--use-defaults' is passed; otherwise, the value of the PROJECT_CLONE_IMG environment variable is used. If unspecifed, the default value of 'quay.io/devfile/project-clone:next' is used. + --project-backup-image + Image to use for the project backup workspace. Used only when + '--use-defaults' is passed; otherwise, the value of the PROJECT_BACKUP_IMG + environment variable is used. If unspecifed, the default value of + 'quay.io/devfile/project-backup:next' is used. --split-yaml Parse output file combined.yaml into a yaml file for each record in combined yaml. Files are output to the 'objects' subdirectory @@ -96,6 +101,10 @@ while [[ "$#" -gt 0 ]]; do PROJECT_CLONE_IMG=$2 shift ;; + --project-backup-image) + PROJECT_BACKUP_IMG=$2 + shift + ;; --split-yamls) SPLIT_YAMLS=true ;; @@ -118,6 +127,7 @@ if $USE_DEFAULT_ENV; then export NAMESPACE=devworkspace-controller export DWO_IMG=${DEFAULT_DWO_IMG:-"quay.io/devfile/devworkspace-controller:next"} export PROJECT_CLONE_IMG=${PROJECT_CLONE_IMG:-"quay.io/devfile/project-clone:next"} + export PROJECT_BACKUP_IMG=${PROJECT_BACKUP_IMG:-"quay.io/devfile/project-backup:next"} export PULL_POLICY=Always export DEFAULT_ROUTING=basic export DEVWORKSPACE_API_VERSION=a6ec0a38307b63a29fad2eea945cc69bee97a683 diff --git a/controllers/backupcronjob/backupcronjob_controller.go b/controllers/backupcronjob/backupcronjob_controller.go new file mode 100644 index 000000000..f07134b33 --- /dev/null +++ b/controllers/backupcronjob/backupcronjob_controller.go @@ -0,0 +1,538 @@ +// +// Copyright (c) 2019-2025 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package controllers + +import ( + "context" + "reflect" + + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/internal/images" + "github.com/devfile/devworkspace-operator/pkg/common" + "github.com/devfile/devworkspace-operator/pkg/conditions" + "github.com/devfile/devworkspace-operator/pkg/config" + wkspConfig "github.com/devfile/devworkspace-operator/pkg/config" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + "github.com/devfile/devworkspace-operator/pkg/provision/storage" + "github.com/go-logr/logr" + "github.com/robfig/cron/v3" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// BackupCronJobReconciler reconciles `BackupCronJob` configuration for the purpose of backing up workspace PVCs. +type BackupCronJobReconciler struct { + client.Client + NonCachingClient client.Client + Log logr.Logger + Scheme *runtime.Scheme + + cron *cron.Cron +} + +// shouldReconcileOnUpdate determines whether the BackupCronJobReconciler should reconcile +// based on changes in the DevWorkspaceOperatorConfig object. +func shouldReconcileOnUpdate(e event.UpdateEvent, log logr.Logger) bool { + log.Info("DevWorkspaceOperatorConfig update event received") + oldConfig, ok := e.ObjectOld.(*controllerv1alpha1.DevWorkspaceOperatorConfig) + if !ok { + return false + } + newConfig, ok := e.ObjectNew.(*controllerv1alpha1.DevWorkspaceOperatorConfig) + if !ok { + return false + } + + oldBackup := oldConfig.Config.Workspace.BackupCronJob + newBackup := newConfig.Config.Workspace.BackupCronJob + + return !reflect.DeepEqual(oldBackup, newBackup) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BackupCronJobReconciler) SetupWithManager(mgr ctrl.Manager) error { + log := r.Log.WithName("setupWithManager") + log.Info("Setting up BackupCronJobReconciler") + + configPredicate := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return shouldReconcileOnUpdate(e, log) + }, + CreateFunc: func(e event.CreateEvent) bool { return true }, + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + GenericFunc: func(e event.GenericEvent) bool { return false }, + } + + r.cron = cron.New() + + return ctrl.NewControllerManagedBy(mgr). + Named("BackupCronJob"). + Watches(&controllerv1alpha1.DevWorkspaceOperatorConfig{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, object client.Object) []reconcile.Request { + operatorNamespace, err := infrastructure.GetNamespace() + // Ignore events from other namespaces + if err != nil || object.GetNamespace() != operatorNamespace || object.GetName() != config.OperatorConfigName { + log.Info("Received event from different namespace, ignoring", "namespace", object.GetNamespace()) + return []ctrl.Request{} + } + + return []ctrl.Request{ + { + NamespacedName: client.ObjectKey{ + Name: object.GetName(), + Namespace: object.GetNamespace(), + }, + }, + } + }), + ). + WithEventFilter(configPredicate). + Complete(r) +} + +// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=serviceaccounts;,verbs=get;list;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;create;update;patch;delete +// +kubebuilder:rbac:groups=controller.devfile.io,resources=devworkspaceoperatorconfigs,verbs=get;list;update;patch;watch +// +kubebuilder:rbac:groups=workspace.devfile.io,resources=devworkspaces,verbs=get;list +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=builds,verbs=get +// +kubebuilder:rbac:groups="",resources=builds/details,verbs=update +// +kubebuilder:rbac:groups="",resources=imagestreams,verbs=create +// +kubebuilder:rbac:groups="",resources=imagestreams/layers,verbs=get;update +// +kubebuilder:rbac:groups=build.openshift.io,resources=builds,verbs=get +// +kubebuilder:rbac:groups=build.openshift.io,resources=builds/details,verbs=update +// +kubebuilder:rbac:groups=image.openshift.io,resources=imagestreams,verbs=get;list;create;update;patch;delete +// +kubebuilder:rbac:groups=image.openshift.io,resources=imagestreams/layers,verbs=get;update + +// Reconcile is the main reconciliation loop for the BackupCronJob controller. +func (r *BackupCronJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log + log.Info("Reconciling BackupCronJob", "DWOC", req.NamespacedName) + + dwOperatorConfig := &controllerv1alpha1.DevWorkspaceOperatorConfig{} + err := r.Get(ctx, req.NamespacedName, dwOperatorConfig) + if err != nil { + log.Error(err, "Failed to get DevWorkspaceOperatorConfig") + r.stopCron(log) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + isCronConfigured := r.isBackupEnabled(dwOperatorConfig) + if !isCronConfigured { + log.Info("DevWorkspace backup is not configured, stopping cron scheduler and skipping reconciliation") + r.stopCron(log) + return ctrl.Result{}, nil + } + + r.startCron(ctx, dwOperatorConfig, log) + + return ctrl.Result{}, nil +} + +// isBackupEnabled checks if the backup cron job is enabled in the configuration. +func (r *BackupCronJobReconciler) isBackupEnabled(config *controllerv1alpha1.DevWorkspaceOperatorConfig) bool { + if config.Config != nil && config.Config.Workspace != nil && config.Config.Workspace.BackupCronJob != nil { + if config.Config.Workspace.BackupCronJob.Enable != nil && *config.Config.Workspace.BackupCronJob.Enable { + return true + } + } + return false +} + +// startCron starts the cron scheduler with the backup job according to the provided configuration. +func (r *BackupCronJobReconciler) startCron(ctx context.Context, dwOperatorConfig *controllerv1alpha1.DevWorkspaceOperatorConfig, log logr.Logger) { + log.Info("Starting backup cron scheduler") + + // remove existing cronjob tasks + // we cannot update the existing tasks, so we need to remove them and add new ones + entries := r.cron.Entries() + for _, entry := range entries { + log.Info("Removing existing cronjob task", "entryID", entry.ID) + r.cron.Remove(entry.ID) + } + + // add cronjob task + backUpConfig := dwOperatorConfig.Config.Workspace.BackupCronJob + log.Info("Adding cronjob task", "schedule", backUpConfig.Schedule) + _, err := r.cron.AddFunc(backUpConfig.Schedule, func() { + log.Info("Starting DevWorkspace backup job") + if err := r.executeBackupSync(ctx, dwOperatorConfig, log); err != nil { + log.Error(err, "Failed to execute backup job for DevWorkspaces") + } + log.Info("DevWorkspace backup job finished") + }) + if err != nil { + log.Error(err, "Failed to add cronjob function") + return + } + + log.Info("Starting cron scheduler") + r.cron.Start() +} + +// stopCron stops the cron scheduler and removes all existing cronjob tasks. +func (r *BackupCronJobReconciler) stopCron(log logr.Logger) { + log.Info("Stopping cron scheduler") + + // remove existing cronjob tasks + entries := r.cron.Entries() + for _, entry := range entries { + r.cron.Remove(entry.ID) + } + + ctx := r.cron.Stop() + <-ctx.Done() + + log.Info("Cron scheduler stopped") +} + +// executeBackupSync executes the backup job for all DevWorkspaces in the cluster that +// have been stopped in the last N minutes. +func (r *BackupCronJobReconciler) executeBackupSync(ctx context.Context, dwOperatorConfig *controllerv1alpha1.DevWorkspaceOperatorConfig, log logr.Logger) error { + log.Info("Executing backup sync for all DevWorkspaces") + + devWorkspaces := &dw.DevWorkspaceList{} + err := r.List(ctx, devWorkspaces) + if err != nil { + log.Error(err, "Failed to list DevWorkspaces") + return err + } + var lastBackupTime *metav1.Time + if dwOperatorConfig.Status != nil && dwOperatorConfig.Status.LastBackupTime != nil { + lastBackupTime = dwOperatorConfig.Status.LastBackupTime + } + for _, dw := range devWorkspaces.Items { + if !r.wasStoppedSinceLastBackup(&dw, lastBackupTime, log) { + log.Info("Skipping backup for DevWorkspace that wasn't stopped recently", "namespace", dw.Namespace, "name", dw.Name) + continue + } + dwID := dw.Status.DevWorkspaceId + log.Info("Found DevWorkspace", "namespace", dw.Namespace, "devworkspace", dw.Name, "id", dwID) + + err = r.ensureJobRunnerRBAC(ctx, &dw) + if err != nil { + log.Error(err, "Failed to ensure Job runner RBAC for DevWorkspace", "id", dwID) + continue + } + + if err = r.createBackupJob(&dw, ctx, dwOperatorConfig, log); err != nil { + log.Error(err, "Failed to create backup Job for DevWorkspace", "id", dwID) + continue + } + log.Info("Backup Job created for DevWorkspace", "id", dwID) + + } + origConfig := client.MergeFrom(dwOperatorConfig.DeepCopy()) + if dwOperatorConfig.Status == nil { + dwOperatorConfig.Status = &controllerv1alpha1.OperatorConfigurationStatus{} + } + dwOperatorConfig.Status.LastBackupTime = &metav1.Time{Time: metav1.Now().Time} + + err = r.Status().Patch(ctx, dwOperatorConfig, origConfig) + if err != nil { + log.Error(err, "Failed to update DevWorkspaceOperatorConfig status with last backup time") + return err + } + return nil +} + +// wasStoppedSinceLastBackup checks if the DevWorkspace was stopped since the last backup time. +func (r *BackupCronJobReconciler) wasStoppedSinceLastBackup(workspace *dw.DevWorkspace, lastBackupTime *metav1.Time, log logr.Logger) bool { + if workspace.Status.Phase != dw.DevWorkspaceStatusStopped { + return false + } + log.Info("DevWorkspace is currently stopped, checking if it was stopped since last backup", "namespace", workspace.Namespace, "name", workspace.Name) + // Check if the workspace was stopped in the last N minutes + if workspace.Status.Conditions != nil { + lastTimeStopped := metav1.Time{} + for _, condition := range workspace.Status.Conditions { + if condition.Type == conditions.Started && condition.Status == corev1.ConditionFalse { + lastTimeStopped = condition.LastTransitionTime + } + } + if !lastTimeStopped.IsZero() { + if lastBackupTime == nil { + // No previous backup, so consider it stopped since last backup + return true + } + if lastTimeStopped.Time.After(lastBackupTime.Time) { + log.Info("DevWorkspace was stopped since last backup", "namespace", workspace.Namespace, "name", workspace.Name) + return true + } + } + } + return false +} + +// createBackupJob creates a Kubernetes Job to back up the workspace's PVC data. +func (r *BackupCronJobReconciler) createBackupJob( + workspace *dw.DevWorkspace, + ctx context.Context, + dwOperatorConfig *controllerv1alpha1.DevWorkspaceOperatorConfig, + log logr.Logger, +) error { + dwID := workspace.Status.DevWorkspaceId + backUpConfig := dwOperatorConfig.Config.Workspace.BackupCronJob + + registryAuthSecret, err := r.handleRegistryAuthSecret(ctx, workspace, dwOperatorConfig, log) + if err != nil { + log.Error(err, "Failed to handle registry auth secret for DevWorkspace", "devworkspace", workspace.Name) + return err + } + + // Find a PVC with used by the workspace + pvcName, workspacePath, err := r.getWorkspacePVCName(ctx, workspace, dwOperatorConfig, log) + if err != nil { + log.Error(err, "Failed to get workspace PVC name", "devworkspace", workspace.Name) + return err + } + if pvcName == "" { + log.Error(err, "No PVC found for DevWorkspace", "id", dwID) + return err + } + + pvc := &corev1.PersistentVolumeClaim{} + err = r.Get(ctx, client.ObjectKey{Name: pvcName, Namespace: workspace.Namespace}, pvc) + if err != nil { + log.Error(err, "Failed to get PVC for DevWorkspace", "id", dwID) + return err + } + orasExtraArgs := "" + if backUpConfig.OrasConfig != nil { + orasExtraArgs = backUpConfig.OrasConfig.ExtraArgs + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: constants.DevWorkspaceBackupJobNamePrefix, + Namespace: workspace.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: dwID, + constants.DevWorkspaceBackupJobLabel: "true", + }, + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: ptr.To[int32](120), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "io.kubernetes.cri-o.Devices": "/dev/fuse", + }, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: JobRunnerSAName + "-" + workspace.Status.DevWorkspaceId, + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "backup-workspace", + Env: []corev1.EnvVar{ + {Name: "DEVWORKSPACE_NAME", Value: workspace.Name}, + {Name: "DEVWORKSPACE_NAMESPACE", Value: workspace.Namespace}, + {Name: "WORKSPACE_ID", Value: dwID}, + { + Name: "BACKUP_SOURCE_PATH", + Value: "/workspace/" + workspacePath, + }, + {Name: "DEVWORKSPACE_BACKUP_REGISTRY", Value: backUpConfig.Registry.Path}, + {Name: "ORAS_EXTRA_ARGS", Value: orasExtraArgs}, + }, + Image: images.GetProjectBackupImage(), + ImagePullPolicy: "Always", + Args: []string{ + "/workspace-recovery.sh", + "--backup", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/workspace", + Name: "workspace-data", + }, + { + MountPath: "/var/lib/containers", + Name: "build-storage", + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To[bool](false), + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "workspace-data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + { + Name: "build-storage", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + }, + } + if registryAuthSecret != nil { + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "registry-auth-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: registryAuthSecret.Name, + }, + }, + }) + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: "registry-auth-secret", + MountPath: "/tmp/.docker", + ReadOnly: true, + }) + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "REGISTRY_AUTH_FILE", + Value: "/tmp/.docker/.dockerconfigjson", + }) + + } + if err := controllerutil.SetControllerReference(workspace, job, r.Scheme); err != nil { + return err + } + err = r.Create(ctx, job) + if err != nil { + log.Error(err, "Failed to create backup Job for DevWorkspace", "devworkspace", workspace.Name) + return err + } + log.Info("Created backup Job for DevWorkspace", "jobName", job.Name, "devworkspace", workspace.Name) + return nil +} + +// getWorkspacePVCName determines the PVC name and workspace path based on the storage provisioner used. +func (r *BackupCronJobReconciler) getWorkspacePVCName(ctx context.Context, workspace *dw.DevWorkspace, dwOperatorConfig *controllerv1alpha1.DevWorkspaceOperatorConfig, log logr.Logger) (string, string, error) { + config, err := wkspConfig.ResolveConfigForWorkspace(workspace, r.Client) + + workspaceWithConfig := &common.DevWorkspaceWithConfig{} + workspaceWithConfig.DevWorkspace = workspace + workspaceWithConfig.Config = config + + storageProvisioner, err := storage.GetProvisioner(workspaceWithConfig) + if err != nil { + return "", "", err + } + if _, ok := storageProvisioner.(*storage.PerWorkspaceStorageProvisioner); ok { + pvcName := common.PerWorkspacePVCName(workspace.Status.DevWorkspaceId) + return pvcName, constants.DefaultProjectsSourcesRoot, nil + + } else if _, ok := storageProvisioner.(*storage.CommonStorageProvisioner); ok { + pvcName := constants.DefaultWorkspacePVCName + if dwOperatorConfig.Config.Workspace.PVCName != "" { + pvcName = dwOperatorConfig.Config.Workspace.PVCName + } + return pvcName, workspace.Status.DevWorkspaceId + constants.DefaultProjectsSourcesRoot, nil + } + return "", "", nil +} + +func (r *BackupCronJobReconciler) handleRegistryAuthSecret(ctx context.Context, workspace *dw.DevWorkspace, + dwOperatorConfig *controllerv1alpha1.DevWorkspaceOperatorConfig, log logr.Logger, +) (*corev1.Secret, error) { + secretName := dwOperatorConfig.Config.Workspace.BackupCronJob.Registry.AuthSecret + if secretName == "" { + // No auth secret configured - anonymous access to registry + return nil, nil + } + + // First check the workspace namespace for the secret + registryAuthSecret := &corev1.Secret{} + err := r.Get(ctx, client.ObjectKey{ + Name: secretName, + Namespace: workspace.Namespace}, registryAuthSecret) + if err == nil { + log.Info("Successfully retrieved registry auth secret for backup from workspace namespace", "secretName", secretName) + return registryAuthSecret, nil + } + if client.IgnoreNotFound(err) != nil { + return nil, err + } + + log.Info("Registry auth secret not found in workspace namespace, checking operator namespace", "secretName", secretName) + + // If the secret is not found in the workspace namespace, check the operator namespace as fallback + err = r.Get(ctx, client.ObjectKey{ + Name: secretName, + Namespace: dwOperatorConfig.Namespace}, registryAuthSecret) + if err != nil { + log.Error(err, "Failed to get registry auth secret for backup job", "secretName", secretName) + return nil, err + } + log.Info("Successfully retrieved registry auth secret for backup job", "secretName", secretName) + return r.copySecret(ctx, workspace, registryAuthSecret, log) +} + +// copySecret copies the given secret from the operator namespace to the workspace namespace. +func (r *BackupCronJobReconciler) copySecret(ctx context.Context, workspace *dw.DevWorkspace, sourceSecret *corev1.Secret, log logr.Logger) (namespaceSecret *corev1.Secret, err error) { + existingNamespaceSecret := &corev1.Secret{} + err = r.Get(ctx, client.ObjectKey{ + Name: constants.DevWorkspaceBackupAuthSecretName, + Namespace: workspace.Namespace}, existingNamespaceSecret) + if client.IgnoreNotFound(err) != nil { + log.Error(err, "Failed to check for existing registry auth secret in workspace namespace", "namespace", workspace.Namespace) + return nil, err + } + if err == nil { + err = r.Delete(ctx, existingNamespaceSecret) + if err != nil { + return nil, err + } + } + namespaceSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.DevWorkspaceBackupAuthSecretName, + Namespace: workspace.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: workspace.Status.DevWorkspaceId, + constants.DevWorkspaceWatchSecretLabel: "true", + }, + }, + Data: sourceSecret.Data, + Type: sourceSecret.Type, + } + if err := controllerutil.SetControllerReference(workspace, namespaceSecret, r.Scheme); err != nil { + return nil, err + } + err = r.Create(ctx, namespaceSecret) + if err == nil { + log.Info("Sucesfully created secret", "name", namespaceSecret.Name, "namespace", workspace.Namespace) + } + return namespaceSecret, err +} diff --git a/controllers/backupcronjob/backupcronjob_controller_test.go b/controllers/backupcronjob/backupcronjob_controller_test.go new file mode 100644 index 000000000..1f94e54b2 --- /dev/null +++ b/controllers/backupcronjob/backupcronjob_controller_test.go @@ -0,0 +1,696 @@ +// Copyright (c) 2019-2025 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "context" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/robfig/cron/v3" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + dwv2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/pkg/conditions" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" +) + +var _ = Describe("BackupCronJobReconciler", func() { + var ( + ctx context.Context + fakeClient client.Client + reconciler BackupCronJobReconciler + nameNamespace types.NamespacedName + log logr.Logger + ) + + BeforeEach(func() { + ctx = context.Background() + + // Initialize infrastructure for testing (defaults to Kubernetes) + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + + scheme := runtime.NewScheme() + Expect(controllerv1alpha1.AddToScheme(scheme)).To(Succeed()) + Expect(dwv2.AddToScheme(scheme)).To(Succeed()) + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + Expect(batchv1.AddToScheme(scheme)).To(Succeed()) + fakeClient = fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&controllerv1alpha1.DevWorkspaceOperatorConfig{}).Build() + log = zap.New(zap.UseDevMode(true)).WithName("BackupCronJobReconcilerTest") + + reconciler = BackupCronJobReconciler{ + Client: fakeClient, + NonCachingClient: fakeClient, + Log: log, + Scheme: scheme, + cron: cron.New(), + } + + nameNamespace = types.NamespacedName{ + Name: "devworkspace-operator-config", + Namespace: "devworkspace-controller", + } + }) + + AfterEach(func() { + reconciler.stopCron(log) // Ensure cron is stopped after each test + }) + + Context("Reconcile", func() { + It("Should do nothing if DevWorkspaceOperatorConfig is not found", func() { + result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(BeEmpty()) + }) + + It("Should not start cron if dwOperatorConfig.Config.Workspace.BackupCronJob is nil", func() { + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: nil, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + + result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(BeEmpty()) + }) + + It("Should not start cron if received event from different namespace", func() { + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: "other-namespace"}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: pointer.Bool(true), + Schedule: "* * * * *", + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Name: nameNamespace.Name, + Namespace: nameNamespace.Namespace, + }}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(BeEmpty()) + }) + + It("Should start cron if enabled and schedule is defined", func() { + enabled := true + schedule := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + + result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(HaveLen(1)) + }) + + It("Should stop cron if cron is disabled", func() { + enabled := true + schedule := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + + result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(HaveLen(1)) + + disabled := false + dwoc.Config.Workspace.BackupCronJob.Enable = &disabled + Expect(fakeClient.Update(ctx, dwoc)).To(Succeed()) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(HaveLen(0)) + }) + + It("Should update cron schedule if DevWorkspaceOperatorConfig is updated", func() { + enabled := true + schedule1 := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule1, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + + result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(HaveLen(1)) + entryID := reconciler.cron.Entries()[0].ID + + schedule2 := "1 * * * *" + dwoc.Config.Workspace.BackupCronJob.Schedule = schedule2 + Expect(fakeClient.Update(ctx, dwoc)).To(Succeed()) + + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(HaveLen(1)) + Expect(reconciler.cron.Entries()[0].ID).NotTo(Equal(entryID)) + }) + + It("Should stop cron schedule if cron value is invalid", func() { + enabled := true + schedule1 := "invalid schedule" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule1, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + + result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(HaveLen(0)) + + }) + + It("Should stop cron if DevWorkspaceOperatorConfig is deleted", func() { + enabled := true + schedule := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + + result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()).To(HaveLen(1)) + + Expect(fakeClient.Delete(ctx, dwoc)).To(Succeed()) + + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: nameNamespace}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + Expect(reconciler.cron.Entries()). + To(HaveLen(0)) + }) + }) + + Context("executeBackupSync", func() { + It("should fail if registry secret does not exist", func() { + enabled := true + schedule := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + AuthSecret: "non-existent", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + dw := createDevWorkspace("dw-recent", "ns-a", false, metav1.NewTime(time.Now().Add(-10*time.Minute))) + dw.Status.Phase = dwv2.DevWorkspaceStatusStopped + dw.Status.DevWorkspaceId = "id-recent" + Expect(fakeClient.Create(ctx, dw)).To(Succeed()) + + err := reconciler.executeBackupSync(ctx, dwoc, log) + Expect(err).ToNot(HaveOccurred()) + + jobList := &batchv1.JobList{} + Expect(fakeClient.List(ctx, jobList, &client.ListOptions{Namespace: dw.Namespace})).To(Succeed()) + Expect(jobList.Items).To(HaveLen(0)) + }) + + It("creates a Job for a DevWorkspace stopped with no previous backup", func() { + enabled := true + schedule := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + OrasConfig: &controllerv1alpha1.OrasConfig{ + ExtraArgs: "--extra-arg1", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + dw := createDevWorkspace("dw-recent", "ns-a", false, metav1.NewTime(time.Now().Add(-10*time.Minute))) + dw.Status.Phase = dwv2.DevWorkspaceStatusStopped + dw.Status.DevWorkspaceId = "id-recent" + Expect(fakeClient.Create(ctx, dw)).To(Succeed()) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-devworkspace", Namespace: dw.Namespace}} + Expect(fakeClient.Create(ctx, pvc)).To(Succeed()) + + Expect(reconciler.executeBackupSync(ctx, dwoc, log)).To(Succeed()) + + jobList := &batchv1.JobList{} + Expect(fakeClient.List(ctx, jobList, &client.ListOptions{Namespace: dw.Namespace})).To(Succeed()) + Expect(jobList.Items).To(HaveLen(1)) + job := jobList.Items[0] + Expect(job.Labels[constants.DevWorkspaceIDLabel]).To(Equal("id-recent")) + Expect(job.Spec.Template.Spec.ServiceAccountName).To(Equal("devworkspace-job-runner-id-recent")) + container := job.Spec.Template.Spec.Containers[0] + expectedEnvs := []corev1.EnvVar{ + {Name: "DEVWORKSPACE_NAME", Value: "dw-recent"}, + {Name: "DEVWORKSPACE_NAMESPACE", Value: "ns-a"}, + {Name: "WORKSPACE_ID", Value: "id-recent"}, + {Name: "BACKUP_SOURCE_PATH", Value: "/workspace/id-recent/projects"}, + {Name: "DEVWORKSPACE_BACKUP_REGISTRY", Value: "fake-registry"}, + {Name: "ORAS_EXTRA_ARGS", Value: "--extra-arg1"}, + } + Expect(container.Env).Should(ContainElements(expectedEnvs), "container env vars should include vars neeeded for backup") + + expectedVolumeMounts := []corev1.VolumeMount{ + {MountPath: "/workspace", Name: "workspace-data"}, + {MountPath: "/var/lib/containers", Name: "build-storage"}, + } + Expect(container.VolumeMounts).Should(ContainElements(expectedVolumeMounts), "container volume mounts should include mounts needed for backup") + }) + + It("does not create a Job when the DevWorkspace was stopped beyond time range", func() { + enabled := true + schedule := "* * * * *" + lastBackupTime := metav1.NewTime(time.Now().Add(-15 * time.Minute)) + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + }, + }, + }, + Status: &controllerv1alpha1.OperatorConfigurationStatus{ + LastBackupTime: &lastBackupTime, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + dw := createDevWorkspace("dw-old", "ns-b", false, metav1.NewTime(time.Now().Add(-60*time.Minute))) + dw.Status.Phase = dwv2.DevWorkspaceStatusStopped + dw.Status.DevWorkspaceId = "id-old" + Expect(fakeClient.Create(ctx, dw)).To(Succeed()) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-devworkspace", Namespace: dw.Namespace}} + Expect(fakeClient.Create(ctx, pvc)).To(Succeed()) + + Expect(reconciler.executeBackupSync(ctx, dwoc, log)).To(Succeed()) + + jobList := &batchv1.JobList{} + Expect(fakeClient.List(ctx, jobList, &client.ListOptions{Namespace: dw.Namespace})).To(Succeed()) + Expect(jobList.Items).To(HaveLen(0)) + }) + + It("does not create a Job for a running DevWorkspace", func() { + enabled := true + schedule := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "fake-registry", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + dw := createDevWorkspace("dw-running", "ns-c", true, metav1.NewTime(time.Now().Add(-5*time.Minute))) + Expect(fakeClient.Create(ctx, dw)).To(Succeed()) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-devworkspace", Namespace: dw.Namespace}} + Expect(fakeClient.Create(ctx, pvc)).To(Succeed()) + + Expect(reconciler.executeBackupSync(ctx, dwoc, log)).To(Succeed()) + + jobList := &batchv1.JobList{} + Expect(fakeClient.List(ctx, jobList, &client.ListOptions{Namespace: dw.Namespace})).To(Succeed()) + Expect(jobList.Items).To(HaveLen(0)) + }) + + It("creates a Job for a DevWorkspace stopped with no previous backup and global auth registry", func() { + enabled := true + schedule := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "my-registry:5000", + AuthSecret: "my-secret", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + dw := createDevWorkspace("dw-recent", "ns-a", false, metav1.NewTime(time.Now().Add(-10*time.Minute))) + dw.Status.Phase = dwv2.DevWorkspaceStatusStopped + dw.Status.DevWorkspaceId = "id-recent" + Expect(fakeClient.Create(ctx, dw)).To(Succeed()) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-devworkspace", Namespace: dw.Namespace}} + Expect(fakeClient.Create(ctx, pvc)).To(Succeed()) + + authSecret := createAuthSecret("my-secret", nameNamespace.Namespace, map[string][]byte{}) + Expect(fakeClient.Create(ctx, authSecret)).To(Succeed()) + + Expect(reconciler.executeBackupSync(ctx, dwoc, log)).To(Succeed()) + + jobList := &batchv1.JobList{} + Expect(fakeClient.List(ctx, jobList, &client.ListOptions{Namespace: dw.Namespace})).To(Succeed()) + Expect(jobList.Items).To(HaveLen(1)) + }) + It("creates a Job for a DevWorkspace stopped with no previous backup and local auth registry", func() { + enabled := true + schedule := "* * * * *" + dwoc := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + ObjectMeta: metav1.ObjectMeta{Name: nameNamespace.Name, Namespace: nameNamespace.Namespace}, + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: &controllerv1alpha1.BackupCronJobConfig{ + Enable: &enabled, + Schedule: schedule, + Registry: &controllerv1alpha1.RegistryConfig{ + Path: "my-registry:5000", + AuthSecret: "my-secret", + }, + }, + }, + }, + } + Expect(fakeClient.Create(ctx, dwoc)).To(Succeed()) + dw := createDevWorkspace("dw-recent", "ns-a", false, metav1.NewTime(time.Now().Add(-10*time.Minute))) + dw.Status.Phase = dwv2.DevWorkspaceStatusStopped + dw.Status.DevWorkspaceId = "id-recent" + Expect(fakeClient.Create(ctx, dw)).To(Succeed()) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-devworkspace", Namespace: dw.Namespace}} + Expect(fakeClient.Create(ctx, pvc)).To(Succeed()) + + authSecret := createAuthSecret("my-secret", "ns-a", map[string][]byte{}) + Expect(fakeClient.Create(ctx, authSecret)).To(Succeed()) + + Expect(reconciler.executeBackupSync(ctx, dwoc, log)).To(Succeed()) + + jobList := &batchv1.JobList{} + Expect(fakeClient.List(ctx, jobList, &client.ListOptions{Namespace: dw.Namespace})).To(Succeed()) + Expect(jobList.Items).To(HaveLen(1)) + }) + }) + Context("ensureJobRunnerRBAC", func() { + It("creates ServiceAccount for Job runner", func() { + dw := createDevWorkspace("dw-rbac", "ns-rbac", false, metav1.NewTime(time.Now().Add(-10*time.Minute))) + dw.Status.DevWorkspaceId = "id-rbac" + Expect(fakeClient.Create(ctx, dw)).To(Succeed()) + + err := reconciler.ensureJobRunnerRBAC(ctx, dw) + Expect(err).ToNot(HaveOccurred()) + + sa := &corev1.ServiceAccount{} + err = fakeClient.Get(ctx, types.NamespacedName{ + Name: "devworkspace-job-runner-id-rbac", + Namespace: dw.Namespace, + }, sa) + Expect(err).ToNot(HaveOccurred()) + Expect(sa.Labels).To(HaveKeyWithValue(constants.DevWorkspaceIDLabel, "id-rbac")) + + // Calling again should be idempotent + err = reconciler.ensureJobRunnerRBAC(ctx, dw) + Expect(err).ToNot(HaveOccurred()) + }) + }) + Context("wasStoppedSinceLastBackup", func() { + It("returns true if DevWorkspace was stopped since last backup", func() { + lastBackupTime := metav1.NewTime(time.Now().Add(-30 * time.Minute)) + workspaceStoppedTime := metav1.NewTime(time.Now().Add(-20 * time.Minute)) + dw := createDevWorkspace("dw-test", "ns-test", false, workspaceStoppedTime) + result := reconciler.wasStoppedSinceLastBackup(dw, &lastBackupTime, log) + Expect(result).To(BeTrue()) + }) + + It("returns false if DevWorkspace was stopped before last backup", func() { + lastBackupTime := metav1.NewTime(time.Now().Add(-5 * time.Minute)) + workspaceStoppedTime := metav1.NewTime(time.Now().Add(-10 * time.Minute)) + dw := createDevWorkspace("dw-test", "ns-test", false, workspaceStoppedTime) + result := reconciler.wasStoppedSinceLastBackup(dw, &lastBackupTime, log) + Expect(result).To(BeFalse()) + }) + It("returns true if there is no last backup time", func() { + dw := createDevWorkspace("dw-test", "ns-test", false, metav1.NewTime(time.Now().Add(-10*time.Minute))) + result := reconciler.wasStoppedSinceLastBackup(dw, nil, log) + Expect(result).To(BeTrue()) + }) + It("returns false if DevWorkspace is running", func() { + lastBackupTime := metav1.NewTime(time.Now().Add(-30 * time.Minute)) + workspaceStoppedTime := metav1.NewTime(time.Now().Add(-20 * time.Minute)) + dw := createDevWorkspace("dw-test", "ns-test", true, workspaceStoppedTime) + result := reconciler.wasStoppedSinceLastBackup(dw, &lastBackupTime, log) + Expect(result).To(BeFalse()) + }) + }) + +}) + +// Helper function to create a DevWorkspace +func createDevWorkspace(name, namespace string, started bool, lastTransitionTime metav1.Time) *dwv2.DevWorkspace { + dw := &dwv2.DevWorkspace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: dwv2.DevWorkspaceSpec{ + Started: started, + }, + Status: dwv2.DevWorkspaceStatus{ + Conditions: []dwv2.DevWorkspaceCondition{}, + }, + } + + if !lastTransitionTime.IsZero() { + condition := dwv2.DevWorkspaceCondition{ + Type: conditions.Started, + Status: corev1.ConditionTrue, + LastTransitionTime: lastTransitionTime, + Reason: "Test", + Message: "Test", + } + if !started { + condition.Status = corev1.ConditionFalse + dw.Status.Phase = dwv2.DevWorkspaceStatusStopped + } + dw.Status.Conditions = append(dw.Status.Conditions, condition) + } + + return dw +} + +func createAuthSecret(name, namespace string, data map[string][]byte) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: data, + } +} + +var _ = Describe("DevWorkspaceOperatorConfig UpdateFunc Tests", func() { + var configPredicate predicate.Funcs + + BeforeEach(func() { + configPredicate = predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return shouldReconcileOnUpdate(e, zap.New(zap.UseDevMode(true))) + }, + } + }) + + DescribeTable("Testing UpdateFunc for backup configuration changes", + func(oldBackup, newBackup *controllerv1alpha1.BackupCronJobConfig, expected bool) { + oldCfg := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: oldBackup, + }, + }, + } + newCfg := &controllerv1alpha1.DevWorkspaceOperatorConfig{ + Config: &controllerv1alpha1.OperatorConfiguration{ + Workspace: &controllerv1alpha1.WorkspaceConfig{ + BackupCronJob: newBackup, + }, + }, + } + updateEvent := event.UpdateEvent{ + ObjectOld: oldCfg, + ObjectNew: newCfg, + } + result := configPredicate.Update(updateEvent) + Expect(result).To(Equal(expected)) + }, + + Entry("Both nil => no change", nil, nil, false), + Entry("OldBackup==nil, NewBackup!=nil => changed", nil, &controllerv1alpha1.BackupCronJobConfig{}, true), + Entry("OldBackup!=nil, NewBackup==nil => changed", &controllerv1alpha1.BackupCronJobConfig{}, nil, true), + Entry("OldBackup.Enable==nil, NewBackup.Enable==nil => no change", + &controllerv1alpha1.BackupCronJobConfig{Enable: nil}, + &controllerv1alpha1.BackupCronJobConfig{Enable: nil}, + false, + ), + Entry("OldBackup.Enable==nil, NewBackup.Enable!=nil => changed", + &controllerv1alpha1.BackupCronJobConfig{Enable: nil}, + &controllerv1alpha1.BackupCronJobConfig{Enable: pointer.Bool(true)}, + true, + ), + Entry("OldBackup.Enable!=nil, NewBackup.Enable==nil => changed", + &controllerv1alpha1.BackupCronJobConfig{Enable: pointer.Bool(true)}, + &controllerv1alpha1.BackupCronJobConfig{Enable: nil}, + true, + ), + Entry("Enable differs => changed", + &controllerv1alpha1.BackupCronJobConfig{Enable: pointer.Bool(true)}, + &controllerv1alpha1.BackupCronJobConfig{Enable: pointer.Bool(false)}, + true, + ), + Entry("Schedule differs => changed", + &controllerv1alpha1.BackupCronJobConfig{Schedule: "0 * * * *"}, + &controllerv1alpha1.BackupCronJobConfig{Schedule: "1 * * * *"}, + true, + ), + Entry("Registry differs => changed", + &controllerv1alpha1.BackupCronJobConfig{Registry: &controllerv1alpha1.RegistryConfig{Path: "fake"}}, + &controllerv1alpha1.BackupCronJobConfig{Registry: &controllerv1alpha1.RegistryConfig{Path: "fake-different"}}, + true, + ), + Entry("All fields match => no change", + &controllerv1alpha1.BackupCronJobConfig{ + Enable: pointer.Bool(true), + Schedule: "0 * * * *", + }, + &controllerv1alpha1.BackupCronJobConfig{ + Enable: pointer.Bool(true), + Schedule: "0 * * * *", + }, + false, + ), + ) +}) diff --git a/controllers/backupcronjob/rbac.go b/controllers/backupcronjob/rbac.go new file mode 100644 index 000000000..9f1dfcf3b --- /dev/null +++ b/controllers/backupcronjob/rbac.go @@ -0,0 +1,153 @@ +// +// Copyright (c) 2019-2025 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package controllers + +import ( + "context" + "fmt" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + "github.com/devfile/devworkspace-operator/pkg/provision/sync" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + JobRunnerSAName = "devworkspace-job-runner" +) + +func (r *BackupCronJobReconciler) ensureJobRunnerRBAC(ctx context.Context, workspace *dw.DevWorkspace) error { + saName := JobRunnerSAName + "-" + workspace.Status.DevWorkspaceId + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: workspace.Namespace, Labels: map[string]string{ + constants.DevWorkspaceIDLabel: workspace.Status.DevWorkspaceId, + }}, + } + + // Create or update ServiceAccount + if err := controllerutil.SetControllerReference(workspace, sa, r.Scheme); err != nil { + return err + } + + clusterAPI := sync.ClusterAPI{ + Client: r.Client, + Scheme: r.Scheme, + Logger: r.Log, + Ctx: ctx, + } + + _, err := sync.SyncObjectWithCluster(sa, clusterAPI) + if err != nil { + if _, ok := err.(*sync.NotInSyncError); !ok { + return fmt.Errorf("synchronizing ServiceAccount: %w", err) + } + } + + if infrastructure.IsOpenShift() { + // Create ClusterRoleBinding for image push role + if err := r.ensureImagePushRoleBinding(ctx, saName, workspace, clusterAPI); err != nil { + return fmt.Errorf("ensuring image push ClusterRoleBinding: %w", err) + } + // Create ImageStream for backup images + if err := r.ensureImageStreamForBackup(ctx, workspace, clusterAPI); err != nil { + return fmt.Errorf("ensuring ImageStream for backup: %w", err) + } + } + + return nil + +} + +// ensureImagePushRoleBinding creates a ClusterRoleBinding to allow the given ServiceAccount to push images +// to the OpenShift internal registry. +func (r *BackupCronJobReconciler) ensureImagePushRoleBinding(ctx context.Context, saName string, workspace *dw.DevWorkspace, clusterAPI sync.ClusterAPI) error { + // Create RoleBinding for system:image-builder role + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devworkspace-image-builder-" + workspace.Status.DevWorkspaceId, + Namespace: workspace.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: workspace.Status.DevWorkspaceId, + }, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: saName, + Namespace: workspace.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "system:image-builder", + APIGroup: "rbac.authorization.k8s.io", + }, + } + + _, err := sync.SyncObjectWithCluster(roleBinding, clusterAPI) + if err != nil { + if _, ok := err.(*sync.NotInSyncError); !ok { + return fmt.Errorf("ensuring RoleBinding: %w", err) + } + } + + return nil +} + +// ensureImageStreamForBackup creates an ImageStream for the backup images in OpenShift in case user +// selects to use the internal registry. Push to non-existing ImageStream fails, so we need to create it first. +func (r *BackupCronJobReconciler) ensureImageStreamForBackup(ctx context.Context, workspace *dw.DevWorkspace, clusterAPI sync.ClusterAPI) error { + // Create ImageStream for backup images using unstructured to avoid scheme conflicts + imageStream := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "image.openshift.io/v1", + "kind": "ImageStream", + "metadata": map[string]interface{}{ + "name": workspace.Name, + "namespace": workspace.Namespace, + "labels": map[string]interface{}{ + constants.DevWorkspaceIDLabel: workspace.Status.DevWorkspaceId, + }, + }, + "spec": map[string]interface{}{ + "lookupPolicy": map[string]interface{}{ + "local": true, + }, + }, + }, + } + if err := controllerutil.SetControllerReference(workspace, imageStream, r.Scheme); err != nil { + return err + } + + imageStream.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "image.openshift.io", + Version: "v1", + Kind: "ImageStream", + }) + + if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, imageStream, func() error { return nil }); err != nil { + return fmt.Errorf("ensuring ImageStream: %w", err) + } + + return nil +} diff --git a/controllers/backupcronjob/suite_test.go b/controllers/backupcronjob/suite_test.go new file mode 100644 index 000000000..9c26322a3 --- /dev/null +++ b/controllers/backupcronjob/suite_test.go @@ -0,0 +1,28 @@ +// +// Copyright (c) 2019-2024 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package controllers + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestBackupCronJob(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "BackupCronJob Controller Suite") +} diff --git a/deploy/bundle/manifests/controller.devfile.io_devworkspaceoperatorconfigs.yaml b/deploy/bundle/manifests/controller.devfile.io_devworkspaceoperatorconfigs.yaml index 471d552c8..587282992 100644 --- a/deploy/bundle/manifests/controller.devfile.io_devworkspaceoperatorconfigs.yaml +++ b/deploy/bundle/manifests/controller.devfile.io_devworkspaceoperatorconfigs.yaml @@ -181,6 +181,54 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + backupCronJob: + description: BackupCronJobConfig defines configuration options for a cron job that automatically backs up workspace PVCs. + properties: + enable: + description: |- + Enable determines whether backup CronJobs should be created for workspace PVCs. + Defaults to false if not specified. + type: boolean + oras: + description: |- + OrasConfig defines additional configuration options for the oras CLI used to + push and pull backup images. + properties: + extraArgs: + description: ExtraArgs are additional arguments passed to the oras CLI + type: string + type: object + registry: + description: RegistryConfig defines the registry configuration where backup images are stored. + properties: + authSecret: + description: |- + AuthSecret is the name of a Kubernetes secret of + type kubernetes.io/dockerconfigjson. + The secret is expected to be in the same namespace the workspace is running in. + If secret is not found in the workspace namespace, the operator will look for the secret + in the namespace where the operator is running in. + as the DevWorkspaceOperatorCongfig. + The secret must contain "controller.devfile.io/watch-secret=true" label so that it can be + recognized by the operator. + type: string + path: + description: |- + A registry where backup images are stored. Images are stored + in {path}/${DEVWORKSPACE_NAMESPACE}/${DEVWORKSPACE_NAME}:latest + type: string + required: + - path + type: object + schedule: + default: 0 1 * * * + description: |- + Schedule specifies the cron schedule for the backup cron job. + For example, "0 1 * * *" runs daily at 1 AM. + type: string + required: + - registry + type: object cleanupCronJob: description: CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. properties: @@ -3030,9 +3078,79 @@ spec: type: string metadata: type: object + status: + description: |- + Status represents the current status of the DevWorkspaceOperatorConfig + automatically managed by the DevWorkspace Operator. + properties: + conditions: + description: Conditions represent the latest available observations of the OperatorConfiguration's state + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupTime: + description: |- + LastBackupTime is the timestamp of the last successful backup. Nil if + no backup is configured or no backup has yet succeeded. + format: date-time + type: string + type: object type: object served: true storage: true + subresources: + status: {} status: acceptedNames: kind: "" diff --git a/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml b/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml index 51be04001..16abce73e 100644 --- a/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml +++ b/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml @@ -101,10 +101,7 @@ spec: - "" resources: - configmaps - - persistentvolumeclaims - pods - - secrets - - serviceaccounts - services verbs: - '*' @@ -120,9 +117,16 @@ spec: - apiGroups: - "" resources: + - imagestreams - pods/exec verbs: - create + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - '*' - apiGroups: - "" resourceNames: @@ -134,6 +138,35 @@ spec: - delete - get - patch + - apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - '*' + - apiGroups: + - "" + - build.openshift.io + resources: + - builds + verbs: + - get + - apiGroups: + - "" + - build.openshift.io + resources: + - builds/details + verbs: + - update + - apiGroups: + - "" + - image.openshift.io + resources: + - imagestreams/layers + verbs: + - get + - update - apiGroups: - admissionregistration.k8s.io resources: @@ -204,6 +237,8 @@ spec: verbs: - get - list + - patch + - update - watch - apiGroups: - controller.devfile.io @@ -221,6 +256,17 @@ spec: - create - get - update + - apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - create + - delete + - get + - list + - patch + - update - apiGroups: - metrics.k8s.io resources: @@ -259,6 +305,17 @@ spec: - rbac.authorization.k8s.io resources: - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: - clusterroles verbs: - create @@ -332,6 +389,8 @@ spec: value: quay.io/devfile/devworkspace-controller:next - name: RELATED_IMAGE_project_clone value: quay.io/devfile/project-clone:next + - name: RELATED_IMAGE_project_backup + value: quay.io/devfile/project-backup:next - name: WATCH_NAMESPACE valueFrom: fieldRef: @@ -465,6 +524,8 @@ spec: name: devworkspace_webhook_server - image: quay.io/devfile/project-clone:next name: project_clone + - image: quay.io/devfile/project-backup:next + name: project_backup - image: registry.access.redhat.com/ubi9/ubi-micro:9.5-1733126338 name: pvc_cleanup_job - image: quay.io/eclipse/che-workspace-data-sync-storage:0.0.1 diff --git a/deploy/deployment/kubernetes/combined.yaml b/deploy/deployment/kubernetes/combined.yaml index 34953ec1e..07e4eedb1 100644 --- a/deploy/deployment/kubernetes/combined.yaml +++ b/deploy/deployment/kubernetes/combined.yaml @@ -182,6 +182,57 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + backupCronJob: + description: BackupCronJobConfig defines configuration options + for a cron job that automatically backs up workspace PVCs. + properties: + enable: + description: |- + Enable determines whether backup CronJobs should be created for workspace PVCs. + Defaults to false if not specified. + type: boolean + oras: + description: |- + OrasConfig defines additional configuration options for the oras CLI used to + push and pull backup images. + properties: + extraArgs: + description: ExtraArgs are additional arguments passed + to the oras CLI + type: string + type: object + registry: + description: RegistryConfig defines the registry configuration + where backup images are stored. + properties: + authSecret: + description: |- + AuthSecret is the name of a Kubernetes secret of + type kubernetes.io/dockerconfigjson. + The secret is expected to be in the same namespace the workspace is running in. + If secret is not found in the workspace namespace, the operator will look for the secret + in the namespace where the operator is running in. + as the DevWorkspaceOperatorCongfig. + The secret must contain "controller.devfile.io/watch-secret=true" label so that it can be + recognized by the operator. + type: string + path: + description: |- + A registry where backup images are stored. Images are stored + in {path}/${DEVWORKSPACE_NAMESPACE}/${DEVWORKSPACE_NAME}:latest + type: string + required: + - path + type: object + schedule: + default: 0 1 * * * + description: |- + Schedule specifies the cron schedule for the backup cron job. + For example, "0 1 * * *" runs daily at 1 AM. + type: string + required: + - registry + type: object cleanupCronJob: description: CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. @@ -3181,9 +3232,81 @@ spec: type: string metadata: type: object + status: + description: |- + Status represents the current status of the DevWorkspaceOperatorConfig + automatically managed by the DevWorkspace Operator. + properties: + conditions: + description: Conditions represent the latest available observations + of the OperatorConfiguration's state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupTime: + description: |- + LastBackupTime is the timestamp of the last successful backup. Nil if + no backup is configured or no backup has yet succeeded. + format: date-time + type: string + type: object type: object served: true storage: true + subresources: + status: {} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -25809,10 +25932,7 @@ rules: - "" resources: - configmaps - - persistentvolumeclaims - pods - - secrets - - serviceaccounts - services verbs: - '*' @@ -25828,9 +25948,16 @@ rules: - apiGroups: - "" resources: + - imagestreams - pods/exec verbs: - create +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - '*' - apiGroups: - "" resourceNames: @@ -25842,6 +25969,35 @@ rules: - delete - get - patch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - build.openshift.io + resources: + - builds + verbs: + - get +- apiGroups: + - "" + - build.openshift.io + resources: + - builds/details + verbs: + - update +- apiGroups: + - "" + - image.openshift.io + resources: + - imagestreams/layers + verbs: + - get + - update - apiGroups: - admissionregistration.k8s.io resources: @@ -25912,6 +26068,8 @@ rules: verbs: - get - list + - patch + - update - watch - apiGroups: - controller.devfile.io @@ -25929,6 +26087,17 @@ rules: - create - get - update +- apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - create + - delete + - get + - list + - patch + - update - apiGroups: - metrics.k8s.io resources: @@ -25967,6 +26136,17 @@ rules: - rbac.authorization.k8s.io resources: - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: - clusterroles verbs: - create @@ -26158,6 +26338,8 @@ spec: value: quay.io/devfile/devworkspace-controller:next - name: RELATED_IMAGE_project_clone value: quay.io/devfile/project-clone:next + - name: RELATED_IMAGE_project_backup + value: quay.io/devfile/project-backup:next - name: WATCH_NAMESPACE value: "" - name: POD_NAME diff --git a/deploy/deployment/kubernetes/objects/devworkspace-controller-manager.Deployment.yaml b/deploy/deployment/kubernetes/objects/devworkspace-controller-manager.Deployment.yaml index cd6b29efc..6be98e9be 100644 --- a/deploy/deployment/kubernetes/objects/devworkspace-controller-manager.Deployment.yaml +++ b/deploy/deployment/kubernetes/objects/devworkspace-controller-manager.Deployment.yaml @@ -30,6 +30,8 @@ spec: value: quay.io/devfile/devworkspace-controller:next - name: RELATED_IMAGE_project_clone value: quay.io/devfile/project-clone:next + - name: RELATED_IMAGE_project_backup + value: quay.io/devfile/project-backup:next - name: WATCH_NAMESPACE value: "" - name: POD_NAME diff --git a/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml b/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml index cb43b0b1e..d4fa6dfeb 100644 --- a/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml +++ b/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml @@ -21,10 +21,7 @@ rules: - "" resources: - configmaps - - persistentvolumeclaims - pods - - secrets - - serviceaccounts - services verbs: - '*' @@ -40,9 +37,16 @@ rules: - apiGroups: - "" resources: + - imagestreams - pods/exec verbs: - create +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - '*' - apiGroups: - "" resourceNames: @@ -54,6 +58,35 @@ rules: - delete - get - patch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - build.openshift.io + resources: + - builds + verbs: + - get +- apiGroups: + - "" + - build.openshift.io + resources: + - builds/details + verbs: + - update +- apiGroups: + - "" + - image.openshift.io + resources: + - imagestreams/layers + verbs: + - get + - update - apiGroups: - admissionregistration.k8s.io resources: @@ -124,6 +157,8 @@ rules: verbs: - get - list + - patch + - update - watch - apiGroups: - controller.devfile.io @@ -141,6 +176,17 @@ rules: - create - get - update +- apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - create + - delete + - get + - list + - patch + - update - apiGroups: - metrics.k8s.io resources: @@ -179,6 +225,17 @@ rules: - rbac.authorization.k8s.io resources: - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: - clusterroles verbs: - create diff --git a/deploy/deployment/kubernetes/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml b/deploy/deployment/kubernetes/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml index 910c7f6b5..3aa055792 100644 --- a/deploy/deployment/kubernetes/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml +++ b/deploy/deployment/kubernetes/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml @@ -182,6 +182,57 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + backupCronJob: + description: BackupCronJobConfig defines configuration options + for a cron job that automatically backs up workspace PVCs. + properties: + enable: + description: |- + Enable determines whether backup CronJobs should be created for workspace PVCs. + Defaults to false if not specified. + type: boolean + oras: + description: |- + OrasConfig defines additional configuration options for the oras CLI used to + push and pull backup images. + properties: + extraArgs: + description: ExtraArgs are additional arguments passed + to the oras CLI + type: string + type: object + registry: + description: RegistryConfig defines the registry configuration + where backup images are stored. + properties: + authSecret: + description: |- + AuthSecret is the name of a Kubernetes secret of + type kubernetes.io/dockerconfigjson. + The secret is expected to be in the same namespace the workspace is running in. + If secret is not found in the workspace namespace, the operator will look for the secret + in the namespace where the operator is running in. + as the DevWorkspaceOperatorCongfig. + The secret must contain "controller.devfile.io/watch-secret=true" label so that it can be + recognized by the operator. + type: string + path: + description: |- + A registry where backup images are stored. Images are stored + in {path}/${DEVWORKSPACE_NAMESPACE}/${DEVWORKSPACE_NAME}:latest + type: string + required: + - path + type: object + schedule: + default: 0 1 * * * + description: |- + Schedule specifies the cron schedule for the backup cron job. + For example, "0 1 * * *" runs daily at 1 AM. + type: string + required: + - registry + type: object cleanupCronJob: description: CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. @@ -3181,6 +3232,78 @@ spec: type: string metadata: type: object + status: + description: |- + Status represents the current status of the DevWorkspaceOperatorConfig + automatically managed by the DevWorkspace Operator. + properties: + conditions: + description: Conditions represent the latest available observations + of the OperatorConfiguration's state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupTime: + description: |- + LastBackupTime is the timestamp of the last successful backup. Nil if + no backup is configured or no backup has yet succeeded. + format: date-time + type: string + type: object type: object served: true storage: true + subresources: + status: {} diff --git a/deploy/deployment/openshift/combined.yaml b/deploy/deployment/openshift/combined.yaml index 31bb95d7f..31dacf1b9 100644 --- a/deploy/deployment/openshift/combined.yaml +++ b/deploy/deployment/openshift/combined.yaml @@ -182,6 +182,57 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + backupCronJob: + description: BackupCronJobConfig defines configuration options + for a cron job that automatically backs up workspace PVCs. + properties: + enable: + description: |- + Enable determines whether backup CronJobs should be created for workspace PVCs. + Defaults to false if not specified. + type: boolean + oras: + description: |- + OrasConfig defines additional configuration options for the oras CLI used to + push and pull backup images. + properties: + extraArgs: + description: ExtraArgs are additional arguments passed + to the oras CLI + type: string + type: object + registry: + description: RegistryConfig defines the registry configuration + where backup images are stored. + properties: + authSecret: + description: |- + AuthSecret is the name of a Kubernetes secret of + type kubernetes.io/dockerconfigjson. + The secret is expected to be in the same namespace the workspace is running in. + If secret is not found in the workspace namespace, the operator will look for the secret + in the namespace where the operator is running in. + as the DevWorkspaceOperatorCongfig. + The secret must contain "controller.devfile.io/watch-secret=true" label so that it can be + recognized by the operator. + type: string + path: + description: |- + A registry where backup images are stored. Images are stored + in {path}/${DEVWORKSPACE_NAMESPACE}/${DEVWORKSPACE_NAME}:latest + type: string + required: + - path + type: object + schedule: + default: 0 1 * * * + description: |- + Schedule specifies the cron schedule for the backup cron job. + For example, "0 1 * * *" runs daily at 1 AM. + type: string + required: + - registry + type: object cleanupCronJob: description: CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. @@ -3181,9 +3232,81 @@ spec: type: string metadata: type: object + status: + description: |- + Status represents the current status of the DevWorkspaceOperatorConfig + automatically managed by the DevWorkspace Operator. + properties: + conditions: + description: Conditions represent the latest available observations + of the OperatorConfiguration's state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupTime: + description: |- + LastBackupTime is the timestamp of the last successful backup. Nil if + no backup is configured or no backup has yet succeeded. + format: date-time + type: string + type: object type: object served: true storage: true + subresources: + status: {} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -25809,10 +25932,7 @@ rules: - "" resources: - configmaps - - persistentvolumeclaims - pods - - secrets - - serviceaccounts - services verbs: - '*' @@ -25828,9 +25948,16 @@ rules: - apiGroups: - "" resources: + - imagestreams - pods/exec verbs: - create +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - '*' - apiGroups: - "" resourceNames: @@ -25842,6 +25969,35 @@ rules: - delete - get - patch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - build.openshift.io + resources: + - builds + verbs: + - get +- apiGroups: + - "" + - build.openshift.io + resources: + - builds/details + verbs: + - update +- apiGroups: + - "" + - image.openshift.io + resources: + - imagestreams/layers + verbs: + - get + - update - apiGroups: - admissionregistration.k8s.io resources: @@ -25912,6 +26068,8 @@ rules: verbs: - get - list + - patch + - update - watch - apiGroups: - controller.devfile.io @@ -25929,6 +26087,17 @@ rules: - create - get - update +- apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - create + - delete + - get + - list + - patch + - update - apiGroups: - metrics.k8s.io resources: @@ -25967,6 +26136,17 @@ rules: - rbac.authorization.k8s.io resources: - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: - clusterroles verbs: - create @@ -26160,6 +26340,8 @@ spec: value: quay.io/devfile/devworkspace-controller:next - name: RELATED_IMAGE_project_clone value: quay.io/devfile/project-clone:next + - name: RELATED_IMAGE_project_backup + value: quay.io/devfile/project-backup:next - name: WATCH_NAMESPACE value: "" - name: POD_NAME diff --git a/deploy/deployment/openshift/objects/devworkspace-controller-manager.Deployment.yaml b/deploy/deployment/openshift/objects/devworkspace-controller-manager.Deployment.yaml index 5b226b7c9..101a59fa3 100644 --- a/deploy/deployment/openshift/objects/devworkspace-controller-manager.Deployment.yaml +++ b/deploy/deployment/openshift/objects/devworkspace-controller-manager.Deployment.yaml @@ -30,6 +30,8 @@ spec: value: quay.io/devfile/devworkspace-controller:next - name: RELATED_IMAGE_project_clone value: quay.io/devfile/project-clone:next + - name: RELATED_IMAGE_project_backup + value: quay.io/devfile/project-backup:next - name: WATCH_NAMESPACE value: "" - name: POD_NAME diff --git a/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml b/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml index cb43b0b1e..d4fa6dfeb 100644 --- a/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml +++ b/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml @@ -21,10 +21,7 @@ rules: - "" resources: - configmaps - - persistentvolumeclaims - pods - - secrets - - serviceaccounts - services verbs: - '*' @@ -40,9 +37,16 @@ rules: - apiGroups: - "" resources: + - imagestreams - pods/exec verbs: - create +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - '*' - apiGroups: - "" resourceNames: @@ -54,6 +58,35 @@ rules: - delete - get - patch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - build.openshift.io + resources: + - builds + verbs: + - get +- apiGroups: + - "" + - build.openshift.io + resources: + - builds/details + verbs: + - update +- apiGroups: + - "" + - image.openshift.io + resources: + - imagestreams/layers + verbs: + - get + - update - apiGroups: - admissionregistration.k8s.io resources: @@ -124,6 +157,8 @@ rules: verbs: - get - list + - patch + - update - watch - apiGroups: - controller.devfile.io @@ -141,6 +176,17 @@ rules: - create - get - update +- apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - create + - delete + - get + - list + - patch + - update - apiGroups: - metrics.k8s.io resources: @@ -179,6 +225,17 @@ rules: - rbac.authorization.k8s.io resources: - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: - clusterroles verbs: - create diff --git a/deploy/deployment/openshift/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml b/deploy/deployment/openshift/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml index 910c7f6b5..3aa055792 100644 --- a/deploy/deployment/openshift/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml +++ b/deploy/deployment/openshift/objects/devworkspaceoperatorconfigs.controller.devfile.io.CustomResourceDefinition.yaml @@ -182,6 +182,57 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + backupCronJob: + description: BackupCronJobConfig defines configuration options + for a cron job that automatically backs up workspace PVCs. + properties: + enable: + description: |- + Enable determines whether backup CronJobs should be created for workspace PVCs. + Defaults to false if not specified. + type: boolean + oras: + description: |- + OrasConfig defines additional configuration options for the oras CLI used to + push and pull backup images. + properties: + extraArgs: + description: ExtraArgs are additional arguments passed + to the oras CLI + type: string + type: object + registry: + description: RegistryConfig defines the registry configuration + where backup images are stored. + properties: + authSecret: + description: |- + AuthSecret is the name of a Kubernetes secret of + type kubernetes.io/dockerconfigjson. + The secret is expected to be in the same namespace the workspace is running in. + If secret is not found in the workspace namespace, the operator will look for the secret + in the namespace where the operator is running in. + as the DevWorkspaceOperatorCongfig. + The secret must contain "controller.devfile.io/watch-secret=true" label so that it can be + recognized by the operator. + type: string + path: + description: |- + A registry where backup images are stored. Images are stored + in {path}/${DEVWORKSPACE_NAMESPACE}/${DEVWORKSPACE_NAME}:latest + type: string + required: + - path + type: object + schedule: + default: 0 1 * * * + description: |- + Schedule specifies the cron schedule for the backup cron job. + For example, "0 1 * * *" runs daily at 1 AM. + type: string + required: + - registry + type: object cleanupCronJob: description: CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. @@ -3181,6 +3232,78 @@ spec: type: string metadata: type: object + status: + description: |- + Status represents the current status of the DevWorkspaceOperatorConfig + automatically managed by the DevWorkspace Operator. + properties: + conditions: + description: Conditions represent the latest available observations + of the OperatorConfiguration's state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupTime: + description: |- + LastBackupTime is the timestamp of the last successful backup. Nil if + no backup is configured or no backup has yet succeeded. + format: date-time + type: string + type: object type: object served: true storage: true + subresources: + status: {} diff --git a/deploy/templates/base/manager_image_patch.yaml b/deploy/templates/base/manager_image_patch.yaml index 4fddefc60..845df5b9a 100644 --- a/deploy/templates/base/manager_image_patch.yaml +++ b/deploy/templates/base/manager_image_patch.yaml @@ -15,4 +15,6 @@ spec: - name: RELATED_IMAGE_devworkspace_webhook_server value: ${DWO_IMG} - name: RELATED_IMAGE_project_clone - value: ${PROJECT_CLONE_IMG} \ No newline at end of file + value: ${PROJECT_CLONE_IMG} + - name: RELATED_IMAGE_project_backup + value: ${PROJECT_BACKUP_IMG} diff --git a/deploy/templates/components/csv/clusterserviceversion.yaml b/deploy/templates/components/csv/clusterserviceversion.yaml index 8f51953be..50bede51e 100644 --- a/deploy/templates/components/csv/clusterserviceversion.yaml +++ b/deploy/templates/components/csv/clusterserviceversion.yaml @@ -104,6 +104,8 @@ spec: name: devworkspace_webhook_server - image: quay.io/devfile/project-clone:next name: project_clone + - image: quay.io/devfile/project-backup:next + name: project_backup - image: registry.access.redhat.com/ubi9/ubi-micro:9.5-1733126338 name: pvc_cleanup_job - image: quay.io/eclipse/che-workspace-data-sync-storage:0.0.1 diff --git a/deploy/templates/components/manager/manager.yaml b/deploy/templates/components/manager/manager.yaml index 331391d50..9a50a0fb4 100644 --- a/deploy/templates/components/manager/manager.yaml +++ b/deploy/templates/components/manager/manager.yaml @@ -86,3 +86,5 @@ spec: value: "quay.io/eclipse/che-sidecar-workspace-data-sync:0.0.1" - name: RELATED_IMAGE_project_clone value: "quay.io/devfile/project-clone:next" + - name: RELATED_IMAGE_project_backup + value: "quay.io/devfile/project-backup:next" diff --git a/deploy/templates/components/rbac/role.yaml b/deploy/templates/components/rbac/role.yaml index 03157dd56..a87be7060 100644 --- a/deploy/templates/components/rbac/role.yaml +++ b/deploy/templates/components/rbac/role.yaml @@ -19,10 +19,7 @@ rules: - "" resources: - configmaps - - persistentvolumeclaims - pods - - secrets - - serviceaccounts - services verbs: - '*' @@ -38,9 +35,16 @@ rules: - apiGroups: - "" resources: + - imagestreams - pods/exec verbs: - create +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - '*' - apiGroups: - "" resourceNames: @@ -52,6 +56,35 @@ rules: - delete - get - patch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - build.openshift.io + resources: + - builds + verbs: + - get +- apiGroups: + - "" + - build.openshift.io + resources: + - builds/details + verbs: + - update +- apiGroups: + - "" + - image.openshift.io + resources: + - imagestreams/layers + verbs: + - get + - update - apiGroups: - admissionregistration.k8s.io resources: @@ -122,6 +155,8 @@ rules: verbs: - get - list + - patch + - update - watch - apiGroups: - controller.devfile.io @@ -139,6 +174,17 @@ rules: - create - get - update +- apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - create + - delete + - get + - list + - patch + - update - apiGroups: - metrics.k8s.io resources: @@ -177,6 +223,17 @@ rules: - rbac.authorization.k8s.io resources: - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: - clusterroles verbs: - create diff --git a/deploy/templates/crd/bases/controller.devfile.io_devworkspaceoperatorconfigs.yaml b/deploy/templates/crd/bases/controller.devfile.io_devworkspaceoperatorconfigs.yaml index d03cd57bd..982fcaa12 100644 --- a/deploy/templates/crd/bases/controller.devfile.io_devworkspaceoperatorconfigs.yaml +++ b/deploy/templates/crd/bases/controller.devfile.io_devworkspaceoperatorconfigs.yaml @@ -180,6 +180,57 @@ spec: Workspace defines configuration options related to how DevWorkspaces are managed properties: + backupCronJob: + description: BackupCronJobConfig defines configuration options + for a cron job that automatically backs up workspace PVCs. + properties: + enable: + description: |- + Enable determines whether backup CronJobs should be created for workspace PVCs. + Defaults to false if not specified. + type: boolean + oras: + description: |- + OrasConfig defines additional configuration options for the oras CLI used to + push and pull backup images. + properties: + extraArgs: + description: ExtraArgs are additional arguments passed + to the oras CLI + type: string + type: object + registry: + description: RegistryConfig defines the registry configuration + where backup images are stored. + properties: + authSecret: + description: |- + AuthSecret is the name of a Kubernetes secret of + type kubernetes.io/dockerconfigjson. + The secret is expected to be in the same namespace the workspace is running in. + If secret is not found in the workspace namespace, the operator will look for the secret + in the namespace where the operator is running in. + as the DevWorkspaceOperatorCongfig. + The secret must contain "controller.devfile.io/watch-secret=true" label so that it can be + recognized by the operator. + type: string + path: + description: |- + A registry where backup images are stored. Images are stored + in {path}/${DEVWORKSPACE_NAMESPACE}/${DEVWORKSPACE_NAME}:latest + type: string + required: + - path + type: object + schedule: + default: 0 1 * * * + description: |- + Schedule specifies the cron schedule for the backup cron job. + For example, "0 1 * * *" runs daily at 1 AM. + type: string + required: + - registry + type: object cleanupCronJob: description: CleanupCronJobConfig defines configuration options for a cron job that automatically cleans up stale DevWorkspaces. @@ -3179,6 +3230,78 @@ spec: type: string metadata: type: object + status: + description: |- + Status represents the current status of the DevWorkspaceOperatorConfig + automatically managed by the DevWorkspace Operator. + properties: + conditions: + description: Conditions represent the latest available observations + of the OperatorConfiguration's state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupTime: + description: |- + LastBackupTime is the timestamp of the last successful backup. Nil if + no backup is configured or no backup has yet succeeded. + format: date-time + type: string + type: object type: object served: true storage: true + subresources: + status: {} diff --git a/docs/release/README.md b/docs/release/README.md index c377a9e98..c3d95b487 100644 --- a/docs/release/README.md +++ b/docs/release/README.md @@ -40,9 +40,13 @@ This means that to test commits in a release branch before running the release j ```bash export DWO_IMG=quay.io/yourrepo/devworkspace-controller:prerelease export PROJECT_CLONE_IMG=quay.io/yourrepo/project-clone:prerelease +export PROJECT_BACKUP_IMG=quay.io/yourrepo/project-backup:prerelease # build and push project clone image podman build -t "$PROJECT_CLONE_IMG" -f ./project-clone/Dockerfile . podman push "$PROJECT_CLONE_IMG" +# build and push project backup image +podman build -t "$PROJECT_BACKUP_IMG" -f ./project-backup/Containerfile ./project-backup/ +podman push "$PROJECT_BACKUP_IMG" # build and push DevWorkspace Operator image export DOCKER=podman # optional make docker diff --git a/internal/images/image.go b/internal/images/image.go index 11bd46202..2ec548e16 100644 --- a/internal/images/image.go +++ b/internal/images/image.go @@ -38,6 +38,7 @@ const ( asyncStorageServerImageEnvVar = "RELATED_IMAGE_async_storage_server" asyncStorageSidecarImageEnvVar = "RELATED_IMAGE_async_storage_sidecar" projectCloneImageEnvVar = "RELATED_IMAGE_project_clone" + projectBackupImageEnvVar = "RELATED_IMAGE_project_backup" ) // GetWebhookServerImage returns the image reference for the webhook server image. Returns @@ -88,3 +89,12 @@ func GetProjectCloneImage() string { } return val } + +func GetProjectBackupImage() string { + val, ok := os.LookupEnv(projectBackupImageEnvVar) + if !ok { + log.Info(fmt.Sprintf("Could not get project backup image: environment variable %s is not set", projectBackupImageEnvVar)) + return "" + } + return val +} diff --git a/main.go b/main.go index 6f355a4d2..65bb35274 100644 --- a/main.go +++ b/main.go @@ -37,6 +37,7 @@ import ( dwv2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + backupCronJobController "github.com/devfile/devworkspace-operator/controllers/backupcronjob" cleanupCronJobController "github.com/devfile/devworkspace-operator/controllers/cleanupcronjob" workspacecontroller "github.com/devfile/devworkspace-operator/controllers/workspace" @@ -188,6 +189,15 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "CleanupCronJob") os.Exit(1) } + if err = (&backupCronJobController.BackupCronJobReconciler{ + Client: mgr.GetClient(), + NonCachingClient: nonCachingClient, + Log: ctrl.Log.WithName("controllers").WithName("BackupCronJob").V(1), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BackupCronJob") + os.Exit(1) + } // +kubebuilder:scaffold:builder // Get a config to talk to the apiserver diff --git a/make-release.sh b/make-release.sh index 9d4079ac9..1076b8acd 100755 --- a/make-release.sh +++ b/make-release.sh @@ -19,6 +19,7 @@ set -e DWO_REPO="${DWO_REPO:-git@github.com:devfile/devworkspace-operator}" DWO_QUAY_REPO="${DWO_QUAY_REPO:-quay.io/devfile/devworkspace-controller}" PROJECT_CLONE_QUAY_REPO="${PROJECT_CLONE_QUAY_REPO:-quay.io/devfile/project-clone}" +PROJECT_BACKUP_QUAY_REPO="${PROJECT_BACKUP_QUAY_REPO:-quay.io/devfile/project-backup}" DWO_BUNDLE_QUAY_REPO="${DWO_BUNDLE_QUAY_REPO:-quay.io/devfile/devworkspace-operator-bundle}" DWO_INDEX_IMAGE="${DWO_INDEX_IMAGE:-quay.io/devfile/devworkspace-operator-index:release}" DWO_DIGEST_INDEX_IMAGE="${DWO_DIGEST_INDEX_IMAGE:-quay.io/devfile/devworkspace-operator-index:release-digest}" @@ -127,12 +128,14 @@ update_images() { # Get image tags DWO_QUAY_IMG="${DWO_QUAY_REPO}:${VERSION}" PROJECT_CLONE_QUAY_IMG="${PROJECT_CLONE_QUAY_REPO}:${VERSION}" + PROJECT_BACKUP_QUAY_IMG="${PROJECT_BACKUP_QUAY_REPO}:${VERSION}" DWO_BUNDLE_QUAY_IMG="${DWO_BUNDLE_QUAY_REPO}:${VERSION}" # Update defaults in Makefile sed -i Makefile -r \ -e "s|quay.io/devfile/devworkspace-controller:[0-9a-zA-Z._-]+|${DWO_QUAY_IMG}|g" \ -e "s|quay.io/devfile/project-clone:[0-9a-zA-Z._-]+|${PROJECT_CLONE_QUAY_IMG}|g" \ + -e "s|quay.io/devfile/project-backup:[0-9a-zA-Z._-]+|${PROJECT_BACKUP_QUAY_IMG}|g" \ -e "s|quay.io/devfile/devworkspace-operator-bundle:[0-9a-zA-Z._-]+|${DWO_BUNDLE_QUAY_IMG}|g" \ -e "s|quay.io/devfile/devworkspace-operator-index:[0-9a-zA-Z._-]+|${DWO_INDEX_IMAGE}|g" @@ -140,12 +143,15 @@ update_images() { sed -i build/scripts/generate_deployment.sh -r \ -e "s|quay.io/devfile/devworkspace-controller:[0-9a-zA-Z._-]+|${DWO_QUAY_IMG}|g" \ -e "s|quay.io/devfile/project-clone:[0-9a-zA-Z._-]+|${PROJECT_CLONE_QUAY_IMG}|g" + -e "s|quay.io/devfile/project-backup:[0-9a-zA-Z._-]+|${PROJECT_BACKUP_QUAY_IMG}|g" local DEFAULT_DWO_IMG="$DWO_QUAY_IMG" local PROJECT_CLONE_IMG="$PROJECT_CLONE_QUAY_IMG" + local PROJECT_BACKUP_IMG="$PROJECT_BACKUP_QUAY_IMG" export DEFAULT_DWO_IMG export PROJECT_CLONE_IMG + export PROJECT_BACKUP_IMG make generate_all } @@ -157,12 +163,15 @@ update_images() { build_and_push_images() { DWO_QUAY_IMG="${DWO_QUAY_REPO}:${VERSION}" PROJECT_CLONE_QUAY_IMG="${PROJECT_CLONE_QUAY_REPO}:${VERSION}" + PROJECT_BACKUP_QUAY_IMG="${PROJECT_BACKUP_QUAY_REPO}:${VERSION}" if [ "$DRY_RUN" == "dryrun" ]; then docker buildx build . -t "${DWO_QUAY_IMG}" -f ./build/Dockerfile \ --platform "$ARCHITECTURES" docker buildx build . -t "${PROJECT_CLONE_QUAY_IMG}" -f ./project-clone/Dockerfile \ --platform "$ARCHITECTURES" + docker buildx build . -t "${PROJECT_BACKUP_QUAY_IMG}" -f ./project-backup/Containerfile \ + --platform "$ARCHITECTURES" else docker buildx build . -t "${DWO_QUAY_IMG}" -f ./build/Dockerfile \ --platform "$ARCHITECTURES" \ @@ -170,6 +179,9 @@ build_and_push_images() { docker buildx build . -t "${PROJECT_CLONE_QUAY_IMG}" -f ./project-clone/Dockerfile \ --platform "$ARCHITECTURES" \ --push + docker buildx build . -t "${PROJECT_BACKUP_QUAY_IMG}" -f ./project-backup/Containerfile \ + --platform "$ARCHITECTURES" \ + --push fi } diff --git a/pkg/config/configmap/property.go b/pkg/config/configmap/property.go index 3bdd842ed..9c981dac1 100644 --- a/pkg/config/configmap/property.go +++ b/pkg/config/configmap/property.go @@ -21,8 +21,7 @@ const ( defaultSidecarPullPolicy = "Always" // workspacePVCName config property handles the PVC name that should be created and used for all workspaces within one kubernetes namespace - workspacePVCName = "devworkspace.pvc.name" - defaultWorkspacePVCName = "claim-devworkspace" + workspacePVCName = "devworkspace.pvc.name" workspacePVCStorageClassName = "devworkspace.pvc.storage_class.name" diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index 03fec33ad..ab2f7b26a 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -82,6 +82,10 @@ var defaultConfig = &v1alpha1.OperatorConfiguration{ RetainTime: pointer.Int32(2592000), Schedule: "0 0 1 * *", }, + BackupCronJob: &v1alpha1.BackupCronJobConfig{ + Enable: pointer.Bool(false), + Schedule: "0 0 1 * *", + }, // Do not declare a default value for this field. // Setting a default leads to an endless reconcile loop when UserNamespacesSupport is disabled, // because in that case the field is ignored and always set to nil. diff --git a/pkg/config/sync.go b/pkg/config/sync.go index 76ffd89a5..ab4d47744 100644 --- a/pkg/config/sync.go +++ b/pkg/config/sync.go @@ -431,6 +431,36 @@ func mergeConfig(from, to *controller.OperatorConfiguration) { to.Workspace.CleanupCronJob.Schedule = from.Workspace.CleanupCronJob.Schedule } } + if from.Workspace.BackupCronJob != nil { + if to.Workspace.BackupCronJob == nil { + to.Workspace.BackupCronJob = &controller.BackupCronJobConfig{} + } + if from.Workspace.BackupCronJob.Enable != nil { + to.Workspace.BackupCronJob.Enable = from.Workspace.BackupCronJob.Enable + } + if from.Workspace.BackupCronJob.Schedule != "" { + to.Workspace.BackupCronJob.Schedule = from.Workspace.BackupCronJob.Schedule + } + if from.Workspace.BackupCronJob.Registry != nil { + if to.Workspace.BackupCronJob.Registry == nil { + to.Workspace.BackupCronJob.Registry = &controller.RegistryConfig{} + } + if from.Workspace.BackupCronJob.Registry.Path != "" { + to.Workspace.BackupCronJob.Registry.Path = from.Workspace.BackupCronJob.Registry.Path + } + if from.Workspace.BackupCronJob.Registry.AuthSecret != "" { + to.Workspace.BackupCronJob.Registry.AuthSecret = from.Workspace.BackupCronJob.Registry.AuthSecret + } + } + if from.Workspace.BackupCronJob.OrasConfig != nil { + if to.Workspace.BackupCronJob.OrasConfig == nil { + to.Workspace.BackupCronJob.OrasConfig = &controller.OrasConfig{} + } + if from.Workspace.BackupCronJob.OrasConfig.ExtraArgs != "" { + to.Workspace.BackupCronJob.OrasConfig.ExtraArgs = from.Workspace.BackupCronJob.OrasConfig.ExtraArgs + } + } + } if from.Workspace.PostStartTimeout != "" { to.Workspace.PostStartTimeout = from.Workspace.PostStartTimeout @@ -684,6 +714,16 @@ func GetCurrentConfigString(currConfig *controller.OperatorConfiguration) string config = append(config, fmt.Sprintf("workspace.cleanupCronJob.cronJobScript=%s", workspace.CleanupCronJob.Schedule)) } } + if workspace.BackupCronJob != nil { + if workspace.BackupCronJob.Enable != nil && *workspace.BackupCronJob.Enable != *defaultConfig.Workspace.BackupCronJob.Enable { + config = append(config, fmt.Sprintf("workspace.backupCronJob.enable=%t", *workspace.BackupCronJob.Enable)) + } + + if workspace.BackupCronJob.Schedule != defaultConfig.Workspace.BackupCronJob.Schedule { + config = append(config, fmt.Sprintf("workspace.backupCronJob.cronJobScript=%s", workspace.BackupCronJob.Schedule)) + } + + } if workspace.HostUsers != nil { config = append(config, fmt.Sprintf("workspace.hostUsers=%t", *workspace.HostUsers)) } diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index b63830659..586eedc1d 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -69,6 +69,9 @@ const ( // PVCCleanupPodCPURequest is the cpu request used for PVC clean up pods PVCCleanupPodCPURequest = "5m" + // DefaultWorkspacePVCName is the default name of the PVC used for workspaces + DefaultWorkspacePVCName = "claim-devworkspace" + // Constants describing storage classes supported by the controller // CommonStorageClassType defines the 'common' storage policy, which is an alias of the 'per-user' storage policy, and operates in the same fashion as the 'per-user' storage policy. diff --git a/pkg/constants/metadata.go b/pkg/constants/metadata.go index 092dddeab..b27cef445 100644 --- a/pkg/constants/metadata.go +++ b/pkg/constants/metadata.go @@ -172,4 +172,12 @@ const ( // NamespaceNodeSelectorAnnotation is an annotation applied to a namespace to configure the node selector for all workspaces // in that namespace. Value should be json-encoded map[string]string NamespaceNodeSelectorAnnotation = "controller.devfile.io/node-selector" + + // DevWorkspaceBackupJobNamePrefix is the prefix used for backup jobs created for DevWorkspaces + DevWorkspaceBackupJobNamePrefix = "devworkspace-backup-" + + // DevWorkspaceBackupJobLabel is the label key to identify backup jobs created for DevWorkspaces + DevWorkspaceBackupJobLabel = "controller.devfile.io/backup-job" + + DevWorkspaceBackupAuthSecretName = "devworkspace-backup-registry-auth" ) diff --git a/project-backup/Containerfile b/project-backup/Containerfile new file mode 100644 index 000000000..0690f2eb6 --- /dev/null +++ b/project-backup/Containerfile @@ -0,0 +1,38 @@ +# +# Copyright (c) 2019-2025 Red Hat, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +FROM quay.io/konflux-ci/oras:3d83c68 AS oras +FROM quay.io/devfile/base-developer-image:ubi10-latest +LABEL project="devworkspace-operator" + +USER 0 +RUN dnf update -y && \ + dnf clean all + +RUN useradd -u 1000 -g 0 -m oras && \ + mkdir -p /home/oras/ && \ + chown -R oras:0 /home/oras + +COPY --chown=1000:0 entrypoint.sh / +COPY --chown=1000:0 workspace-recovery.sh / + +RUN chmod +x /entrypoint.sh ; \ + chmod +x /workspace-recovery.sh + +COPY --from=oras /usr/bin/oras /usr/bin/oras + +USER 1000 + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/project-backup/entrypoint.sh b/project-backup/entrypoint.sh new file mode 100644 index 000000000..a55b419d4 --- /dev/null +++ b/project-backup/entrypoint.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2025 Red Hat, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -x +set -e + +exec "$@" diff --git a/project-backup/workspace-recovery.sh b/project-backup/workspace-recovery.sh new file mode 100644 index 000000000..440176f62 --- /dev/null +++ b/project-backup/workspace-recovery.sh @@ -0,0 +1,127 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-2025 Red Hat, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -euo pipefail + +# --- Configuration --- +: "${DEVWORKSPACE_BACKUP_REGISTRY:?Missing DEVWORKSPACE_BACKUP_REGISTRY}" +: "${DEVWORKSPACE_NAMESPACE:?Missing DEVWORKSPACE_NAMESPACE}" +: "${DEVWORKSPACE_NAME:?Missing DEVWORKSPACE_NAME}" +: "${BACKUP_SOURCE_PATH:?Missing BACKUP_SOURCE_PATH}" + +BACKUP_IMAGE="${DEVWORKSPACE_BACKUP_REGISTRY}/${DEVWORKSPACE_NAMESPACE}/${DEVWORKSPACE_NAME}:latest" + +# --- Functions --- +backup() { + TARBALL_NAME="devworkspace-backup.tar.gz" + cd /tmp + echo "Backing up devworkspace '$DEVWORKSPACE_NAME' in namespace '$DEVWORKSPACE_NAMESPACE' to image '$BACKUP_IMAGE'" + + # Create tarball of the backup source path + tar -czvf "$TARBALL_NAME" -C "$BACKUP_SOURCE_PATH" . + + # Push the tarball to the OCI registry using oras as a custom artifact + oras_args=( + push + "$BACKUP_IMAGE" + --artifact-type application/vnd.devworkspace.backup.artifact.v1+json + --annotation devworkspace.name="$DEVWORKSPACE_NAME" + --annotation devworkspace.namespace="$DEVWORKSPACE_NAMESPACE" + --disable-path-validation + ) + if [[ -n "${REGISTRY_AUTH_FILE:-}" ]]; then + # If REGISTRY_AUTH_FILE is provided, use it for authentication + oras_args+=(--registry-config "$REGISTRY_AUTH_FILE") + elif [[ -f /var/run/secrets/kubernetes.io/serviceaccount/token ]]; then + echo "Using mounted service account token for registry authentication" + TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + REGISTRY_HOST=$(echo "$BACKUP_IMAGE" | cut -d'/' -f1) + + # Create temporary auth config for oras + REGISTRY_AUTH_FILE="/tmp/registry_auth.json" + + # For OpenShift internal registry, use service account token as password with 'serviceaccount' username + if [[ "$REGISTRY_HOST" == *"openshift"* ]] || [[ "$REGISTRY_HOST" == *"svc.cluster.local"* ]]; then + # OpenShift internal registry authentication + # Use the service account CA for TLS verification + if [[ -f /var/run/secrets/kubernetes.io/serviceaccount/ca.crt ]]; then + oras login --password-stdin \ + --ca-file /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -u serviceaccount \ + --registry-config "$REGISTRY_AUTH_FILE" \ + "$REGISTRY_HOST" <<< "$TOKEN" + else + # Fallback to insecure if CA cert is not available + oras login --password-stdin \ + --insecure \ + -u serviceaccount \ + --registry-config "$REGISTRY_AUTH_FILE" \ + "$REGISTRY_HOST" <<< "$TOKEN" + fi + fi + + oras_args+=(--registry-config "$REGISTRY_AUTH_FILE") + fi + if [[ -n "${ORAS_EXTRA_ARGS:-}" ]]; then + extra_args=( ${ORAS_EXTRA_ARGS} ) + oras_args+=("${extra_args[@]}") + fi + oras_args+=("$TARBALL_NAME") + oras "${oras_args[@]}" + rm -f "$TARBALL_NAME" + + # Clean up temporary auth file if created + if [[ -f /tmp/registry_auth.json ]]; then + rm -f /tmp/registry_auth.json + fi + + echo "Backup completed successfully." +} + +restore() { + local container_name="workspace-restore" + + podman create --name "$container_name" "$BACKUP_IMAGE" + rm -rf "${BACKUP_SOURCE_PATH:?}"/* + podman cp "$container_name":/. "$BACKUP_SOURCE_PATH" + podman rm "$container_name" +} + +usage() { + echo "Usage: $0 [--backup|--restore]" + exit 1 +} +echo + +# --- Main --- +if [[ $# -eq 0 ]]; then + usage +fi + +for arg in "$@"; do + case "$arg" in + --backup) + backup + ;; + --restore) + restore + ;; + *) + echo "Unknown option: $arg" + usage + ;; + esac +done