diff --git a/Dockerfile b/Dockerfile index 3da290e..f5c3f0a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -54,6 +54,7 @@ COPY --from=frontend-build /frontend/dist /app/ui/build # Generate Swagger documentation COPY backend/ ./ +RUN go mod tidy RUN swag init -d . -g cmd/main.go -o swagger # Compile the backend diff --git a/README.md b/README.md index d078c4d..e4b16ad 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,12 @@ - **SSL support**: Secure connections available - **Easy restoration**: One-click restore from any backup +### ๐Ÿงฉ **Cluster-based Setup** + +- **Discover databases**: Connect to a PostgreSQL cluster and list databases the user can access +- **Bulk import**: Select multiple databases and create them at once +- **Shared setup**: Apply a single backup schedule, storage and notifiers to all selected databases + ### ๐Ÿ‘ฅ **Suitable for Teams** (docs) - **Workspaces**: Group databases, notifiers and storages for different projects or teams @@ -147,13 +153,30 @@ docker compose up -d ## ๐Ÿš€ Usage 1. **Access the dashboard**: Navigate to `http://localhost:4005` -2. **Add first DB for backup**: Click "New Database" and follow the setup wizard +2. **Add database(s)**: Click "Add database" and choose either: + - **Single database**: classic flow for one DB + - **From cluster**: discover and import multiple DBs from a PostgreSQL cluster 3. **Configure schedule**: Choose from hourly, daily, weekly or monthly intervals 4. **Set database connection**: Enter your PostgreSQL credentials and connection details 5. **Choose storage**: Select where to store your backups (local, S3, Google Drive, etc.) 6. **Add notifications** (optional): Configure email, Telegram, Slack, or webhook notifications 7. **Save and start**: Postgresus will validate settings and begin the backup schedule +### Import multiple databases from a cluster + +1. Go to Databases โ†’ **Add database** โ†’ **From cluster** +2. Enter connection: PostgreSQL version, host, port, username, password, HTTPS if needed +3. Click **Load databases** to list accessible DBs (templates are excluded) +4. Select the databases you want to back up (multi-select) +5. Set a shared backup schedule and storage +6. Select notifiers (optional) +7. Click **Create** โ€” Postgresus creates a database entry per selected DB and applies your shared settings + +Notes: +- Requires `CONNECT` privilege to each selected database; templates are excluded automatically +- The specified PostgreSQL version must match the server; choose the actual version of the cluster +- HTTPS toggle uses `sslmode=require` + ### ๐Ÿ”‘ Resetting Password (docs) If you need to reset the password, you can use the built-in password reset command: diff --git a/backend/README.md b/backend/README.md index 8f47ff2..434d940 100644 --- a/backend/README.md +++ b/backend/README.md @@ -3,6 +3,10 @@ Keep in mind: you need to use dev-db from docker-compose.yml in this folder instead of postgresus-db from docker-compose.yml in the root folder. +## Requirements + +- Go 1.23+ + > Copy .env.example to .env > Copy docker-compose.yml.example to docker-compose.yml (for development only) > Go to tools folder and install Postgres versions @@ -47,6 +51,10 @@ Swagger URL is: > http://localhost:4005/api/v1/docs/swagger/index.html#/ +## New endpoints + +- POST `/api/v1/databases/list-databases-direct` โ€” list accessible databases in a PostgreSQL cluster without saving configuration. See Swagger for request/response schema. + # Project structure Default endpoint structure is: diff --git a/backend/cmd/main.go b/backend/cmd/main.go index 99c4a29..365ff4d 100644 --- a/backend/cmd/main.go +++ b/backend/cmd/main.go @@ -16,6 +16,7 @@ import ( "postgresus-backend/internal/features/audit_logs" "postgresus-backend/internal/features/backups/backups" backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/clusters" "postgresus-backend/internal/features/databases" "postgresus-backend/internal/features/disk" healthcheck_attempt "postgresus-backend/internal/features/healthcheck/attempt" @@ -192,6 +193,7 @@ func setUpRoutes(r *gin.Engine) { notifiers.GetNotifierController().RegisterRoutes(protected) storages.GetStorageController().RegisterRoutes(protected) databases.GetDatabaseController().RegisterRoutes(protected) + clusters.GetClusterController().RegisterRoutes(protected) backups.GetBackupController().RegisterRoutes(protected) restores.GetRestoreController().RegisterRoutes(protected) healthcheck_config.GetHealthcheckConfigController().RegisterRoutes(protected) @@ -210,6 +212,7 @@ func setUpDependencies() { audit_logs.SetupDependencies() notifiers.SetupDependencies() storages.SetupDependencies() + clusters.SetupDependencies() } func runBackgroundTasks(log *slog.Logger) { @@ -224,6 +227,10 @@ func runBackgroundTasks(log *slog.Logger) { backups.GetBackupBackgroundService().Run() }) + go runWithPanicLogging(log, "cluster background service", func() { + clusters.GetClusterBackgroundService().Run() + }) + go runWithPanicLogging(log, "restore background service", func() { restores.GetRestoreBackgroundService().Run() }) diff --git a/backend/internal/features/backups/backups/service.go b/backend/internal/features/backups/backups/service.go index 29d7318..63c8f01 100644 --- a/backend/internal/features/backups/backups/service.go +++ b/backend/internal/features/backups/backups/service.go @@ -550,6 +550,10 @@ func (s *BackupService) deleteBackup(backup *Backup) error { return s.backupRepository.DeleteByID(backup.ID) } +func (s *BackupService) DeleteBackupsForDatabase(databaseID uuid.UUID) error { + return s.deleteDbBackups(databaseID) +} + func (s *BackupService) deleteDbBackups(databaseID uuid.UUID) error { dbBackupsInProgress, err := s.backupRepository.FindByDatabaseIdAndStatus( databaseID, diff --git a/backend/internal/features/backups/config/model.go b/backend/internal/features/backups/config/model.go index bf8c8e0..a8206da 100644 --- a/backend/internal/features/backups/config/model.go +++ b/backend/internal/features/backups/config/model.go @@ -24,6 +24,10 @@ type BackupConfig struct { Storage *storages.Storage `json:"storage" gorm:"foreignKey:StorageID"` StorageID *uuid.UUID `json:"storageId" gorm:"column:storage_id;type:uuid;"` + // Cluster management + ClusterID *uuid.UUID `json:"clusterId" gorm:"column:cluster_id;type:uuid"` + ManagedByCluster bool `json:"managedByCluster" gorm:"column:managed_by_cluster;type:boolean;not null"` + SendNotificationsOn []BackupNotificationType `json:"sendNotificationsOn" gorm:"-"` SendNotificationsOnString string `json:"-" gorm:"column:send_notifications_on;type:text;not null"` diff --git a/backend/internal/features/backups/config/repository.go b/backend/internal/features/backups/config/repository.go index 7c7f602..1999642 100644 --- a/backend/internal/features/backups/config/repository.go +++ b/backend/internal/features/backups/config/repository.go @@ -81,7 +81,7 @@ func (r *BackupConfigRepository) GetWithEnabledBackups() ([]*BackupConfig, error GetDb(). Preload("BackupInterval"). Preload("Storage"). - Where("is_backups_enabled = ?", true). + Where("is_backups_enabled = ? AND (managed_by_cluster = FALSE OR cluster_id IS NULL)", true). Find(&backupConfigs).Error; err != nil { return nil, err } diff --git a/backend/internal/features/backups/config/service.go b/backend/internal/features/backups/config/service.go index 510702c..9865a99 100644 --- a/backend/internal/features/backups/config/service.go +++ b/backend/internal/features/backups/config/service.go @@ -117,6 +117,12 @@ func (s *BackupConfigService) GetBackupConfigByDbId( return config, nil } +func (s *BackupConfigService) FindBackupConfigByDbIdNoInit( + databaseID uuid.UUID, +) (*BackupConfig, error) { + return s.backupConfigRepository.FindByDatabaseID(databaseID) +} + func (s *BackupConfigService) IsStorageUsing( user *users_models.User, storageID uuid.UUID, diff --git a/backend/internal/features/clusters/background_service.go b/backend/internal/features/clusters/background_service.go new file mode 100644 index 0000000..c121eff --- /dev/null +++ b/backend/internal/features/clusters/background_service.go @@ -0,0 +1,59 @@ +package clusters + +import ( + "log/slog" + "time" + + "github.com/google/uuid" +) + +// ClusterBackgroundService periodically checks clusters and triggers RunBackup on their intervals. +type ClusterBackgroundService struct { + service *ClusterService + repo *ClusterRepository + logger *slog.Logger +} + +func (s *ClusterBackgroundService) Run() { + for { + if err := s.tick(); err != nil { + s.logger.Error("Cluster scheduler tick failed", "error", err) + } + time.Sleep(1 * time.Minute) + } +} + +func (s *ClusterBackgroundService) tick() error { + clusters, err := s.repo.FindAll() + if err != nil { + return err + } + + now := time.Now().UTC() + for _, c := range clusters { + // require interval and backups enabled + if !c.IsBackupsEnabled || c.BackupInterval == nil { + continue + } + + var last *time.Time + if c.LastRunAt != nil && !c.LastRunAt.IsZero() { + last = c.LastRunAt + } + + if c.BackupInterval.ShouldTriggerBackup(now, last) { + if err := s.service.RunBackupScheduled(c.ID); err != nil { + s.logger.Error("Failed to run cluster backup", "clusterId", c.ID, "error", err) + } + // Update last run regardless of outcome to avoid tight loop + _ = s.repo.UpdateLastRunAt(c.ID, now) + } + } + + return nil +} + +// For tests or manual invocations +func (s *ClusterBackgroundService) RunOnceFor(clusterID uuid.UUID) error { + return s.service.RunBackupScheduled(clusterID) +} diff --git a/backend/internal/features/clusters/controller.go b/backend/internal/features/clusters/controller.go new file mode 100644 index 0000000..321fd14 --- /dev/null +++ b/backend/internal/features/clusters/controller.go @@ -0,0 +1,307 @@ +package clusters + +import ( + "net/http" + users_middleware "postgresus-backend/internal/features/users/middleware" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +type ClusterController struct { + service *ClusterService +} + +func (c *ClusterController) RegisterRoutes(router *gin.RouterGroup) { + router.GET("/clusters", c.GetClusters) + router.POST("/clusters", c.CreateCluster) + router.POST("/clusters/:id/run-backup", c.RunClusterBackup) + router.PUT("/clusters/:id", c.UpdateCluster) + router.GET("/clusters/:id/databases", c.ListClusterDatabases) + router.GET("/clusters/:id/propagation/preview", c.PreviewPropagation) + router.POST("/clusters/:id/propagation/apply", c.ApplyPropagation) +} + +// CreateCluster +// @Summary Create a new cluster +// @Tags clusters +// @Accept json +// @Produce json +// @Param request body Cluster true "Cluster creation data with workspaceId" +// @Success 201 {object} Cluster +// @Failure 400 +// @Failure 401 +// @Failure 500 +// @Router /clusters [post] +func (c *ClusterController) CreateCluster(ctx *gin.Context) { + user, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + var request Cluster + if err := ctx.ShouldBindJSON(&request); err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if request.WorkspaceID == nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "workspaceId is required"}) + return + } + + cluster, err := c.service.CreateCluster(user, *request.WorkspaceID, &request) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + cluster.HideSensitiveData() + ctx.JSON(http.StatusCreated, cluster) +} + +// GetClusters +// @Summary Get clusters by workspace +// @Tags clusters +// @Produce json +// @Param workspace_id query string true "Workspace ID" +// @Success 200 {array} Cluster +// @Failure 400 +// @Failure 401 +// @Failure 500 +// @Router /clusters [get] +func (c *ClusterController) GetClusters(ctx *gin.Context) { + user, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + workspaceIDStr := ctx.Query("workspace_id") + if workspaceIDStr == "" { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "workspace_id query parameter is required"}) + return + } + + workspaceID, err := uuid.Parse(workspaceIDStr) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace_id"}) + return + } + + clusters, err := c.service.GetClusters(user, workspaceID) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + ctx.JSON(http.StatusOK, clusters) +} + +// RunClusterBackup +// @Summary Run backup across a cluster (discovering new DBs) +// @Tags clusters +// @Param id path string true "Cluster ID" +// @Success 200 +// @Failure 400 +// @Failure 401 +// @Failure 500 +// @Router /clusters/{id}/run-backup [post] +func (c *ClusterController) RunClusterBackup(ctx *gin.Context) { + user, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + id, err := uuid.Parse(ctx.Param("id")) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid cluster ID"}) + return + } + + if err := c.service.RunBackup(user, id); err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + ctx.JSON(http.StatusOK, gin.H{"message": "cluster backup started"}) +} + +// UpdateCluster +// @Summary Update cluster +// @Tags clusters +// @Accept json +// @Produce json +// @Param id path string true "Cluster ID" +// @Param request body Cluster true "Cluster update data" +// @Success 200 {object} Cluster +// @Failure 400 +// @Failure 401 +// @Failure 500 +// @Router /clusters/{id} [put] +func (c *ClusterController) UpdateCluster(ctx *gin.Context) { + user, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + id, err := uuid.Parse(ctx.Param("id")) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid cluster ID"}) + return + } + + var request Cluster + if err := ctx.ShouldBindJSON(&request); err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + updated, err := c.service.UpdateCluster(user, id, &request) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + ctx.JSON(http.StatusOK, updated) +} + +// ListClusterDatabases +// @Summary List accessible databases of a cluster +// @Tags clusters +// @Produce json +// @Param id path string true "Cluster ID" +// @Success 200 {object} map[string][]string +// @Failure 400 +// @Failure 401 +// @Failure 500 +// @Router /clusters/{id}/databases [get] +func (c *ClusterController) ListClusterDatabases(ctx *gin.Context) { + user, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + id, err := uuid.Parse(ctx.Param("id")) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid cluster ID"}) + return + } + + dbs, err := c.service.ListClusterDatabases(user, id) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + ctx.JSON(http.StatusOK, gin.H{"databases": dbs}) +} + +// PreviewPropagation +// @Summary Preview force-apply of cluster defaults to existing DBs +// @Tags clusters +// @Produce json +// @Param id path string true "Cluster ID" +// @Param applyStorage query bool false "Apply storage" +// @Param applySchedule query bool false "Apply schedule" +// @Param respectExclusions query bool false "Respect cluster exclusions" +// @Success 200 {array} SwaggerPropagationChange +// @Failure 400 +// @Failure 401 +// @Failure 500 +// @Router /clusters/{id}/propagation/preview [get] +func (c *ClusterController) PreviewPropagation(ctx *gin.Context) { + user, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + id, err := uuid.Parse(ctx.Param("id")) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid cluster ID"}) + return + } + + q := ctx.Request.URL.Query() + opts := PropagationOptions{ + ApplyStorage: q.Get("applyStorage") == "true" || q.Get("apply_storage") == "true" || q.Get("apply_storage") == "1", + ApplySchedule: q.Get("applySchedule") == "true" || q.Get("apply_schedule") == "true" || q.Get("apply_schedule") == "1", + ApplyEnableBackups: q.Get("applyEnableBackups") == "true" || q.Get("apply_enable_backups") == "true" || q.Get("apply_enable_backups") == "1", + RespectExclusions: q.Get("respectExclusions") == "true" || q.Get("respect_exclusions") == "true" || q.Get("respect_exclusions") == "1", + } + + res, err := c.service.PreviewPropagation(user, id, opts) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + ctx.JSON(http.StatusOK, res) +} + +type applyPropagationRequest struct { + ApplyStorage bool `json:"applyStorage"` + ApplySchedule bool `json:"applySchedule"` + ApplyEnableBackups bool `json:"applyEnableBackups"` + RespectExclusions bool `json:"respectExclusions"` +} + +// SwaggerPropagationChange is a copy of PropagationChange for Swagger generation +// It avoids cross-file/type resolution issues during swag parsing. +type SwaggerPropagationChange struct { + DatabaseID uuid.UUID `json:"databaseId"` + Name string `json:"name"` + ChangeStorage bool `json:"changeStorage"` + ChangeSchedule bool `json:"changeSchedule"` + ChangeEnabled bool `json:"changeEnabled"` +} + +// ApplyPropagation +// @Summary Force-apply cluster defaults to existing DBs +// @Tags clusters +// @Accept json +// @Produce json +// @Param id path string true "Cluster ID" +// @Param request body applyPropagationRequest true "Propagation options" +// @Success 200 {array} SwaggerPropagationChange +// @Failure 400 +// @Failure 401 +// @Failure 500 +// @Router /clusters/{id}/propagation/apply [post] +func (c *ClusterController) ApplyPropagation(ctx *gin.Context) { + user, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + id, err := uuid.Parse(ctx.Param("id")) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid cluster ID"}) + return + } + + var req applyPropagationRequest + if err := ctx.ShouldBindJSON(&req); err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + opts := PropagationOptions{ + ApplyStorage: req.ApplyStorage, + ApplySchedule: req.ApplySchedule, + ApplyEnableBackups: req.ApplyEnableBackups, + RespectExclusions: req.RespectExclusions, + } + + res, err := c.service.ApplyPropagation(user, id, opts) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + ctx.JSON(http.StatusOK, res) +} diff --git a/backend/internal/features/clusters/di.go b/backend/internal/features/clusters/di.go new file mode 100644 index 0000000..45fd3a1 --- /dev/null +++ b/backend/internal/features/clusters/di.go @@ -0,0 +1,34 @@ +package clusters + +import ( + "postgresus-backend/internal/features/backups/backups" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + workspaces_services "postgresus-backend/internal/features/workspaces/services" + "postgresus-backend/internal/util/logger" +) + +var clusterRepository = &ClusterRepository{} + +var clusterService = &ClusterService{ + repo: clusterRepository, + dbService: databases.GetDatabaseService(), + backupService: backups.GetBackupService(), + backupConfigService: backups_config.GetBackupConfigService(), + workspaceService: workspaces_services.GetWorkspaceService(), +} + +var clusterController = &ClusterController{service: clusterService} + +func GetClusterController() *ClusterController { return clusterController } +func GetClusterService() *ClusterService { return clusterService } + +var clusterBackgroundService = &ClusterBackgroundService{ + service: clusterService, + repo: clusterRepository, + logger: logger.GetLogger(), +} + +func GetClusterBackgroundService() *ClusterBackgroundService { return clusterBackgroundService } + +func SetupDependencies() {} diff --git a/backend/internal/features/clusters/models.go b/backend/internal/features/clusters/models.go new file mode 100644 index 0000000..35cba0d --- /dev/null +++ b/backend/internal/features/clusters/models.go @@ -0,0 +1,110 @@ +package clusters + +import ( + "errors" + "postgresus-backend/internal/features/intervals" + "postgresus-backend/internal/features/notifiers" + "postgresus-backend/internal/util/period" + "postgresus-backend/internal/util/tools" + "time" + + "github.com/google/uuid" +) + +type Cluster struct { + ID uuid.UUID `json:"id" gorm:"column:id;primaryKey;type:uuid;default:gen_random_uuid()"` + + WorkspaceID *uuid.UUID `json:"workspaceId" gorm:"column:workspace_id;type:uuid;not null"` + Name string `json:"name" gorm:"column:name;type:text;not null"` + + // PostgreSQL connection settings + Postgresql PostgresqlCluster `json:"postgresql" gorm:"foreignKey:ClusterID"` + + // Default backup settings applied to newly discovered databases during cluster backups + IsBackupsEnabled bool `json:"isBackupsEnabled" gorm:"column:is_backups_enabled;type:boolean;not null;default:false"` + StorePeriod period.Period `json:"storePeriod" gorm:"column:store_period;type:text;not null"` + BackupIntervalID *uuid.UUID `json:"backupIntervalId" gorm:"column:backup_interval_id;type:uuid"` + BackupInterval *intervals.Interval `json:"backupInterval,omitempty" gorm:"foreignKey:BackupIntervalID"` + StorageID *uuid.UUID `json:"storageId" gorm:"column:storage_id;type:uuid"` + SendNotificationsOn string `json:"sendNotificationsOn" gorm:"column:send_notifications_on;type:text;not null"` + CpuCount int `json:"cpuCount" gorm:"column:cpu_count;type:int;not null;default:1"` + + LastRunAt *time.Time `json:"lastRunAt" gorm:"column:last_run_at"` + + Notifiers []notifiers.Notifier `json:"notifiers" gorm:"many2many:cluster_notifiers;"` + ExcludedDatabases []ClusterExcludedDatabase `json:"excludedDatabases" gorm:"foreignKey:ClusterID"` +} + +func (c *Cluster) TableName() string { return "clusters" } + +func (c *Cluster) ValidateBasic() error { + if c.WorkspaceID == nil { + return errors.New("workspaceId is required") + } + if c.Name == "" { + return errors.New("name is required") + } + return c.Postgresql.Validate() +} + +func (c *Cluster) HideSensitiveData() { + c.Postgresql.HideSensitiveData() +} + +type PostgresqlCluster struct { + ID uuid.UUID `json:"id" gorm:"primaryKey;type:uuid;default:gen_random_uuid()"` + ClusterID uuid.UUID `json:"clusterId" gorm:"type:uuid;column:cluster_id;not null"` + + Version tools.PostgresqlVersion `json:"version" gorm:"type:text;not null"` + Host string `json:"host" gorm:"type:text;not null"` + Port int `json:"port" gorm:"type:int;not null"` + Username string `json:"username" gorm:"type:text;not null"` + Password string `json:"password" gorm:"type:text;not null"` + IsHttps bool `json:"isHttps" gorm:"type:boolean;default:false"` +} + +func (p *PostgresqlCluster) TableName() string { return "postgresql_clusters" } + +func (p *PostgresqlCluster) Validate() error { + if p.Version == "" { + return errors.New("version is required") + } + if p.Host == "" { + return errors.New("host is required") + } + if p.Port == 0 { + return errors.New("port is required") + } + if p.Username == "" { + return errors.New("username is required") + } + if p.Password == "" { + return errors.New("password is required") + } + return nil +} + +func (p *PostgresqlCluster) HideSensitiveData() { p.Password = "" } + +type ClusterExcludedDatabase struct { + ID uuid.UUID `json:"id" gorm:"primaryKey;type:uuid;default:gen_random_uuid()"` + ClusterID uuid.UUID `json:"clusterId" gorm:"type:uuid;column:cluster_id;not null"` + Name string `json:"name" gorm:"type:text;not null"` +} + +func (c *ClusterExcludedDatabase) TableName() string { return "cluster_excluded_databases" } + +type PropagationOptions struct { + ApplyStorage bool `json:"applyStorage"` + ApplySchedule bool `json:"applySchedule"` + ApplyEnableBackups bool `json:"applyEnableBackups"` + RespectExclusions bool `json:"respectExclusions"` +} + +type PropagationChange struct { + DatabaseID uuid.UUID `json:"databaseId"` + Name string `json:"name"` + ChangeStorage bool `json:"changeStorage"` + ChangeSchedule bool `json:"changeSchedule"` + ChangeEnabled bool `json:"changeEnabled"` +} diff --git a/backend/internal/features/clusters/repository.go b/backend/internal/features/clusters/repository.go new file mode 100644 index 0000000..acda79e --- /dev/null +++ b/backend/internal/features/clusters/repository.go @@ -0,0 +1,187 @@ +package clusters + +import ( + "postgresus-backend/internal/storage" + "time" + + "github.com/google/uuid" + "gorm.io/gorm" +) + +type ClusterRepository struct{} + +func (r *ClusterRepository) Save(cluster *Cluster) (*Cluster, error) { + db := storage.GetDb() + + isNew := cluster.ID == uuid.Nil + if isNew { + cluster.ID = uuid.New() + } + + err := db.Transaction(func(tx *gorm.DB) error { + // ensure nested object has correct FK + cluster.Postgresql.ClusterID = cluster.ID + + // handle backup interval object + if cluster.BackupInterval != nil { + if cluster.BackupInterval.ID == uuid.Nil { + if err := tx.Create(cluster.BackupInterval).Error; err != nil { + return err + } + } else { + if err := tx.Save(cluster.BackupInterval).Error; err != nil { + return err + } + } + // assign FK + cluster.BackupIntervalID = &cluster.BackupInterval.ID + } + + if isNew { + if err := tx.Create(cluster).Omit("Postgresql", "Notifiers", "ExcludedDatabases", "BackupInterval").Error; err != nil { + return err + } + } else { + if err := tx.Save(cluster).Omit("Postgresql", "Notifiers", "ExcludedDatabases", "BackupInterval").Error; err != nil { + return err + } + } + + // save pg settings + if cluster.Postgresql.ID == uuid.Nil { + cluster.Postgresql.ID = uuid.New() + if err := tx.Create(&cluster.Postgresql).Error; err != nil { + return err + } + } else { + if err := tx.Save(&cluster.Postgresql).Error; err != nil { + return err + } + } + + // save notifiers relation + if err := tx.Model(cluster).Association("Notifiers").Replace(cluster.Notifiers); err != nil { + return err + } + + // replace excluded databases manually to ensure ClusterID is set + if err := tx.Where("cluster_id = ?", cluster.ID).Delete(&ClusterExcludedDatabase{}).Error; err != nil { + return err + } + if len(cluster.ExcludedDatabases) > 0 { + items := make([]ClusterExcludedDatabase, 0, len(cluster.ExcludedDatabases)) + for _, ed := range cluster.ExcludedDatabases { + if ed.Name == "" { + continue + } + ed.ClusterID = cluster.ID + if ed.ID == uuid.Nil { + ed.ID = uuid.New() + } + items = append(items, ed) + } + if len(items) > 0 { + if err := tx.Create(&items).Error; err != nil { + return err + } + } + } + + return nil + }) + + if err != nil { + return nil, err + } + + return cluster, nil +} + +func (r *ClusterRepository) FindByID(id uuid.UUID) (*Cluster, error) { + var cluster Cluster + + if err := storage.GetDb(). + Preload("Postgresql"). + Preload("Notifiers"). + Preload("BackupInterval"). + Preload("ExcludedDatabases"). + Where("id = ?", id). + First(&cluster).Error; err != nil { + return nil, err + } + + return &cluster, nil +} + +func (r *ClusterRepository) FindByWorkspaceID(workspaceID uuid.UUID) ([]*Cluster, error) { + var clusters []*Cluster + + if err := storage.GetDb(). + Preload("Postgresql"). + Preload("Notifiers"). + Preload("BackupInterval"). + Preload("ExcludedDatabases"). + Where("workspace_id = ?", workspaceID). + Order("name ASC"). + Find(&clusters).Error; err != nil { + return nil, err + } + + return clusters, nil +} + +func (r *ClusterRepository) Delete(id uuid.UUID) error { + db := storage.GetDb() + + return db.Transaction(func(tx *gorm.DB) error { + var cluster Cluster + if err := tx.Where("id = ?", id).First(&cluster).Error; err != nil { + return err + } + + if err := tx.Model(&cluster).Association("Notifiers").Clear(); err != nil { + return err + } + + if err := tx.Where("cluster_id = ?", id).Delete(&PostgresqlCluster{}).Error; err != nil { + return err + } + + if err := tx.Delete(&Cluster{}, id).Error; err != nil { + return err + } + + return nil + }) +} + +func (r *ClusterRepository) IsNotifierUsing(notifierID uuid.UUID) (bool, error) { + var count int64 + if err := storage.GetDb(). + Table("cluster_notifiers"). + Where("notifier_id = ?", notifierID). + Count(&count).Error; err != nil { + return false, err + } + return count > 0, nil +} + +func (r *ClusterRepository) FindAll() ([]*Cluster, error) { + var clusters []*Cluster + + if err := storage.GetDb(). + Preload("Postgresql"). + Preload("Notifiers"). + Preload("BackupInterval"). + Preload("ExcludedDatabases"). + Order("name ASC"). + Find(&clusters).Error; err != nil { + return nil, err + } + + return clusters, nil +} + +func (r *ClusterRepository) UpdateLastRunAt(id uuid.UUID, t time.Time) error { + return storage.GetDb().Model(&Cluster{}).Where("id = ?", id).Update("last_run_at", t).Error +} diff --git a/backend/internal/features/clusters/service.go b/backend/internal/features/clusters/service.go new file mode 100644 index 0000000..33c5f43 --- /dev/null +++ b/backend/internal/features/clusters/service.go @@ -0,0 +1,888 @@ +package clusters + +import ( + "errors" + "fmt" + "io" + "log/slog" + "strings" + "sync" + + backups "postgresus-backend/internal/features/backups/backups" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + "postgresus-backend/internal/features/databases/databases/postgresql" + "postgresus-backend/internal/features/intervals" + users_enums "postgresus-backend/internal/features/users/enums" + users_models "postgresus-backend/internal/features/users/models" + workspaces_services "postgresus-backend/internal/features/workspaces/services" + "postgresus-backend/internal/util/period" + + "github.com/google/uuid" +) + +type ClusterService struct { + repo *ClusterRepository + dbService *databases.DatabaseService + backupService *backups.BackupService + backupConfigService *backups_config.BackupConfigService + workspaceService *workspaces_services.WorkspaceService +} + +// discoverClusterDatabases lists accessible databases on the cluster connection +func (s *ClusterService) discoverClusterDatabases(pg *PostgresqlCluster) ([]string, error) { + p := &postgresql.PostgresqlDatabase{ + Version: pg.Version, + Host: pg.Host, + Port: pg.Port, + Username: pg.Username, + Password: pg.Password, + IsHttps: pg.IsHttps, + } + logger := slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{Level: slog.LevelError})) + return p.ListAccessibleDatabases(logger) +} + +// isSystemDb returns true for Postgres system databases +func isSystemDb(name string) bool { + switch strings.ToLower(strings.TrimSpace(name)) { + case "postgres", "template0", "template1": + return true + default: + return false + } +} + +// findDb finds an existing DB matching the cluster connection and database name +func findDb(existing []*databases.Database, pg *PostgresqlCluster, dbName string) *databases.Database { + for _, d := range existing { + if d == nil || d.Postgresql == nil || d.Type != databases.DatabaseTypePostgres { + continue + } + if d.Postgresql.Database == nil { + continue + } + if d.Postgresql.Host == pg.Host && d.Postgresql.Port == pg.Port && d.Postgresql.Username == pg.Username && d.Postgresql.IsHttps == pg.IsHttps { + if strings.EqualFold(*d.Postgresql.Database, dbName) { + return d + } + } + } + return nil +} + +// RunBackupScheduled runs cluster backup as a system task (admin bypass), used by background scheduler. +func (s *ClusterService) RunBackupScheduled(clusterID uuid.UUID) error { + sys := &users_models.User{Role: users_enums.UserRoleAdmin} + return s.RunBackup(sys, clusterID) +} + +// configureClusterDatabasesForScheduling discovers databases and ensures Database + BackupConfig exist, +// but DOES NOT trigger actual backups. It honors cluster exclusions. +func (s *ClusterService) configureClusterDatabasesForScheduling( + user *users_models.User, + cluster *Cluster, +) error { + // Discover databases on the cluster + dbNames, err := s.discoverClusterDatabases(&cluster.Postgresql) + if err != nil { + return fmt.Errorf("failed to discover databases in cluster: %w", err) + } + + if cluster.WorkspaceID == nil { + return errors.New("cluster is not bound to a workspace") + } + + // Load existing databases for workspace + existingDbs, err := s.dbService.GetDatabasesByWorkspace(user, *cluster.WorkspaceID) + if err != nil { + return err + } + + // Build excluded set + excluded := map[string]struct{}{} + for _, ed := range cluster.ExcludedDatabases { + if ed.Name == "" { + continue + } + excluded[strings.ToLower(ed.Name)] = struct{}{} + } + + // Concurrency limit for triggering backups + const maxParallel = 5 + sem := make(chan struct{}, maxParallel) + var wg sync.WaitGroup + + // Create any missing databases and ensure backup config + for _, dbName := range dbNames { + if isSystemDb(dbName) { + continue + } + if _, skip := excluded[strings.ToLower(dbName)]; skip { + continue + } + + if db := findDb(existingDbs, &cluster.Postgresql, dbName); db == nil { + // create database + created, err := s.createDatabaseForCluster(user, cluster, dbName) + if err != nil { + return fmt.Errorf("failed to create database '%s' for cluster: %w", dbName, err) + } + + // ensure backup config according to cluster defaults + if err := s.ensureBackupConfig(created.ID, cluster); err != nil { + return fmt.Errorf("failed to create backup config for '%s': %w", dbName, err) + } + + // trigger backup if enabled + cfg, err := s.backupConfigService.GetBackupConfigByDbId(created.ID) + if err == nil && cfg != nil && cfg.IsBackupsEnabled { + wg.Add(1) + sem <- struct{}{} + go func(dbid uuid.UUID) { + defer wg.Done() + defer func() { <-sem }() + s.backupService.MakeBackup(dbid, true) + }(created.ID) + } + } else { + // existing DB: ensure backup config exists and apply cluster storage if missing + if err := s.ensureBackupConfig(db.ID, cluster); err != nil { + return fmt.Errorf("failed to ensure backup config for existing DB '%s': %w", dbName, err) + } + cfg, err := s.backupConfigService.GetBackupConfigByDbId(db.ID) + if err == nil && cfg != nil && cfg.IsBackupsEnabled { + wg.Add(1) + sem <- struct{}{} + go func(dbid uuid.UUID) { + defer wg.Done() + defer func() { <-sem }() + s.backupService.MakeBackup(dbid, true) + }(db.ID) + } + } + } + + wg.Wait() + return nil +} + +func (s *ClusterService) CreateCluster( + user *users_models.User, + workspaceID uuid.UUID, + cluster *Cluster, +) (*Cluster, error) { + canManage, err := s.workspaceService.CanUserManageDBs(workspaceID, user) + if err != nil { + return nil, err + } + if !canManage { + return nil, errors.New("insufficient permissions to create cluster in this workspace") + } + + cluster.WorkspaceID = &workspaceID + // Validation: if backups are enabled, storage must be set + if cluster.IsBackupsEnabled && cluster.StorageID == nil { + return nil, errors.New("storage must be selected when backups are enabled for cluster") + } + if err := cluster.ValidateBasic(); err != nil { + return nil, err + } + + saved, err := s.repo.Save(cluster) + if err != nil { + return nil, err + } + + // Configure scheduled backups for existing databases in the cluster (non-blocking) + go func() { + _ = s.configureClusterDatabasesForScheduling(user, saved) + }() + + return saved, nil +} + +func (s *ClusterService) GetClusters( + user *users_models.User, + workspaceID uuid.UUID, +) ([]*Cluster, error) { + canAccess, _, err := s.workspaceService.CanUserAccessWorkspace(workspaceID, user) + if err != nil { + return nil, err + } + if !canAccess { + return nil, errors.New("insufficient permissions to access this workspace") + } + + clusters, err := s.repo.FindByWorkspaceID(workspaceID) + if err != nil { + return nil, err + } + + for _, c := range clusters { + c.HideSensitiveData() + } + + return clusters, nil +} + +func (s *ClusterService) UpdateCluster( + user *users_models.User, + id uuid.UUID, + updated *Cluster, +) (*Cluster, error) { + existing, err := s.repo.FindByID(id) + if err != nil { + return nil, err + } + + if existing.WorkspaceID == nil { + return nil, errors.New("cluster is not bound to a workspace") + } + + canManage, err := s.workspaceService.CanUserManageDBs(*existing.WorkspaceID, user) + if err != nil { + return nil, err + } + if !canManage { + return nil, errors.New("insufficient permissions to update this cluster") + } + + // Preserve identifiers + updated.ID = id + updated.WorkspaceID = existing.WorkspaceID + + // Merge Postgresql settings, preserve password if empty + updated.Postgresql.ID = existing.Postgresql.ID + updated.Postgresql.ClusterID = existing.ID + if strings.TrimSpace(updated.Postgresql.Password) == "" { + updated.Postgresql.Password = existing.Postgresql.Password + } + + // Validate + // Validation: if backups are enabled, storage must be set + if updated.IsBackupsEnabled && updated.StorageID == nil { + return nil, errors.New("storage must be selected when backups are enabled for cluster") + } + if err := updated.ValidateBasic(); err != nil { + return nil, err + } + + if existing.WorkspaceID != nil { + oldExcluded := map[string]struct{}{} + for _, ed := range existing.ExcludedDatabases { + if ed.Name == "" { + continue + } + oldExcluded[strings.ToLower(ed.Name)] = struct{}{} + } + + addedExcluded := make([]string, 0) + for _, ed := range updated.ExcludedDatabases { + if ed.Name == "" { + continue + } + nameLower := strings.ToLower(ed.Name) + if _, was := oldExcluded[nameLower]; !was { + addedExcluded = append(addedExcluded, nameLower) + } + } + + if len(addedExcluded) > 0 { + dbs, err := s.dbService.GetDatabasesByWorkspace(user, *existing.WorkspaceID) + if err != nil { + return nil, err + } + + for _, d := range dbs { + if d.Type != databases.DatabaseTypePostgres || d.Postgresql == nil || d.Postgresql.Database == nil { + continue + } + if !s.matchesClusterConn(d, &existing.Postgresql) { + continue + } + dbName := strings.ToLower(*d.Postgresql.Database) + for _, excludedName := range addedExcluded { + if dbName == excludedName { + cfg, err := s.backupConfigService.FindBackupConfigByDbIdNoInit(d.ID) + if err != nil { + return nil, fmt.Errorf("failed to load backup config for excluded database '%s': %w", *d.Postgresql.Database, err) + } + if cfg != nil { + cfg.IsBackupsEnabled = false + cfg.ManagedByCluster = false + cfg.ClusterID = nil + if _, err := s.backupConfigService.SaveBackupConfig(cfg); err != nil { + return nil, fmt.Errorf("failed to disable backup config for excluded database '%s': %w", *d.Postgresql.Database, err) + } + } + break + } + } + } + } + } + + saved, err := s.repo.Save(updated) + if err != nil { + return nil, err + } + saved.HideSensitiveData() + return saved, nil +} + +func (s *ClusterService) ListClusterDatabases( + user *users_models.User, + clusterID uuid.UUID, +) ([]string, error) { + cluster, err := s.repo.FindByID(clusterID) + if err != nil { + return nil, err + } + if cluster.WorkspaceID == nil { + return nil, errors.New("cluster is not bound to a workspace") + } + canAccess, _, err := s.workspaceService.CanUserAccessWorkspace(*cluster.WorkspaceID, user) + if err != nil { + return nil, err + } + if !canAccess { + return nil, errors.New("insufficient permissions to access this workspace") + } + + dbs, err := s.discoverClusterDatabases(&cluster.Postgresql) + if err != nil { + return nil, err + } + return dbs, nil +} + +func (s *ClusterService) RunBackup( + user *users_models.User, + clusterID uuid.UUID, +) error { + cluster, err := s.repo.FindByID(clusterID) + if err != nil { + return err + } + + if cluster.WorkspaceID == nil { + return errors.New("cluster is not bound to a workspace") + } + + canManage, err := s.workspaceService.CanUserManageDBs(*cluster.WorkspaceID, user) + if err != nil { + return err + } + if !canManage { + return errors.New("insufficient permissions to run backup for this cluster") + } + + // Discover databases on the cluster + dbNames, err := s.discoverClusterDatabases(&cluster.Postgresql) + if err != nil { + return fmt.Errorf("failed to discover databases in cluster: %w", err) + } + + // Load existing databases for workspace to avoid duplicate creation + existingDbs, err := s.dbService.GetDatabasesByWorkspace(user, *cluster.WorkspaceID) + if err != nil { + return err + } + + // Build excluded set + excluded := map[string]struct{}{} + for _, ed := range cluster.ExcludedDatabases { + if ed.Name == "" { + continue + } + excluded[strings.ToLower(ed.Name)] = struct{}{} + } + + // Concurrency limit for triggering backups + const maxParallel = 5 + sem := make(chan struct{}, maxParallel) + var wg sync.WaitGroup + + // Create any missing databases and ensure backup config + for _, dbName := range dbNames { + if isSystemDb(dbName) { + continue + } + if _, skip := excluded[strings.ToLower(dbName)]; skip { + continue + } + + if db := findDb(existingDbs, &cluster.Postgresql, dbName); db == nil { + // create database + created, err := s.createDatabaseForCluster(user, cluster, dbName) + if err != nil { + return fmt.Errorf("failed to create database '%s' for cluster: %w", dbName, err) + } + + // ensure backup config according to cluster defaults + if err := s.ensureBackupConfig(created.ID, cluster); err != nil { + return fmt.Errorf("failed to create backup config for '%s': %w", dbName, err) + } + + // trigger backup if enabled + cfg, err := s.backupConfigService.GetBackupConfigByDbId(created.ID) + if err == nil && cfg != nil && cfg.IsBackupsEnabled { + wg.Add(1) + sem <- struct{}{} + go func(dbid uuid.UUID) { + defer wg.Done() + defer func() { <-sem }() + s.backupService.MakeBackup(dbid, true) + }(created.ID) + } + } else { + // Existing DB: ensure config reflects cluster defaults when appropriate, then trigger if enabled + if err := s.ensureBackupConfig(db.ID, cluster); err != nil { + return fmt.Errorf("failed to ensure backup config for existing DB '%s': %w", dbName, err) + } + cfg, err := s.backupConfigService.GetBackupConfigByDbId(db.ID) + if err == nil && cfg != nil && cfg.IsBackupsEnabled { + wg.Add(1) + sem <- struct{}{} + go func(dbid uuid.UUID) { + defer wg.Done() + defer func() { <-sem }() + s.backupService.MakeBackup(dbid, true) + }(db.ID) + } + } + } + + wg.Wait() + return nil +} + +func (s *ClusterService) createDatabaseForCluster( + user *users_models.User, + cluster *Cluster, + dbName string, +) (*databases.Database, error) { + pg := cluster.Postgresql + + db := &databases.Database{ + ID: uuid.Nil, + WorkspaceID: cluster.WorkspaceID, + Name: dbName, + Type: databases.DatabaseTypePostgres, + Notifiers: cluster.Notifiers, + Postgresql: &postgresql.PostgresqlDatabase{ + ID: uuid.Nil, + Version: pg.Version, + Host: pg.Host, + Port: pg.Port, + Username: pg.Username, + Password: pg.Password, + Database: &dbName, + IsHttps: pg.IsHttps, + }, + } + + // databaseService.CreateDatabase expects workspace ID not nil + return s.dbService.CreateDatabase(user, *cluster.WorkspaceID, db) +} + +func (s *ClusterService) ensureBackupConfig( + databaseID uuid.UUID, + cluster *Cluster, +) error { + cfg, err := s.backupConfigService.FindBackupConfigByDbIdNoInit(databaseID) + if err != nil { + return err + } + if cfg != nil { + // Upgrade default-initialized config to cluster defaults + if isDefaultBackupConfig(cfg) { + var intervalObj *intervals.Interval + var intervalID uuid.UUID + if cluster.BackupInterval != nil { + if cluster.BackupInterval.ID != uuid.Nil { + intervalID = cluster.BackupInterval.ID + } else { + intervalObj = &intervals.Interval{ + ID: uuid.Nil, + Interval: cluster.BackupInterval.Interval, + TimeOfDay: cluster.BackupInterval.TimeOfDay, + Weekday: cluster.BackupInterval.Weekday, + DayOfMonth: cluster.BackupInterval.DayOfMonth, + } + } + } else if cluster.BackupIntervalID != nil { + intervalID = *cluster.BackupIntervalID + } else { + timeOfDay := "04:00" + intervalObj = &intervals.Interval{Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay} + } + + cfg.IsBackupsEnabled = cluster.IsBackupsEnabled + cfg.StorePeriod = cluster.StorePeriod + cfg.BackupIntervalID = intervalID + cfg.BackupInterval = intervalObj + cfg.StorageID = cluster.StorageID + cfg.SendNotificationsOn = parseNotifications(cluster.SendNotificationsOn) + cfg.IsRetryIfFailed = true + cfg.MaxFailedTriesCount = 3 + cfg.CpuCount = cluster.CpuCount + // mark as cluster-managed + cfg.ManagedByCluster = true + cfg.ClusterID = &cluster.ID + + _, err = s.backupConfigService.SaveBackupConfig(cfg) + return err + } + return nil + } + + // Build backup config from cluster defaults + var intervalObj *intervals.Interval + var intervalID uuid.UUID + if cluster.BackupInterval != nil { + // If cluster has an interval object, reference its ID if present, else copy the object + if cluster.BackupInterval.ID != uuid.Nil { + intervalID = cluster.BackupInterval.ID + } else { + // Copy object; repository will create it for backup config + intervalObj = &intervals.Interval{ + ID: uuid.Nil, + Interval: cluster.BackupInterval.Interval, + TimeOfDay: cluster.BackupInterval.TimeOfDay, + Weekday: cluster.BackupInterval.Weekday, + DayOfMonth: cluster.BackupInterval.DayOfMonth, + } + } + } else if cluster.BackupIntervalID != nil { + intervalID = *cluster.BackupIntervalID + } else { + // fallback to daily at 04:00 + timeOfDay := "04:00" + intervalObj = &intervals.Interval{Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay} + } + + notifs := parseNotifications(cluster.SendNotificationsOn) + + newCfg := &backups_config.BackupConfig{ + DatabaseID: databaseID, + IsBackupsEnabled: cluster.IsBackupsEnabled, + StorePeriod: cluster.StorePeriod, + BackupIntervalID: intervalID, + BackupInterval: intervalObj, + StorageID: cluster.StorageID, + SendNotificationsOn: notifs, + IsRetryIfFailed: true, + MaxFailedTriesCount: 3, + CpuCount: cluster.CpuCount, + ManagedByCluster: true, + ClusterID: &cluster.ID, + } + + _, err = s.backupConfigService.SaveBackupConfig(newCfg) + return err +} + +func (s *ClusterService) PreviewPropagation( + user *users_models.User, + clusterID uuid.UUID, + opts PropagationOptions, +) ([]PropagationChange, error) { + cluster, err := s.repo.FindByID(clusterID) + if err != nil { + return nil, err + } + if cluster.WorkspaceID == nil { + return nil, errors.New("cluster is not bound to a workspace") + } + canManage, err := s.workspaceService.CanUserManageDBs(*cluster.WorkspaceID, user) + if err != nil { + return nil, err + } + if !canManage { + return nil, errors.New("insufficient permissions to update this cluster") + } + + dbs, err := s.dbService.GetDatabasesByWorkspace(user, *cluster.WorkspaceID) + if err != nil { + return nil, err + } + + excluded := map[string]struct{}{} + if opts.RespectExclusions { + for _, ed := range cluster.ExcludedDatabases { + if ed.Name == "" { + continue + } + excluded[strings.ToLower(ed.Name)] = struct{}{} + } + } + + clusterIntID, clusterIntObj := s.clusterScheduleToIntervalIDAndObj(cluster) + + res := make([]PropagationChange, 0) + for _, d := range dbs { + if d.Type != databases.DatabaseTypePostgres || d.Postgresql == nil { + continue + } + if !s.matchesClusterConn(d, &cluster.Postgresql) { + continue + } + if d.Postgresql.Database == nil || isSystemDb(*d.Postgresql.Database) { + continue + } + if opts.RespectExclusions { + if _, ok := excluded[strings.ToLower(*d.Postgresql.Database)]; ok { + continue + } + } + + cfg, err := s.backupConfigService.GetBackupConfigByDbId(d.ID) + if err != nil || cfg == nil { + continue + } + + ch := PropagationChange{DatabaseID: d.ID, Name: *d.Postgresql.Database} + if opts.ApplyStorage && cluster.StorageID != nil { + if cfg.StorageID == nil || *cfg.StorageID != *cluster.StorageID { + ch.ChangeStorage = true + } + } + if opts.ApplySchedule { + if clusterIntID != uuid.Nil { + if cfg.BackupIntervalID == uuid.Nil || cfg.BackupIntervalID != clusterIntID { + ch.ChangeSchedule = true + } + } else if clusterIntObj != nil { + if cfg.BackupInterval == nil || !s.intervalsEqual(cfg.BackupInterval, clusterIntObj) { + ch.ChangeSchedule = true + } + } + } + + if opts.ApplyEnableBackups { + if cfg.IsBackupsEnabled != cluster.IsBackupsEnabled { + ch.ChangeEnabled = true + } + } + + if ch.ChangeStorage || ch.ChangeSchedule || ch.ChangeEnabled { + res = append(res, ch) + } + } + + return res, nil +} + +func (s *ClusterService) ApplyPropagation( + user *users_models.User, + clusterID uuid.UUID, + opts PropagationOptions, +) ([]PropagationChange, error) { + cluster, err := s.repo.FindByID(clusterID) + if err != nil { + return nil, err + } + if cluster.WorkspaceID == nil { + return nil, errors.New("cluster is not bound to a workspace") + } + canManage, err := s.workspaceService.CanUserManageDBs(*cluster.WorkspaceID, user) + if err != nil { + return nil, err + } + if !canManage { + return nil, errors.New("insufficient permissions to update this cluster") + } + + dbs, err := s.dbService.GetDatabasesByWorkspace(user, *cluster.WorkspaceID) + if err != nil { + return nil, err + } + + excluded := map[string]struct{}{} + if opts.RespectExclusions { + for _, ed := range cluster.ExcludedDatabases { + if ed.Name == "" { + continue + } + excluded[strings.ToLower(ed.Name)] = struct{}{} + } + } + + clusterIntID, clusterIntObj := s.clusterScheduleToIntervalIDAndObj(cluster) + + applied := make([]PropagationChange, 0) + for _, d := range dbs { + if d.Type != databases.DatabaseTypePostgres || d.Postgresql == nil { + continue + } + if !s.matchesClusterConn(d, &cluster.Postgresql) { + continue + } + if d.Postgresql.Database == nil || isSystemDb(*d.Postgresql.Database) { + continue + } + if opts.RespectExclusions { + if _, ok := excluded[strings.ToLower(*d.Postgresql.Database)]; ok { + continue + } + } + + cfg, err := s.backupConfigService.GetBackupConfigByDbId(d.ID) + if err != nil || cfg == nil { + continue + } + + ch := PropagationChange{DatabaseID: d.ID, Name: *d.Postgresql.Database} + if opts.ApplyStorage && cluster.StorageID != nil { + if cfg.StorageID == nil || *cfg.StorageID != *cluster.StorageID { + cfg.StorageID = cluster.StorageID + ch.ChangeStorage = true + } + } + if opts.ApplySchedule { + if clusterIntID != uuid.Nil { + if cfg.BackupIntervalID == uuid.Nil || cfg.BackupIntervalID != clusterIntID { + cfg.BackupIntervalID = clusterIntID + cfg.BackupInterval = nil + ch.ChangeSchedule = true + } + } else if clusterIntObj != nil { + if cfg.BackupInterval == nil || !s.intervalsEqual(cfg.BackupInterval, clusterIntObj) { + t := clusterIntObj.TimeOfDay + dom := clusterIntObj.DayOfMonth + wd := clusterIntObj.Weekday + cfg.BackupIntervalID = uuid.Nil + cfg.BackupInterval = &intervals.Interval{ + ID: uuid.Nil, + Interval: clusterIntObj.Interval, + TimeOfDay: t, + Weekday: wd, + DayOfMonth: dom, + } + ch.ChangeSchedule = true + } + } + } + + if opts.ApplyEnableBackups { + if cfg.IsBackupsEnabled != cluster.IsBackupsEnabled { + cfg.IsBackupsEnabled = cluster.IsBackupsEnabled + ch.ChangeEnabled = true + } + } + + if ch.ChangeStorage || ch.ChangeSchedule || ch.ChangeEnabled { + // mark as cluster-managed when applying cluster defaults + cfg.ManagedByCluster = true + cfg.ClusterID = &cluster.ID + if _, err := s.backupConfigService.SaveBackupConfig(cfg); err != nil { + return nil, fmt.Errorf("failed to apply changes to DB '%s': %w", ch.Name, err) + } + applied = append(applied, ch) + } + } + + return applied, nil +} + +func (s *ClusterService) matchesClusterConn(d *databases.Database, pg *PostgresqlCluster) bool { + if d.Postgresql == nil { + return false + } + return d.Postgresql.Host == pg.Host && d.Postgresql.Port == pg.Port && d.Postgresql.Username == pg.Username && d.Postgresql.IsHttps == pg.IsHttps +} + +func (s *ClusterService) clusterScheduleToIntervalIDAndObj(cluster *Cluster) (uuid.UUID, *intervals.Interval) { + if cluster.BackupInterval != nil { + if cluster.BackupInterval.ID != uuid.Nil { + return cluster.BackupInterval.ID, nil + } + t := cluster.BackupInterval.TimeOfDay + dom := cluster.BackupInterval.DayOfMonth + wd := cluster.BackupInterval.Weekday + return uuid.Nil, &intervals.Interval{ID: uuid.Nil, Interval: cluster.BackupInterval.Interval, TimeOfDay: t, Weekday: wd, DayOfMonth: dom} + } + if cluster.BackupIntervalID != nil { + return *cluster.BackupIntervalID, nil + } + return uuid.Nil, nil +} + +func (s *ClusterService) intervalsEqual(a, b *intervals.Interval) bool { + if a == nil || b == nil { + return false + } + if a.Interval != b.Interval { + return false + } + if (a.TimeOfDay == nil) != (b.TimeOfDay == nil) { + return false + } + if a.TimeOfDay != nil && b.TimeOfDay != nil && *a.TimeOfDay != *b.TimeOfDay { + return false + } + if (a.Weekday == nil) != (b.Weekday == nil) { + return false + } + if a.Weekday != nil && b.Weekday != nil && *a.Weekday != *b.Weekday { + return false + } + if (a.DayOfMonth == nil) != (b.DayOfMonth == nil) { + return false + } + if a.DayOfMonth != nil && b.DayOfMonth != nil && *a.DayOfMonth != *b.DayOfMonth { + return false + } + return true +} + +// parseNotifications parses a comma-separated list into BackupNotificationType slice +func parseNotifications(slist string) []backups_config.BackupNotificationType { + if strings.TrimSpace(slist) == "" { + return []backups_config.BackupNotificationType{} + } + parts := strings.Split(slist, ",") + res := make([]backups_config.BackupNotificationType, 0, len(parts)) + for _, p := range parts { + v := strings.TrimSpace(p) + switch backups_config.BackupNotificationType(v) { + case backups_config.NotificationBackupFailed, backups_config.NotificationBackupSuccess: + res = append(res, backups_config.BackupNotificationType(v)) + default: + // ignore unknown values + } + } + return res +} + +// isDefaultBackupConfig detects the auto-initialized default config +func isDefaultBackupConfig(cfg *backups_config.BackupConfig) bool { + if cfg == nil { + return false + } + if cfg.IsBackupsEnabled { + return false + } + // Default store period is WEEK + if cfg.StorePeriod != period.PeriodWeek { + return false + } + // Default interval is DAILY at 04:00 + if cfg.BackupInterval == nil || cfg.BackupInterval.Interval != intervals.IntervalDaily { + return false + } + if cfg.BackupInterval.TimeOfDay == nil || *cfg.BackupInterval.TimeOfDay != "04:00" { + return false + } + // No storage by default + if cfg.StorageID != nil { + return false + } + // Default CPU count is 1 and retry is enabled with 3 tries + if cfg.CpuCount != 1 || !cfg.IsRetryIfFailed || cfg.MaxFailedTriesCount != 3 { + return false + } + return true +} diff --git a/backend/internal/features/databases/controller.go b/backend/internal/features/databases/controller.go index 0e4f30a..c588548 100644 --- a/backend/internal/features/databases/controller.go +++ b/backend/internal/features/databases/controller.go @@ -24,6 +24,7 @@ func (c *DatabaseController) RegisterRoutes(router *gin.RouterGroup) { router.GET("/databases", c.GetDatabases) router.POST("/databases/:id/test-connection", c.TestDatabaseConnection) router.POST("/databases/test-connection-direct", c.TestDatabaseConnectionDirect) + router.POST("/databases/list-databases-direct", c.ListAccessibleDatabasesDirect) router.POST("/databases/:id/copy", c.CopyDatabase) router.GET("/databases/notifier/:id/is-using", c.IsNotifierUsing) @@ -265,6 +266,39 @@ func (c *DatabaseController) TestDatabaseConnectionDirect(ctx *gin.Context) { ctx.JSON(http.StatusOK, gin.H{"message": "connection successful"}) } +// ListAccessibleDatabasesDirect +// @Summary List databases in a cluster directly +// @Description List accessible databases in a PostgreSQL cluster without saving configuration +// @Tags databases +// @Accept json +// @Produce json +// @Param request body Database true "Database connection configuration" +// @Success 200 {object} map[string][]string +// @Failure 400 +// @Failure 401 +// @Router /databases/list-databases-direct [post] +func (c *DatabaseController) ListAccessibleDatabasesDirect(ctx *gin.Context) { + _, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + var request Database + if err := ctx.ShouldBindJSON(&request); err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + dbs, err := c.databaseService.ListAccessibleDatabasesDirect(&request) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + ctx.JSON(http.StatusOK, gin.H{"databases": dbs}) +} + // IsNotifierUsing // @Summary Check if notifier is being used // @Description Check if a notifier is currently being used by any database diff --git a/backend/internal/features/databases/databases/postgresql/model.go b/backend/internal/features/databases/databases/postgresql/model.go index e0cb44d..60775cc 100644 --- a/backend/internal/features/databases/databases/postgresql/model.go +++ b/backend/internal/features/databases/databases/postgresql/model.go @@ -164,6 +164,71 @@ func verifyDatabaseVersion( return nil } +// ListAccessibleDatabases connects to the cluster and returns databases the user can CONNECT to +func (p *PostgresqlDatabase) ListAccessibleDatabases(logger *slog.Logger) ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + // Try common system databases for initial connection + candidates := []string{"postgres", "template1"} + var lastErr error + + for _, dbName := range candidates { + connStr := buildConnectionStringForDB(p, dbName) + + conn, err := pgx.Connect(ctx, connStr) + if err != nil { + lastErr = fmt.Errorf("failed to connect to database '%s': %w", dbName, err) + continue + } + + // Ensure we always close the connection + defer func() { + if closeErr := conn.Close(ctx); closeErr != nil { + logger.Error("Failed to close connection", "error", closeErr) + } + }() + + // Verify version after successful connection + if err := verifyDatabaseVersion(ctx, conn, p.Version); err != nil { + return nil, err + } + + // List accessible databases (exclude templates, require CONNECT privilege) + rows, err := conn.Query(ctx, ` + SELECT datname + FROM pg_database + WHERE datistemplate = false + AND datallowconn = true + AND has_database_privilege(current_user, datname, 'CONNECT') + ORDER BY datname`) + if err != nil { + return nil, fmt.Errorf("failed to list databases: %w", err) + } + defer rows.Close() + + var dbs []string + for rows.Next() { + var name string + if scanErr := rows.Scan(&name); scanErr != nil { + return nil, fmt.Errorf("failed to scan database name: %w", scanErr) + } + dbs = append(dbs, name) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("error iterating over database rows: %w", err) + } + + return dbs, nil + } + + if lastErr != nil { + return nil, lastErr + } + + return nil, errors.New("unable to connect to cluster using known system databases") +} + // testBasicOperations tests basic operations that backup tools need func testBasicOperations(ctx context.Context, conn *pgx.Conn, dbName string) error { var hasCreatePriv bool diff --git a/backend/internal/features/databases/service.go b/backend/internal/features/databases/service.go index 07d0cbd..6fd9844 100644 --- a/backend/internal/features/databases/service.go +++ b/backend/internal/features/databases/service.go @@ -297,6 +297,45 @@ func (s *DatabaseService) TestDatabaseConnectionDirect( return usingDatabase.TestConnection(s.logger) } +func (s *DatabaseService) ListAccessibleDatabasesDirect( + database *Database, +) ([]string, error) { + var usingDatabase *Database + + if database.ID != uuid.Nil { + existingDatabase, err := s.dbRepository.FindByID(database.ID) + if err != nil { + return nil, err + } + + if database.WorkspaceID != nil && existingDatabase.WorkspaceID != nil && + *existingDatabase.WorkspaceID != *database.WorkspaceID { + return nil, errors.New("database does not belong to this workspace") + } + + existingDatabase.Update(database) + + if err := existingDatabase.Validate(); err != nil { + return nil, err + } + + usingDatabase = existingDatabase + } else { + usingDatabase = database + } + + if usingDatabase.Type != DatabaseTypePostgres || usingDatabase.Postgresql == nil { + return nil, errors.New("postgresql configuration is required") + } + + dbs, err := usingDatabase.Postgresql.ListAccessibleDatabases(s.logger) + if err != nil { + return nil, err + } + + return dbs, nil +} + func (s *DatabaseService) GetDatabaseByID( id uuid.UUID, ) (*Database, error) { diff --git a/backend/migrations/20251115220000_add_clusters.sql b/backend/migrations/20251115220000_add_clusters.sql new file mode 100644 index 0000000..056bca6 --- /dev/null +++ b/backend/migrations/20251115220000_add_clusters.sql @@ -0,0 +1,41 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE clusters ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + workspace_id UUID NOT NULL REFERENCES workspaces(id) ON DELETE CASCADE, + name TEXT NOT NULL, + + is_backups_enabled BOOLEAN NOT NULL DEFAULT FALSE, + store_period TEXT NOT NULL DEFAULT 'WEEK', + backup_interval_id UUID, + storage_id UUID REFERENCES storages(id), + send_notifications_on TEXT NOT NULL DEFAULT 'BACKUP_FAILED,BACKUP_SUCCESS', + cpu_count INT NOT NULL DEFAULT 1 +); + +CREATE TABLE postgresql_clusters ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + cluster_id UUID NOT NULL REFERENCES clusters(id) ON DELETE CASCADE, + version TEXT NOT NULL, + host TEXT NOT NULL, + port INT NOT NULL, + username TEXT NOT NULL, + password TEXT NOT NULL, + is_https BOOLEAN NOT NULL DEFAULT FALSE +); + +CREATE TABLE cluster_notifiers ( + cluster_id UUID NOT NULL REFERENCES clusters(id) ON DELETE CASCADE, + notifier_id UUID NOT NULL REFERENCES notifiers(id) ON DELETE CASCADE, + PRIMARY KEY (cluster_id, notifier_id) +); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE IF EXISTS cluster_notifiers; +DROP TABLE IF EXISTS postgresql_clusters; +DROP TABLE IF EXISTS clusters; +-- +goose StatementEnd diff --git a/backend/migrations/20251116183000_add_cluster_excluded_databases.sql b/backend/migrations/20251116183000_add_cluster_excluded_databases.sql new file mode 100644 index 0000000..170a9ed --- /dev/null +++ b/backend/migrations/20251116183000_add_cluster_excluded_databases.sql @@ -0,0 +1,15 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE IF NOT EXISTS cluster_excluded_databases ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + cluster_id UUID NOT NULL REFERENCES clusters(id) ON DELETE CASCADE, + name TEXT NOT NULL +); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE IF EXISTS cluster_excluded_databases; +-- +goose StatementEnd diff --git a/backend/migrations/20251117220000_add_clusters_last_run_at.sql b/backend/migrations/20251117220000_add_clusters_last_run_at.sql new file mode 100644 index 0000000..6e31217 --- /dev/null +++ b/backend/migrations/20251117220000_add_clusters_last_run_at.sql @@ -0,0 +1,5 @@ +-- +goose Up +ALTER TABLE clusters ADD COLUMN IF NOT EXISTS last_run_at timestamptz NULL; + +-- +goose Down +ALTER TABLE clusters DROP COLUMN IF EXISTS last_run_at; diff --git a/backend/migrations/20251119100000_backup_configs_cluster_management.sql b/backend/migrations/20251119100000_backup_configs_cluster_management.sql new file mode 100644 index 0000000..63b1a4f --- /dev/null +++ b/backend/migrations/20251119100000_backup_configs_cluster_management.sql @@ -0,0 +1,9 @@ +-- +goose Up +ALTER TABLE backup_configs ADD COLUMN IF NOT EXISTS cluster_id uuid NULL; +ALTER TABLE backup_configs ADD COLUMN IF NOT EXISTS managed_by_cluster boolean NOT NULL DEFAULT false; +CREATE INDEX IF NOT EXISTS idx_backup_configs_cluster_id ON backup_configs(cluster_id); + +-- +goose Down +DROP INDEX IF EXISTS idx_backup_configs_cluster_id; +ALTER TABLE backup_configs DROP COLUMN IF EXISTS managed_by_cluster; +ALTER TABLE backup_configs DROP COLUMN IF EXISTS cluster_id; diff --git a/frontend/src/entity/clusters/api/clusterApi.ts b/frontend/src/entity/clusters/api/clusterApi.ts new file mode 100644 index 0000000..a24a0bb --- /dev/null +++ b/frontend/src/entity/clusters/api/clusterApi.ts @@ -0,0 +1,96 @@ +import { getApplicationServer } from '../../../constants'; +import RequestOptions from '../../../shared/api/RequestOptions'; +import { apiHelper } from '../../../shared/api/apiHelper'; +import type { Cluster } from '../model/Cluster'; + +export type PropagationChange = { + databaseId: string; + name: string; + changeStorage: boolean; + changeSchedule: boolean; +}; + +export type ApplyPropagationRequest = { + applyStorage: boolean; + applySchedule: boolean; + applyEnableBackups: boolean; + respectExclusions: boolean; +}; + +export const clusterApi = { + async createCluster(cluster: Cluster) { + const requestOptions: RequestOptions = new RequestOptions(); + requestOptions.setBody(JSON.stringify(cluster)); + return apiHelper.fetchPostJson( + `${getApplicationServer()}/api/v1/clusters`, + requestOptions, + ); + }, + + async getClusters(workspaceId: string) { + const requestOptions: RequestOptions = new RequestOptions(); + return apiHelper.fetchGetJson( + `${getApplicationServer()}/api/v1/clusters?workspace_id=${workspaceId}`, + requestOptions, + true, + ); + }, + + async runClusterBackup(clusterId: string) { + const requestOptions: RequestOptions = new RequestOptions(); + return apiHelper.fetchPostJson( + `${getApplicationServer()}/api/v1/clusters/${clusterId}/run-backup`, + requestOptions, + ); + }, + + async updateCluster(clusterId: string, cluster: Cluster) { + const requestOptions: RequestOptions = new RequestOptions(); + requestOptions.setBody(JSON.stringify(cluster)); + return apiHelper.fetchPutJson( + `${getApplicationServer()}/api/v1/clusters/${clusterId}`, + requestOptions, + ); + }, + + async getClusterDatabases(clusterId: string) { + const requestOptions: RequestOptions = new RequestOptions(); + return apiHelper + .fetchGetJson<{ databases: string[] }>( + `${getApplicationServer()}/api/v1/clusters/${clusterId}/databases`, + requestOptions, + true, + ) + .then((res) => res.databases); + }, + + async previewPropagation( + clusterId: string, + opts: ApplyPropagationRequest, + ) { + const requestOptions: RequestOptions = new RequestOptions(); + const qs = new URLSearchParams({ + applyStorage: String(!!opts.applyStorage), + applySchedule: String(!!opts.applySchedule), + applyEnableBackups: String(!!opts.applyEnableBackups), + respectExclusions: String(!!opts.respectExclusions), + }).toString(); + return apiHelper.fetchGetJson( + `${getApplicationServer()}/api/v1/clusters/${clusterId}/propagation/preview?${qs}`, + requestOptions, + true, + ); + }, + + async applyPropagation( + clusterId: string, + body: ApplyPropagationRequest, + ) { + const requestOptions: RequestOptions = new RequestOptions(); + requestOptions.setBody(JSON.stringify(body)); + return apiHelper.fetchPostJson( + `${getApplicationServer()}/api/v1/clusters/${clusterId}/propagation/apply`, + requestOptions, + ); + }, +}; diff --git a/frontend/src/entity/clusters/model/Cluster.ts b/frontend/src/entity/clusters/model/Cluster.ts new file mode 100644 index 0000000..6fd19ee --- /dev/null +++ b/frontend/src/entity/clusters/model/Cluster.ts @@ -0,0 +1,21 @@ +import type { Notifier } from '../../notifiers'; +import type { Interval } from '../../intervals'; +import type { PostgresqlCluster } from './PostgresqlCluster'; + +export interface Cluster { + id?: string; + workspaceId: string; + name: string; + postgresql: PostgresqlCluster; + + isBackupsEnabled?: boolean; + storePeriod?: string; + backupIntervalId?: string; + backupInterval?: Interval; + storageId?: string; + sendNotificationsOn?: string; + cpuCount?: number; + + notifiers?: Notifier[]; + excludedDatabases?: { id?: string; clusterId?: string; name: string }[]; +} diff --git a/frontend/src/entity/clusters/model/PostgresqlCluster.ts b/frontend/src/entity/clusters/model/PostgresqlCluster.ts new file mode 100644 index 0000000..287e409 --- /dev/null +++ b/frontend/src/entity/clusters/model/PostgresqlCluster.ts @@ -0,0 +1,10 @@ +export interface PostgresqlCluster { + id?: string; + clusterId?: string; + version: string; // '12' | '13' | ... + host: string; + port: number; + username: string; + password: string; + isHttps: boolean; +} diff --git a/frontend/src/entity/databases/api/databaseApi.ts b/frontend/src/entity/databases/api/databaseApi.ts index ea07428..4bc41be 100644 --- a/frontend/src/entity/databases/api/databaseApi.ts +++ b/frontend/src/entity/databases/api/databaseApi.ts @@ -73,6 +73,17 @@ export const databaseApi = { ); }, + async listDatabasesDirect(database: Database) { + const requestOptions: RequestOptions = new RequestOptions(); + requestOptions.setBody(JSON.stringify(database)); + return apiHelper + .fetchPostJson<{ databases: string[] }>( + `${getApplicationServer()}/api/v1/databases/list-databases-direct`, + requestOptions, + ) + .then((res) => res.databases); + }, + async isNotifierUsing(notifierId: string) { const requestOptions: RequestOptions = new RequestOptions(); return apiHelper diff --git a/frontend/src/features/clusters/ui/ClustersComponent.tsx b/frontend/src/features/clusters/ui/ClustersComponent.tsx new file mode 100644 index 0000000..75ec7f0 --- /dev/null +++ b/frontend/src/features/clusters/ui/ClustersComponent.tsx @@ -0,0 +1,842 @@ +import { App, Button, Checkbox, Input, InputNumber, Modal, Select, Spin, Switch, TimePicker } from 'antd'; +import dayjs, { Dayjs } from 'dayjs'; +import { useEffect, useState } from 'react'; + +import type { WorkspaceResponse } from '../../../entity/workspaces'; +import { clusterApi } from '../../../entity/clusters/api/clusterApi'; +import type { ApplyPropagationRequest, PropagationChange } from '../../../entity/clusters/api/clusterApi'; +import type { Cluster } from '../../../entity/clusters/model/Cluster'; +import { storageApi } from '../../../entity/storages/api/storageApi'; +import type { Storage } from '../../../entity/storages/models/Storage'; +import { IntervalType } from '../../../entity/intervals'; +import { getLocalDayOfMonth, getLocalWeekday, getUserTimeFormat, getUtcDayOfMonth, getUtcWeekday } from '../../../shared/time/utils'; + +interface Props { + contentHeight: number; + workspace: WorkspaceResponse; + isCanManageDBs: boolean; +} + +export const ClustersComponent = ({ contentHeight, workspace, isCanManageDBs }: Props) => { + const { message } = App.useApp(); + const [isLoading, setIsLoading] = useState(true); + const [clusters, setClusters] = useState([]); + const [storages, setStorages] = useState([]); + + const [isShowAddCluster, setIsShowAddCluster] = useState(false); + const [newCluster, setNewCluster] = useState({ + workspaceId: workspace.id, + name: 'PG Cluster', + postgresql: { + version: '16', + host: '', + port: 5432, + username: '', + password: '', + isHttps: false, + }, + isBackupsEnabled: true, + storePeriod: 'WEEK', + storageId: undefined, + cpuCount: 1, + sendNotificationsOn: 'BACKUP_FAILED,BACKUP_SUCCESS', + }); + + const [selectedClusterId, setSelectedClusterId] = useState(null); + const [editCluster, setEditCluster] = useState(null); + const [isSaving, setIsSaving] = useState(false); + const [isLoadingDbs, setIsLoadingDbs] = useState(false); + const [clusterDbs, setClusterDbs] = useState([]); + const [excluded, setExcluded] = useState>(new Set()); + const timeFmt = getUserTimeFormat(); + + // Force-apply state + const [isPropagationOpen, setIsPropagationOpen] = useState(false); + const [propagationOpts, setPropagationOpts] = useState({ + applyStorage: true, + applySchedule: true, + applyEnableBackups: true, + respectExclusions: true, + }); + const [previewLoading, setPreviewLoading] = useState(false); + const [preview, setPreview] = useState([]); + const [isApplyingPropagation, setIsApplyingPropagation] = useState(false); + + const loadClusters = async () => { + setIsLoading(true); + try { + const list = await clusterApi.getClusters(workspace.id); + setClusters(list); + } catch (e) { + message.error((e as Error).message); + } + setIsLoading(false); + }; + + const loadStorages = async () => { + try { + const list = await storageApi.getStorages(workspace.id); + setStorages(list); + } catch (e) { + message.error((e as Error).message); + } + }; + + useEffect(() => { + loadClusters(); + loadStorages(); + }, [workspace.id]); + + const selectCluster = (c: Cluster) => { + setSelectedClusterId(c.id!); + const shallow: Cluster = JSON.parse(JSON.stringify(c)); + setEditCluster(shallow); + const ex = new Set((c.excludedDatabases || []).map((x) => x.name.toLowerCase())); + setExcluded(ex); + void loadClusterDatabases(c.id!); + }; + + const loadClusterDatabases = async (clusterId: string) => { + setIsLoadingDbs(true); + try { + const dbs = await clusterApi.getClusterDatabases(clusterId); + setClusterDbs(dbs); + } catch (e) { + message.error((e as Error).message); + } + setIsLoadingDbs(false); + }; + + const createCluster = async () => { + try { + const created = await clusterApi.createCluster(newCluster); + setClusters([created, ...clusters]); + setIsShowAddCluster(false); + } catch (e) { + message.error((e as Error).message); + } + }; + + const runClusterBackup = async (clusterId: string) => { + try { + await clusterApi.runClusterBackup(clusterId); + message.success('Cluster backup started'); + } catch (e) { + message.error((e as Error).message); + } + }; + + const openPropagationModal = async () => { + if (!selectedClusterId) return; + setIsPropagationOpen(true); + setPreviewLoading(true); + try { + const res = await clusterApi.previewPropagation(selectedClusterId, propagationOpts); + setPreview(res); + } catch (e) { + message.error((e as Error).message); + } + setPreviewLoading(false); + }; + + const refreshPreview = async (opts: ApplyPropagationRequest) => { + setPropagationOpts(opts); + if (!selectedClusterId) return; + setPreviewLoading(true); + try { + const res = await clusterApi.previewPropagation(selectedClusterId, opts); + setPreview(res); + } catch (e) { + message.error((e as Error).message); + } + setPreviewLoading(false); + }; + + const applyPropagation = async () => { + if (!selectedClusterId) return; + setIsApplyingPropagation(true); + try { + const applied = await clusterApi.applyPropagation(selectedClusterId, propagationOpts); + message.success(`Applied to ${applied.length} databases`); + setIsPropagationOpen(false); + } catch (e) { + message.error((e as Error).message); + } + setIsApplyingPropagation(false); + }; + + const saveCluster = async () => { + if (!editCluster || !selectedClusterId) return; + setIsSaving(true); + try { + const payload: Cluster = { + ...editCluster, + excludedDatabases: Array.from(excluded).map((name) => ({ name })), + } as Cluster; + const updated = await clusterApi.updateCluster(selectedClusterId, payload); + // update list item + setClusters((prev) => prev.map((c) => (c.id === updated.id ? updated : c))); + setEditCluster(updated); + message.success('Cluster saved'); + } catch (e) { + message.error((e as Error).message); + } + setIsSaving(false); + }; + + return ( +
+
+ {isCanManageDBs && ( + + )} + + setIsPropagationOpen(false)} + onOk={applyPropagation} + okText="Apply" + confirmLoading={isApplyingPropagation} + width={600} + > +
Choose what to apply to existing databases for this cluster and preview the changes before applying.
+
+ refreshPreview({ ...propagationOpts, applyStorage: e.target.checked })} + > + Apply storage + + refreshPreview({ ...propagationOpts, applySchedule: e.target.checked })} + > + Apply schedule + + refreshPreview({ ...propagationOpts, applyEnableBackups: e.target.checked })} + > + Apply enable backups + + refreshPreview({ ...propagationOpts, respectExclusions: e.target.checked })} + > + Respect exclusions + +
+ + {previewLoading ? ( +
+ ) : preview.length === 0 ? ( +
No databases would be changed with the current options.
+ ) : ( +
+ {preview.map((p) => ( +
+
{p.name}
+
+ {p.changeStorage ? 'Storage' : ''}{p.changeStorage && p.changeSchedule ? ' ยท ' : ''}{p.changeSchedule ? 'Schedule' : ''} +
+
+ ))} +
+ )} +
+ +
+
{clusters.length} clusters
+ +
+ + {isLoading ? ( +
+ +
+ ) : clusters.length === 0 ? ( +
No clusters yet
+ ) : ( + clusters.map((c) => ( +
selectCluster(c)} + > +
{c.name}
+
+ {c.postgresql.host}:{c.postgresql.port} ยท {c.postgresql.username} +
+
+ + +
+
+ )) + )} +
+ +
+ {!editCluster ? ( +
Select a cluster on the left to manage settings, storage, and exclusions.
+ ) : ( +
+
Cluster settings
+ +
+
Name
+ setEditCluster({ ...(editCluster as Cluster), name: e.target.value })} + size="small" + className="max-w-[280px] grow" + placeholder="Cluster name" + /> +
+ +
+
PG version
+ setEditCluster({ ...(editCluster as Cluster), postgresql: { ...(editCluster!.postgresql), host: e.target.value.trim().replace('https://', '').replace('http://', '') } })} + size="small" + className="max-w-[280px] grow" + placeholder="Enter PG host" + /> +
+ +
+
Port
+ typeof v === 'number' && setEditCluster({ ...(editCluster as Cluster), postgresql: { ...(editCluster!.postgresql), port: v } })} + size="small" + className="max-w-[280px] grow" + /> +
+ +
+
Username
+ setEditCluster({ ...(editCluster as Cluster), postgresql: { ...(editCluster!.postgresql), username: e.target.value.trim() } })} + size="small" + className="max-w-[280px] grow" + /> +
+ +
+
Password
+ setEditCluster({ ...(editCluster as Cluster), postgresql: { ...(editCluster!.postgresql), password: e.target.value } })} + size="small" + className="max-w-[280px] grow" + placeholder="Leave empty to keep" + /> +
+ +
+
Use HTTPS
+ setEditCluster({ ...(editCluster as Cluster), postgresql: { ...(editCluster!.postgresql), isHttps: checked } })} + size="small" + /> +
+ +
+
Enable backups
+ setEditCluster({ ...(editCluster as Cluster), isBackupsEnabled: checked })} + size="small" + /> +
+ +
+
Store period
+ setEditCluster({ ...(editCluster as Cluster), storageId: v as string })} + size="small" + className="max-w-[280px] grow" + allowClear + placeholder="Select storage" + options={storages.map((s) => ({ label: s.name, value: s.id }))} + /> +
+ + {editCluster.isBackupsEnabled && ( + <> +
+
Backup interval
+ { + const ref = dayjs(editCluster.backupInterval?.timeOfDay || '04:00', 'HH:mm'); + setEditCluster({ + ...(editCluster as Cluster), + backupInterval: { + ...(editCluster.backupInterval || { id: undefined as unknown as string, interval: IntervalType.WEEKLY, timeOfDay: '04:00' }), + weekday: getUtcWeekday(localW, ref), + }, + }); + }} + size="small" + className="max-w-[280px] grow" + options={[ + { value: 1, label: 'Mon' }, + { value: 2, label: 'Tue' }, + { value: 3, label: 'Wed' }, + { value: 4, label: 'Thu' }, + { value: 5, label: 'Fri' }, + { value: 6, label: 'Sat' }, + { value: 7, label: 'Sun' }, + ]} + /> +
+ )} + + {editCluster.backupInterval?.interval === IntervalType.MONTHLY && ( +
+
Backup day of month
+ { + if (!localDom) return; + const ref = dayjs(editCluster.backupInterval?.timeOfDay || '04:00', 'HH:mm'); + setEditCluster({ + ...(editCluster as Cluster), + backupInterval: { + ...(editCluster.backupInterval || { id: undefined as unknown as string, interval: IntervalType.MONTHLY, timeOfDay: '04:00' }), + dayOfMonth: getUtcDayOfMonth(localDom, ref), + }, + }); + }} + size="small" + className="max-w-[280px] grow" + /> +
+ )} + + {editCluster.backupInterval?.interval !== IntervalType.HOURLY && ( +
+
Backup time of day
+ { + if (!t) return; + setEditCluster({ + ...(editCluster as Cluster), + backupInterval: { + ...(editCluster.backupInterval || { id: undefined as unknown as string, interval: IntervalType.DAILY }), + timeOfDay: t.utc().format('HH:mm'), + }, + }); + }} + /> +
+ )} + + )} + +
+ + + + +
+ +
+
Databases in cluster
+ {isLoadingDbs ? ( +
+ +
+ ) : clusterDbs.length === 0 ? ( +
No databases found or cannot connect
+ ) : ( +
+ {clusterDbs.map((name) => ( +
+
{name}
+ { + const next = new Set(excluded); + if (e.target.checked) next.add(name.toLowerCase()); + else next.delete(name.toLowerCase()); + setExcluded(next); + }} + > + Exclude + +
+ ))} +
+ )} +
+
+ )} +
+ + {isShowAddCluster && ( + } + open={isShowAddCluster} + onCancel={() => setIsShowAddCluster(false)} + width={520} + > +
+
+
Name
+ setNewCluster({ ...newCluster, name: e.target.value })} + size="small" + className="max-w-[220px] grow" + placeholder="Cluster name" + /> +
+ +
+
PG version
+ setNewCluster({ ...newCluster, postgresql: { ...newCluster.postgresql, host: e.target.value.trim().replace('https://', '').replace('http://', '') } })} + size="small" + className="max-w-[220px] grow" + placeholder="Enter PG host" + /> +
+ +
+
Port
+ typeof v === 'number' && setNewCluster({ ...newCluster, postgresql: { ...newCluster.postgresql, port: v } })} + size="small" + className="max-w-[220px] grow" + placeholder="Enter PG port" + /> +
+ +
+
Username
+ setNewCluster({ ...newCluster, postgresql: { ...newCluster.postgresql, username: e.target.value.trim() } })} + size="small" + className="max-w/[220px] grow" + placeholder="Enter PG username" + /> +
+ +
+
Password
+ setNewCluster({ ...newCluster, postgresql: { ...newCluster.postgresql, password: e.target.value.trim() } })} + size="small" + className="max-w/[220px] grow" + placeholder="Enter PG password" + /> +
+ +
+
Use HTTPS
+ setNewCluster({ ...newCluster, postgresql: { ...newCluster.postgresql, isHttps: checked } })} + size="small" + /> +
+ +
+
Enable backups
+ setNewCluster({ ...newCluster, isBackupsEnabled: checked })} + size="small" + /> +
+ + {newCluster.isBackupsEnabled && ( + <> +
+
Backup interval
+ setNewCluster({ ...newCluster, backupInterval: { ...(newCluster.backupInterval || { id: undefined as unknown as string, interval: IntervalType.WEEKLY, timeOfDay: '04:00' }), weekday: wd as number } })} + size="small" + className="max-w-[220px] grow" + options={[ + { value: 1, label: 'Mon' }, + { value: 2, label: 'Tue' }, + { value: 3, label: 'Wed' }, + { value: 4, label: 'Thu' }, + { value: 5, label: 'Fri' }, + { value: 6, label: 'Sat' }, + { value: 7, label: 'Sun' }, + ]} + /> +
+ )} + + {newCluster.backupInterval?.interval === IntervalType.MONTHLY && ( +
+
Backup day of month
+ typeof v === 'number' && setNewCluster({ ...newCluster, backupInterval: { ...(newCluster.backupInterval || { id: undefined as unknown as string, interval: IntervalType.MONTHLY, timeOfDay: '04:00' }), dayOfMonth: v } })} + size="small" + className="max-w-[220px] grow" + /> +
+ )} + + {newCluster.backupInterval?.interval !== IntervalType.HOURLY && ( +
+
Backup time of day
+ t && setNewCluster({ ...newCluster, backupInterval: { ...(newCluster.backupInterval || { id: undefined as unknown as string, interval: IntervalType.DAILY }), timeOfDay: t.utc().format('HH:mm') } })} + /> +
+ )} + + )} + +
+
CPU count
+ typeof v === 'number' && setNewCluster({ ...newCluster, cpuCount: v })} + size="small" + className="max-w/[220px] grow" + placeholder="CPU count for pg_dump" + /> +
+ +
+
Store period
+ setNewCluster({ ...newCluster, storageId: v as string })} + size="small" + className="max-w-[220px] grow" + allowClear + placeholder="Select storage" + options={storages.map((s) => ({ label: s.name, value: s.id }))} + /> +
+ +
+ + +
+ + )} +
+ ); +}; diff --git a/frontend/src/features/databases/ui/CreateDatabasesFromClusterComponent.tsx b/frontend/src/features/databases/ui/CreateDatabasesFromClusterComponent.tsx new file mode 100644 index 0000000..06370d4 --- /dev/null +++ b/frontend/src/features/databases/ui/CreateDatabasesFromClusterComponent.tsx @@ -0,0 +1,331 @@ +import { Button, Input, InputNumber, Select, Spin, Switch } from 'antd'; +import { useState } from 'react'; + +import { backupsApi, type BackupConfig, backupConfigApi } from '../../../entity/backups'; +import { type Database, DatabaseType, type PostgresqlDatabase, databaseApi } from '../../../entity/databases'; +import { PostgresqlVersion } from '../../../entity/databases/model/postgresql/PostgresqlVersion'; +import type { Notifier } from '../../../entity/notifiers'; +import { EditBackupConfigComponent } from '../../backups'; +import { EditDatabaseNotifiersComponent } from './edit/EditDatabaseNotifiersComponent'; + +interface Props { + workspaceId: string; + onCreated: () => void; + onClose: () => void; +} + +export const CreateDatabasesFromClusterComponent = ({ workspaceId, onCreated, onClose }: Props) => { + const [step, setStep] = useState<'cluster-settings' | 'select-databases' | 'backup-config' | 'notifiers' | 'creating'>('cluster-settings'); + + const [clusterDb, setClusterDb] = useState({ + id: undefined as unknown as string, + name: 'Postgres Cluster', + workspaceId, + type: DatabaseType.POSTGRES, + postgresql: { + id: undefined as unknown as string, + version: undefined as unknown as PostgresqlVersion, + host: '', + port: 5432, + username: '', + password: '', + isHttps: false, + } as unknown as PostgresqlDatabase, + notifiers: [], + } as Database); + + const [isLoadingDbs, setIsLoadingDbs] = useState(false); + const [availableDbs, setAvailableDbs] = useState([]); + const [selectedDbNames, setSelectedDbNames] = useState([]); + + const [backupConfig, setBackupConfig] = useState(); + const [notifiers, setNotifiers] = useState([]); + + const [isCreating, setIsCreating] = useState(false); + const [createdCount, setCreatedCount] = useState(0); + + // Step 1: load databases from cluster + const loadDatabasesFromCluster = async () => { + setIsLoadingDbs(true); + try { + const dbs = await databaseApi.listDatabasesDirect(clusterDb); + setAvailableDbs(dbs); + // preselect all + setSelectedDbNames(dbs); + setStep('select-databases'); + } catch (e) { + alert((e as Error).message); + } + setIsLoadingDbs(false); + }; + + // Step 5: create databases + const createDatabases = async () => { + if (!backupConfig) return; + + setIsCreating(true); + setCreatedCount(0); + + try { + for (const dbName of selectedDbNames) { + // build per-db object + const newDb: Database = { + id: undefined as unknown as string, + name: dbName, + workspaceId, + type: DatabaseType.POSTGRES, + postgresql: { + ...(clusterDb.postgresql as PostgresqlDatabase), + database: dbName, + } as PostgresqlDatabase, + notifiers, + } as Database; + + const created = await databaseApi.createDatabase(newDb); + + const cfg: BackupConfig = { ...backupConfig, databaseId: created.id } as BackupConfig; + await backupConfigApi.saveBackupConfig(cfg); + if (cfg.isBackupsEnabled) { + await backupsApi.makeBackup(created.id); + } + + setCreatedCount((c: number) => c + 1); + } + + onCreated(); + onClose(); + } catch (e) { + alert((e as Error).message); + } + + setIsCreating(false); + }; + + // Render steps + if (step === 'cluster-settings') { + const pg = clusterDb.postgresql! as PostgresqlDatabase; + + const isAllFieldsFilled = Boolean(pg.version && pg.host && pg.port && pg.username && pg.password); + + return ( +
+
+
PG version
+ + setClusterDb({ + ...clusterDb, + postgresql: { ...pg, host: e.target.value.trim().replace('https://', '').replace('http://', '') }, + }) + } + size="small" + className="max-w-[200px] grow" + placeholder="Enter PG host" + /> +
+ +
+
Port
+ typeof v === 'number' && setClusterDb({ ...clusterDb, postgresql: { ...pg, port: v } })} + size="small" + className="max-w-[200px] grow" + placeholder="Enter PG port" + /> +
+ +
+
Username
+ setClusterDb({ ...clusterDb, postgresql: { ...pg, username: e.target.value.trim() } })} + size="small" + className="max-w-[200px] grow" + placeholder="Enter PG username" + /> +
+ +
+
Password
+ setClusterDb({ ...clusterDb, postgresql: { ...pg, password: e.target.value.trim() } })} + size="small" + className="max-w-[200px] grow" + placeholder="Enter PG password" + /> +
+ +
+
Use HTTPS
+ setClusterDb({ ...clusterDb, postgresql: { ...pg, isHttps: checked } })} + size="small" + /> +
+ +
+ +
+
+ ); + } + + if (step === 'select-databases') { + return ( +
+
Select databases to back up
+
+ {selectedDbNames.length} / {availableDbs.length} selected + + + + +
+