Skip to content

Commit c97ef3a

Browse files
committed
Run backup command for cloud-based repos in the backup job.
Add/adjust tests for cloud repo backup job changes.
1 parent a5052b1 commit c97ef3a

File tree

9 files changed

+694
-268
lines changed

9 files changed

+694
-268
lines changed

internal/controller/postgrescluster/instance.go

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1176,7 +1176,7 @@ func (r *Reconciler) reconcileInstance(
11761176
}
11771177
if err == nil {
11781178
instanceCertificates, err = r.reconcileInstanceCertificates(
1179-
ctx, cluster, spec, instance, rootCA)
1179+
ctx, cluster, spec, instance, rootCA, backupsSpecFound)
11801180
}
11811181
if err == nil {
11821182
postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes, nil)
@@ -1370,10 +1370,8 @@ func addPGBackRestToInstancePodSpec(
13701370
ctx context.Context, cluster *v1beta1.PostgresCluster,
13711371
instanceCertificates *corev1.Secret, instancePod *corev1.PodSpec,
13721372
) {
1373-
if pgbackrest.RepoHostVolumeDefined(cluster) {
1374-
pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod,
1375-
instanceCertificates.Name)
1376-
}
1373+
pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod,
1374+
instanceCertificates.Name)
13771375

13781376
pgbackrest.AddConfigToInstancePod(cluster, instancePod)
13791377
}
@@ -1422,7 +1420,7 @@ func (r *Reconciler) reconcileInstanceConfigMap(
14221420
func (r *Reconciler) reconcileInstanceCertificates(
14231421
ctx context.Context, cluster *v1beta1.PostgresCluster,
14241422
spec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet,
1425-
root *pki.RootCertificateAuthority,
1423+
root *pki.RootCertificateAuthority, backupsSpecFound bool,
14261424
) (*corev1.Secret, error) {
14271425
existing := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)}
14281426
err := errors.WithStack(client.IgnoreNotFound(
@@ -1465,7 +1463,7 @@ func (r *Reconciler) reconcileInstanceCertificates(
14651463
root.Certificate, leafCert.Certificate,
14661464
leafCert.PrivateKey, instanceCerts)
14671465
}
1468-
if err == nil {
1466+
if err == nil && backupsSpecFound {
14691467
err = pgbackrest.InstanceCertificates(ctx, cluster,
14701468
root.Certificate, leafCert.Certificate, leafCert.PrivateKey,
14711469
instanceCerts)

internal/controller/postgrescluster/instance_test.go

Lines changed: 26 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -546,49 +546,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) {
546546
},
547547
}
548548

549-
t.Run("NoVolumeRepo", func(t *testing.T) {
550-
cluster := cluster.DeepCopy()
551-
cluster.Spec.Backups.PGBackRest.Repos = nil
552-
553-
out := pod.DeepCopy()
554-
addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out)
555-
556-
// Only Containers and Volumes fields have changed.
557-
assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes"))
558-
559-
// Only database container has mounts.
560-
// Other containers are ignored.
561-
assert.Assert(t, cmp.MarshalMatches(out.Containers, `
562-
- name: database
563-
resources: {}
564-
volumeMounts:
565-
- mountPath: /etc/pgbackrest/conf.d
566-
name: pgbackrest-config
567-
readOnly: true
568-
- name: other
569-
resources: {}
570-
`))
571-
572-
// Instance configuration files but no certificates.
573-
// Other volumes are ignored.
574-
assert.Assert(t, cmp.MarshalMatches(out.Volumes, `
575-
- name: other
576-
- name: postgres-data
577-
- name: postgres-wal
578-
- name: pgbackrest-config
579-
projected:
580-
sources:
581-
- configMap:
582-
items:
583-
- key: pgbackrest_instance.conf
584-
path: pgbackrest_instance.conf
585-
- key: config-hash
586-
path: config-hash
587-
name: hippo-pgbackrest-config
588-
`))
589-
})
590-
591-
t.Run("OneVolumeRepo", func(t *testing.T) {
549+
t.Run("CloudOrVolumeSameBehavior", func(t *testing.T) {
592550
alwaysExpect := func(t testing.TB, result *corev1.PodSpec) {
593551
// Only Containers and Volumes fields have changed.
594552
assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes"))
@@ -637,21 +595,31 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) {
637595
`))
638596
}
639597

640-
cluster := cluster.DeepCopy()
641-
cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{
598+
clusterWithVolume := cluster.DeepCopy()
599+
clusterWithVolume.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{
642600
{
643601
Name: "repo1",
644602
Volume: new(v1beta1.RepoPVC),
645603
},
646604
}
647605

648-
out := pod.DeepCopy()
649-
addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out)
650-
alwaysExpect(t, out)
606+
clusterWithCloudRepo := cluster.DeepCopy()
607+
clusterWithCloudRepo.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{
608+
{
609+
Name: "repo1",
610+
GCS: new(v1beta1.RepoGCS),
611+
},
612+
}
613+
614+
outWithVolume := pod.DeepCopy()
615+
addPGBackRestToInstancePodSpec(ctx, clusterWithVolume, &certificates, outWithVolume)
616+
alwaysExpect(t, outWithVolume)
651617

652-
// The TLS server is added and configuration mounted.
653-
// It has PostgreSQL volumes mounted while other volumes are ignored.
654-
assert.Assert(t, cmp.MarshalMatches(out.Containers, `
618+
outWithCloudRepo := pod.DeepCopy()
619+
addPGBackRestToInstancePodSpec(ctx, clusterWithCloudRepo, &certificates, outWithCloudRepo)
620+
alwaysExpect(t, outWithCloudRepo)
621+
622+
outContainers := `
655623
- name: database
656624
resources: {}
657625
volumeMounts:
@@ -739,7 +707,12 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) {
739707
- mountPath: /etc/pgbackrest/conf.d
740708
name: pgbackrest-config
741709
readOnly: true
742-
`))
710+
`
711+
712+
// The TLS server is added and configuration mounted.
713+
// It has PostgreSQL volumes mounted while other volumes are ignored.
714+
assert.Assert(t, cmp.MarshalMatches(outWithVolume.Containers, outContainers))
715+
assert.Assert(t, cmp.MarshalMatches(outWithCloudRepo.Containers, outContainers))
743716

744717
t.Run("CustomResources", func(t *testing.T) {
745718
cluster := cluster.DeepCopy()
@@ -756,7 +729,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) {
756729
},
757730
}
758731

759-
before := out.DeepCopy()
732+
before := outWithVolume.DeepCopy()
760733
out := pod.DeepCopy()
761734
addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out)
762735
alwaysExpect(t, out)

internal/controller/postgrescluster/pgbackrest.go

Lines changed: 42 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
"k8s.io/apimachinery/pkg/api/meta"
2424
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2525
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
26-
"k8s.io/apimachinery/pkg/labels"
2726
"k8s.io/apimachinery/pkg/runtime"
2827
"k8s.io/apimachinery/pkg/runtime/schema"
2928
"k8s.io/apimachinery/pkg/types"
@@ -775,12 +774,7 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC
775774
// generateBackupJobSpecIntent generates a JobSpec for a pgBackRest backup job
776775
func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster,
777776
repo v1beta1.PGBackRestRepo, serviceAccountName string,
778-
labels, annotations map[string]string, opts ...string) (*batchv1.JobSpec, error) {
779-
780-
selector, containerName, err := getPGBackRestExecSelector(postgresCluster, repo)
781-
if err != nil {
782-
return nil, errors.WithStack(err)
783-
}
777+
labels, annotations map[string]string, opts ...string) *batchv1.JobSpec {
784778

785779
repoIndex := regexRepoIndex.FindString(repo.Name)
786780
cmdOpts := []string{
@@ -795,21 +789,31 @@ func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.P
795789
cmdOpts = append(cmdOpts, opts...)
796790

797791
container := corev1.Container{
798-
Command: []string{"/opt/crunchy/bin/pgbackrest"},
799-
Env: []corev1.EnvVar{
800-
{Name: "COMMAND", Value: "backup"},
801-
{Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")},
802-
{Name: "COMPARE_HASH", Value: "true"},
803-
{Name: "CONTAINER", Value: containerName},
804-
{Name: "NAMESPACE", Value: postgresCluster.GetNamespace()},
805-
{Name: "SELECTOR", Value: selector.String()},
806-
},
807792
Image: config.PGBackRestContainerImage(postgresCluster),
808793
ImagePullPolicy: postgresCluster.Spec.ImagePullPolicy,
809794
Name: naming.PGBackRestRepoContainerName,
810795
SecurityContext: initialize.RestrictedSecurityContext(),
811796
}
812797

798+
// If the repo that we are backing up to is a local volume, we will configure
799+
// the job to use the pgbackrest go binary to exec into the repo host and run
800+
// the backup. If the repo is a cloud-based repo, we will run the pgbackrest
801+
// backup command directly in the job pod.
802+
if repo.Volume != nil {
803+
container.Command = []string{"/opt/crunchy/bin/pgbackrest"}
804+
container.Env = []corev1.EnvVar{
805+
{Name: "COMMAND", Value: "backup"},
806+
{Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")},
807+
{Name: "COMPARE_HASH", Value: "true"},
808+
{Name: "CONTAINER", Value: naming.PGBackRestRepoContainerName},
809+
{Name: "NAMESPACE", Value: postgresCluster.GetNamespace()},
810+
{Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()},
811+
}
812+
} else {
813+
container.Command = []string{"/bin/pgbackrest", "backup"}
814+
container.Command = append(container.Command, cmdOpts...)
815+
}
816+
813817
if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil {
814818
container.Resources = postgresCluster.Spec.Backups.PGBackRest.Jobs.Resources
815819
}
@@ -855,13 +859,16 @@ func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.P
855859
jobSpec.Template.Spec.ImagePullSecrets = postgresCluster.Spec.ImagePullSecrets
856860

857861
// add pgBackRest configs to template
858-
if containerName == naming.PGBackRestRepoContainerName {
862+
if repo.Volume != nil {
859863
pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec)
860864
} else {
861-
pgbackrest.AddConfigToInstancePod(postgresCluster, &jobSpec.Template.Spec)
865+
// If we are doing a cloud repo backup, we need to give pgbackrest proper permissions
866+
// to read certificate files
867+
jobSpec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster)
868+
pgbackrest.AddConfigToCloudBackupJob(postgresCluster, &jobSpec.Template)
862869
}
863870

864-
return jobSpec, nil
871+
return jobSpec
865872
}
866873

867874
// +kubebuilder:rbac:groups="",resources="configmaps",verbs={delete,list}
@@ -2017,14 +2024,12 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster,
20172024
return nil
20182025
}
20192026

2020-
// reconcilePGBackRestConfig is responsible for reconciling the pgBackRest ConfigMaps and Secrets.
2027+
// reconcilePGBackRestConfig is responsible for reconciling the pgBackRest ConfigMaps.
20212028
func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context,
20222029
postgresCluster *v1beta1.PostgresCluster,
20232030
repoHostName, configHash, serviceName, serviceNamespace string,
20242031
instanceNames []string) error {
20252032

2026-
log := logging.FromContext(ctx).WithValues("reconcileResource", "repoConfig")
2027-
20282033
backrestConfig := pgbackrest.CreatePGBackRestConfigMapIntent(postgresCluster, repoHostName,
20292034
configHash, serviceName, serviceNamespace, instanceNames)
20302035
if err := controllerutil.SetControllerReference(postgresCluster, backrestConfig,
@@ -2035,12 +2040,6 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context,
20352040
return errors.WithStack(err)
20362041
}
20372042

2038-
repoHostConfigured := pgbackrest.RepoHostVolumeDefined(postgresCluster)
2039-
if !repoHostConfigured {
2040-
log.V(1).Info("skipping SSH reconciliation, no repo hosts configured")
2041-
return nil
2042-
}
2043-
20442043
return nil
20452044
}
20462045

@@ -2442,11 +2441,8 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context,
24422441
backupJob.ObjectMeta.Labels = labels
24432442
backupJob.ObjectMeta.Annotations = annotations
24442443

2445-
spec, err := generateBackupJobSpecIntent(ctx, postgresCluster, repo,
2444+
spec := generateBackupJobSpecIntent(ctx, postgresCluster, repo,
24462445
serviceAccount.GetName(), labels, annotations, backupOpts...)
2447-
if err != nil {
2448-
return errors.WithStack(err)
2449-
}
24502446

24512447
backupJob.Spec = *spec
24522448

@@ -2535,11 +2531,15 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context,
25352531
replicaRepoReady = (condition.Status == metav1.ConditionTrue)
25362532
}
25372533

2538-
// get pod name and container name as needed to exec into the proper pod and create
2539-
// the pgBackRest backup
2540-
_, containerName, err := getPGBackRestExecSelector(postgresCluster, replicaCreateRepo)
2541-
if err != nil {
2542-
return errors.WithStack(err)
2534+
// TODO: Since we now only exec into the repo host when backing up to a local volume and
2535+
// run the backup in the job pod when backing up to a cloud-based repo, we should consider
2536+
// using a different value than the container name for the "pgbackrest-config" annotation
2537+
// that we attach to these backups
2538+
var containerName string
2539+
if replicaCreateRepo.Volume != nil {
2540+
containerName = naming.PGBackRestRepoContainerName
2541+
} else {
2542+
containerName = naming.ContainerDatabase
25432543
}
25442544

25452545
// determine if the dedicated repository host is ready using the repo host ready status
@@ -2591,10 +2591,10 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context,
25912591
}
25922592
}
25932593

2594-
dedicatedEnabled := pgbackrest.RepoHostVolumeDefined(postgresCluster)
25952594
// return if no job has been created and the replica repo or the dedicated
25962595
// repo host is not ready
2597-
if job == nil && ((dedicatedEnabled && !dedicatedRepoReady) || !replicaRepoReady) {
2596+
if job == nil && ((pgbackrest.RepoHostVolumeDefined(postgresCluster) && !dedicatedRepoReady) ||
2597+
!replicaRepoReady) {
25982598
return nil
25992599
}
26002600

@@ -2619,11 +2619,8 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context,
26192619
backupJob.ObjectMeta.Labels = labels
26202620
backupJob.ObjectMeta.Annotations = annotations
26212621

2622-
spec, err := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo,
2622+
spec := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo,
26232623
serviceAccount.GetName(), labels, annotations)
2624-
if err != nil {
2625-
return errors.WithStack(err)
2626-
}
26272624

26282625
backupJob.Spec = *spec
26292626

@@ -2805,27 +2802,6 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context,
28052802
return false, nil
28062803
}
28072804

2808-
// getPGBackRestExecSelector returns a selector and container name that allows the proper
2809-
// Pod (along with a specific container within it) to be found within the Kubernetes
2810-
// cluster as needed to exec into the container and run a pgBackRest command.
2811-
func getPGBackRestExecSelector(postgresCluster *v1beta1.PostgresCluster,
2812-
repo v1beta1.PGBackRestRepo) (labels.Selector, string, error) {
2813-
2814-
var err error
2815-
var podSelector labels.Selector
2816-
var containerName string
2817-
2818-
if repo.Volume != nil {
2819-
podSelector = naming.PGBackRestDedicatedSelector(postgresCluster.GetName())
2820-
containerName = naming.PGBackRestRepoContainerName
2821-
} else {
2822-
podSelector, err = naming.AsSelector(naming.ClusterPrimary(postgresCluster.GetName()))
2823-
containerName = naming.ContainerDatabase
2824-
}
2825-
2826-
return podSelector, containerName, err
2827-
}
2828-
28292805
// getRepoHostStatus is responsible for returning the pgBackRest status for the
28302806
// provided pgBackRest repository host
28312807
func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus {
@@ -3070,11 +3046,8 @@ func (r *Reconciler) reconcilePGBackRestCronJob(
30703046
// set backup type (i.e. "full", "diff", "incr")
30713047
backupOpts := []string{"--type=" + backupType}
30723048

3073-
jobSpec, err := generateBackupJobSpecIntent(ctx, cluster, repo,
3049+
jobSpec := generateBackupJobSpecIntent(ctx, cluster, repo,
30743050
serviceAccount.GetName(), labels, annotations, backupOpts...)
3075-
if err != nil {
3076-
return errors.WithStack(err)
3077-
}
30783051

30793052
// Suspend cronjobs when shutdown or read-only. Any jobs that have already
30803053
// started will continue.
@@ -3107,7 +3080,7 @@ func (r *Reconciler) reconcilePGBackRestCronJob(
31073080

31083081
// set metadata
31093082
pgBackRestCronJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("CronJob"))
3110-
err = errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob))
3083+
err := errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob))
31113084

31123085
if err == nil {
31133086
err = r.apply(ctx, pgBackRestCronJob)

0 commit comments

Comments
 (0)