Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
198 changes: 198 additions & 0 deletions test/e2e/internal/tests/backup/backup_restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package backup

import (
"fmt"
"strings"
"time"

v1 "github.com/cloudnative-pg/api/pkg/api/v1"
Expand Down Expand Up @@ -177,4 +178,201 @@ var _ = Describe("Backup and restore", func() {
&s3BackupPluginBackupPluginRestore{},
),
)

DescribeTable("should perform point-in-time recovery",
func(
ctx SpecContext,
factory pitrTestCaseFactory,
) {
testResources := factory.createBackupRestoreTestResources(namespace.Name)

By("starting the object store deployment")
Expect(testResources.ObjectStoreResources.Create(ctx, cl)).To(Succeed())

By("creating the Archive")
Expect(cl.Create(ctx, testResources.Archive)).To(Succeed())

By("creating a CloudNativePG cluster")
src := testResources.SrcCluster
Expect(cl.Create(ctx, testResources.SrcCluster)).To(Succeed())

By("having the cluster ready")
Eventually(func(g Gomega) {
g.Expect(cl.Get(
ctx,
types.NamespacedName{
Name: src.Name,
Namespace: src.Namespace,
},
src)).To(Succeed())
g.Expect(internalCluster.IsReady(*src)).To(BeTrue())
}).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed())

By("adding initial data to PostgreSQL")
clientSet, cfg, err := internalClient.NewClientSet()
Expect(err).NotTo(HaveOccurred())
_, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: src.Namespace,
PodName: fmt.Sprintf("%v-1", src.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "CREATE TABLE pitr_test (id int, data text, created_at timestamp DEFAULT now());"})
Expect(err).NotTo(HaveOccurred())

_, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: src.Namespace,
PodName: fmt.Sprintf("%v-1", src.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "INSERT INTO pitr_test (id, data) VALUES (1, 'before_backup');"})
Expect(err).NotTo(HaveOccurred())

By("creating a backup")
backup := testResources.SrcBackup
Expect(cl.Create(ctx, backup)).To(Succeed())

By("waiting for the backup to complete")
Eventually(func(g Gomega) {
g.Expect(cl.Get(ctx, types.NamespacedName{Name: backup.Name, Namespace: backup.Namespace},
backup)).To(Succeed())
g.Expect(backup.Status.Phase).To(BeEquivalentTo(v1.BackupPhaseCompleted))
}).Within(2 * time.Minute).WithPolling(5 * time.Second).Should(Succeed())

_, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: src.Namespace,
PodName: fmt.Sprintf("%v-1", src.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "INSERT INTO pitr_test (id, data) VALUES (2, 'after_backup');"})
Expect(err).NotTo(HaveOccurred())

By("recording timestamp for PITR target after adding more data")
time.Sleep(2 * time.Second) // Ensure timestamp difference

// Record a timestamp for PITR target
output, _, err := command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: src.Namespace,
PodName: fmt.Sprintf("%v-1", src.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "SELECT now()::text;"})
Expect(err).NotTo(HaveOccurred())
pitrTargetTime := strings.TrimSpace(output)

By("adding final data that should not appear in PITR restore")
_, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: src.Namespace,
PodName: fmt.Sprintf("%v-1", src.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "INSERT INTO pitr_test (id, data) VALUES (3, 'should_not_appear_in_pitr');"})
Expect(err).NotTo(HaveOccurred())

By("forcing WAL switch to ensure data is archived")
_, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: src.Namespace,
PodName: fmt.Sprintf("%v-1", src.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "SELECT pg_switch_wal();"})
Expect(err).NotTo(HaveOccurred())

time.Sleep(5 * time.Second)

By("performing point-in-time recovery to specific timestamp")
pitrCluster := factory.createPITRCluster(namespace.Name, pitrTargetTime)
Expect(cl.Create(ctx, pitrCluster)).To(Succeed())

By("having the PITR cluster ready")
Eventually(func(g Gomega) {
g.Expect(cl.Get(ctx,
types.NamespacedName{Name: pitrCluster.Name, Namespace: pitrCluster.Namespace},
pitrCluster)).To(Succeed())
g.Expect(internalCluster.IsReady(*pitrCluster)).To(BeTrue())
}).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed())

By("verifying PITR recovered to correct point in time")

output, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: pitrCluster.Namespace,
PodName: fmt.Sprintf("%v-1", pitrCluster.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "SELECT COUNT(*) FROM pitr_test WHERE data = 'before_backup';"})
Expect(err).NotTo(HaveOccurred())
Expect(strings.TrimSpace(output)).To(Equal("1"), "Should have initial data from before backup")

output, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: pitrCluster.Namespace,
PodName: fmt.Sprintf("%v-1", pitrCluster.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "SELECT COUNT(*) FROM pitr_test WHERE data = 'after_backup';"})
Expect(err).NotTo(HaveOccurred())
Expect(strings.TrimSpace(output)).To(Equal("1"), "Should have data added after backup")

output, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: pitrCluster.Namespace,
PodName: fmt.Sprintf("%v-1", pitrCluster.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "SELECT COUNT(*) FROM pitr_test WHERE data = 'should_not_appear_in_pitr';"})
Expect(err).NotTo(HaveOccurred())
Expect(strings.TrimSpace(output)).To(Equal("0"), "Should NOT have data after PITR target time")

By("verifying total record count matches expected PITR state")
output, _, err = command.ExecuteInContainer(ctx,
*clientSet,
cfg,
command.ContainerLocator{
NamespaceName: pitrCluster.Namespace,
PodName: fmt.Sprintf("%v-1", pitrCluster.Name),
ContainerName: "postgres",
},
nil,
[]string{"psql", "-tAc", "SELECT COUNT(*) FROM pitr_test;"})
Expect(err).NotTo(HaveOccurred())
Expect(strings.TrimSpace(output)).To(Equal("2"), "Should have exactly 2 records in PITR restore (1 before backup + 1 after backup)")
},
Entry(
"using TargetTime with plugin",
&s3BackupPluginTargetTimeRestore{},
),
)
})
68 changes: 68 additions & 0 deletions test/e2e/internal/tests/backup/fixtures.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,18 @@ const (
archiveName = "source"
dstBackupName = "restore"
restoreClusterName = "restore"
pitrClusterName = "pitr-restore"
)

type testCaseFactory interface {
createBackupRestoreTestResources(namespace string) backupRestoreTestResources
}

type pitrTestCaseFactory interface {
testCaseFactory
createPITRCluster(namespace string, targetTime string) *cloudnativepgv1.Cluster
}

type backupRestoreTestResources struct {
ObjectStoreResources *objectstore.Resources
Archive *pluginPgbackrestV1.Archive
Expand All @@ -52,6 +58,10 @@ type backupRestoreTestResources struct {

type s3BackupPluginBackupPluginRestore struct{}

type s3BackupPluginTargetTimeRestore struct {
s3BackupPluginBackupPluginRestore
}

func (s s3BackupPluginBackupPluginRestore) createBackupRestoreTestResources(
namespace string,
) backupRestoreTestResources {
Expand All @@ -67,6 +77,64 @@ func (s s3BackupPluginBackupPluginRestore) createBackupRestoreTestResources(
return result
}

func (s s3BackupPluginTargetTimeRestore) createPITRCluster(
namespace string,
targetTime string,
) *cloudnativepgv1.Cluster {
cluster := &cloudnativepgv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "postgresql.cnpg.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: pitrClusterName,
Namespace: namespace,
},
Spec: cloudnativepgv1.ClusterSpec{
Instances: 2,
ImagePullPolicy: corev1.PullAlways,
Bootstrap: &cloudnativepgv1.BootstrapConfiguration{
Recovery: &cloudnativepgv1.BootstrapRecovery{
Source: "source",
RecoveryTarget: &cloudnativepgv1.RecoveryTarget{
TargetTime: targetTime,
},
},
},
Plugins: []cloudnativepgv1.PluginConfiguration{
{
Name: "pgbackrest.cnpg.opera.com",
Parameters: map[string]string{
"pgbackrestObjectName": archiveName,
},
},
},
PostgresConfiguration: cloudnativepgv1.PostgresConfiguration{
Parameters: map[string]string{
"log_min_messages": "DEBUG4",
},
},
ExternalClusters: []cloudnativepgv1.ExternalCluster{
{
Name: "source",
PluginConfiguration: &cloudnativepgv1.PluginConfiguration{
Name: "pgbackrest.cnpg.opera.com",
Parameters: map[string]string{
"pgbackrestObjectName": archiveName,
"stanza": srcClusterName,
},
},
},
},
StorageConfiguration: cloudnativepgv1.StorageConfiguration{
Size: size,
},
},
}

return cluster
}

func newSrcPluginBackup(namespace string) *cloudnativepgv1.Backup {
return &cloudnativepgv1.Backup{
TypeMeta: metav1.TypeMeta{
Expand Down