Skip to content

Commit b4b514c

Browse files
FEATURE (encryption): Add backups encryption
1 parent da0fec6 commit b4b514c

35 files changed

+1813
-335
lines changed

README.md

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,13 +40,13 @@
4040
- **Precise timing**: run backups at specific times (e.g., 4 AM during low traffic)
4141
- **Smart compression**: 4-8x space savings with balanced compression (~20% overhead)
4242

43-
### 🗄️ **Multiple Storage Destinations** <a href="https://postgresus.com/storages">(docs)</a>
43+
### 🗄️ **Multiple Storage Destinations** <a href="https://postgresus.com/storages">(view supported)</a>
4444

4545
- **Local storage**: Keep backups on your VPS/server
4646
- **Cloud storage**: S3, Cloudflare R2, Google Drive, NAS, Dropbox and more
4747
- **Secure**: All data stays under your control
4848

49-
### 📱 **Smart Notifications** <a href="https://postgresus.com/notifiers">(docs)</a>
49+
### 📱 **Smart Notifications** <a href="https://postgresus.com/notifiers">(view supported)</a>
5050

5151
- **Multiple channels**: Email, Telegram, Slack, Discord, webhooks
5252
- **Real-time updates**: Success and failure notifications
@@ -58,6 +58,13 @@
5858
- **SSL support**: Secure connections available
5959
- **Easy restoration**: One-click restore from any backup
6060

61+
### 🔒 **Backup Encryption** <a href="https://postgresus.com/encryption">(docs)</a>
62+
63+
- **AES-256-GCM encryption**: Enterprise-grade protection for backup files
64+
- **Zero-trust storage**: Encrypted backups are useless so you can keep in shared storages like S3, Azure Blob Storage, etc.
65+
- **Optionality**: Encrypted backups are optional and can be enabled or disabled if you wish
66+
- **Download unencrypted**: You can still download unencrypted backups via the 'Download' button to use them in `pg_restore` or other tools.
67+
6168
### 👥 **Suitable for Teams** <a href="https://postgresus.com/access-management">(docs)</a>
6269

6370
- **Workspaces**: Group databases, notifiers and storages for different projects or teams

backend/internal/features/backups/backups/controller_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ func Test_CancelBackup_InProgressBackup_SuccessfullyCancelled(t *testing.T) {
524524
assert.NoError(t, err)
525525

526526
// Register a cancellable context for the backup
527-
GetBackupService().backupContextMgr.RegisterBackup(backup.ID, func() {})
527+
GetBackupService().backupContextManager.RegisterBackup(backup.ID, func() {})
528528

529529
resp := test_utils.MakePostRequest(
530530
t,

backend/internal/features/backups/backups/di.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import (
77
"postgresus-backend/internal/features/databases"
88
"postgresus-backend/internal/features/notifiers"
99
"postgresus-backend/internal/features/storages"
10+
users_repositories "postgresus-backend/internal/features/users/repositories"
1011
workspaces_services "postgresus-backend/internal/features/workspaces/services"
1112
"postgresus-backend/internal/util/logger"
1213
"time"
@@ -23,6 +24,7 @@ var backupService = &BackupService{
2324
notifiers.GetNotifierService(),
2425
notifiers.GetNotifierService(),
2526
backups_config.GetBackupConfigService(),
27+
users_repositories.GetSecretKeyRepository(),
2628
usecases.GetCreateBackupUsecase(),
2729
logger.GetLogger(),
2830
[]BackupRemoveListener{},

backend/internal/features/backups/backups/dto.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
11
package backups
22

3+
import (
4+
"io"
5+
"postgresus-backend/internal/features/backups/backups/encryption"
6+
)
7+
38
type GetBackupsRequest struct {
49
DatabaseID string `form:"database_id" binding:"required"`
510
Limit int `form:"limit"`
@@ -12,3 +17,12 @@ type GetBackupsResponse struct {
1217
Limit int `json:"limit"`
1318
Offset int `json:"offset"`
1419
}
20+
21+
type decryptionReaderCloser struct {
22+
*encryption.DecryptionReader
23+
baseReader io.ReadCloser
24+
}
25+
26+
func (r *decryptionReaderCloser) Close() error {
27+
return r.baseReader.Close()
28+
}
Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
package encryption
2+
3+
import (
4+
"crypto/aes"
5+
"crypto/cipher"
6+
"encoding/binary"
7+
"fmt"
8+
"io"
9+
10+
"github.com/google/uuid"
11+
)
12+
13+
type DecryptionReader struct {
14+
baseReader io.Reader
15+
cipher cipher.AEAD
16+
buffer []byte
17+
nonce []byte
18+
chunkIndex uint64
19+
headerRead bool
20+
eof bool
21+
}
22+
23+
func NewDecryptionReader(
24+
baseReader io.Reader,
25+
masterKey string,
26+
backupID uuid.UUID,
27+
salt []byte,
28+
nonce []byte,
29+
) (*DecryptionReader, error) {
30+
if len(salt) != SaltLen {
31+
return nil, fmt.Errorf("salt must be %d bytes, got %d", SaltLen, len(salt))
32+
}
33+
if len(nonce) != NonceLen {
34+
return nil, fmt.Errorf("nonce must be %d bytes, got %d", NonceLen, len(nonce))
35+
}
36+
37+
derivedKey, err := DeriveBackupKey(masterKey, backupID, salt)
38+
if err != nil {
39+
return nil, fmt.Errorf("failed to derive backup key: %w", err)
40+
}
41+
42+
block, err := aes.NewCipher(derivedKey)
43+
if err != nil {
44+
return nil, fmt.Errorf("failed to create cipher: %w", err)
45+
}
46+
47+
aesgcm, err := cipher.NewGCM(block)
48+
if err != nil {
49+
return nil, fmt.Errorf("failed to create GCM: %w", err)
50+
}
51+
52+
reader := &DecryptionReader{
53+
baseReader,
54+
aesgcm,
55+
make([]byte, 0),
56+
nonce,
57+
0,
58+
false,
59+
false,
60+
}
61+
62+
if err := reader.readAndValidateHeader(salt, nonce); err != nil {
63+
return nil, err
64+
}
65+
66+
return reader, nil
67+
}
68+
69+
func (r *DecryptionReader) Read(p []byte) (n int, err error) {
70+
for len(r.buffer) < len(p) && !r.eof {
71+
if err := r.readAndDecryptChunk(); err != nil {
72+
if err == io.EOF {
73+
r.eof = true
74+
break
75+
}
76+
return 0, err
77+
}
78+
}
79+
80+
if len(r.buffer) == 0 {
81+
return 0, io.EOF
82+
}
83+
84+
n = copy(p, r.buffer)
85+
r.buffer = r.buffer[n:]
86+
87+
return n, nil
88+
}
89+
90+
func (r *DecryptionReader) readAndValidateHeader(expectedSalt, expectedNonce []byte) error {
91+
header := make([]byte, HeaderLen)
92+
93+
if _, err := io.ReadFull(r.baseReader, header); err != nil {
94+
return fmt.Errorf("failed to read header: %w", err)
95+
}
96+
97+
magic := string(header[0:MagicBytesLen])
98+
if magic != MagicBytes {
99+
return fmt.Errorf("invalid magic bytes: expected %s, got %s", MagicBytes, magic)
100+
}
101+
102+
salt := header[MagicBytesLen : MagicBytesLen+SaltLen]
103+
nonce := header[MagicBytesLen+SaltLen : MagicBytesLen+SaltLen+NonceLen]
104+
105+
if string(salt) != string(expectedSalt) {
106+
return fmt.Errorf("salt mismatch in file header")
107+
}
108+
109+
if string(nonce) != string(expectedNonce) {
110+
return fmt.Errorf("nonce mismatch in file header")
111+
}
112+
113+
r.headerRead = true
114+
return nil
115+
}
116+
117+
func (r *DecryptionReader) readAndDecryptChunk() error {
118+
lengthBuf := make([]byte, 4)
119+
if _, err := io.ReadFull(r.baseReader, lengthBuf); err != nil {
120+
return err
121+
}
122+
123+
chunkLen := binary.BigEndian.Uint32(lengthBuf)
124+
if chunkLen == 0 || chunkLen > ChunkSize+16 {
125+
return fmt.Errorf("invalid chunk length: %d", chunkLen)
126+
}
127+
128+
encrypted := make([]byte, chunkLen)
129+
if _, err := io.ReadFull(r.baseReader, encrypted); err != nil {
130+
return fmt.Errorf("failed to read encrypted chunk: %w", err)
131+
}
132+
133+
chunkNonce := r.generateChunkNonce()
134+
135+
decrypted, err := r.cipher.Open(nil, chunkNonce, encrypted, nil)
136+
if err != nil {
137+
return fmt.Errorf(
138+
"failed to decrypt chunk (authentication failed - file may be corrupted or tampered): %w",
139+
err,
140+
)
141+
}
142+
143+
r.buffer = append(r.buffer, decrypted...)
144+
r.chunkIndex++
145+
146+
return nil
147+
}
148+
149+
func (r *DecryptionReader) generateChunkNonce() []byte {
150+
chunkNonce := make([]byte, NonceLen)
151+
copy(chunkNonce, r.nonce)
152+
153+
binary.BigEndian.PutUint64(chunkNonce[4:], r.chunkIndex)
154+
155+
return chunkNonce
156+
}
Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
package encryption
2+
3+
import (
4+
"crypto/aes"
5+
"crypto/cipher"
6+
"encoding/binary"
7+
"fmt"
8+
"io"
9+
10+
"github.com/google/uuid"
11+
)
12+
13+
type EncryptionWriter struct {
14+
baseWriter io.Writer
15+
cipher cipher.AEAD
16+
buffer []byte
17+
nonce []byte
18+
salt []byte
19+
chunkIndex uint64
20+
headerWritten bool
21+
}
22+
23+
func NewEncryptionWriter(
24+
baseWriter io.Writer,
25+
masterKey string,
26+
backupID uuid.UUID,
27+
salt []byte,
28+
nonce []byte,
29+
) (*EncryptionWriter, error) {
30+
if len(salt) != SaltLen {
31+
return nil, fmt.Errorf("salt must be %d bytes, got %d", SaltLen, len(salt))
32+
}
33+
if len(nonce) != NonceLen {
34+
return nil, fmt.Errorf("nonce must be %d bytes, got %d", NonceLen, len(nonce))
35+
}
36+
37+
derivedKey, err := DeriveBackupKey(masterKey, backupID, salt)
38+
if err != nil {
39+
return nil, fmt.Errorf("failed to derive backup key: %w", err)
40+
}
41+
42+
block, err := aes.NewCipher(derivedKey)
43+
if err != nil {
44+
return nil, fmt.Errorf("failed to create cipher: %w", err)
45+
}
46+
47+
aesgcm, err := cipher.NewGCM(block)
48+
if err != nil {
49+
return nil, fmt.Errorf("failed to create GCM: %w", err)
50+
}
51+
52+
writer := &EncryptionWriter{
53+
baseWriter: baseWriter,
54+
cipher: aesgcm,
55+
buffer: make([]byte, 0, ChunkSize),
56+
nonce: nonce,
57+
chunkIndex: 0,
58+
headerWritten: false,
59+
salt: salt, // Store salt for lazy header writing
60+
}
61+
62+
return writer, nil
63+
}
64+
65+
func (w *EncryptionWriter) Write(p []byte) (n int, err error) {
66+
// Write header on first write (lazy initialization)
67+
if !w.headerWritten {
68+
if err := w.writeHeader(w.salt, w.nonce); err != nil {
69+
return 0, fmt.Errorf("failed to write header: %w", err)
70+
}
71+
}
72+
73+
n = len(p)
74+
w.buffer = append(w.buffer, p...)
75+
76+
for len(w.buffer) >= ChunkSize {
77+
chunk := w.buffer[:ChunkSize]
78+
if err := w.encryptAndWriteChunk(chunk); err != nil {
79+
return 0, err
80+
}
81+
w.buffer = w.buffer[ChunkSize:]
82+
}
83+
84+
return n, nil
85+
}
86+
87+
func (w *EncryptionWriter) Close() error {
88+
// Write header if it hasn't been written yet (in case Close is called without any writes)
89+
if !w.headerWritten {
90+
if err := w.writeHeader(w.salt, w.nonce); err != nil {
91+
return fmt.Errorf("failed to write header: %w", err)
92+
}
93+
}
94+
95+
if len(w.buffer) > 0 {
96+
if err := w.encryptAndWriteChunk(w.buffer); err != nil {
97+
return err
98+
}
99+
w.buffer = nil
100+
}
101+
return nil
102+
}
103+
104+
func (w *EncryptionWriter) writeHeader(salt, nonce []byte) error {
105+
header := make([]byte, HeaderLen)
106+
107+
copy(header[0:MagicBytesLen], []byte(MagicBytes))
108+
copy(header[MagicBytesLen:MagicBytesLen+SaltLen], salt)
109+
copy(header[MagicBytesLen+SaltLen:MagicBytesLen+SaltLen+NonceLen], nonce)
110+
111+
_, err := w.baseWriter.Write(header)
112+
if err != nil {
113+
return fmt.Errorf("failed to write header: %w", err)
114+
}
115+
116+
w.headerWritten = true
117+
return nil
118+
}
119+
120+
func (w *EncryptionWriter) encryptAndWriteChunk(chunk []byte) error {
121+
chunkNonce := w.generateChunkNonce()
122+
123+
encrypted := w.cipher.Seal(nil, chunkNonce, chunk, nil)
124+
125+
lengthBuf := make([]byte, 4)
126+
binary.BigEndian.PutUint32(lengthBuf, uint32(len(encrypted)))
127+
128+
if _, err := w.baseWriter.Write(lengthBuf); err != nil {
129+
return fmt.Errorf("failed to write chunk length: %w", err)
130+
}
131+
132+
if _, err := w.baseWriter.Write(encrypted); err != nil {
133+
return fmt.Errorf("failed to write encrypted chunk: %w", err)
134+
}
135+
136+
w.chunkIndex++
137+
return nil
138+
}
139+
140+
func (w *EncryptionWriter) generateChunkNonce() []byte {
141+
chunkNonce := make([]byte, NonceLen)
142+
copy(chunkNonce, w.nonce)
143+
144+
binary.BigEndian.PutUint64(chunkNonce[4:], w.chunkIndex)
145+
146+
return chunkNonce
147+
}

0 commit comments

Comments
 (0)