Skip to content

Commit 3115cb3

Browse files
committed
Release 1.21.0 - See CHANGELOG.md
1 parent 859ce5f commit 3115cb3

File tree

3 files changed

+89
-13
lines changed

3 files changed

+89
-13
lines changed

CHANGELOG.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,13 @@
1+
## 1.21.0 2020-06-03 <dave at tiredofit dot ca>
2+
3+
### Added
4+
- Add S3 Compatible Storage Support
5+
6+
### Changed
7+
- Switch some variables to support tiredofit/alpine base image better
8+
- Fix issue with parallel compression not working correctly
9+
10+
111
## 1.20.1 2020-04-24 <dave at tiredofit dot ca>
212

313
### Changed

README.md

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ This will build a container for backing up multiple type of DB Servers
1212

1313
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis, Rethink servers.
1414

15-
* dump to local filesystem
15+
* dump to local filesystem or backup to S3 Compatible services
1616
* select database user and password
1717
* backup all databases
1818
* choose to have an MD5 sum after backup for verification
@@ -88,6 +88,7 @@ Along with the Environment Variables from the [Base image](https://hub.docker.co
8888

8989
| Parameter | Description |
9090
|-----------|-------------|
91+
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM`
9192
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, or none `NONE` - Default `GZ`
9293
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `rethink`
9394
| `DB_HOST` | Server Hostname e.g. `mariadb`
@@ -105,6 +106,19 @@ Along with the Environment Variables from the [Base image](https://hub.docker.co
105106
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` |
106107
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` |
107108

109+
**Backing Up to S3 Compatible Services**
110+
111+
If `BACKUP_LOCATION` = `S3` then the following options are used.
112+
113+
| Parameter | Description |
114+
|-----------|-------------|
115+
| `S3_BUCKET` | S3 Bucket name e.g. 'mybucket' |
116+
| `S3_HOSTNAME` | Hostname of S3 Server e.g "s3.amazonaws.com" - You can also include a port if necessary
117+
| `S3_KEY_ID` | S3 Key ID |
118+
| `S3_KEY_SECRET` | S3 Key Secret |
119+
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' |
120+
| `S3_PROTOCOL` | Use either `http` or `https` to access service - Default `https` |
121+
| `S3_URI_STYLE` | Choose either `VIRTUALHOST` or `PATH` style - Default `VIRTUALHOST`
108122

109123
## Maintenance
110124

install/etc/s6/services/10-db-backup/run

Lines changed: 64 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -16,28 +16,42 @@ file_env 'DB_USER'
1616
file_env 'DB_PASS'
1717

1818
### Set Defaults
19+
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
1920
COMPRESSION=${COMPRESSION:-GZ}
20-
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
21-
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
2221
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
22+
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
2323
DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
2424
DBHOST=${DB_HOST}
2525
DBNAME=${DB_NAME}
2626
DBPASS=${DB_PASS}
27-
DBUSER=${DB_USER}
2827
DBTYPE=${DB_TYPE}
28+
DBUSER=${DB_USER}
2929
MD5=${MD5:-TRUE}
30+
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
3031
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
3132
SPLIT_DB=${SPLIT_DB:-FALSE}
3233
TMPDIR=/tmp/backups
3334

35+
if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ]
36+
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
37+
sanity_var S3_HOST "S3 Host"
38+
sanity_var S3_BUCKET "S3 Bucket"
39+
sanity_var S3_KEY_ID "S3 Key ID"
40+
sanity_var S3_KEY_SECRET "S3 Key Secret"
41+
sanity_var S3_URI_STYLE "S3 URI Style (Virtualhost or Path)"
42+
sanity_var S3_PATH "S3 Path"
43+
file_env 'S3_KEY_ID'
44+
file_env 'S3_KEY_SECRET'
45+
46+
fi
47+
3448
if [ "$1" = "NOW" ]; then
3549
DB_DUMP_BEGIN=+0
3650
MANUAL=TRUE
3751
fi
3852

3953
### Set Compression Options
40-
if [ "$PARALLEL_COMPRESSION" = "TRUE " ]; then
54+
if var_true $PARALLEL_COMPRESSION ; then
4155
BZIP="pbzip2"
4256
GZIP="pigz"
4357
XZIP="pixz"
@@ -98,7 +112,7 @@ function backup_couch() {
98112
}
99113

100114
function backup_mysql() {
101-
if [ "$SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then
115+
if var_true $SPLIT_DB ; then
102116
DATABASES=`mysql -h ${DBHOST} -P $DBPORT -u$DBUSER --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
103117

104118
for db in $DATABASES; do
@@ -139,7 +153,7 @@ function backup_mongo() {
139153
}
140154

141155
function backup_pgsql() {
142-
if [ "$SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then
156+
if var_true $SPLIT_DB ; then
143157
export PGPASSWORD=${DBPASS}
144158
DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
145159
for db in $DATABASES; do
@@ -279,17 +293,14 @@ function compression() {
279293
}
280294

281295
function generate_md5() {
282-
if [ "$MD5" = "TRUE" ] || [ "$MD5" = "true" ] ; then
296+
if var_true $MD5 ; then
283297
cd $TMPDIR
284298
md5sum ${TARGET} > ${TARGET}.md5
285299
MD5VALUE=$(md5sum ${TARGET} | awk '{ print $1}')
286300
fi
287301
}
288302

289303
function move_backup() {
290-
mkdir -p ${DB_DUMP_TARGET}
291-
mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/
292-
mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET}
293304
case "$SIZE_VALUE" in
294305
"b" | "bytes" )
295306
SIZE_VALUE=1
@@ -306,6 +317,47 @@ function move_backup() {
306317
else
307318
FILESIZE=$(du -h "${DB_DUMP_TARGET}/${TARGET}" | awk '{ print $1}')
308319
fi
320+
321+
case "${BACKUP_LOCATION}" in
322+
"FILE" | "file" | "filesystem" | "FILESYSTEM" )
323+
mkdir -p ${DB_DUMP_TARGET}
324+
mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/
325+
mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET}
326+
;;
327+
"S3" | "s3" | "MINIO" | "minio" )
328+
s3_content_type="application/octet-stream"
329+
if [ "$S3_URI_STYLE" = "VIRTUALHOST" ] || [ "$S3_URI_STYLE" = "VHOST" ] [ "$S3_URI_STYLE" = "virtualhost" ] [ "$S3_URI_STYLE" = "vhost" ] ; then
330+
s3_url="${S3_BUCKET}.${S3_HOST}"
331+
else
332+
s3_url="${S3_HOST}/${S3_BUCKET}"
333+
fi
334+
335+
if var_true $MD5 ; then
336+
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
337+
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}.md5" | base64)"
338+
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
339+
print_debug "Uploading ${TARGET}.md5 to S3"
340+
curl -T "${TMPDIR}/${TARGET}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET}.md5 \
341+
-H "Date: $date" \
342+
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
343+
-H "Content-Type: ${s3_content_type}" \
344+
-H "Content-MD5: ${s3_md5}"
345+
fi
346+
347+
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
348+
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}" | base64)"
349+
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
350+
print_debug "Uploading ${TARGET} to S3"
351+
curl -T ${TMPDIR}/${TARGET} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET} \
352+
-H "Date: $s3_date" \
353+
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
354+
-H "Content-Type: ${s3_content_type}" \
355+
-H "Content-MD5: ${s3_md5}"
356+
357+
rm -rf ${TMPDIR}/*.md5
358+
rm -rf ${TMPDIR}/${TARGET}
359+
;;
360+
esac
309361
}
310362

311363

@@ -373,7 +425,7 @@ print_info "Initialized on `date`"
373425
esac
374426

375427
### Zabbix
376-
if [ "$ENABLE_ZABBIX" = "TRUE" ] || [ "$ENABLE_ZABBIX" = "true" ]; then
428+
if var_true $ENABLE_ZABBIX ; then
377429
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}`
378430
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'`
379431
fi
@@ -395,7 +447,7 @@ print_info "Initialized on `date`"
395447
fi
396448

397449
### Go back to Sleep until next Backup time
398-
if [ "$MANUAL" = "TRUE" ]; then
450+
if var_true $MANUAL ; then
399451
exit 1;
400452
else
401453
sleep $(($DB_DUMP_FREQ*60))

0 commit comments

Comments
 (0)