@@ -16,28 +16,42 @@ file_env 'DB_USER'
1616file_env 'DB_PASS'
1717
1818### Set Defaults
19+ BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
1920COMPRESSION=${COMPRESSION:-GZ}
20- PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
21- DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
2221DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
22+ DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
2323DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
2424DBHOST=${DB_HOST}
2525DBNAME=${DB_NAME}
2626DBPASS=${DB_PASS}
27- DBUSER=${DB_USER}
2827DBTYPE=${DB_TYPE}
28+ DBUSER=${DB_USER}
2929MD5=${MD5:-TRUE}
30+ PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
3031SIZE_VALUE=${SIZE_VALUE:-"bytes"}
3132SPLIT_DB=${SPLIT_DB:-FALSE}
3233TMPDIR=/tmp/backups
3334
35+ if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ]
36+ S3_PROTOCOL=${S3_PROTOCOL:-"https"}
37+ sanity_var S3_HOST "S3 Host"
38+ sanity_var S3_BUCKET "S3 Bucket"
39+ sanity_var S3_KEY_ID "S3 Key ID"
40+ sanity_var S3_KEY_SECRET "S3 Key Secret"
41+ sanity_var S3_URI_STYLE "S3 URI Style (Virtualhost or Path)"
42+ sanity_var S3_PATH "S3 Path"
43+ file_env 'S3_KEY_ID'
44+ file_env 'S3_KEY_SECRET'
45+
46+ fi
47+
3448if [ "$1" = "NOW" ]; then
3549 DB_DUMP_BEGIN=+0
3650 MANUAL=TRUE
3751fi
3852
3953### Set Compression Options
40- if [ " $PARALLEL_COMPRESSION" = "TRUE " ] ; then
54+ if var_true $PARALLEL_COMPRESSION ; then
4155 BZIP="pbzip2"
4256 GZIP="pigz"
4357 XZIP="pixz"
@@ -98,7 +112,7 @@ function backup_couch() {
98112}
99113
100114function backup_mysql() {
101- if [ " $SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then
115+ if var_true $SPLIT_DB ; then
102116 DATABASES=`mysql -h ${DBHOST} -P $DBPORT -u$DBUSER --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
103117
104118 for db in $DATABASES; do
@@ -139,7 +153,7 @@ function backup_mongo() {
139153}
140154
141155function backup_pgsql() {
142- if [ " $SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then
156+ if var_true $SPLIT_DB ; then
143157 export PGPASSWORD=${DBPASS}
144158 DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
145159 for db in $DATABASES; do
@@ -279,17 +293,14 @@ function compression() {
279293}
280294
281295function generate_md5() {
282- if [ " $MD5" = "TRUE" ] || [ "$MD5" = "true" ] ; then
296+ if var_true $MD5 ; then
283297 cd $TMPDIR
284298 md5sum ${TARGET} > ${TARGET}.md5
285299 MD5VALUE=$(md5sum ${TARGET} | awk '{ print $1}')
286300fi
287301}
288302
289303function move_backup() {
290- mkdir -p ${DB_DUMP_TARGET}
291- mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/
292- mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET}
293304 case "$SIZE_VALUE" in
294305 "b" | "bytes" )
295306 SIZE_VALUE=1
@@ -306,6 +317,47 @@ function move_backup() {
306317 else
307318 FILESIZE=$(du -h "${DB_DUMP_TARGET}/${TARGET}" | awk '{ print $1}')
308319 fi
320+
321+ case "${BACKUP_LOCATION}" in
322+ "FILE" | "file" | "filesystem" | "FILESYSTEM" )
323+ mkdir -p ${DB_DUMP_TARGET}
324+ mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/
325+ mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET}
326+ ;;
327+ "S3" | "s3" | "MINIO" | "minio" )
328+ s3_content_type="application/octet-stream"
329+ if [ "$S3_URI_STYLE" = "VIRTUALHOST" ] || [ "$S3_URI_STYLE" = "VHOST" ] [ "$S3_URI_STYLE" = "virtualhost" ] [ "$S3_URI_STYLE" = "vhost" ] ; then
330+ s3_url="${S3_BUCKET}.${S3_HOST}"
331+ else
332+ s3_url="${S3_HOST}/${S3_BUCKET}"
333+ fi
334+
335+ if var_true $MD5 ; then
336+ s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
337+ s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}.md5" | base64)"
338+ sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
339+ print_debug "Uploading ${TARGET}.md5 to S3"
340+ curl -T "${TMPDIR}/${TARGET}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET}.md5 \
341+ -H "Date: $date" \
342+ -H "Authorization: AWS ${S3_KEY_ID}:$sig" \
343+ -H "Content-Type: ${s3_content_type}" \
344+ -H "Content-MD5: ${s3_md5}"
345+ fi
346+
347+ s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
348+ s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}" | base64)"
349+ sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
350+ print_debug "Uploading ${TARGET} to S3"
351+ curl -T ${TMPDIR}/${TARGET} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET} \
352+ -H "Date: $s3_date" \
353+ -H "Authorization: AWS ${S3_KEY_ID}:$sig" \
354+ -H "Content-Type: ${s3_content_type}" \
355+ -H "Content-MD5: ${s3_md5}"
356+
357+ rm -rf ${TMPDIR}/*.md5
358+ rm -rf ${TMPDIR}/${TARGET}
359+ ;;
360+ esac
309361}
310362
311363
@@ -373,7 +425,7 @@ print_info "Initialized on `date`"
373425 esac
374426
375427### Zabbix
376- if [ " $ENABLE_ZABBIX" = "TRUE" ] || [ "$ENABLE_ZABBIX" = "true" ]; then
428+ if var_true $ENABLE_ZABBIX ; then
377429 silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}`
378430 silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'`
379431 fi
@@ -395,7 +447,7 @@ print_info "Initialized on `date`"
395447 fi
396448
397449 ### Go back to Sleep until next Backup time
398- if [ " $MANUAL" = "TRUE" ] ; then
450+ if var_true $MANUAL ; then
399451 exit 1;
400452 else
401453 sleep $(($DB_DUMP_FREQ*60))
0 commit comments