|
| 1 | +#! /usr/bin/env bash |
| 2 | + |
| 3 | +set -eou pipefail |
| 4 | +IFS=$'\n\t' |
| 5 | + |
| 6 | +## Required Env passed from CronJob: |
| 7 | +# MARIADB_HOST, MARIADB_USER, MARIADB_PASSWORD (or use .my.cnf) |
| 8 | +# LOGICAL_BACKUP_PROVIDER, LOGICAL_BACKUP_S3_BUCKET, etc. |
| 9 | + |
| 10 | +# MariaDB query to get total size of all databases in bytes |
| 11 | +ALL_DB_SIZE_QUERY="SELECT SUM(data_length + index_length) FROM information_schema.TABLES;" |
| 12 | +DUMP_SIZE_COEFF=5 |
| 13 | +ERRORCOUNT=0 |
| 14 | +CLUSTER_NAME=${CLUSTER_NAME_LABEL:-"mariadb-cluster"} |
| 15 | +LOGICAL_BACKUP_PROVIDER=${LOGICAL_BACKUP_PROVIDER:="s3"} |
| 16 | +LOGICAL_BACKUP_S3_RETENTION_TIME=${LOGICAL_BACKUP_S3_RETENTION_TIME:=""} |
| 17 | +LOGICAL_BACKUP_S3_ENDPOINT=${LOGICAL_BACKUP_S3_ENDPOINT:-} |
| 18 | +LOGICAL_BACKUP_S3_REGION=${LOGICAL_BACKUP_S3_REGION:-"us-west-1"} |
| 19 | + |
| 20 | +function estimate_size { |
| 21 | + # Connects to MariaDB to calculate data size for S3 multipart upload optimization |
| 22 | + mariadb -h "$MARIADB_HOST" -u "$MARIADB_USER" -p"$MARIADB_PASSWORD" \ |
| 23 | + --skip-ssl -Nsr -e "${ALL_DB_SIZE_QUERY}" |
| 24 | +} |
| 25 | + |
| 26 | +function dump { |
| 27 | + echo "Taking dump from ${MARIADB_HOST} using mariadb-dump" |
| 28 | + |
| 29 | + # --all-databases: Backup everything |
| 30 | + # --single-transaction: Ensure consistency for InnoDB without locking |
| 31 | + # --quick: Stream output to save memory |
| 32 | + # --routines: Include stored procedures |
| 33 | + mariadb-dump -h "$MARIADB_HOST" -u "$MARIADB_USER" -p"$MARIADB_PASSWORD" \ |
| 34 | + --all-databases \ |
| 35 | + --single-transaction \ |
| 36 | + --quick \ |
| 37 | + --routines \ |
| 38 | + --events \ |
| 39 | + --skip-ssl \ |
| 40 | + --verbose |
| 41 | +} |
| 42 | + |
| 43 | +function compress { |
| 44 | + # Use pigz for multi-threaded compression if available, else gzip |
| 45 | + command -v pigz >/dev/null 2>&1 && pigz || gzip |
| 46 | +} |
| 47 | + |
| 48 | +function az_upload { |
| 49 | + local FILE_PATH="${1}" |
| 50 | + # Path: container/cluster-name/scope/logical_backups/timestamp.sql.gz |
| 51 | + PATH_TO_BACKUP="${CLUSTER_NAME}/${LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX}/logical_backups/$(date +%s).sql.gz" |
| 52 | + |
| 53 | + echo "Uploading to Azure Blob Storage..." |
| 54 | + az storage blob upload \ |
| 55 | + --file "${FILE_PATH}" \ |
| 56 | + --account-name "${LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME}" \ |
| 57 | + --account-key "${LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY}" \ |
| 58 | + --container-name "${LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER}" \ |
| 59 | + --name "${PATH_TO_BACKUP}" |
| 60 | +} |
| 61 | + |
| 62 | +function aws_delete_objects { |
| 63 | + args=("--bucket=$LOGICAL_BACKUP_S3_BUCKET") |
| 64 | + [[ -n "${LOGICAL_BACKUP_S3_ENDPOINT}" ]] && args+=("--endpoint-url=${LOGICAL_BACKUP_S3_ENDPOINT}") |
| 65 | + [[ -n "${LOGICAL_BACKUP_S3_REGION}" ]] && args+=("--region=${LOGICAL_BACKUP_S3_REGION}") |
| 66 | + |
| 67 | + aws s3api delete-objects "${args[@]}" --delete Objects=["$(printf \{Key=%q\}, "$@")"],Quiet=true |
| 68 | +} |
| 69 | +export -f aws_delete_objects |
| 70 | + |
| 71 | +function aws_delete_outdated { |
| 72 | + if [[ -z "$LOGICAL_BACKUP_S3_RETENTION_TIME" ]] ; then |
| 73 | + echo "No retention time configured; skipping cleanup." |
| 74 | + return 0 |
| 75 | + fi |
| 76 | + |
| 77 | + cutoff_date=$(date -d "$LOGICAL_BACKUP_S3_RETENTION_TIME ago" +%F) |
| 78 | + prefix="${CLUSTER_NAME}/${LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX}/logical_backups/" |
| 79 | + |
| 80 | + args=( |
| 81 | + "--no-paginate" |
| 82 | + "--output=text" |
| 83 | + "--prefix=$prefix" |
| 84 | + "--bucket=$LOGICAL_BACKUP_S3_BUCKET" |
| 85 | + ) |
| 86 | + [[ -n "${LOGICAL_BACKUP_S3_ENDPOINT}" ]] && args+=("--endpoint-url=${LOGICAL_BACKUP_S3_ENDPOINT}") |
| 87 | + [[ -n "${LOGICAL_BACKUP_S3_REGION}" ]] && args+=("--region=${LOGICAL_BACKUP_S3_REGION}") |
| 88 | + |
| 89 | + aws s3api list-objects "${args[@]}" --query="Contents[?LastModified<='$cutoff_date'].[Key]" > /tmp/outdated-backups |
| 90 | + sed -i '$d' /tmp/outdated-backups # Spare the most recent backup |
| 91 | + |
| 92 | + count=$(wc -l < /tmp/outdated-backups) |
| 93 | + if [[ $count -gt 0 ]]; then |
| 94 | + echo "Deleting $count outdated backups created before $cutoff_date" |
| 95 | + tr '\n' '\0' < /tmp/outdated-backups | xargs -0 -P1 -n100 bash -c 'aws_delete_objects "$@"' _ |
| 96 | + fi |
| 97 | +} |
| 98 | + |
| 99 | +function aws_upload { |
| 100 | + local EXPECTED_SIZE="$1" |
| 101 | + PATH_TO_BACKUP="s3://${LOGICAL_BACKUP_S3_BUCKET}/${CLUSTER_NAME}/${LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX}/logical_backups/$(date +%s).sql.gz" |
| 102 | + |
| 103 | + args=() |
| 104 | + [[ -n "${EXPECTED_SIZE}" ]] && args+=("--expected-size=${EXPECTED_SIZE}") |
| 105 | + [[ -n "${LOGICAL_BACKUP_S3_ENDPOINT}" ]] && args+=("--endpoint-url=${LOGICAL_BACKUP_S3_ENDPOINT}") |
| 106 | + [[ -n "${LOGICAL_BACKUP_S3_REGION}" ]] && args+=("--region=${LOGICAL_BACKUP_S3_REGION}") |
| 107 | + |
| 108 | + echo "Uploading dump to S3: ${PATH_TO_BACKUP}" |
| 109 | + echo "${args[@]}" |
| 110 | + aws s3 cp - "$PATH_TO_BACKUP" "${args[@]}" |
| 111 | +} |
| 112 | + |
| 113 | +function upload { |
| 114 | + case $LOGICAL_BACKUP_PROVIDER in |
| 115 | + "s3") |
| 116 | + aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) |
| 117 | + aws_delete_outdated |
| 118 | + ;; |
| 119 | + "az") |
| 120 | + # Azure requires a physical file for 'az storage blob upload' in this context |
| 121 | + dump | compress > /tmp/mariadb-backup.sql.gz |
| 122 | + az_upload /tmp/mariadb-backup.sql.gz |
| 123 | + rm /tmp/mariadb-backup.sql.gz |
| 124 | + ;; |
| 125 | + esac |
| 126 | +} |
| 127 | + |
| 128 | +# Execution Logic |
| 129 | +if [ "$LOGICAL_BACKUP_PROVIDER" == "az" ]; then |
| 130 | + upload |
| 131 | +else |
| 132 | + # Stream dump directly to S3 to save disk space |
| 133 | + dump | compress | upload |
| 134 | + [[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 )) |
| 135 | + exit $ERRORCOUNT |
| 136 | +fi |
0 commit comments