-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdb_backup.sh
More file actions
executable file
·95 lines (84 loc) · 2.29 KB
/
db_backup.sh
File metadata and controls
executable file
·95 lines (84 loc) · 2.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
#!/bin/bash
#
# Author: Joshua Chen
# Date: 2016-04-12
# Location: Shenzhen
# Desc: backup MySQL database, save in compressed form,
# parallel tool is used to boost compression speed.
#
log(){
logger -t "[MySQL Backup]" -p local0.info "$*"
}
runSQL() {
$mysql -h $host -u$user -p$pass -e "$*"
}
# Actions to perform before dump.
# We mark down the 'revision' of the backup data here,
# the 'revision' is identified by the slave status.
preAction() {
mkdir "$dstDir"
runSQL "stop slave sql_thread;" # prevent changes to the database from sql_thread
runSQL "show slave status\G" > $statusFile # record the master log state
if test $? -ne 0; then
runSQL "start slave sql_thread;"
msg="failed to record the slave status, abort"
log "$msg"
exit 1
fi
}
postAction() {
runSQL "start slave sql_thread;"
}
dumpOneTb() {
local db=$1 tb=$2 file stat
file="${dstDir}/${db}.${tb}.sql.bz2"
log "dump start: ${db}.${tb}"
$dumper -h $host -u $user -p$pass $db $tb | $compressor > "$file"
test $? -eq 0 && stat="success" || stat="failed"
log "dump end: ${db}.${tb}, $stat"
}
# Collect all table names in the databases
# The first argument is the file to write into
# all subsequent arguments are database names
collectTbNames() {
local tableList db
tableList=$1
shift
:> "$tableList"
for db in "$@"
do
runSQL "show tables from $db" | \
sed -r -e 1d -e "s/^/${db}./" >> "$tableList"
done
}
cleanup() {
rm -f $tableList $lock $taskRecord
}
host="127.0.0.1"
user="backup"
pass="backup"
time=$(date '+%Y%m%d%H%M%S')
dumper="/usr/local/mysql/bin/mysqldump"
mysql="/usr/local/mysql/bin/mysql"
compressor="/usr/local/bin/lbzip2"
backupDir="/data/backup"
dstDir="$backupDir/$time"
statusFile="${dstDir}/status"
dbList="pcdn acoway_oss statis acoway_manage mysql"
tableList=$(mktemp)
# import the parallel task manager functions
source /etc/rc.d/init.d/ptm
limit=6 # max parallel task
lock=$(mktemp) # lock file for the ptm
taskRecord=$(mktemp) # record file for the ptm
trap cleanup exit
log "start to work"
preAction
collectTbNames "$tableList" $dbList
while IFS=. read db tb
do
pexec $limit $lock $taskRecord dumpOneTb "$db" "$tb"
done < "$tableList"
wait
postAction
log "work end"