@@ -71,7 +71,7 @@ function msg() {
71
71
# 1 if the input is empty,
72
72
# -1 otherwise.
73
73
# ######################################
74
- function checkPath () {
74
+ function check_path () {
75
75
if [[ -z $1 ]]; then
76
76
return 1
77
77
fi
@@ -112,7 +112,8 @@ function checkPath() {
112
112
# (text) [5] AWS keypair to use
113
113
# (text) [6] Path to Private Key file to use for instance
114
114
# Matching public key with .pub extension should exist
115
- # (text) [7] The AWS zone to launch the instance in (one of a,b,c,d,e)
115
+ # (text) [7] The AWS region to launch the instance (for example: us-east-1, eu-central-1)
116
+ # (text) [8] The AWS zone to launch the instance in (one of a,b,c,d,e)
116
117
# Returns:
117
118
# None
118
119
# ######################################
@@ -125,7 +126,8 @@ function create_ec2_docker_machine() {
125
126
--amazonec2-block-duration-minutes=$4 \
126
127
--amazonec2-keypair-name=" $5 " \
127
128
--amazonec2-ssh-keypath=" $6 " \
128
- --amazonec2-zone $7 \
129
+ --amazonec2-region=" $7 " \
130
+ --amazonec2-zone=" $8 " \
129
131
$1 2> >( grep -v " failed waiting for successful resource state" >&2 ) &
130
132
}
131
133
@@ -162,7 +164,7 @@ function destroy_docker_machine() {
162
164
# None
163
165
# ######################################
164
166
function wait_ec2_docker_machine_ready() {
165
- machine=$1
167
+ local machine=$1
166
168
local check_price=$2
167
169
while true ; do
168
170
sleep 5;
@@ -171,7 +173,7 @@ function wait_ec2_docker_machine_ready() {
171
173
(( stop_now== 1 )) && return 0
172
174
if $check_price ; then
173
175
status=$( \
174
- aws ec2 describe-spot-instance-requests \
176
+ aws --region= $AWS_REGION ec2 describe-spot-instance-requests \
175
177
--filters=" Name=launch.instance-type,Values=$AWS_EC2_TYPE " \
176
178
| jq ' .SpotInstanceRequests | sort_by(.CreateTime) | .[] | .Status.Code' \
177
179
| tail -n 1
@@ -204,7 +206,7 @@ function cleanup_and_exit {
204
206
if [ ! -z ${VOLUME_ID+x} ]; then
205
207
msg " Wait and delete volume $VOLUME_ID "
206
208
sleep 60 # wait for the machine to be removed
207
- delvolout=$( aws ec2 delete-volume --volume-id $VOLUME_ID )
209
+ delvolout=$( aws --region= $AWS_REGION ec2 delete-volume --volume-id $VOLUME_ID )
208
210
msg " Volume $VOLUME_ID deleted"
209
211
fi
210
212
else
@@ -498,7 +500,8 @@ while [ $# -gt 0 ]; do
498
500
AWS_SSH_KEY_PATH=" $2 " ; shift 2 ;;
499
501
--aws-ebs-volume-size )
500
502
AWS_EBS_VOLUME_SIZE=" $2 " ; shift 2 ;;
501
-
503
+ --aws-region )
504
+ AWS_REGION=" $2 " ; shift 2 ;;
502
505
--s3cfg-path )
503
506
S3_CFG_PATH=" $2 " ; shift 2 ;;
504
507
* )
@@ -556,12 +559,16 @@ if [[ "$RUN_ON" == "aws" ]]; then
556
559
err " ERROR: AWS keypair name and ssh key file must be specified to run on AWS EC2."
557
560
exit 1
558
561
else
559
- checkPath AWS_SSH_KEY_PATH
562
+ check_path AWS_SSH_KEY_PATH
560
563
fi
561
564
if [[ -z ${AWS_EC2_TYPE+x} ]]; then
562
565
err " ERROR: AWS EC2 Instance type not given."
563
566
exit 1
564
567
fi
568
+ if [[ -z ${AWS_REGION+x} ]]; then
569
+ err " ERROR: AWS EC2 region not given."
570
+ exit 1
571
+ fi
565
572
elif [[ " $RUN_ON " == " localhost" ]]; then
566
573
if [[ ! -z ${AWS_KEYPAIR_NAME+x} ]] || [[ ! -z ${AWS_SSH_KEY_PATH+x} ]] ; then
567
574
err " ERROR: options '--aws-keypair-name' and '--aws-ssh-key-path' must be used with '--run on aws'."
@@ -575,6 +582,10 @@ elif [[ "$RUN_ON" == "localhost" ]]; then
575
582
err " ERROR: option '--aws-ebs-volume-size' must be used with '--run on aws'."
576
583
exit 1
577
584
fi
585
+ if [[ ! -z ${AWS_REGION+x} ]]; then
586
+ err " ERROR: option '--aws-region' must be used with '--run on aws'."
587
+ exit 1
588
+ fi
578
589
else
579
590
err " ERROR: incorrect value for option --run-on"
580
591
exit 1
@@ -626,7 +637,7 @@ if [[ ! -z ${DB_PREPARED_SNAPSHOT+x} ]] && [[ ! -z ${DB_DUMP+x} ]]; then
626
637
fi
627
638
628
639
if [[ ! -z ${DB_DUMP+x} ]]; then
629
- checkPath DB_DUMP
640
+ check_path DB_DUMP
630
641
if [[ " $? " -ne " 0" ]]; then
631
642
echo " $DB_DUMP " > $TMP_PATH /db_dump_tmp.sql
632
643
DB_DUMP=" $TMP_PATH /db_dump_tmp.sql"
@@ -639,7 +650,7 @@ if [[ -z ${PG_CONFIG+x} ]]; then
639
650
err " NOTICE: No PostgreSQL config is provided. Will use default."
640
651
# TODO(NikolayS) use "auto-tuning" – shared_buffers=1/4 RAM, etc
641
652
else
642
- checkPath PG_CONFIG
653
+ check_path PG_CONFIG
643
654
if [[ " $? " -ne " 0" ]]; then # TODO(NikolayS) support file:// and s3://
644
655
# err "WARNING: Value given as pg_config: '$PG_CONFIG' not found as file will use as content"
645
656
echo " $PG_CONFIG " > $TMP_PATH /pg_config_tmp.sql
@@ -665,18 +676,18 @@ if [[ -z ${ARTIFACTS_FILENAME+x} ]]; then
665
676
ARTIFACTS_FILENAME=$DOCKER_MACHINE
666
677
fi
667
678
668
- if [[ ! -z ${WORKLOAD_REAL+x} ]] && ! checkPath WORKLOAD_REAL; then
679
+ if [[ ! -z ${WORKLOAD_REAL+x} ]] && ! check_path WORKLOAD_REAL; then
669
680
err " ERROR: workload file '$WORKLOAD_REAL ' not found."
670
681
exit 1
671
682
fi
672
683
673
- if [[ ! -z ${WORKLOAD_BASIS+x} ]] && ! checkPath WORKLOAD_BASIS; then
684
+ if [[ ! -z ${WORKLOAD_BASIS+x} ]] && ! check_path WORKLOAD_BASIS; then
674
685
err " ERROR: workload file '$WORKLOAD_BASIS ' not found."
675
686
exit 1
676
687
fi
677
688
678
689
if [[ ! -z ${WORKLOAD_CUSTOM_SQL+x} ]]; then
679
- checkPath WORKLOAD_CUSTOM_SQL
690
+ check_path WORKLOAD_CUSTOM_SQL
680
691
if [[ " $? " -ne " 0" ]]; then
681
692
# err "WARNING: Value given as workload-custom-sql: '$WORKLOAD_CUSTOM_SQL' not found as file will use as content"
682
693
echo " $WORKLOAD_CUSTOM_SQL " > $TMP_PATH /workload_custom_sql_tmp.sql
@@ -685,7 +696,7 @@ if [[ ! -z ${WORKLOAD_CUSTOM_SQL+x} ]]; then
685
696
fi
686
697
687
698
if [[ ! -z ${COMMANDS_AFTER_CONTAINER_INIT+x} ]]; then
688
- checkPath COMMANDS_AFTER_CONTAINER_INIT
699
+ check_path COMMANDS_AFTER_CONTAINER_INIT
689
700
if [[ " $? " -ne " 0" ]]; then
690
701
# err "WARNING: Value given as after_db_init_code: '$COMMANDS_AFTER_CONTAINER_INIT' not found as file will use as content"
691
702
echo " $COMMANDS_AFTER_CONTAINER_INIT " > $TMP_PATH /after_docker_init_code_tmp.sh
@@ -694,15 +705,15 @@ if [[ ! -z ${COMMANDS_AFTER_CONTAINER_INIT+x} ]]; then
694
705
fi
695
706
696
707
if [[ ! -z ${SQL_AFTER_DB_RESTORE+x} ]]; then
697
- checkPath SQL_AFTER_DB_RESTORE
708
+ check_path SQL_AFTER_DB_RESTORE
698
709
if [[ " $? " -ne " 0" ]]; then
699
710
echo " $SQL_AFTER_DB_RESTORE " > $TMP_PATH /after_db_init_code_tmp.sql
700
711
SQL_AFTER_DB_RESTORE=" $TMP_PATH /after_db_init_code_tmp.sql"
701
712
fi
702
713
fi
703
714
704
715
if [[ ! -z ${SQL_BEFORE_DB_RESTORE+x} ]]; then
705
- checkPath SQL_BEFORE_DB_RESTORE
716
+ check_path SQL_BEFORE_DB_RESTORE
706
717
if [[ " $? " -ne " 0" ]]; then
707
718
# err "WARNING: Value given as before_db_init_code: '$SQL_BEFORE_DB_RESTORE' not found as file will use as content"
708
719
echo " $SQL_BEFORE_DB_RESTORE " > $TMP_PATH /before_db_init_code_tmp.sql
@@ -711,23 +722,23 @@ if [[ ! -z ${SQL_BEFORE_DB_RESTORE+x} ]]; then
711
722
fi
712
723
713
724
if [[ ! -z ${DELTA_SQL_DO+x} ]]; then
714
- checkPath DELTA_SQL_DO
725
+ check_path DELTA_SQL_DO
715
726
if [[ " $? " -ne " 0" ]]; then
716
727
echo " $DELTA_SQL_DO " > $TMP_PATH /target_ddl_do_tmp.sql
717
728
DELTA_SQL_DO=" $TMP_PATH /target_ddl_do_tmp.sql"
718
729
fi
719
730
fi
720
731
721
732
if [[ ! -z ${DELTA_SQL_UNDO+x} ]]; then
722
- checkPath DELTA_SQL_UNDO
733
+ check_path DELTA_SQL_UNDO
723
734
if [[ " $? " -ne " 0" ]]; then
724
735
echo " $DELTA_SQL_UNDO " > $TMP_PATH /target_ddl_undo_tmp.sql
725
736
DELTA_SQL_UNDO=" $TMP_PATH /target_ddl_undo_tmp.sql"
726
737
fi
727
738
fi
728
739
729
740
if [[ ! -z ${DELTA_CONFIG+x} ]]; then
730
- checkPath DELTA_CONFIG
741
+ check_path DELTA_CONFIG
731
742
if [[ " $? " -ne " 0" ]]; then
732
743
echo " $DELTA_CONFIG " > $TMP_PATH /target_config_tmp.conf
733
744
DELTA_CONFIG=" $TMP_PATH /target_config_tmp.conf"
@@ -803,7 +814,7 @@ elif [[ "$RUN_ON" == "aws" ]]; then
803
814
# # Get max price from history and apply multiplier
804
815
# TODO detect region and/or allow to choose via options
805
816
prices=$(
806
- aws --region=us-east-1 ec2 \
817
+ aws --region=$AWS_REGION ec2 \
807
818
describe-spot-price-history --instance-types $AWS_EC2_TYPE --no-paginate \
808
819
--start-time=$( date +%s) --product-descriptions=" Linux/UNIX (Amazon VPC)" \
809
820
--query ' SpotPriceHistory[*].{az:AvailabilityZone, price:SpotPrice}'
@@ -821,26 +832,26 @@ elif [[ "$RUN_ON" == "aws" ]]; then
821
832
msg " Increased price: $price "
822
833
EC2_PRICE=$price
823
834
if [ -z $zone ]; then
824
- region =' a' # default zone
835
+ zone =' a' # default zone
825
836
fi
826
837
827
838
create_ec2_docker_machine $DOCKER_MACHINE $AWS_EC2_TYPE $EC2_PRICE \
828
- 60 $AWS_KEYPAIR_NAME $AWS_SSH_KEY_PATH $zone ;
839
+ 60 $AWS_KEYPAIR_NAME $AWS_SSH_KEY_PATH $AWS_REGION $ zone;
829
840
status=$( wait_ec2_docker_machine_ready " $DOCKER_MACHINE " true)
830
841
if [[ " $status " == " price-too-low" ]]; then
831
842
msg " Price $price is too low for $AWS_EC2_TYPE instance. Getting the up-to-date value from the error message..."
832
843
833
844
# destroy_docker_machine $DOCKER_MACHINE
834
845
# "docker-machine rm" doesn't work for "price-too-low" spot requests,
835
846
# so we need to clean up them via aws cli interface directly
836
- aws ec2 describe-spot-instance-requests \
847
+ aws --region= $AWS_REGION ec2 describe-spot-instance-requests \
837
848
--filters ' Name=status-code,Values=price-too-low' \
838
849
| grep SpotInstanceRequestId | awk ' {gsub(/[,"]/, "", $2); print $2}' \
839
- | xargs aws ec2 cancel-spot-instance-requests \
850
+ | xargs aws --region= $AWS_REGION ec2 cancel-spot-instance-requests \
840
851
--spot-instance-request-ids || true
841
852
842
853
corrrectPriceForLastFailedRequest=$( \
843
- aws ec2 describe-spot-instance-requests \
854
+ aws --region= $AWS_REGION ec2 describe-spot-instance-requests \
844
855
--filters=" Name=launch.instance-type,Values=$AWS_EC2_TYPE " \
845
856
| jq ' .SpotInstanceRequests[] | select(.Status.Code == "price-too-low") | .Status.Message' \
846
857
| grep -Eo ' [0-9]+[.][0-9]+' | tail -n 1 &
@@ -853,7 +864,7 @@ elif [[ "$RUN_ON" == "aws" ]]; then
853
864
DOCKER_MACHINE=" ${DOCKER_MACHINE// _/ -} "
854
865
# try start docker machine name with new price
855
866
create_ec2_docker_machine $DOCKER_MACHINE $AWS_EC2_TYPE $EC2_PRICE \
856
- 60 $AWS_KEYPAIR_NAME $AWS_SSH_KEY_PATH $zone
867
+ 60 $AWS_KEYPAIR_NAME $AWS_SSH_KEY_PATH $AWS_REGION $ zone
857
868
wait_ec2_docker_machine_ready " $DOCKER_MACHINE " false ;
858
869
else
859
870
err " $( date " +%Y-%m-%d %H:%M:%S" ) ERROR: Cannot determine actual price for the instance $AWS_EC2_TYPE ."
@@ -896,10 +907,10 @@ elif [[ "$RUN_ON" == "aws" ]]; then
896
907
# Create new volume and attach them for non i3 instances if needed
897
908
if [ ! -z ${AWS_EBS_VOLUME_SIZE+x} ]; then
898
909
msg " Create and attach a new EBS volume (size: $AWS_EBS_VOLUME_SIZE GB)"
899
- VOLUME_ID=$( aws ec2 create-volume --size $AWS_EBS_VOLUME_SIZE --region us-east-1 --availability-zone us-east-1a --volume-type gp2 | jq -r .VolumeId)
910
+ VOLUME_ID=$( aws --region= $AWS_REGION ec2 create-volume --size $AWS_EBS_VOLUME_SIZE --availability-zone us-east-1a --volume-type gp2 | jq -r .VolumeId)
900
911
INSTANCE_ID=$( docker-machine ssh $DOCKER_MACHINE curl -s http://169.254.169.254/latest/meta-data/instance-id)
901
912
sleep 10 # wait to volume will ready
902
- attachResult=$( aws ec2 attach-volume --device /dev/xvdf --volume-id $VOLUME_ID --instance-id $INSTANCE_ID --region us-east-1 )
913
+ attachResult=$( aws --region= $AWS_REGION ec2 attach-volume --device /dev/xvdf --volume-id $VOLUME_ID --instance-id $INSTANCE_ID )
903
914
docker-machine ssh $DOCKER_MACHINE sudo mkfs.ext4 /dev/xvdf
904
915
docker-machine ssh $DOCKER_MACHINE sudo mount /dev/xvdf /home/storage
905
916
fi
0 commit comments