Skip to content
This repository was archived by the owner on Aug 16, 2021. It is now read-only.

Commit 397c31f

Browse files
authored
Merge pull request #93 from postgres-ai/dmius-region-param
Region param added to nancy run
2 parents 058a38f + 0eda757 commit 397c31f

8 files changed

+91
-56
lines changed

nancy_run.sh

Lines changed: 64 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ KEEP_ALIVE=0
1616
VERBOSE_OUTPUT_REDIRECT=" > /dev/null"
1717
EBS_SIZE_MULTIPLIER=15
1818
POSTGRES_VERSION_DEFAULT=10
19-
AWS_BLOCK_DURATION=0 # by default no time limit
19+
AWS_BLOCK_DURATION=0
2020

2121
#######################################
2222
# Print an error/warning/notice message to STDERR
@@ -72,7 +72,7 @@ function msg() {
7272
# 1 if the input is empty,
7373
# -1 otherwise.
7474
#######################################
75-
function checkPath() {
75+
function check_path() {
7676
if [[ -z $1 ]]; then
7777
return 1
7878
fi
@@ -113,20 +113,22 @@ function checkPath() {
113113
# (text) [5] AWS keypair to use
114114
# (text) [6] Path to Private Key file to use for instance
115115
# Matching public key with .pub extension should exist
116-
# (text) [7] The AWS zone to launch the instance in (one of a,b,c,d,e)
116+
# (text) [7] The AWS region to launch the instance (for example: us-east-1, eu-central-1)
117+
# (text) [8] The AWS zone to launch the instance in (one of a,b,c,d,e)
117118
# Returns:
118119
# None
119120
#######################################
120121
function create_ec2_docker_machine() {
121-
msg "Attempt to create a docker machine in zone $7 with price $3..."
122+
msg "Attempt to create a docker machine in region $7 with price $3..."
122123
docker-machine create --driver=amazonec2 \
123124
--amazonec2-request-spot-instance \
124125
--amazonec2-instance-type=$2 \
125126
--amazonec2-spot-price=$3 \
126127
--amazonec2-block-duration-minutes=$4 \
127128
--amazonec2-keypair-name="$5" \
128129
--amazonec2-ssh-keypath="$6" \
129-
--amazonec2-zone $7 \
130+
--amazonec2-region="$7" \
131+
--amazonec2-zone="$8" \
130132
$1 2> >(grep -v "failed waiting for successful resource state" >&2) &
131133
}
132134

@@ -163,7 +165,7 @@ function destroy_docker_machine() {
163165
# None
164166
#######################################
165167
function wait_ec2_docker_machine_ready() {
166-
machine=$1
168+
local machine=$1
167169
local check_price=$2
168170
while true; do
169171
sleep 5;
@@ -172,7 +174,7 @@ function wait_ec2_docker_machine_ready() {
172174
((stop_now==1)) && return 0
173175
if $check_price ; then
174176
status=$( \
175-
aws ec2 describe-spot-instance-requests \
177+
aws --region=$AWS_REGION ec2 describe-spot-instance-requests \
176178
--filters="Name=launch.instance-type,Values=$AWS_EC2_TYPE" \
177179
| jq '.SpotInstanceRequests | sort_by(.CreateTime) | .[] | .Status.Code' \
178180
| tail -n 1
@@ -205,7 +207,7 @@ function cleanup_and_exit {
205207
if [ ! -z ${VOLUME_ID+x} ]; then
206208
msg "Wait and delete volume $VOLUME_ID"
207209
sleep 60 # wait for the machine to be removed
208-
delvolout=$(aws ec2 delete-volume --volume-id $VOLUME_ID)
210+
delvolout=$(aws --region=$AWS_REGION ec2 delete-volume --volume-id $VOLUME_ID)
209211
msg "Volume $VOLUME_ID deleted"
210212
fi
211213
else
@@ -499,6 +501,8 @@ while [ $# -gt 0 ]; do
499501
AWS_SSH_KEY_PATH="$2"; shift 2 ;;
500502
--aws-ebs-volume-size )
501503
AWS_EBS_VOLUME_SIZE="$2"; shift 2 ;;
504+
--aws-region )
505+
AWS_REGION="$2"; shift 2 ;;
502506
--aws-block-duration )
503507
AWS_BLOCK_DURATION=$2; shift 2 ;;
504508

@@ -598,18 +602,23 @@ if [[ "$RUN_ON" == "aws" ]]; then
598602
err "ERROR: AWS keypair name and ssh key file must be specified to run on AWS EC2."
599603
exit 1
600604
else
601-
checkPath AWS_SSH_KEY_PATH
605+
check_path AWS_SSH_KEY_PATH
602606
fi
603607
if [[ -z ${AWS_EC2_TYPE+x} ]]; then
604608
err "ERROR: AWS EC2 Instance type not given."
605609
exit 1
606610
fi
611+
if [[ -z ${AWS_REGION+x} ]]; then
612+
err "NOTICE: AWS EC2 region not given. Will used us-east-1."
613+
AWS_REGION='us-east-1'
614+
fi
607615
if [[ -z ${AWS_BLOCK_DURATION+x} ]]; then
608-
err "NOTICE: Container live time duration is not given."
616+
err "NOTICE: Container live time duration is not given. Will used 60 minutes."
617+
AWS_BLOCK_DURATION=60
609618
else
610619
case $AWS_BLOCK_DURATION in
611620
0|60|120|240|300|360)
612-
dbg "Container live time duration is $AWS_BLOCK_DURATION. "
621+
dbg "Container live time duration is $AWS_BLOCK_DURATION."
613622
;;
614623
*)
615624
err "Container live time duration (--aws-block-duration) has wrong value: $AWS_BLOCK_DURATION. Available values of AWS spot instance duration in minutes is 60, 120, 180, 240, 300, or 360)."
@@ -619,19 +628,23 @@ if [[ "$RUN_ON" == "aws" ]]; then
619628
fi
620629
elif [[ "$RUN_ON" == "localhost" ]]; then
621630
if [[ ! -z ${AWS_KEYPAIR_NAME+x} ]] || [[ ! -z ${AWS_SSH_KEY_PATH+x} ]] ; then
622-
err "ERROR: options '--aws-keypair-name' and '--aws-ssh-key-path' must be used with '--run on aws'."
631+
err "ERROR: options '--aws-keypair-name' and '--aws-ssh-key-path' must be used with '--run-on aws'."
623632
exit 1
624633
fi
625634
if [[ ! -z ${AWS_EC2_TYPE+x} ]]; then
626-
err "ERROR: option '--aws-ec2-type' must be used with '--run on aws'."
635+
err "ERROR: option '--aws-ec2-type' must be used with '--run-on aws'."
627636
exit 1
628637
fi
629638
if [[ ! -z ${AWS_EBS_VOLUME_SIZE+x} ]]; then
630-
err "ERROR: option '--aws-ebs-volume-size' must be used with '--run on aws'."
639+
err "ERROR: option '--aws-ebs-volume-size' must be used with '--run-on aws'."
640+
exit 1
641+
fi
642+
if [[ ! -z ${AWS_REGION+x} ]]; then
643+
err "ERROR: option '--aws-region' must be used with '--run-on aws'."
631644
exit 1
632645
fi
633646
if [[ "$AWS_BLOCK_DURATION" != "0" ]]; then
634-
err "ERROR: option '--aws-block-duration' must be used with '--run on aws'."
647+
err "ERROR: option '--aws-block-duration' must be used with '--run-on aws'."
635648
exit 1
636649
fi
637650
else
@@ -685,7 +698,7 @@ if [[ ! -z ${DB_PREPARED_SNAPSHOT+x} ]] && [[ ! -z ${DB_DUMP+x} ]]; then
685698
fi
686699

687700
if [[ ! -z ${DB_DUMP+x} ]]; then
688-
checkPath DB_DUMP
701+
check_path DB_DUMP
689702
if [[ "$?" -ne "0" ]]; then
690703
echo "$DB_DUMP" > $TMP_PATH/db_dump_tmp.sql
691704
DB_DUMP="$TMP_PATH/db_dump_tmp.sql"
@@ -698,7 +711,7 @@ if [[ -z ${PG_CONFIG+x} ]]; then
698711
err "NOTICE: No PostgreSQL config is provided. Will use default."
699712
# TODO(NikolayS) use "auto-tuning" – shared_buffers=1/4 RAM, etc
700713
else
701-
checkPath PG_CONFIG
714+
check_path PG_CONFIG
702715
if [[ "$?" -ne "0" ]]; then # TODO(NikolayS) support file:// and s3://
703716
#err "WARNING: Value given as pg_config: '$PG_CONFIG' not found as file will use as content"
704717
echo "$PG_CONFIG" > $TMP_PATH/pg_config_tmp.sql
@@ -724,18 +737,18 @@ if [[ -z ${ARTIFACTS_FILENAME+x} ]]; then
724737
ARTIFACTS_FILENAME=$DOCKER_MACHINE
725738
fi
726739

727-
if [[ ! -z ${WORKLOAD_REAL+x} ]] && ! checkPath WORKLOAD_REAL; then
740+
if [[ ! -z ${WORKLOAD_REAL+x} ]] && ! check_path WORKLOAD_REAL; then
728741
err "ERROR: workload file '$WORKLOAD_REAL' not found."
729742
exit 1
730743
fi
731744

732-
if [[ ! -z ${WORKLOAD_BASIS+x} ]] && ! checkPath WORKLOAD_BASIS; then
745+
if [[ ! -z ${WORKLOAD_BASIS+x} ]] && ! check_path WORKLOAD_BASIS; then
733746
err "ERROR: workload file '$WORKLOAD_BASIS' not found."
734747
exit 1
735748
fi
736749

737750
if [[ ! -z ${WORKLOAD_CUSTOM_SQL+x} ]]; then
738-
checkPath WORKLOAD_CUSTOM_SQL
751+
check_path WORKLOAD_CUSTOM_SQL
739752
if [[ "$?" -ne "0" ]]; then
740753
#err "WARNING: Value given as workload-custom-sql: '$WORKLOAD_CUSTOM_SQL' not found as file will use as content"
741754
echo "$WORKLOAD_CUSTOM_SQL" > $TMP_PATH/workload_custom_sql_tmp.sql
@@ -744,7 +757,7 @@ if [[ ! -z ${WORKLOAD_CUSTOM_SQL+x} ]]; then
744757
fi
745758

746759
if [[ ! -z ${COMMANDS_AFTER_CONTAINER_INIT+x} ]]; then
747-
checkPath COMMANDS_AFTER_CONTAINER_INIT
760+
check_path COMMANDS_AFTER_CONTAINER_INIT
748761
if [[ "$?" -ne "0" ]]; then
749762
#err "WARNING: Value given as after_db_init_code: '$COMMANDS_AFTER_CONTAINER_INIT' not found as file will use as content"
750763
echo "$COMMANDS_AFTER_CONTAINER_INIT" > $TMP_PATH/after_docker_init_code_tmp.sh
@@ -753,15 +766,15 @@ if [[ ! -z ${COMMANDS_AFTER_CONTAINER_INIT+x} ]]; then
753766
fi
754767

755768
if [[ ! -z ${SQL_AFTER_DB_RESTORE+x} ]]; then
756-
checkPath SQL_AFTER_DB_RESTORE
769+
check_path SQL_AFTER_DB_RESTORE
757770
if [[ "$?" -ne "0" ]]; then
758771
echo "$SQL_AFTER_DB_RESTORE" > $TMP_PATH/after_db_init_code_tmp.sql
759772
SQL_AFTER_DB_RESTORE="$TMP_PATH/after_db_init_code_tmp.sql"
760773
fi
761774
fi
762775

763776
if [[ ! -z ${SQL_BEFORE_DB_RESTORE+x} ]]; then
764-
checkPath SQL_BEFORE_DB_RESTORE
777+
check_path SQL_BEFORE_DB_RESTORE
765778
if [[ "$?" -ne "0" ]]; then
766779
#err "WARNING: Value given as before_db_init_code: '$SQL_BEFORE_DB_RESTORE' not found as file will use as content"
767780
echo "$SQL_BEFORE_DB_RESTORE" > $TMP_PATH/before_db_init_code_tmp.sql
@@ -770,23 +783,23 @@ if [[ ! -z ${SQL_BEFORE_DB_RESTORE+x} ]]; then
770783
fi
771784

772785
if [[ ! -z ${DELTA_SQL_DO+x} ]]; then
773-
checkPath DELTA_SQL_DO
786+
check_path DELTA_SQL_DO
774787
if [[ "$?" -ne "0" ]]; then
775788
echo "$DELTA_SQL_DO" > $TMP_PATH/target_ddl_do_tmp.sql
776789
DELTA_SQL_DO="$TMP_PATH/target_ddl_do_tmp.sql"
777790
fi
778791
fi
779792

780793
if [[ ! -z ${DELTA_SQL_UNDO+x} ]]; then
781-
checkPath DELTA_SQL_UNDO
794+
check_path DELTA_SQL_UNDO
782795
if [[ "$?" -ne "0" ]]; then
783796
echo "$DELTA_SQL_UNDO" > $TMP_PATH/target_ddl_undo_tmp.sql
784797
DELTA_SQL_UNDO="$TMP_PATH/target_ddl_undo_tmp.sql"
785798
fi
786799
fi
787800

788801
if [[ ! -z ${DELTA_CONFIG+x} ]]; then
789-
checkPath DELTA_CONFIG
802+
check_path DELTA_CONFIG
790803
if [[ "$?" -ne "0" ]]; then
791804
echo "$DELTA_CONFIG" > $TMP_PATH/target_config_tmp.conf
792805
DELTA_CONFIG="$TMP_PATH/target_config_tmp.conf"
@@ -795,8 +808,9 @@ fi
795808

796809
if [[ "$RUN_ON" == "aws" ]]; then
797810
if [[ ! -z ${AWS_EBS_VOLUME_SIZE+x} ]]; then
798-
if ! [[ $AWS_EBS_VOLUME_SIZE =~ '^[0-9]+$' ]] ; then
799-
err "ERROR: --ebs-volume-size must be integer."
811+
re='^[0-9]+$'
812+
if ! [[ $AWS_EBS_VOLUME_SIZE =~ $re ]] ; then
813+
err "ERROR: --aws-ebs-volume-size must be integer."
800814
exit 1
801815
fi
802816
else
@@ -948,7 +962,7 @@ elif [[ "$RUN_ON" == "aws" ]]; then
948962
## Get max price from history and apply multiplier
949963
# TODO detect region and/or allow to choose via options
950964
prices=$(
951-
aws --region=us-east-1 ec2 \
965+
aws --region=$AWS_REGION ec2 \
952966
describe-spot-price-history --instance-types $AWS_EC2_TYPE --no-paginate \
953967
--start-time=$(date +%s) --product-descriptions="Linux/UNIX (Amazon VPC)" \
954968
--query 'SpotPriceHistory[*].{az:AvailabilityZone, price:SpotPrice}'
@@ -959,33 +973,34 @@ elif [[ "$RUN_ON" == "aws" ]]; then
959973
region="${region/\"/}"
960974
minprice="${minprice/\"/}"
961975
minprice="${minprice/\"/}"
962-
zone=${region: -1}
963-
msg "Min price from history: $minprice in $region (zone: $zone)"
976+
AWS_ZONE=${region: -1}
977+
AWS_REGION=${region:: -1}
978+
msg "Min price from history: $minprice in $region (zone: $AWS_ZONE)"
964979
multiplier="1.01"
965980
price=$(echo "$minprice * $multiplier" | bc -l)
966981
msg "Increased price: $price"
967982
EC2_PRICE=$price
968-
if [ -z $zone ]; then
969-
region='a' #default zone
983+
if [[ -z $AWS_ZONE ]]; then
984+
AWS_ZONE='a' #default zone
970985
fi
971986

972-
createDockerMachine $DOCKER_MACHINE $AWS_EC2_TYPE $EC2_PRICE \
973-
$AWS_BLOCK_DURATION $AWS_KEYPAIR_NAME $AWS_SSH_KEY_PATH $zone;
974-
status=$(waitEC2Ready "docker-machine create" "$DOCKER_MACHINE" 1)
987+
create_ec2_docker_machine $DOCKER_MACHINE $AWS_EC2_TYPE $EC2_PRICE \
988+
$AWS_BLOCK_DURATION $AWS_KEYPAIR_NAME $AWS_SSH_KEY_PATH $AWS_REGION $AWS_ZONE;
989+
status=$(wait_ec2_docker_machine_ready "$DOCKER_MACHINE" true)
975990
if [[ "$status" == "price-too-low" ]]; then
976991
msg "Price $price is too low for $AWS_EC2_TYPE instance. Getting the up-to-date value from the error message..."
977992

978993
#destroy_docker_machine $DOCKER_MACHINE
979994
# "docker-machine rm" doesn't work for "price-too-low" spot requests,
980995
# so we need to clean up them via aws cli interface directly
981-
aws ec2 describe-spot-instance-requests \
996+
aws --region=$AWS_REGION ec2 describe-spot-instance-requests \
982997
--filters 'Name=status-code,Values=price-too-low' \
983998
| grep SpotInstanceRequestId | awk '{gsub(/[,"]/, "", $2); print $2}' \
984-
| xargs aws ec2 cancel-spot-instance-requests \
999+
| xargs aws --region=$AWS_REGION ec2 cancel-spot-instance-requests \
9851000
--spot-instance-request-ids || true
9861001

9871002
corrrectPriceForLastFailedRequest=$( \
988-
aws ec2 describe-spot-instance-requests \
1003+
aws --region=$AWS_REGION ec2 describe-spot-instance-requests \
9891004
--filters="Name=launch.instance-type,Values=$AWS_EC2_TYPE" \
9901005
| jq '.SpotInstanceRequests[] | select(.Status.Code == "price-too-low") | .Status.Message' \
9911006
| grep -Eo '[0-9]+[.][0-9]+' | tail -n 1 &
@@ -997,10 +1012,9 @@ elif [[ "$RUN_ON" == "aws" ]]; then
9971012
DOCKER_MACHINE="nancy-$CURRENT_TS"
9981013
DOCKER_MACHINE="${DOCKER_MACHINE//_/-}"
9991014
#try start docker machine name with new price
1000-
msg "Attempt to create a new docker machine: $DOCKER_MACHINE with price: $EC2_PRICE."
1001-
createDockerMachine $DOCKER_MACHINE $AWS_EC2_TYPE $EC2_PRICE \
1002-
$AWS_BLOCK_DURATION $AWS_KEYPAIR_NAME $AWS_SSH_KEY_PATH $zone;
1003-
waitEC2Ready "docker-machine create" "$DOCKER_MACHINE" 0;
1015+
create_ec2_docker_machine $DOCKER_MACHINE $AWS_EC2_TYPE $EC2_PRICE \
1016+
$AWS_BLOCK_DURATION $AWS_KEYPAIR_NAME $AWS_SSH_KEY_PATH $AWS_REGION $AWS_ZONE
1017+
wait_ec2_docker_machine_ready "$DOCKER_MACHINE" false;
10041018
else
10051019
err "ERROR: Cannot determine actual price for the instance $AWS_EC2_TYPE."
10061020
exit 1;
@@ -1026,28 +1040,23 @@ elif [[ "$RUN_ON" == "aws" ]]; then
10261040
docker-machine ssh $DOCKER_MACHINE sudo add-apt-repository -y ppa:sbates
10271041
docker-machine ssh $DOCKER_MACHINE "sudo apt-get update || true"
10281042
docker-machine ssh $DOCKER_MACHINE sudo apt-get install -y nvme-cli
1029-
1030-
docker-machine ssh $DOCKER_MACHINE "echo \"# partition table of /dev/nvme0n1\" > /tmp/nvme.part"
1031-
docker-machine ssh $DOCKER_MACHINE "echo \"unit: sectors \" >> /tmp/nvme.part"
1032-
docker-machine ssh $DOCKER_MACHINE "echo \"/dev/nvme0n1p1 : start=2048, size=1855466702, Id=83 \" >> /tmp/nvme.part"
1033-
docker-machine ssh $DOCKER_MACHINE "echo \"/dev/nvme0n1p2 : start=0, size=0, Id=0 \" >> /tmp/nvme.part"
1034-
docker-machine ssh $DOCKER_MACHINE "echo \"/dev/nvme0n1p3 : start=0, size=0, Id=0 \" >> /tmp/nvme.part"
1035-
docker-machine ssh $DOCKER_MACHINE "echo \"/dev/nvme0n1p4 : start=0, size=0, Id=0 \" >> /tmp/nvme.part"
1036-
1037-
docker-machine ssh $DOCKER_MACHINE "sudo sfdisk /dev/nvme0n1 < /tmp/nvme.part"
1038-
docker-machine ssh $DOCKER_MACHINE "sudo mkfs -t ext4 /dev/nvme0n1p1"
1043+
docker-machine ssh $DOCKER_MACHINE "sudo parted -a optimal -s /dev/nvme0n1 mklabel gpt"
1044+
docker-machine ssh $DOCKER_MACHINE "sudo parted -a optimal -s /dev/nvme0n1 mkpart primary 0% 100%"
1045+
docker-machine ssh $DOCKER_MACHINE "sudo mkfs.ext4 /dev/nvme0n1p1"
10391046
docker-machine ssh $DOCKER_MACHINE "sudo mount /dev/nvme0n1p1 /home/storage"
1047+
docker-machine ssh $DOCKER_MACHINE "sudo df -h /dev/nvme0n1p1"
10401048
else
10411049
msg "Use EBS volume"
10421050
# Create new volume and attach them for non i3 instances if needed
10431051
if [ ! -z ${AWS_EBS_VOLUME_SIZE+x} ]; then
10441052
msg "Create and attach a new EBS volume (size: $AWS_EBS_VOLUME_SIZE GB)"
1045-
VOLUME_ID=$(aws ec2 create-volume --size $AWS_EBS_VOLUME_SIZE --region us-east-1 --availability-zone us-east-1a --volume-type gp2 | jq -r .VolumeId)
1053+
VOLUME_ID=$(aws --region=$AWS_REGION ec2 create-volume --size $AWS_EBS_VOLUME_SIZE --availability-zone $AWS_REGION$AWS_ZONE --volume-type gp2 | jq -r .VolumeId)
10461054
INSTANCE_ID=$(docker-machine ssh $DOCKER_MACHINE curl -s http://169.254.169.254/latest/meta-data/instance-id)
10471055
sleep 10 # wait to volume will ready
1048-
attachResult=$(aws ec2 attach-volume --device /dev/xvdf --volume-id $VOLUME_ID --instance-id $INSTANCE_ID --region us-east-1)
1056+
attachResult=$(aws --region=$AWS_REGION ec2 attach-volume --device /dev/xvdf --volume-id $VOLUME_ID --instance-id $INSTANCE_ID)
10491057
docker-machine ssh $DOCKER_MACHINE sudo mkfs.ext4 /dev/xvdf
10501058
docker-machine ssh $DOCKER_MACHINE sudo mount /dev/xvdf /home/storage
1059+
docker-machine ssh $DOCKER_MACHINE "sudo df -h /dev/xvdf"
10511060
fi
10521061
fi
10531062

tests/nancy_run_invalid_aws_option.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ output=$(
99
2>&1
1010
)
1111

12-
if [[ $output =~ "must be used with '--run on aws'" ]]; then
12+
if [[ $output =~ "must be used with '--run-on aws'" ]]; then
1313
echo -e "\e[36mOK\e[39m"
1414
else
1515
>&2 echo -e "\e[31mFAILED\e[39m"

tests/nancy_run_options_both_dump_snapshot.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ read -r -d '' params <<PARAMS
55
--aws-ssh-key-path "/home/someuser/.ssh/awskey.pem" \
66
--aws-ec2-type "r4.large" \
77
--s3cfg-path "/home/someuser/.s3cfg" \
8+
--aws-region "us-east-1" \
89
--workload-real "s3://somebucket/db.sql.30min.pgreplay" \
910
--tmp-path tmp \
1011
--db-dump "s3://somebucket/dump.sql.bz2" \

tests/nancy_run_options_ddl_do+_undo-.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ read -r -d '' params <<PARAMS
44
--run-on aws --aws-keypair-name awskey --pg-version 9.6 \
55
--aws-ssh-key-path "/home/someuser/.ssh/awskey.pem" \
66
--aws-ec2-type "r4.large" \
7+
--aws-region "us-east-1" \
78
--s3cfg-path "/home/someuser/.s3cfg" \
89
--workload-real "s3://somebucket/db.sql.30min.pgreplay" \
910
--tmp-path tmp \

tests/nancy_run_options_ddl_do-_undo+.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ read -r -d '' params <<PARAMS
44
--run-on aws --aws-keypair-name awskey --pg-version 9.6 \
55
--aws-ssh-key-path "/home/someuser/.ssh/awskey.pem" \
66
--aws-ec2-type "r4.large" \
7+
--aws-region "us-east-1" \
78
--s3cfg-path "/home/someuser/.s3cfg" \
89
--workload-real "s3://somebucket/db.sql.30min.pgreplay" \
910
--tmp-path tmp \

tests/nancy_run_options_multi_workloads.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ read -r -d '' params <<PARAMS
44
--run-on aws --aws-keypair-name awskey --pg-version 9.6 \
55
--aws-ssh-key-path "/home/someuser/.ssh/awskey.pem" \
66
--aws-ec2-type "r4.large" \
7+
--aws-region "us-east-1" \
78
--s3cfg-path "/home/someuser/.s3cfg" \
89
--workload-real "s3://somebucket/db.sql.30min.pgreplay" \
910
--workload-custom-sql "select\tnow();" \

tests/nancy_run_options_no_dump_snapshot.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ read -r -d '' params <<PARAMS
44
--run-on aws --aws-keypair-name awskey --pg-version 9.6 \
55
--aws-ssh-key-path "/home/someuser/.ssh/awskey.pem" \
66
--aws-ec2-type "r4.large" \
7+
--aws-region "us-east-1" \
78
--s3cfg-path "/home/someuser/.s3cfg" \
89
--workload-real "s3://somebucket/db.sql.30min.pgreplay" \
910
--tmp-path tmp

0 commit comments

Comments
 (0)