diff --git a/lib/base/lib/assets/base/node-start.sh b/lib/base/lib/assets/base/node-start.sh deleted file mode 100644 index bdd37569..00000000 --- a/lib/base/lib/assets/base/node-start.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e -export CLIENT=geth - -echo "Script is starting client $CLIENT" -# Start the node -cd /home/bcuser/node -/usr/local/bin/docker-compose -f /home/bcuser/node/docker-compose.yml up -d - -echo "Started" diff --git a/lib/base/lib/assets/base/node-stop.sh b/lib/base/lib/assets/base/node-stop.sh deleted file mode 100644 index 613e58f8..00000000 --- a/lib/base/lib/assets/base/node-stop.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -set -e -export CLIENT=geth -echo "Script is stopping client $CLIENT" -# Stop the node -cd /home/bcuser/node -/usr/local/bin/docker-compose -f /home/bcuser/node/docker-compose.yml down - -echo "Stopped" diff --git a/lib/base/lib/assets/cfn-hup/cfn-auto-reloader.conf b/lib/base/lib/assets/instance/cfn-hup/cfn-auto-reloader.conf similarity index 100% rename from lib/base/lib/assets/cfn-hup/cfn-auto-reloader.conf rename to lib/base/lib/assets/instance/cfn-hup/cfn-auto-reloader.conf diff --git a/lib/base/lib/assets/cfn-hup/cfn-hup.conf b/lib/base/lib/assets/instance/cfn-hup/cfn-hup.conf similarity index 100% rename from lib/base/lib/assets/cfn-hup/cfn-hup.conf rename to lib/base/lib/assets/instance/cfn-hup/cfn-hup.conf diff --git a/lib/base/lib/assets/cfn-hup/cfn-hup.service b/lib/base/lib/assets/instance/cfn-hup/cfn-hup.service similarity index 100% rename from lib/base/lib/assets/cfn-hup/cfn-hup.service rename to lib/base/lib/assets/instance/cfn-hup/cfn-hup.service diff --git a/lib/base/lib/assets/instance/cfn-hup/setup.sh b/lib/base/lib/assets/instance/cfn-hup/setup.sh new file mode 100755 index 00000000..418811e4 --- /dev/null +++ b/lib/base/lib/assets/instance/cfn-hup/setup.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +if [ -n "$1" ]; then + export STACK_ID=$1 +else + echo "Error: No Stack ID is provided" + echo "Usage: instance/cfn-hup/setup.sh " + exit 1 +fi + +if [ -n "$2" ]; then + export AWS_REGION=$2 +else + echo "Error: No AWS Region is provided" + echo "Usage: instance/cfn-hup/setup.sh " + exit 1 +fi + + echo "Install CloudFormation helper scripts" + mkdir -p /opt/aws/ + pip3 install --break-system-packages https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-py3-latest.tar.gz + ln -s /usr/local/init/ubuntu/cfn-hup /etc/init.d/cfn-hup + + echo "Configuring CloudFormation helper scripts" + mkdir -p /etc/cfn/ + mv /opt/instance/cfn-hup/cfn-hup.conf /etc/cfn/cfn-hup.conf + sed -i "s;__AWS_STACK_ID__;\"$STACK_ID\";g" /etc/cfn/cfn-hup.conf + sed -i "s;__AWS_REGION__;\"$AWS_REGION\";g" /etc/cfn/cfn-hup.conf + + mkdir -p /etc/cfn/hooks.d/system + mv /opt/instance/cfn-hup/cfn-auto-reloader.conf /etc/cfn/hooks.d/cfn-auto-reloader.conf + sed -i "s;__AWS_STACK_NAME__;\"$STACK_NAME\";g" /etc/cfn/hooks.d/cfn-auto-reloader.conf + sed -i "s;__AWS_REGION__;\"$AWS_REGION\";g" /etc/cfn/hooks.d/cfn-auto-reloader.conf + + echo "Starting CloudFormation helper scripts as a service" + mv /opt/instance/cfn-hup/cfn-hup.service /etc/systemd/system/cfn-hup.service + + systemctl daemon-reload + systemctl enable --now cfn-hup + systemctl start cfn-hup.service diff --git a/lib/base/lib/assets/restore-from-snapshot-archive-s3.sh b/lib/base/lib/assets/instance/storage/restore-from-snapshot-archive-s3.sh similarity index 93% rename from lib/base/lib/assets/restore-from-snapshot-archive-s3.sh rename to lib/base/lib/assets/instance/storage/restore-from-snapshot-archive-s3.sh index 2f0cbaff..5bd8aa17 100644 --- a/lib/base/lib/assets/restore-from-snapshot-archive-s3.sh +++ b/lib/base/lib/assets/instance/storage/restore-from-snapshot-archive-s3.sh @@ -1,7 +1,7 @@ #!/bin/bash set +e -source /etc/environment +source /etc/cdk_environment echo "Downloading Snapshot." @@ -28,4 +28,4 @@ echo "Snapshot is ready, starting the service.." chown -R bcuser:bcuser $SNAPSHOT_DIR sudo systemctl daemon-reload -sudo systemctl enable --now base +sudo systemctl enable --now node diff --git a/lib/base/lib/assets/restore-from-snapshot-http.sh b/lib/base/lib/assets/instance/storage/restore-from-snapshot-http.sh similarity index 96% rename from lib/base/lib/assets/restore-from-snapshot-http.sh rename to lib/base/lib/assets/instance/storage/restore-from-snapshot-http.sh index 5b4c7352..c36a5af0 100644 --- a/lib/base/lib/assets/restore-from-snapshot-http.sh +++ b/lib/base/lib/assets/instance/storage/restore-from-snapshot-http.sh @@ -1,7 +1,7 @@ #!/bin/bash set +e -source /etc/environment +source /etc/cdk_environment echo "Downloading Snapshot." @@ -59,4 +59,4 @@ echo "Snapshot is ready, starting the service.." chown -R bcuser:bcuser /data sudo systemctl daemon-reload -sudo systemctl enable --now base +sudo systemctl enable --now node diff --git a/lib/base/lib/assets/instance/storage/setup.sh b/lib/base/lib/assets/instance/storage/setup.sh new file mode 100755 index 00000000..ca5e6b81 --- /dev/null +++ b/lib/base/lib/assets/instance/storage/setup.sh @@ -0,0 +1,130 @@ +#!/bin/bash + +make_fs () { + # If file system = to ext4 use mkfs.ext4, if xfs use mkfs.xfs + if [ -z "$1" ]; then + echo "Error: No file system type provided." + echo "Usage: make_fs " + exit 1 + fi + + if [ -z "$2" ]; then + echo "Error: No target volume ID provided." + echo "Usage: make_fs " + exit 1 + fi + + local file_system=$1 + local volume_id=$2 + if [ "$file_system" == "ext4" ]; then + mkfs -t ext4 "$volume_id" + return "$?" + else + mkfs.xfs -f "$volume_id" + return "$?" + fi +} + +# We need an nvme disk that is not mounted and not partitioned +get_all_empty_nvme_disks () { + local all_not_mounted_nvme_disks + local all_mounted_nvme_partitions + local unmounted_nvme_disks=() + local sorted_unmounted_nvme_disks + + #The disk will only be mounted when the nvme disk is larger than 100GB to avoid storing blockchain node data directly on the root EBS disk (which is 46GB by default) + all_not_mounted_nvme_disks=$(lsblk -lnb | awk '{if ($7 == "" && $4 > 100000000) {print $1}}' | grep nvme) + all_mounted_nvme_partitions=$(mount | awk '{print $1}' | grep /dev/nvme) + for disk in ${all_not_mounted_nvme_disks[*]}; do + if [[ ! "${all_mounted_nvme_partitions[*]}" =~ $disk ]]; then + unmounted_nvme_disks+=("$disk") + fi + done + # Sort the array + sorted_unmounted_nvme_disks=($(printf '%s\n' "${unmounted_nvme_disks[*]}" | sort)) + echo "${sorted_unmounted_nvme_disks[*]}" +} + +get_next_empty_nvme_disk () { + local sorted_unmounted_nvme_disks + sorted_unmounted_nvme_disks=($(get_all_empty_nvme_disks)) + # Return the first unmounted nvme disk + echo "/dev/${sorted_unmounted_nvme_disks[0]}" +} + +# Add input as command line parameters for name of the directory to mount +if [ -n "$1" ]; then + DIR_NAME=$1 +else + echo "Error: No data file system mount path is provided." + echo "Usage: instance/storage/setup.sh " + echo "Default file system type is ext4" + echo "If you skip , script will try to use the first unformatted volume ID." + echo "Usage example: instance/storage/setup.sh /data ext4 300000000000000" + exit 1 +fi + +# Case input for $2 between ext4 and xfs, use ext4 as default +case $2 in + ext4) + echo "File system set to ext4" + FILE_SYSTEM="ext4" + FS_CONFIG="defaults" + ;; + xfs) + echo "File system set to xfs" + FILE_SYSTEM="xfs" + FS_CONFIG="noatime,nodiratime,nodiscard" # See more: https://cdrdv2-public.intel.com/686417/rocksdb-benchmark-tuning-guide-on-xeon.pdf + ;; + *) + echo "File system set to ext4" + FILE_SYSTEM="ext4" + FS_CONFIG="defaults" + ;; +esac + +if [ -n "$3" ]; then + VOLUME_SIZE=$3 +else + echo "The size of volume for $DIR_NAME is not specified. Will try to guess volume ID." +fi + + echo "Checking if $DIR_NAME is mounted, and dont do anything if it is" + if [ $(df --output=target | grep -c "$DIR_NAME") -lt 1 ]; then + + if [ -n "$VOLUME_SIZE" ]; then + VOLUME_ID=/dev/$(lsblk -lnb | awk -v VOLUME_SIZE_BYTES="$VOLUME_SIZE" '{if ($4== VOLUME_SIZE_BYTES) {print $1}}') + echo "Data volume size defined, use respective volume id: $VOLUME_ID" + else + VOLUME_ID=$(get_next_empty_nvme_disk) + echo "Data volume size undefined, trying volume id: $VOLUME_ID" + fi + + make_fs $FILE_SYSTEM "$VOLUME_ID" + + sleep 10 + VOLUME_UUID=$(lsblk -fn -o UUID "$VOLUME_ID") + VOLUME_FSTAB_CONF="UUID=$VOLUME_UUID $DIR_NAME $FILE_SYSTEM $FS_CONFIG 0 2" + echo "VOLUME_ID=$VOLUME_ID" + echo "VOLUME_UUID=$VOLUME_UUID" + echo "VOLUME_FSTAB_CONF=$VOLUME_FSTAB_CONF" + + # Check if data disc is already in fstab and replace the line if it is with the new disc UUID + echo "Checking fstab for volume $DIR_NAME" + if [ $(grep -c "$DIR_NAME" /etc/fstab) -gt 0 ]; then + SED_REPLACEMENT_STRING="$(grep -n "$DIR_NAME" /etc/fstab | cut -d: -f1)s#.*#$VOLUME_FSTAB_CONF#" + # if file exists, delete it + if [ -f /etc/fstab.bak ]; then + rm /etc/fstab.bak + fi + cp /etc/fstab /etc/fstab.bak + sed -i "$SED_REPLACEMENT_STRING" /etc/fstab + else + echo "$VOLUME_FSTAB_CONF" | tee -a /etc/fstab + fi + + mount -a + chown -R bcuser:bcuser "$DIR_NAME" + else + echo "$DIR_NAME volume is mounted, nothing changed" + fi diff --git a/lib/base/lib/assets/node/node-start.sh b/lib/base/lib/assets/node/node-start.sh new file mode 100644 index 00000000..63deb24e --- /dev/null +++ b/lib/base/lib/assets/node/node-start.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -e + +source /etc/cdk_environment + +export NETWORK_ENV=".env.$NETWORK_ID" +export CLIENT=geth + +echo "Script is starting client $CLIENT on $NETWORK_ENV" +# Start the node +cd /home/bcuser/node +docker compose -f /home/bcuser/node/docker-compose.yml up -d + +echo "Started" diff --git a/lib/base/lib/assets/node/node-stop.sh b/lib/base/lib/assets/node/node-stop.sh new file mode 100644 index 00000000..85c3fa5c --- /dev/null +++ b/lib/base/lib/assets/node/node-stop.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -e +source /etc/cdk_environment + +export NETWORK_ENV=".env.$NETWORK_ID" +export CLIENT=geth + +echo "Script is starting client $CLIENT on $NETWORK_ENV" +# Stop the node +cd /home/bcuser/node +docker compose -f /home/bcuser/node/docker-compose.yml down + +echo "Stopped" diff --git a/lib/base/lib/assets/setup-instance-store-volumes.sh b/lib/base/lib/assets/setup-instance-store-volumes.sh deleted file mode 100644 index ee2825d7..00000000 --- a/lib/base/lib/assets/setup-instance-store-volumes.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -source /etc/environment - -if [[ "$DATA_VOLUME_TYPE" == "instance-store" ]]; then - echo "Data volume type is instance store" - export DATA_VOLUME_ID=/dev/$(lsblk -lnb | awk 'max < $4 {max = $4; vol = $1} END {print vol}') -fi - -if [ -n "$DATA_VOLUME_ID" ]; then - if [ $(df --output=target | grep -c "/data") -lt 1 ]; then - echo "Checking fstab for Data volume" - - mkfs.ext4 $DATA_VOLUME_ID - echo "Data volume formatted. Mounting..." - echo "waiting for volume to get UUID" - OUTPUT=0; - while [ "$OUTPUT" = 0 ]; do - DATA_VOLUME_UUID=$(lsblk -fn -o UUID $DATA_VOLUME_ID) - OUTPUT=$(echo $DATA_VOLUME_UUID | grep -c - $2) - echo $OUTPUT - done - DATA_VOLUME_UUID=$(lsblk -fn -o UUID $DATA_VOLUME_ID) - DATA_VOLUME_FSTAB_CONF="UUID=$DATA_VOLUME_UUID /data ext4 defaults 0 2" - echo "DATA_VOLUME_ID="$DATA_VOLUME_ID - echo "DATA_VOLUME_UUID="$DATA_VOLUME_UUID - echo "DATA_VOLUME_FSTAB_CONF="$DATA_VOLUME_FSTAB_CONF - - # Check if data disc is already in fstab and replace the line if it is with the new disc UUID - if [ $(grep -c "data" /etc/fstab) -gt 0 ]; then - SED_REPLACEMENT_STRING="$(grep -n "/data" /etc/fstab | cut -d: -f1)s#.*#$DATA_VOLUME_FSTAB_CONF#" - cp /etc/fstab /etc/fstab.bak - sed -i "$SED_REPLACEMENT_STRING" /etc/fstab - else - echo $DATA_VOLUME_FSTAB_CONF | sudo tee -a /etc/fstab - fi - - sudo mount -a - - chown bcuser:bcuser -R /data - else - echo "Data volume is mounted, nothing changed" - fi -fi diff --git a/lib/base/lib/assets/sync-checker/setup.sh b/lib/base/lib/assets/sync-checker/setup.sh new file mode 100755 index 00000000..96bc10e5 --- /dev/null +++ b/lib/base/lib/assets/sync-checker/setup.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +if [ -n "$1" ]; then + export SYNC_CHECKER_SCRIPT=$1 +else + echo "No path to syncchecker script is provided" + echo "Usage: sync-checker/setup.sh " + echo "Using default: /opt/sync-checker/syncchecker.sh" + export SYNC_CHECKER_SCRIPT="/opt/sync-checker/syncchecker.sh" +fi + +echo "Configuring syncchecker script" +mv $SYNC_CHECKER_SCRIPT /opt/syncchecker.sh +chmod +x /opt/syncchecker.sh + +echo "Setting up sync-checker service" +mv /opt/sync-checker/sync-checker.service /etc/systemd/system/sync-checker.service + +# Run every 5 minutes +echo "Setting up sync-checker timer" +mv /opt/sync-checker/sync-checker.timer /etc/systemd/system/sync-checker.timer + +echo "Starting sync checker timer" +systemctl start sync-checker.timer +systemctl enable sync-checker.timer diff --git a/lib/base/lib/assets/sync-checker/sync-checker.service b/lib/base/lib/assets/sync-checker/sync-checker.service new file mode 100644 index 00000000..9f187ce2 --- /dev/null +++ b/lib/base/lib/assets/sync-checker/sync-checker.service @@ -0,0 +1,5 @@ +[Unit] +Description="Sync checker for blockchain node" + +[Service] +ExecStart=/opt/syncchecker.sh diff --git a/lib/base/lib/assets/sync-checker/sync-checker.timer b/lib/base/lib/assets/sync-checker/sync-checker.timer new file mode 100644 index 00000000..b45ff94e --- /dev/null +++ b/lib/base/lib/assets/sync-checker/sync-checker.timer @@ -0,0 +1,9 @@ +[Unit] +Description="Run Sync checker service every 5 min" + +[Timer] +OnCalendar=*:*:0/5 +Unit=sync-checker.service + +[Install] +WantedBy=multi-user.target diff --git a/lib/base/lib/assets/user-data/node.sh b/lib/base/lib/assets/user-data-alinux.sh similarity index 67% rename from lib/base/lib/assets/user-data/node.sh rename to lib/base/lib/assets/user-data-alinux.sh index 9454be2c..5553f1d8 100644 --- a/lib/base/lib/assets/user-data/node.sh +++ b/lib/base/lib/assets/user-data-alinux.sh @@ -8,13 +8,36 @@ RESOURCE_ID=${_NODE_CF_LOGICAL_ID_} ASSETS_S3_PATH=${_ASSETS_S3_PATH_} DATA_VOLUME_TYPE=${_DATA_VOLUME_TYPE_} DATA_VOLUME_SIZE=${_DATA_VOLUME_SIZE_} + +# Set by Base-specic CDK components and stacks +AWS_REGION=${_AWS_REGION_} +STACK_NAME=${_STACK_NAME_} +RESTORE_FROM_SNAPSHOT=${_RESTORE_FROM_SNAPSHOT_} +NETWORK_ID=${_NETWORK_ID_} +NODE_CONFIG=${_NODE_CONFIG_} +L1_EXECUTION_ENDPOINT=${_L1_EXECUTION_ENDPOINT_} +L1_CONSENSUS_ENDPOINT=${_L1_CONSENSUS_ENDPOINT_} +SNAPSHOT_URL=${_SNAPSHOT_URL_} + { echo "LIFECYCLE_HOOK_NAME=$LIFECYCLE_HOOK_NAME" echo "AUTOSCALING_GROUP_NAME=$AUTOSCALING_GROUP_NAME" echo "ASSETS_S3_PATH=$ASSETS_S3_PATH" echo "DATA_VOLUME_TYPE=$DATA_VOLUME_TYPE" echo "DATA_VOLUME_SIZE=$DATA_VOLUME_SIZE" -} >> /etc/environment + + echo "AWS_REGION=$AWS_REGION" + echo "NETWORK_ID=$NETWORK_ID" + echo "NODE_CONFIG=$NODE_CONFIG" + echo "L1_EXECUTION_ENDPOINT=$L1_EXECUTION_ENDPOINT" + echo "L1_CONSENSUS_ENDPOINT=$L1_CONSENSUS_ENDPOINT" + echo "SNAPSHOT_URL=$SNAPSHOT_URL" +} >> /etc/cdk_environment + +source /etc/cdk_environment + +# Export environment variables so calls to `envsubst` inherit the evironment variables. +while read -r line; do export "$line"; done < /etc/cdk_environment arch=$(uname -m) @@ -22,21 +45,22 @@ echo "Architecture detected: $arch" if [ "$arch" == "x86_64" ]; then SSM_AGENT_BINARY_URI=https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm - AWS_CLI_BINARY_URI=https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip S5CMD_URI=https://github.com/peak/s5cmd/releases/download/v2.1.0/s5cmd_2.1.0_Linux-64bit.tar.gz YQ_URI=https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 else SSM_AGENT_BINARY_URI=https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_arm64/amazon-ssm-agent.rpm - AWS_CLI_BINARY_URI=https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip S5CMD_URI=https://github.com/peak/s5cmd/releases/download/v2.1.0/s5cmd_2.1.0_Linux-arm64.tar.gz YQ_URI=https://github.com/mikefarah/yq/releases/latest/download/yq_linux_arm64 fi echo "Updating and installing required system packages" -yum update -y -yum -y install amazon-cloudwatch-agent collectd jq yq gcc ncurses-devel aws-cfn-bootstrap zstd +dnf update -y +dnf -y install amazon-cloudwatch-agent collectd jq gcc ncurses-devel telnet aws-cfn-bootstrap cronie zstd git wget $YQ_URI -O /usr/bin/yq && chmod +x /usr/bin/yq +sudo systemctl enable crond.service +sudo systemctl start crond.service + echo " Installing aria2 a p2p downloader" cd /tmp @@ -66,76 +90,18 @@ echo "Downloading assets zip file" aws s3 cp $ASSETS_S3_PATH ./assets.zip unzip -q assets.zip -echo 'Uninstalling AWS CLI v1' -yum remove awscli - -echo 'Installing AWS CLI v2' -curl $AWS_CLI_BINARY_URI -o "awscliv2.zip" -unzip -q awscliv2.zip -./aws/install -rm /usr/bin/aws -ln /usr/local/bin/aws /usr/bin/aws - -echo 'Installing SSM Agent' +echo 'Upgrading SSM Agent' yum install -y $SSM_AGENT_BINARY_URI # Base specific setup starts here -# Set by Base-specic CDK components and stacks -REGION=${_REGION_} -STACK_NAME=${_STACK_NAME_} -RESTORE_FROM_SNAPSHOT=${_RESTORE_FROM_SNAPSHOT_} -NETWORK_ID=${_NETWORK_ID_} -NODE_CONFIG=${_NODE_CONFIG_} -L1_EXECUTION_ENDPOINT=${_L1_EXECUTION_ENDPOINT_} -L1_CONSENSUS_ENDPOINT=${_L1_CONSENSUS_ENDPOINT_} -SNAPSHOT_URL=${_SNAPSHOT_URL_} - -{ - echo "REGION=$REGION" - echo "NETWORK_ID=$NETWORK_ID" - echo "NODE_CONFIG=$NODE_CONFIG" - echo "L1_EXECUTION_ENDPOINT=$L1_EXECUTION_ENDPOINT" - echo "L1_CONSENSUS_ENDPOINT=$L1_CONSENSUS_ENDPOINT" - echo "SNAPSHOT_URL=$SNAPSHOT_URL" -} >> /etc/environment - -GIT_URL=https://github.com/base-org/node.git -SYNC_CHECKER_FILE_NAME=syncchecker-base.sh - -yum -y install docker python3-pip cronie cronie-anacron gcc python3-devel git -yum -y remove python-requests -pip3 install docker-compose -pip3 install hapless -pip3 uninstall -y urllib3 -pip3 install 'urllib3<2.0' - -echo "Assigning Swap Space" -# Check if a swap file already exists -if [ -f /swapfile ]; then - # Remove the existing swap file - swapoff /swapfile - rm -rf /swapfile -fi - -# Create a new swap file -# Set swap size to fixed 5 GB -swap_size_mb=5120 -unit=M -fallocate -l $swap_size_mb$unit /swapfile -chmod 600 /swapfile -mkswap /swapfile -swapon /swapfile - -# Enable the swap space to persist after reboot. -echo "/swapfile none swap sw 0 0" | sudo tee -a /etc/fstab - -sysctl vm.swappiness=6 -sysctl vm.vfs_cache_pressure=10 -echo "vm.swappiness=10" | sudo tee -a /etc/sysctl.conf -echo "vm.vfs_cache_pressure=10" | sudo tee -a /etc/sysctl.conf - -free -h +echo "Installing Docker" +dnf remove -y docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine +dnf -y install dnf-plugins-core +dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo +sed -i 's/$releasever/9/g' /etc/yum.repos.d/docker-ce.repo +dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +systemctl enable --now docker mkdir -p /data @@ -152,6 +118,7 @@ systemctl enable docker echo "Clonning node repo" cd /home/bcuser +GIT_URL=https://github.com/base-org/node.git git clone $GIT_URL cd ./node @@ -208,21 +175,18 @@ sed -i "s#GETH_HOST_DATA_DIR=./geth-data#GETH_HOST_DATA_DIR=/data/geth#g" /home/ chown -R bcuser:bcuser /home/bcuser/node -echo "Configuring syncchecker script" -cp /opt/sync-checker/$SYNC_CHECKER_FILE_NAME /opt/syncchecker.sh -chmod 766 /opt/syncchecker.sh - -echo "*/5 * * * * /opt/syncchecker.sh" | crontab -crontab -l +echo "Configuring and starting sync-checker" +SYNC_CHECKER_FILE_NAME="syncchecker-base.sh" +/opt/sync-checker/setup.sh "/opt/sync-checker/$SYNC_CHECKER_FILE_NAME" echo "Configuring node as a service" mkdir /home/bcuser/bin -mv /opt/base/node-start.sh /home/bcuser/bin/node-start.sh -mv /opt/base/node-stop.sh /home/bcuser/bin/node-stop.sh +mv /opt/node/node-start.sh /home/bcuser/bin/node-start.sh +mv /opt/node/node-stop.sh /home/bcuser/bin/node-stop.sh chmod 766 /home/bcuser/bin/* chown -R bcuser:bcuser /home/bcuser -sudo bash -c 'cat > /etc/systemd/system/base.service < /etc/systemd/system/node.service < /dev/null + then + echo "cfn-signal could not be found, installing" + /opt/instance/cfn-hup/setup.sh "$STACK_NAME" "$AWS_REGION" + else + echo "cfn-signal is available, skipping installation" + fi + cfn-signal --stack "$STACK_NAME" --resource "$RESOURCE_ID" --region "$AWS_REGION" fi echo "Preparing data volume" -echo "Wait for one minute for the volume to become available" -sleep 60s - if [[ "$DATA_VOLUME_TYPE" == "instance-store" ]]; then echo "Data volume type is instance store" - cd /opt - chmod +x /opt/setup-instance-store-volumes.sh - - (crontab -l; echo "@reboot /opt/setup-instance-store-volumes.sh >/tmp/setup-instance-store-volumes.log 2>&1") | crontab - + (crontab -l; echo "@reboot /opt/instance/storage/setup.sh /data ext4 > /tmp/setup-store-volume-data.log 2>&1") | crontab - crontab -l - DATA_VOLUME_ID=/dev/$(lsblk -lnb | awk 'max < $4 {max = $4; vol = $1} END {print vol}') - + /opt/instance/storage/setup.sh /data ext4 else echo "Data volume type is EBS" - - DATA_VOLUME_ID=/dev/$(lsblk -lnb | awk -v VOLUME_SIZE_BYTES="$DATA_VOLUME_SIZE" '{if ($4== VOLUME_SIZE_BYTES) {print $1}}') + echo "Waiting for EBS volume to become available" + sleep 60 + /opt/instance/storage/setup.sh /data ext4 fi -mkfs -t ext4 $DATA_VOLUME_ID -echo "waiting for volume to get UUID" - OUTPUT=0; - while [ "$OUTPUT" = 0 ]; do - DATA_VOLUME_UUID=$(lsblk -fn -o UUID $DATA_VOLUME_ID) - OUTPUT=$(echo $DATA_VOLUME_UUID | grep -c - $2) - echo $OUTPUT - done -DATA_VOLUME_FSTAB_CONF="UUID=$DATA_VOLUME_UUID /data ext4 defaults 0 2" -echo "DATA_VOLUME_ID="$DATA_VOLUME_ID -echo "DATA_VOLUME_UUID="$DATA_VOLUME_UUID -echo "DATA_VOLUME_FSTAB_CONF="$DATA_VOLUME_FSTAB_CONF -echo $DATA_VOLUME_FSTAB_CONF | tee -a /etc/fstab -mount -a - lsblk -d chown -R bcuser:bcuser /data @@ -295,18 +245,18 @@ systemctl restart amazon-cloudwatch-agent if [ "$RESTORE_FROM_SNAPSHOT" == "false" ]; then echo "Skipping restoration from snapshot. Starting node" systemctl daemon-reload - systemctl enable --now base + systemctl enable --now node else echo "Restoring full node from snapshot over http" - chmod +x /opt/restore-from-snapshot-http.sh - echo "/opt/restore-from-snapshot-http.sh" | at now + 1 min + chmod +x /opt/instance/storage/restore-from-snapshot-http.sh + echo "/opt/instance/storage/restore-from-snapshot-http.sh" | at now + 1 min fi if [[ "$LIFECYCLE_HOOK_NAME" != "none" ]]; then echo "Signaling ASG lifecycle hook to complete" TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/instance-id) - aws autoscaling complete-lifecycle-action --lifecycle-action-result CONTINUE --instance-id $INSTANCE_ID --lifecycle-hook-name "$LIFECYCLE_HOOK_NAME" --auto-scaling-group-name "$AUTOSCALING_GROUP_NAME" --region $REGION + aws autoscaling complete-lifecycle-action --lifecycle-action-result CONTINUE --instance-id $INSTANCE_ID --lifecycle-hook-name "$LIFECYCLE_HOOK_NAME" --auto-scaling-group-name "$AUTOSCALING_GROUP_NAME" --region $AWS_REGION fi echo "All Done!!" diff --git a/lib/base/lib/ha-nodes-stack.ts b/lib/base/lib/ha-nodes-stack.ts index d876281a..86760725 100644 --- a/lib/base/lib/ha-nodes-stack.ts +++ b/lib/base/lib/ha-nodes-stack.ts @@ -31,7 +31,7 @@ export class BaseHANodesStack extends cdk.Stack { constructor(scope: cdkConstructs.Construct, id: string, props: BaseHANodesStackProps) { super(scope, id, props); - const REGION = cdk.Stack.of(this).region; + const AWS_REGION = cdk.Stack.of(this).region; const STACK_NAME = cdk.Stack.of(this).stackName; const lifecycleHookName = STACK_NAME; const autoScalingGroupName = STACK_NAME; @@ -69,11 +69,11 @@ export class BaseHANodesStack extends cdk.Stack { asset.bucket.grantRead(instanceRole); // parsing user data script and injecting necessary variables - const nodeScript = fs.readFileSync(path.join(__dirname, "assets", "user-data", "node.sh")).toString(); + const nodeScript = fs.readFileSync(path.join(__dirname, "assets", "user-data-alinux.sh")).toString(); const dataVolumeSizeBytes = dataVolume.sizeGiB * constants.GibibytesToBytesConversionCoefficient; const modifiedInitNodeScript = cdk.Fn.sub(nodeScript, { - _REGION_: REGION, + _AWS_REGION_: AWS_REGION, _ASSETS_S3_PATH_: `s3://${asset.s3BucketName}/${asset.s3ObjectKey}`, _STACK_NAME_: STACK_NAME, _NODE_CF_LOGICAL_ID_: constants.NoneValue, @@ -94,9 +94,9 @@ export class BaseHANodesStack extends cdk.Stack { instanceType, dataVolumes: [dataVolume], machineImage: new ec2.AmazonLinuxImage({ - generation: AmazonLinuxGeneration.AMAZON_LINUX_2, - kernel:ec2.AmazonLinuxKernel.KERNEL5_X, - cpuType: instanceCpuType + generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX_2023, + kernel:ec2.AmazonLinuxKernel.KERNEL6_1, + cpuType: instanceCpuType, }), role: instanceRole, vpc, diff --git a/lib/base/lib/single-node-stack.ts b/lib/base/lib/single-node-stack.ts index 8c6eda38..0e4f4cd7 100644 --- a/lib/base/lib/single-node-stack.ts +++ b/lib/base/lib/single-node-stack.ts @@ -30,7 +30,7 @@ export class BaseSingleNodeStack extends cdk.Stack { super(scope, id, props); // Setting up necessary environment variables - const REGION = cdk.Stack.of(this).region; + const AWS_REGION = cdk.Stack.of(this).region; const STACK_NAME = cdk.Stack.of(this).stackName; const STACK_ID = cdk.Stack.of(this).stackId; const availabilityZones = cdk.Stack.of(this).availabilityZones; @@ -82,7 +82,8 @@ export class BaseSingleNodeStack extends cdk.Stack { dataVolumes: [dataVolume], rootDataVolumeDeviceName: "/dev/xvda", machineImage: new ec2.AmazonLinuxImage({ - generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, + generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX_2023, + kernel:ec2.AmazonLinuxKernel.KERNEL6_1, cpuType: instanceCpuType, }), vpc, @@ -95,11 +96,11 @@ export class BaseSingleNodeStack extends cdk.Stack { }); // Parsing user data script and injecting necessary variables - const nodeStartScript = fs.readFileSync(path.join(__dirname, "assets", "user-data", "node.sh")).toString(); + const nodeStartScript = fs.readFileSync(path.join(__dirname, "assets", "user-data-alinux.sh")).toString(); const dataVolumeSizeBytes = dataVolume.sizeGiB * constants.GibibytesToBytesConversionCoefficient; const modifiedInitNodeScript = cdk.Fn.sub(nodeStartScript, { - _REGION_: REGION, + _AWS_REGION_: AWS_REGION, _ASSETS_S3_PATH_: `s3://${asset.s3BucketName}/${asset.s3ObjectKey}`, _STACK_NAME_: STACK_NAME, _NODE_CF_LOGICAL_ID_: node.nodeCFLogicalId, @@ -122,7 +123,7 @@ export class BaseSingleNodeStack extends cdk.Stack { const dashboardString = cdk.Fn.sub(JSON.stringify(nodeCwDashboard.SyncNodeCWDashboardJSON), { INSTANCE_ID:node.instanceId, INSTANCE_NAME: STACK_NAME, - REGION: REGION, + REGION: AWS_REGION, }) new cw.CfnDashboard(this, 'base-cw-dashboard', {