File tree Expand file tree Collapse file tree 9 files changed +103
-9
lines changed
Expand file tree Collapse file tree 9 files changed +103
-9
lines changed Original file line number Diff line number Diff line change @@ -46,15 +46,9 @@ locals {
4646 role = " dev"
4747 iam_instance_profile = data.terraform_remote_state.common.outputs.instance_profile_name[" ec2-to-ecs" ]
4848 key_name = " eatda-ec2-dev-key"
49- user_data = <<- EOF
50- #!/bin/bash
51- echo ECS_CLUSTER=dev-cluster >> /etc/ecs/ecs.config
52- fallocate -l 2G /swapfile
53- chmod 600 /swapfile
54- mkswap /swapfile
55- swapon /swapfile
56- echo '/swapfile none swap sw 0 0' >> /etc/fstab
57- EOF
49+ user_data = templatefile (" ${ path . module } /scripts/user-data.sh" , {
50+ ecs_cluster_name = " dev-cluster"
51+ })
5852 }
5953
6054 task_definitions_with_roles = {
Original file line number Diff line number Diff line change @@ -5,6 +5,7 @@ module "ec2" {
55 instance_subnet_map = local. instance_subnet_map
66 name_prefix = local. name_prefix
77 tags = local. common_tags
8+ depends_on = [module . s3 ]
89}
910
1011module "ecs" {
Original file line number Diff line number Diff line change @@ -8,6 +8,14 @@ resource "aws_s3_bucket" "dev" {
88 }
99}
1010
11+ resource "aws_s3_object" "app-backup-log-script" {
12+ bucket = aws_s3_bucket. dev . bucket
13+ key = " scripts/app-backup-dev-logs.sh"
14+ source = " ${ path . module } /scripts/app-backup-dev-logs.sh"
15+ etag = filemd5 (" ${ path . module } /scripts/app-backup-dev-logs.sh" )
16+ content_type = " text/x-sh"
17+ }
18+
1119resource "aws_s3_bucket_public_access_block" "dev" {
1220 bucket = aws_s3_bucket. dev . id
1321
Original file line number Diff line number Diff line change 1+ #! /bin/bash
2+ set -e
3+
4+ LOG_DIR=" /home/ec2-user/logs/eatda"
5+ S3_BUCKET=" s3://eatda-storage-dev/backup/logs/"
6+ TIMESTAMP=$( date +%Y-%m-%d-%H%M%S)
7+ ARCHIVE_PATH=" /tmp/eatda-logs-${TIMESTAMP} .tar.gz"
8+
9+ if [ ! -d " $LOG_DIR " ]; then
10+ echo " [ERROR] Log directory does not exist: $LOG_DIR " >&2
11+ exit 1
12+ fi
13+
14+ tar -czf " $ARCHIVE_PATH " -C " $LOG_DIR " .
15+
16+ if aws s3 cp " $ARCHIVE_PATH " " $S3_BUCKET " ; then
17+ echo " [INFO] Upload successful: $ARCHIVE_PATH -> $S3_BUCKET "
18+
19+ find " $LOG_DIR " -type f -name " *.log" -delete
20+ echo " [INFO] Old log files deleted from $LOG_DIR "
21+
22+ rm " $ARCHIVE_PATH "
23+ else
24+ echo " [ERROR] Failed to upload archive to S3." >&2
25+ exit 1
26+ fi
Original file line number Diff line number Diff line change 1+ #! /bin/bash
2+ echo ECS_CLUSTER=${ecs_cluster_name} >> /etc/ecs/ecs.config
3+
4+ fallocate -l 2G /swapfile
5+ chmod 600 /swapfile
6+ mkswap /swapfile
7+ swapon /swapfile
8+ echo ' /swapfile none swap sw 0 0' >> /etc/fstab
9+
10+ /bin/mkdir -p /home/ec2-user/logs/eatda
11+
12+ aws s3 cp s3://eatda-storage-dev/scripts/app-backup-dev-logs.sh /home/ec2-user/logs/eatda/app-backup-dev-logs.sh
13+ chmod +x /home/ec2-user/logs/eatda/app-backup-dev-logs.sh
14+
15+ (crontab -l 2> /dev/null; echo " 0 0 * * 0 /home/ec2-user/logs/eatda/app-backup-dev-logs.sh >> /var/log/app-backup.log 2>&1" ) | crontab -
Original file line number Diff line number Diff line change @@ -15,6 +15,7 @@ module "ec2" {
1515 instance_subnet_map = local. instance_subnet_map
1616 name_prefix = local. name_prefix
1717 tags = local. common_tags
18+ depends_on = [module . s3 ]
1819}
1920
2021module "ecs" {
Original file line number Diff line number Diff line change @@ -8,6 +8,14 @@ resource "aws_s3_bucket" "prod" {
88 }
99}
1010
11+ resource "aws_s3_object" "app-backup-log-script" {
12+ bucket = aws_s3_bucket. prod . bucket
13+ key = " scripts/app-backup-prod-logs.sh"
14+ source = " ${ path . module } /scripts/app-backup-prod-logs.sh"
15+ etag = filemd5 (" ${ path . module } /scripts/app-backup-prod-logs.sh" )
16+ content_type = " text/x-sh"
17+ }
18+
1119resource "aws_s3_bucket_public_access_block" "prod" {
1220 bucket = aws_s3_bucket. prod . id
1321
Original file line number Diff line number Diff line change 1+ #! /bin/bash
2+ set -e
3+
4+ LOG_DIR=" /home/ec2-user/logs/eatda"
5+ S3_BUCKET=" s3://eatda-storage-prod/backup/logs/"
6+ TIMESTAMP=$( date +%Y-%m-%d-%H%M%S)
7+ ARCHIVE_PATH=" /tmp/eatda-logs-${TIMESTAMP} .tar.gz"
8+
9+ if [ ! -d " $LOG_DIR " ]; then
10+ echo " [ERROR] Log directory does not exist: $LOG_DIR " >&2
11+ exit 1
12+ fi
13+
14+ tar -czf " $ARCHIVE_PATH " -C " $LOG_DIR " .
15+
16+ if aws s3 cp " $ARCHIVE_PATH " " $S3_BUCKET " ; then
17+ echo " [INFO] Upload successful: $ARCHIVE_PATH -> $S3_BUCKET "
18+
19+ find " $LOG_DIR " -type f -name " *.log" -delete
20+ echo " [INFO] Old log files deleted from $LOG_DIR "
21+
22+ rm " $ARCHIVE_PATH "
23+ else
24+ echo " [ERROR] Failed to upload archive to S3." >&2
25+ exit 1
26+ fi
Original file line number Diff line number Diff line change 1+ #! /bin/bash
2+ echo ECS_CLUSTER=${ecs_cluster_name} >> /etc/ecs/ecs.config
3+
4+ fallocate -l 2G /swapfile
5+ chmod 600 /swapfile
6+ mkswap /swapfile
7+ swapon /swapfile
8+ echo ' /swapfile none swap sw 0 0' >> /etc/fstab
9+
10+ /bin/mkdir -p /home/ec2-user/logs/eatda
11+
12+ aws s3 cp s3://eatda-storage-prod/scripts/app-backup-prod-logs.sh /home/ec2-user/logs/eatda/app-backup-prod-logs.sh
13+ chmod +x /home/ec2-user/logs/eatda/app-backup-prod-logs.sh
14+
15+ (crontab -l 2> /dev/null; echo " 0 0 * * 0 /home/ec2-user/logs/eatda/app-backup-prod-logs.sh >> /var/log/app-backup.log 2>&1" ) | crontab -
You can’t perform that action at this time.
0 commit comments