diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index f561328..0000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,124 +0,0 @@ -# name: Deploy API - -# on: -# workflow_run: -# workflows: ["Terraform CI/CD"] -# branches: [master] -# types: [completed] - -# env: -# NODE_VERSION: 16 - -# permissions: -# contents: read -# actions: read -# id-token: write - -# jobs: -# deploy: -# if: ${{ github.event.workflow_run.conclusion == 'success' }} -# runs-on: ubuntu-latest - -# steps: -# - uses: actions/checkout@v3 - -# # Configurar Terraform y autenticaciΓ³n con Azure -# - name: πŸ› οΈ Setup Terraform -# uses: hashicorp/setup-terraform@v3 -# with: -# terraform_version: 1.5.0 - -# - name: πŸ” Configure Azure Credentials -# run: | -# echo "ARM_CLIENT_ID=${{ secrets.ARM_CLIENT_ID }}" >> $GITHUB_ENV -# echo "ARM_CLIENT_SECRET=${{ secrets.ARM_CLIENT_SECRET }}" >> $GITHUB_ENV -# echo "ARM_TENANT_ID=${{ secrets.ARM_TENANT_ID }}" >> $GITHUB_ENV -# echo "ARM_SUBSCRIPTION_ID=${{ secrets.ARM_SUBSCRIPTION_ID }}" >> $GITHUB_ENV - -# # Inicializar Terraform -# - name: πŸ“₯ Terraform Init -# run: | -# terraform -chdir=infra init \ -# -backend-config="resource_group_name=soft-tfstate-rg" \ -# -backend-config="storage_account_name=softsastate" \ -# -backend-config="container_name=tfstate" \ -# -backend-config="key=terraform.tfstate" - -# # Generar inventory.ini dinΓ‘micamente (soluciΓ³n definitiva) -# - name: πŸ“ Generate Ansible Inventory -# run: | -# CONTROL_IP=$(terraform -chdir=infra output -raw control_node_public_ip) -# SSH_USER=$(terraform -chdir=infra output -raw ssh_user) - -# mkdir -p ./ansible -# cat > ./ansible/inventory.ini < ~/.ssh/vm_ssh_key -# chmod 600 ~/.ssh/vm_ssh_key -# echo -e "Host *\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config - -# # Verificar inventory -# - name: πŸ” Verify Inventory -# run: | -# if [ ! -f "./ansible/inventory.ini" ]; then -# echo "❌ Error: inventory.ini not found" -# exit 1 -# fi -# echo "Inventory contents:" -# cat ./ansible/inventory.ini - -# # Resto de los pasos de despliegue... -# - name: πŸš€ Deploy Ansible Configuration -# run: | -# chmod +x ./ansible/deploy-ansible-from-local.sh -# ./ansible/deploy-ansible-from-local.sh - -# - name: 🟒 Set up Node.js -# uses: actions/setup-node@v3 -# with: -# node-version: ${{ env.NODE_VERSION }} - -# - name: πŸ“¦ Install dependencies -# run: npm ci -# working-directory: src/movie-analyst-api - -# - name: πŸ§ͺ Run tests -# run: npm test -# working-directory: src/movie-analyst-api - -# - name: πŸ—ƒοΈ Deploy Database -# run: | -# chmod +x ./ansible/deploy-db-from-local.sh -# ./ansible/deploy-db-from-local.sh - -# - name: πŸš€ Deploy API -# run: | -# export DB_HOST="$(terraform -chdir=infra output -raw mysql_fqdn)" -# export DB_USER="$(terraform -chdir=infra output -raw mysql_admin_user)" -# export DB_PASS="$(terraform -chdir=infra output -raw mysql_admin_pwd)" -# export DB_NAME="$(terraform -chdir=infra output -raw mysql_database_name)" -# chmod +x ./ansible/deploy-api-from-local.sh -# ./ansible/deploy-api-from-local.sh \ No newline at end of file diff --git a/.github/workflows/terraform-destroy.yml b/.github/workflows/terraform-destroy.yml new file mode 100644 index 0000000..83fd0ba --- /dev/null +++ b/.github/workflows/terraform-destroy.yml @@ -0,0 +1,72 @@ +name: Terraform Destroy + +on: + workflow_dispatch: + inputs: + confirm_destroy: + description: "⚠️ Type 'YES' to confirm destroying the infrastructure" + required: true + +permissions: + contents: read + id-token: write + +env: + ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} + ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} + ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} + ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} + +jobs: + destroy: + name: Terraform Destroy + runs-on: ubuntu-latest + + steps: + - name: πŸ›‘ Validate confirmation input + if: ${{ github.event.inputs.confirm_destroy != 'YES' }} + run: | + echo "You must type YES to proceed with destroy." + exit 1 + + - name: πŸ“¦ Checkout code + uses: actions/checkout@v3 + + - name: βš™οΈ Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.5.0 + + - name: πŸ” Azure Login with Service Principal + uses: azure/login@v1 + with: + creds: >- + { + "clientId": "${{ secrets.ARM_CLIENT_ID }}", + "clientSecret": "${{ secrets.ARM_CLIENT_SECRET }}", + "subscriptionId": "${{ secrets.ARM_SUBSCRIPTION_ID }}", + "tenantId": "${{ secrets.ARM_TENANT_ID }}" + } + + - name: πŸ§ͺ Verify Azure login + run: az account show + + - name: πŸ“₯ Terraform Init + run: | + terraform -chdir=infra init \ + -backend-config="resource_group_name=soft-tfstate-rg" \ + -backend-config="storage_account_name=softsastate" \ + -backend-config="container_name=tfstate" \ + -backend-config="key=terraform.tfstate" + + - name: ⚠️ Terraform Destroy + run: | + terraform -chdir=infra destroy -auto-approve -input=false \ + -var="subscription_id=${{ secrets.ARM_SUBSCRIPTION_ID }}" \ + -var="client_id=${{ secrets.ARM_CLIENT_ID }}" \ + -var="client_secret=${{ secrets.ARM_CLIENT_SECRET }}" \ + -var="tenant_id=${{ secrets.ARM_TENANT_ID }}" \ + -var="allowed_ssh_ip=${{ secrets.MY_IP_ADDRESS }}" \ + -var="mysql_user=${{ secrets.MYSQL_USER }}" \ + -var="mysql_admin_password=${{ secrets.MYSQL_ADMIN_PASSWORD }}" \ + -var="ssh_public_key=${{ secrets.VM_SSH_PUB_KEY }}" diff --git a/.github/workflows/terraform.yml b/.github/workflows/terraform.yml index 0f4d136..522a978 100644 --- a/.github/workflows/terraform.yml +++ b/.github/workflows/terraform.yml @@ -106,51 +106,133 @@ jobs: if: github.ref == 'refs/heads/master' run: terraform -chdir=infra apply -auto-approve -input=false -var-file=terraform.tfvars + # - name: πŸ“„ Generate Terraform outputs + # run: | + # terraform -chdir=infra output -json > infra/tf_outputs.json + # echo "βœ… tf_outputs.json generated" + - name: πŸ“„ Generate Terraform outputs run: | - terraform -chdir=infra output -json > infra/tf_outputs.json - - # cat infra/tf_outputs.json - # echo "Trying to extract IP:" - # jq -r '.control_node_public_ip' infra/tf_outputs.json + "$TERRAFORM_CLI_PATH/terraform-bin" -chdir=infra output -json > infra/tf_outputs.json + # - name: πŸ› Debug Terraform outputs + # run: | + # cat infra/tf_outputs.json + - name: πŸ“¦ Upload inventory.ini as artifact uses: actions/upload-artifact@v4 with: name: inventory path: ansible/inventory.ini - - - name: πŸ”‘ Configure SSH for jumpbox + + - name: πŸ“€ Export Terraform outputs to GitHub ENV run: | - mkdir -p ~/.ssh - echo "${{ secrets.VM_SSH_KEY }}" > ~/.ssh/vm_ssh_key - chmod 600 ~/.ssh/vm_ssh_key - echo -e "Host *\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config + echo "πŸ“₯ Exporting variables to GitHub ENV..." + echo "APP_SERVICE_NAME=$(jq -r '.app_service_name.value' infra/tf_outputs.json)" >> $GITHUB_ENV + echo "RESOURCE_GROUP_NAME=$(jq -r '.resource_group_name.value' infra/tf_outputs.json)" >> $GITHUB_ENV + echo "LB_API_URL=$(jq -r '.lb_api_url.value' infra/tf_outputs.json)" >> $GITHUB_ENV + echo "LB_API_PORT=$(jq -r '.api_public_port.value' infra/tf_outputs.json)" >> $GITHUB_ENV - - name: πŸš€ Upload Inventory to Jumpbox + - name: πŸ”§ Compose LB API URL run: | + echo "LB_API_URL=${LB_API_URL}:${LB_API_PORT}" >> $GITHUB_ENV + + - name: πŸ—ƒοΈ Run Script Configure Jumpbox + run: | + chmod +x ./ansible/configure-jumpbox.sh + # Get outputs from Terraform - JUMP_HOST=$(terraform -chdir=infra output -raw control_node_public_ip 2>/dev/null | grep -Eo '([0-9]{1,3}\.){3}[0-9]{1,3}' | tail -n1) - JUMP_USER=$(terraform -chdir=infra output -raw ssh_user 2>/dev/null | grep -Eo '^[a-zA-Z0-9]+' | tail -n1) + JUMP_HOST=$(terraform -chdir=infra output -raw control_node_public_ip | grep -Eo '([0-9]{1,3}\.){3}[0-9]{1,3}' | tail -n1) + JUMP_USER=$(terraform -chdir=infra output -raw ssh_user | grep -Eo '^[a-zA-Z0-9]+' | tail -n1) - # Log and validate - echo "JUMP_HOST: $JUMP_HOST" - echo "JUMP_USER: $JUMP_USER" + SSH_KEY_CONTENT="${{ secrets.VM_SSH_KEY }}" + ANSIBLE_DIR=./ansible + REMOTE_DIR=/home/${JUMP_USER}/ansible-setup + ./ansible/configure-jumpbox.sh "$JUMP_HOST" "$JUMP_USER" "$SSH_KEY_CONTENT" "$ANSIBLE_DIR" "$REMOTE_DIR" - if [[ -z "$JUMP_HOST" || -z "$JUMP_USER" ]]; then - echo "❌ Terraform outputs not found!" + - name: πŸ›’οΈ Run Script Deploy Database + run: | + chmod +x ./ansible/deploy-db-from-local.sh + # βœ… Ejecuta y guarda el resultado de terraform output + # Get clean DB_HOST - Ultimate reliable method + DB_HOST=$(terraform -chdir=infra output -raw mysql_fqdn 2>&1 | grep -oE '[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}' | head -n1) + # echo "Extracted DB_HOST='$DB_HOST'" + + # Fallback if raw output fails + if [ -z "$DB_HOST" ]; then + DB_HOST=$(terraform -chdir=infra output mysql_fqdn | grep -oE '[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}' | head -n1) + # echo "Fallback extracted DB_HOST='$DB_HOST'" + fi + + # Final validation + if [[ ! "$DB_HOST" =~ ^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then + echo "ERROR: Could not extract valid DB_HOST" + echo "Raw terraform output:" + terraform -chdir=infra output mysql_fqdn + exit 1 + fi + DB_USER="${{ secrets.MYSQL_USER }}" + DB_PASS="${{ secrets.MYSQL_ADMIN_PASSWORD }}" + DB_NAME="movie_analyst" + + JUMP_HOST=$(terraform -chdir=infra output -raw control_node_public_ip | grep -Eo '([0-9]{1,3}\.){3}[0-9]{1,3}' | tail -n1) + JUMP_USER=$(terraform -chdir=infra output -raw ssh_user | grep -Eo '^[a-zA-Z0-9]+' | tail -n1) + + ./ansible/deploy-db-from-local.sh "$DB_HOST" "$DB_USER" "$DB_PASS" "$DB_NAME" "$JUMP_HOST" "$JUMP_USER" + + - name: 🧩 Run Script Deploy API from Jumpbox to VMs + run: | + chmod +x ./ansible/deploy-api-jumpbox-to-vms.sh + + # Get clean DB_HOST - Ultimate reliable method + DB_HOST=$(terraform -chdir=infra output -raw mysql_fqdn 2>&1 | grep -oE '[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}' | head -n1) + # echo "Extracted DB_HOST='$DB_HOST'" + + # Fallback if raw output fails + if [ -z "$DB_HOST" ]; then + DB_HOST=$(terraform -chdir=infra output mysql_fqdn | grep -oE '[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}' | head -n1) + # echo "Fallback extracted DB_HOST='$DB_HOST'" + fi + + # Final validation + if [[ ! "$DB_HOST" =~ ^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then + echo "ERROR: Could not extract valid DB_HOST" + echo "Raw terraform output:" + terraform -chdir=infra output mysql_fqdn exit 1 fi - # Create remote directory - ssh -i ~/.ssh/vm_ssh_key -o StrictHostKeyChecking=no ${JUMP_USER}@${JUMP_HOST} \ - "mkdir -p /home/${JUMP_USER}/ansible-setup" + JUMP_HOST=$(terraform -chdir=infra output -raw control_node_public_ip | grep -Eo '([0-9]{1,3}\.){3}[0-9]{1,3}' | tail -n1) + JUMP_USER=$(terraform -chdir=infra output -raw ssh_user | grep -Eo '^[a-zA-Z0-9]+' | tail -n1) + + + DB_USER="${{ secrets.MYSQL_USER }}" + DB_PASS="${{ secrets.MYSQL_ADMIN_PASSWORD }}" + DB_NAME="movie_analyst" - # Upload inventory - scp -i ~/.ssh/vm_ssh_key -o StrictHostKeyChecking=no \ - ./ansible/inventory.ini \ - ${JUMP_USER}@${JUMP_HOST}:/home/${JUMP_USER}/ansible-setup/inventory.ini + # echo "=== Final Variables ===" + # echo "DB_HOST: $DB_HOST" + # echo "JUMP_HOST: $JUMP_HOST" + # echo "JUMP_USER: $JUMP_USER" - # Verify upload - ssh -i ~/.ssh/vm_ssh_key -o StrictHostKeyChecking=no ${JUMP_USER}@${JUMP_HOST} \ - "ls -la /home/${JUMP_USER}/ansible-setup/" + ./ansible/deploy-api-jumpbox-to-vms.sh "$DB_HOST" "$DB_USER" "$DB_PASS" "$DB_NAME" "$JUMP_HOST" "$JUMP_USER" + + - name: Deploy Frontend + run: | + chmod +x ./ansible/deploy-frontend.yml + echo "Using values:" + echo "APP_SERVICE_NAME=$APP_SERVICE_NAME" + echo "RESOURCE_GROUP_NAME=$RESOURCE_GROUP_NAME" + echo "LB_API_URL=$LB_API_URL" + ansible-playbook ansible/deploy-frontend.yml -i localhost, + env: + APP_SERVICE_NAME: ${{ env.APP_SERVICE_NAME }} + RESOURCE_GROUP_NAME: ${{ env.RESOURCE_GROUP_NAME }} + LB_API_URL: ${{ env.LB_API_URL }} + + - name: 🌐 Show frontend URL + run: | + echo "βœ… Your frontend is deployed and available at:" + echo "https://${APP_SERVICE_NAME}.azurewebsites.net" + env: + APP_SERVICE_NAME: ${{ env.APP_SERVICE_NAME }} diff --git a/.gitignore b/.gitignore index ae3e17f..a7c4300 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,4 @@ infra/.terraform.lock.hcl *node_modules/* docs/query.txt infra/import-resources.bat +docs/tests.txt diff --git a/README.md b/README.md index 9966c13..08e3c03 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,13 @@
- - # Azure Full Stack Automation + Azure Cloud Logo +

Cloud Web App Deployment on Azure

- - # πŸ“— Table of Contents - [πŸ“– About the Project](#about-project) - - [🌦️ Cloud Diagram](#cloud-diagram) - - [🌦️ Azure Deployment](#azure-deployment) - - [🌦️ Estimated Cost](#estimated-cost) - [πŸ›  Built With](#built-with) - [Tech Stack](#tech-stack) - [Key Features](#key-features) @@ -20,535 +15,281 @@ - [πŸ’» Getting Started](#getting-started) - [Prerequisites](#prerequisites) - [Setup](#setup) - - [Install](#install) - - [Usage](#usage) - - [Run tests](#run-tests) + - [Provision Infrastructure (Terraform)](#terraform) + - [Configure Services (Ansible)](#ansible) - [Deployment](#deployment) +- [πŸ”§ Customizing Variables](#custom-variables) +- [☁️ Remote Terraform State in Azure](#️remote-terraform-state-in-azure) - [πŸ‘₯ Authors](#authors) - [πŸ”­ Future Features](#future-features) - [🀝 Contributing](#contributing) -- [⭐️ show your support](#support) +- [⭐️ Show your support](#support) - [πŸ™ Acknowledgements](#acknowledgements) - [❓ FAQ](#faq) - [πŸ“ License](#license) - - -# πŸ“– Azure Full Stack Automation - -**Azure Full Stack Automation** is a project to deploy a full stack application (frontend and backend) to Azure using Terraform for infrastructure provisioning, Ansible for configuration management, and CI/CD pipelines for automated deployment. - -## 🌦️ Cloud Diagram -![architecture diagram](https://github.com/user-attachments/assets/2133893e-3ed1-4f2f-b36f-73754dbdfc31) - -## Azure Deployment -![Recursos 1](https://github.com/user-attachments/assets/04af5b7b-5872-432d-8289-10d62402f937) - -![Recursos 2](https://github.com/user-attachments/assets/2f3a0567-e8d2-4e05-86e9-4486ddfa3a9d) +--- -## Azure Monthly Estimated Cost +# πŸ“– Azure Cloud Web App Deployment -![monthly cost](https://github.com/user-attachments/assets/d64b68ee-6dc5-40c5-85f3-34e761b437bc) +This project automates the provisioning and deployment of a scalable cloud application on Microsoft Azure. It uses **Terraform** to create infrastructure, and **Ansible** to configure services like a backend API running on 2 VMs behind a Load Balancer, connected to an **Azure MySQL** database, along with a frontend deployed on **Azure Web App**. +

(back to top)

+## πŸ›  Built With +### Tech Stack -## πŸ›  Built With
Infrastructure as Code
+
Configuration Management
+
- CI/CD + Cloud Platform
-## πŸ›  Terraform Modules Overview -This project uses a modular Terraform architecture with the following components: - -
- Network Module (`./modules/mysql-database`) - -#### Virtual Network (VNet) -- **CIDR Block**: `10.0.0.0/16` -- **Subnets**: - - **Backend Subnet** (`10.0.2.0/24`): - - Hosts application servers - - Associated with backend NSG - - Connected to NAT Gateway - - **Database Subnet** (`10.0.3.0/24`): - - Isolated subnet for database services - - Microsoft.Storage service endpoints enabled - - Restricted access to backend subnet only - - -#### Network Security Groups (NSGs) -- **Backend NSG**: - - Allows SSH access from any IP (port 22) - - Permits internal HTTP traffic on port 8080 - - Default Azure rules for outbound traffic -- **Database NSG**: - - Restricts MySQL access (port 3306) to backend subnet only - - Explicitly denies all other inbound traffic - - Implements zero-trust model for database layer - -#### NAT Gateway (Standard SKU) -- **Features**: - - Provides outbound internet connectivity for backend resources - - Uses static public IP address - - Deployed in availability zone 1 - - 4-minute idle timeout (minimum for cost optimization) -- **Environment Awareness**: - - Currently deployed in all environments (commented conditional logic available) - -
- -
- MySQL Database Module (`./modules/mysql-database`) - - #### MySQL Flexible Server -- **Environment-Aware Configuration**: - - **Production**: GP_Standard_D2ds_v4 SKU with 256GB storage - - **Non-Production**: B_Standard_B1ms SKU with 20GB storage (free-tier eligible) -- **Authentication**: - - Custom administrator username/password - - MySQL 8.0.21 version -- **Storage**: - - UTF8MB4 charset with Unicode collation - - Auto-growing storage (up to 16TB) - -#### Database Instance -- Pre-configured `movie_analyst` database -- Optimized character set for multilingual content -- Proper collation for case-insensitive searches - -#### Network Integration -- Private Endpoint connectivity -- Isolated within database subnet -- DNS integration via Private DNS Zone -
- -
- Load Balancer Module (`./modules/load-balancer`) - -#### Azure Load Balancer (Standard SKU) -- **Frontend Configuration**: - - Static public IP address (Standard SKU) - - Listens on port 80 for HTTP traffic -- **Backend Pool**: - - Contains 2 backend VMs for high availability - - Auto-registers VM network interfaces -- **Health Probes**: - - HTTP probe checking `/health` endpoint on port 8080 - - 15-second interval for responsiveness -- **Load Balancing Rules**: - - Port 80 β†’ 8080 forwarding - - TCP protocol for optimal performance - - Health probe integration - -#### Virtual Machine Infrastructure -- **Backend VMs**: - - 2 Ubuntu 18.04 LTS instances (Standard_B1ls) - - Each with: - - Dynamic private IP in backend subnet - - Basic SKU public IP (dynamic) - - 30GB standard OS disk -- **Control VM**: - - Ubuntu 22.04 LTS instance - - Static public IP (Standard SKU) - - Used for management/administration -
- -
- Monitoring Module (`./modules/monitoring`) - -#### Log Analytics Workspace -- **SKU**: PerGB2018 (First 5GB/month free) -- **Retention**: 30 days (free tier maximum) -- **Features**: - - Centralized log collection - - Basic metrics storage - - Resource-agnostic logging - -#### Diagnostic Settings -- **Free Tier Configuration**: - - Minimal metrics collection - - Load balancer basic health metrics - - No additional storage costs -- **Enhanced Configuration**: - - Full metrics collection - - Comprehensive log capture - - All categories enabled -
- -
- App Service Module (`./modules/app-service`) - -#### App Service Plan -- **Tier**: Free (F1 SKU) -- **OS**: Linux -- **Scaling**: Manual (single instance) -- **Compute**: Shared infrastructure - -#### Web Application -- **Runtime**: Node.js 14 LTS -- **Configuration**: - - AlwaysOn disabled (Free tier limitation) - - System-assigned managed identity - - Custom application settings -- **Networking**: - - Integrated with Load Balancer backend - - Automatic HTTPS redirection -
- -### Tech Stack
Database
-Infrastructure + Deployment Targets
- - ### Key Features -- **Infrastructure as Code**: Entire Azure infrastructure defined and managed using Terraform -- **Team remote State**: Terraform State Management in Azure -- **Modular Architecture**: Separate Terraform modules for networking, database, load balancing, and monitoring -- **Environment Separation**: Support for multiple environments (dev, qa, staging, prod) using Terraform workspaces -- **CI/CD Pipeline**: Automated deployment process for both frontend and backend components -- **Monitoring Integration**: Built-in Azure monitoring for the deployed application -- **Configuration Management**: Use of Ansible for automated configuration and deployment. +- πŸ”§ Automated infrastructure provisioning with Terraform +- πŸ“¦ Service configuration and app deployment using Ansible +- 🐘 MySQL database initialized via Ansible using `ansible/files/mysql/movie_db.sql` +- 🌐 Scalable API on 2 Azure VMs behind a Load Balancer +- πŸ’Ύ Managed Azure MySQL integration +- πŸš€ Web frontend deployed using Azure Web App +- πŸ” Service Principal authentication using Client Secret +- βš™οΈ Fully automated deployment workflow with CI/CD integration +- πŸ—οΈ End-to-end Terraform deployment from scratch included in workflow +- 🧹 Automated environment teardown using `terraform-destroy.yml` +- πŸ’Έ Uses Azure Web App **F1 Free Tier** for cost-effective deployment +

(back to top)

- +--- ## πŸš€ Live Demo -- [Live Demo Link](https://your-azure-app-url.azurewebsites.net) +- [Frontend Web App](https://softdefault-movies-app.azurewebsites.net/) +- [API Endpoint (behind Load Balancer)](http://your-lb-ip-or-dns)

(back to top)

+--- + ## πŸ’» Getting Started -To get a local copy up and running, follow these steps. +To get a local copy up and running, follow these instructions. ### Prerequisites -Before you begin, ensure you have the following installed: -- Terraform (>= 1.0.0) -- Azure CLI -- Ansible (>= 2.9) - -## Ansible Configuration - -This project uses Ansible for automated configuration management and application deployment across all infrastructure components. - -### Playbook Structure - -#### 1. Infrastructure Setup (`setup-infra.yml`) -- **Hosts**: All nodes (control + backend) -- **Purpose**: Baseline system configuration -- **Key Tasks**: - - Updates `/etc/hosts` for all nodes - - Configures ssh access from control node - - Sets up passwordless authentication - - Disables strict host checking for internal nodes - -#### 2. API Deployment (`deploy-api.yml`) -- **Hosts**: Backend nodes -- **Purpose**: Full application deployment -- **Key Tasks**: - - Installs system dependencies (Node.js, npm, MySQL client) - - Clones application repository - - Configures database connection - - Initializes MySQL database schema - - Sets up PM2 process manager - - Creates systemd service for automatic startup - -### Configuration Highlights - -1. **Secure Deployment**: - - Database credentials injected via variables - - Limited file permissions (config.js: 0640) - - No-log for sensitive database operations - - ssh strict host checking disabled only for internal nodes - -2. **Idempotent Operations**: - - Conditional database initialization - - Changed-when clauses for accurate reporting - - Atomic file operations - -3. **Environment Variables**: - ```yaml - mysql_config: - host: "{{ mysql_host }}" - user: "{{ mysql_user }}" - password: "{{ mysql_password }}" - database: "{{ mysql_database }}" - port: 3306 -Execution Workflow -First-Time Setup: +Most dependencies are installed automatically in the GitHub Actions workflow. However, for local development or debugging, ensure you have the following: -``` -ansible-playbook -i inventory.ini setup-infra.yml -``` -API Deployment: +- πŸ–₯️ Azure CLI (`az`) +- πŸ“¦ Terraform β‰₯ 1.5 +- βš™οΈ Ansible β‰₯ 2.15 +- πŸ” SSH key pair for accessing virtual machines +- πŸ“¦ Node.js β‰₯ 14 and npm (required for both frontend and backend) +- πŸ“š `zip` utility (used to package the frontend app) +- ☁️ Azure Service Principal credentials (used by the workflow): + - `ARM_CLIENT_ID` + - `ARM_CLIENT_SECRET` + - `ARM_TENANT_ID` + - `ARM_SUBSCRIPTION_ID` +- πŸ’‘ *(Optional)* GitHub CLI (`gh`) – useful for managing secrets or manually triggering workflows -``` -ansible-playbook -i inventory.ini deploy-api.yml \ - -e mysql_host=softqa-mysql-eastus \ - -e mysql_user=adminuser \ - -e mysql_password=$DB_PASSWORD \ - -e mysql_database=movie_analyst -``` -Verification: -``` -ansible nodes -i inventory.ini -m -shell -a "systemctl status movie-api" -``` - -File Structure +### πŸ”§ Setup +```bash +# Clone this repository +git clone https://github.com/NeckerFree/azure-fullstack-automation.git +cd azure-fullstack-automation ``` -ansible/ -β”œβ”€β”€ inventory.ini # Generated by Terraform -β”œβ”€β”€ vm_ssh_key # Auto-generated SSH key -β”œβ”€β”€ files/ -β”‚ └── mysql/ -β”‚ └── movie_db.sql # Database schema -β”œβ”€β”€ templates/ -β”‚ β”œβ”€β”€ config.js.j2 # DB config template -β”‚ └── movie-api.service.j2 # Systemd template -β”œβ”€β”€ deploy-api.yml # Main deployment playbook -└── setup-infra.yml # Infrastructure setup -``` - -Best Practices -Secret Management: +## πŸ“¦ Provision Infrastructure (Terraform) -``` -ansible-vault encrypt_string '$DB_PASSWORD' --name 'mysql_password' -``` -Dry-Run Verification: +Infrastructure provisioning is fully automated and triggered via **GitHub Actions** on every push or pull request to the `main` branch. -``` -ansible-playbook -i inventory.ini deploy-api.yml --check --diff -``` -Tagged Execution: +- Terraform is initialized and executed within the workflow using predefined variables. +- The deployment includes: + - A MySQL database on Azure + - A Load Balancer with 2 backend VMs + - Network and security resources +- The entire infrastructure is provisioned from scratch via `terraform.yml`. -``` -ansible-playbook -i inventory.ini deploy-api.yml --tags "db,config" -``` +## βš™οΈ Configure Services (Ansible) -## Ansible Inventory Generation +Once the infrastructure is up, **Ansible playbooks** are automatically triggered within the same CI/CD workflow to: -This project automatically generates an Ansible inventory file (`inventory.ini`) from Terraform outputs, enabling seamless configuration management of provisioned VMs. +- Configure the VMs with the required packages +- Deploy the Node.js API to both backend nodes +- Apply application settings +- Validate MySQL schema creation and data population -### Inventory Generation Process +This configuration is handled through the `deploy-api.yml` GitHub Actions workflow. -The system creates a dynamic inventory using: -1. **Terraform Template File** (`inventory.tmpl`): - ```ini - [control] - ${control.name} ansible_host=${control.ip} ansible_user=${ssh _user} +## 🚒 Deployment - [nodes] - %{for node in nodes ~} - ${node.name} ansible_host=${node.ip} ansible_user=${ssh _user} - %{endfor ~} +- 🎯 **Trigger**: Every push or pull request to `main` kicks off a full deployment pipeline. +- 🌐 **API** is publicly reachable via the Load Balancer’s IP address. +- πŸ’» **Frontend** is deployed to Azure Web App using the **F1 Free Tier**. +- πŸ” **Secure integration** between services via environment variables and Azure-managed credentials. +- 🧨 A separate `terraform-destroy.yml` workflow is available to automatically destroy all infrastructure when needed. - [all:vars] - ansible_ssh _common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' - ansible_ssh _private_key_file=${ssh _private_key_path} -

(back to top)

-### Setup +--- +## πŸ”§ Customizing Variables -1. Clone the repository: -sh -git clone https://github.com/aljoveza/devops-rampup.git -cd devops-rampup +You can modify the following variables to adapt the deployment to your needs. These are defined in the Terraform configuration files: -Initialize Terraform: +## `infra/terraform.tfvars` -```sh -terraform init -``` -Create a Terraform workspace (for example, for QA environment): +```hcl +allowed_ssh_ip = "186.155.19.140/32" # IP allowed to access VMs via SSH +mysql_user = "mysqladmin" # MySQL admin user +mysql_admin_password = "Sec#reP@ssword123!" # MySQL admin password -```sh -terraform workspace new qa -``` -Install -Install Azure CLI and login: - -```sh -az login -``` -Install required Ansible roles: +variable "location" { + default = "westus2" # Azure region to deploy resources +} -```sh -ansible-galaxy install -r ansible/requirements.yml -``` -Usage -Plan the Terraform deployment: +variable "admin_username" { + default = "myadminuser" # Admin username for virtual machines +} -```sh -terraform plan -var-file=environments/qa.tfvars +variable "lb_api_port" { + default = 8080 # API port exposed by Load Balancer +} ``` -Apply the changes: +

(back to top)

+--- -```sh -terraform apply -var-file=environments/qa.tfvars -``` -Run Ansible playbook to configure servers: +## ☁️ Remote Terraform State in Azure -```sh -ansible-playbook ansible/setup.yml -i ansible/inventory/qa -``` -Run tests -Run infrastructure tests: +Terraform uses remote state storage to persist infrastructure state across executions and team members. -```sh -terraform validate -``` -Run application tests: +This project stores the Terraform state file (`terraform.tfstate`) securely in an **Azure Storage Account** using a backend configuration like the following: -```sh -cd frontend && npm test -cd ../backend && npm test +```hcl +terraform { + backend "azurerm" { + resource_group_name = "my-resource-group" + storage_account_name = "myterraformstate" + container_name = "tfstate" + key = "infrastructure.tfstate" + } +} ``` -

(back to top)

- - +--- ## πŸ‘₯ Authors + πŸ‘€ **Elio CortΓ©s** - GitHub: [@NeckerFree](https://github.com/NeckerFree) - Twitter: [@ElioCortesM](https://twitter.com/ElioCortesM) - LinkedIn: [elionelsoncortes](https://www.linkedin.com/in/elionelsoncortes/) -

(back to top)

-πŸ”­ Future Features -Auto-scaling: Implement auto-scaling for both frontend and backend components +

(back to top)

+ +--- -Blue-Green Deployment: Add support for blue-green deployments +## πŸ”­ Future Features -Enhanced Monitoring: Integrate Application Insights for deeper performance monitoring +- [ ] Add CI/CD pipeline with GitHub Actions +- [ ] Enable autoscaling for the API tier +- [ ] Implement managed identity-based DB auth -

(back to top)

+

(back to top)

+ +--- ## 🀝 Contributing + Contributions, issues, and feature requests are welcome! -Feel free to check the issues page. +Feel free to open an issue, or request features. + +

(back to top)

+ +--- + +## ⭐️ Show your support -

(back to top)

-⭐️ show your support -If you like this project, please give it a ⭐️ on GitHub! +If you like this project, please ⭐️ the repository and share it with others! -

(back to top)

+

(back to top)

+ +--- ## πŸ™ Acknowledgements -Hat tip to anyone whose code was used -Inspiration +- [Microsoft Azure documentation](https://learn.microsoft.com/en-us/azure/) +- [Ansible Azure Collection](https://galaxy.ansible.com/azure/azcollection) contributors +- [HashiCorp Terraform Modules](https://registry.terraform.io/) +- [devops-rampup](https://github.com/aljoveza/devops-rampup) β€” Backend & frontend prototype used as base for this project +- [EPAM DevOps Campus](https://campus.epam.com/en/training) β€” Cloud and DevOps learning program +- [ChatGPT](https://chatgpt.com/) β€” Assistance in automation, CI/CD, and documentation +- [DeepSeek](https://chat.deepseek.com/) β€” Assistance in automation, CI/CD, and documentation +

(back to top)

+ +--- + +## ❓ FAQ -etc +### πŸ” Where are secrets like passwords and keys stored? +Secrets are securely stored as GitHub Actions secrets and injected at runtime into the workflows. -

(back to top)

+### πŸ§ͺ Can I test changes before deploying to Azure? +Yes! You can test locally using `terraform plan` and `ansible-playbook` in dry-run mode before committing changes. -## ❓FAQ -How do I switch between environments? +### 🌎 Where is the infrastructure deployed? +By default, all resources are deployed to the `westus2` Azure region. You can change this in `infra/variables.tf`. -Use Terraform workspaces: terraform workspace select qa or terraform workspace select prod +### πŸ›  What if I want to destroy all resources? +You can run the `terraform-destroy.yml` GitHub Actions workflow to safely destroy the provisioned infrastructure. -Where are the database credentials stored? +### 🐘 How is the database created? +The Azure MySQL database is provisioned with Terraform and initialized using `movie_db.sql` from Ansible. -Database credentials are managed through Azure Key Vault and injected as environment variables during deployment. +### 🌐 What is the default URL for the frontend? +The frontend is hosted on Azure Web App. The exact URL depends on the generated Azure App Service name. Check the Azure Portal or output logs. -

(back to top)

+

(back to top)

+ +--- ## πŸ“ License -This project is MIT licensed. -

(back to top)

+This project is licensed under the [MIT License](./LICENSE). -Deploy Azure resources: -/infra/> terraform plan -/infra/> terraform apply - -when deploy completed: -Replace JUMPBOX_HOST with the value of jumpbox ansible_host in generated inventory.ini - -To validate jumbox folders or files connect from Powershell as admin an run: -ssh -i "$HOME/.ssh/vm_ssh_key" adminuser@4.154.243.88 - -In root folder bash run: - chmod +x deploy-ansible-from-local.sh - chmod +x deploy-db-from-local.sh - chmod +x deploy-api-from-local.sh - -# Copy files, install and configure ansible jumpbox -./ansible/deploy-ansible-from-local.sh -accept fingerprint (yes) - -expected: -PLAY RECAP ********************************************************************* -softqa-vm-api-0 : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 -softqa-vm-api-1 : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 -localhost : ok=3 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 -Playbook executed successfully! - -Executed in GH main.yml workflow - - -# Copy files, install and configure database in VMs nodes -./ansible/deploy-db-from-local.sh - -expected: -PLAY RECAP ********************************************************************* -softqa-vm-api-0 : ok=7 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 -softqa-vm-api-1 : ok=7 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 -softqa-vm-api-0 : ok=7 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 -softqa-vm-api-1 : ok=7 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 -softqa-vm-api-1 : ok=7 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 -Playbook executed successfully! - -After execute GH actions workflow -execute \infra> terraform output -to get api URL: -lb_api_url = "http://4.155.207.109" - -and test it browser, postman or using curl \ No newline at end of file +

(back to top)

\ No newline at end of file diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg index 3bd35bc..66d7789 100644 --- a/ansible/ansible.cfg +++ b/ansible/ansible.cfg @@ -3,6 +3,7 @@ inventory = inventory.ini host_key_checking = False retry_files_enabled = False timeout = 60 +interpreter_python = auto_silent [ssh_connection] ssh_args = -o ControlMaster=auto -o ControlPersist=60s \ No newline at end of file diff --git a/ansible/api-setup.yml b/ansible/api-setup.yml index 92c5112..5294624 100644 --- a/ansible/api-setup.yml +++ b/ansible/api-setup.yml @@ -3,7 +3,7 @@ hosts: nodes become: yes vars: - app_path: /home/adminuser/ansible-setup/src/movie-analyst-api + admin_user: necker # Can be overridden with --extra-vars ansible_ssh_common_args: "-o StrictHostKeyChecking=no -o ConnectTimeout=30" ansible_ssh_retries: 3 ansible_ssh_private_key_file: "~/.ssh/vm_ssh_key" @@ -15,6 +15,10 @@ delay: 10 sleep: 5 + - name: Set app path based on admin_user + set_fact: + app_path: "/home/{{ admin_user }}/ansible-setup/src/movie-analyst-api" + - name: Show received DB environment variables debug: msg: @@ -30,65 +34,124 @@ - curl - gnupg - ca-certificates + - rsync state: present update_cache: yes + - name: Remove old Node.js and npm if installed + apt: + name: + - nodejs + - npm + state: absent + ignore_errors: yes + - name: Add NodeSource GPG key apt_key: url: https://deb.nodesource.com/gpgkey/nodesource.gpg.key state: present - - name: Add NodeSource repository + - name: Add Node.js 16.x repo apt_repository: - repo: "deb https://deb.nodesource.com/node_16.x {{ ansible_distribution_release }} main" + repo: "deb https://deb.nodesource.com/node_16.x {{ ansible_distribution_release | lower }} main" state: present filename: nodesource - update_cache: yes - - name: Install Node.js + - name: Install Node.js 16 and npm apt: name: nodejs state: present update_cache: yes + - name: Check installed Node and npm versions + shell: "node -v && npm -v" + register: node_versions + + - name: Show versions + debug: + var: node_versions.stdout_lines + - name: Ensure app directory exists file: path: "{{ app_path }}" state: directory mode: "0755" - owner: adminuser - group: adminuser + owner: "{{ admin_user }}" + group: "{{ admin_user }}" - - name: Synchronize API code - ansible.posix.synchronize: - src: "{{ api_source_path }}/" + - name: Copy API code + copy: + src: "{{ app_path }}/" dest: "{{ app_path }}" - recursive: yes - rsync_opts: - - "--rsh=ssh -i /home/adminuser/.ssh/vm_ssh_key -o StrictHostKeyChecking=no" - - "--exclude=node_modules" - delegate_to: localhost - - - name: Install npm dependencies - npm: - path: "{{ app_path }}" - state: present - production: yes + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + mode: "0755" + + - name: πŸ“¦ Install dependencies with npm ci + command: npm ci + args: + chdir: "{{ app_path }}" + + - name: πŸ› οΈ Run database connectivity test + shell: > + node db-test.js + args: + chdir: "{{ app_path }}" + executable: /bin/bash + environment: + DB_HOST: "{{ db_host }}" + DB_USER: "{{ db_user }}" + DB_PASS: "{{ db_password }}" + DB_NAME: "{{ db_name }}" + register: db_test_result + ignore_errors: yes + + - name: Show database test results + debug: + var: db_test_result + + - name: Fail if database test failed + fail: + msg: "Database connectivity test failed. Error: {{ db_test_result.stderr | default(db_test_result.msg) }}" + when: db_test_result.rc != 0 + + - name: Stop movie-api service before running tests + systemd: + name: movie-api + state: stopped + ignore_errors: yes # In case service doesn't exist yet - name: Deploy systemd service template: - src: "/home/adminuser/ansible-setup/templates/movie-api.service.j2" + src: "{{ playbook_dir }}/templates/movie-api.service.j2" dest: /etc/systemd/system/movie-api.service - remote_src: true owner: root group: root mode: "0644" notify: restart movie-api + - name: πŸ§ͺ Run tests with npm test + command: npm test + args: + chdir: "{{ app_path }}" + register: test_results + ignore_errors: yes # Continue even if tests fail to ensure service is restarted + + - name: Show test results + debug: + var: test_results + + - name: Ensure movie-api service is running after tests + systemd: + name: movie-api + state: started + enabled: yes + daemon_reload: yes + handlers: - name: restart movie-api systemd: name: movie-api state: restarted enabled: yes - daemon_reload: yes + daemon_reload: yes \ No newline at end of file diff --git a/ansible/configure-jumpbox.sh b/ansible/configure-jumpbox.sh new file mode 100644 index 0000000..0282d2e --- /dev/null +++ b/ansible/configure-jumpbox.sh @@ -0,0 +1,66 @@ +#!/bin/bash +set -euo pipefail + +# === USO === +# ./configure-jumpbox.sh + +# === PARÁMETROS === +JUMP_HOST="$1" +JUMP_USER="$2" +SSH_KEY_CONTENT="$3" +ANSIBLE_DIR="$4" +REMOTE_DIR="$5" + +SSH_KEY_PATH="$HOME/.ssh/vm_ssh_key" + +# echo "πŸ“‘ Conectando al Jumpbox $JUMP_USER@$JUMP_HOST" +# echo "πŸ“ Subiendo archivos desde $ANSIBLE_DIR a $REMOTE_DIR" + +# === 1. Configurar SSH localmente con contenido del secreto === +echo "πŸ”‘ Escribiendo la clave SSH localmente..." +mkdir -p ~/.ssh +echo "$SSH_KEY_CONTENT" > "$SSH_KEY_PATH" +chmod 600 "$SSH_KEY_PATH" +echo -e "Host *\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile /dev/null\n" > ~/.ssh/config + +# === 2. Crear carpeta remota en Jumpbox === +echo "πŸ“ Creando carpeta en el Jumpbox: $REMOTE_DIR" +ssh -i "$SSH_KEY_PATH" -o StrictHostKeyChecking=no "${JUMP_USER}@${JUMP_HOST}" \ + "mkdir -p ${REMOTE_DIR}" + +# === 3. Subir archivos Ansible al Jumpbox === +echo "⬆️ Subiendo archivos individuales..." +scp -i "$SSH_KEY_PATH" -o StrictHostKeyChecking=no \ + "${ANSIBLE_DIR}/setup-infra.yml" "${ANSIBLE_DIR}/inventory.ini" \ + "${ANSIBLE_DIR}/api-setup.yml" "${ANSIBLE_DIR}/db-setup.yml" \ + "${JUMP_USER}@${JUMP_HOST}:${REMOTE_DIR}/" + +echo "⬆️ Subiendo carpetas 'templates' y 'files'..." +scp -i "$SSH_KEY_PATH" -o StrictHostKeyChecking=no -r \ + "${ANSIBLE_DIR}/templates" "${ANSIBLE_DIR}/files" \ + "${JUMP_USER}@${JUMP_HOST}:${REMOTE_DIR}/" + +# === 4. Subir clave SSH privada a ~/.ssh/vm_ssh_key en Jumpbox === +echo "πŸ” Subiendo clave SSH privada a Jumpbox..." +scp -i "$SSH_KEY_PATH" -o StrictHostKeyChecking=no \ + "$SSH_KEY_PATH" \ + "${JUMP_USER}@${JUMP_HOST}:/home/${JUMP_USER}/.ssh/vm_ssh_key" + +ssh -i "$SSH_KEY_PATH" -o StrictHostKeyChecking=no \ + "${JUMP_USER}@${JUMP_HOST}" \ + "chmod 600 /home/${JUMP_USER}/.ssh/vm_ssh_key" + +# === 5. Ejecutar playbook desde el Jumpbox === +echo "πŸš€ Ejecutando playbook desde el Jumpbox..." +ssh -i "$SSH_KEY_PATH" -o StrictHostKeyChecking=no "${JUMP_USER}@${JUMP_HOST}" << EOF + set -e + cd ${REMOTE_DIR} + if ! command -v ansible-playbook &> /dev/null; then + echo "πŸ”§ Ansible no encontrado. Instalando..." + sudo apt update && sudo apt install ansible -y + fi + echo "πŸ“¦ Ejecutando playbook setup-infra.yml..." + ansible-playbook -i inventory.ini setup-infra.yml -e "ADMIN_USER=${JUMP_USER}" +EOF + +echo "βœ… Playbook ejecutado exitosamente." diff --git a/ansible/db-setup.yml b/ansible/db-setup.yml index 5d4c69e..d87e948 100644 --- a/ansible/db-setup.yml +++ b/ansible/db-setup.yml @@ -3,7 +3,7 @@ hosts: mysql_server become: yes vars: - mysql_script_path: "/home/adminuser/movie_db.sql" + mysql_script_path: "/home/{{ admin_user }}/movie_db.sql" remote_mysql_script_path: "/etc/ansible/mysql/movie_db.sql" tasks: @@ -24,26 +24,66 @@ loop: - { key: "DB_HOST", value: "{{ db_host }}" } - { key: "DB_USER", value: "{{ db_user }}" } - - { key: "DB_PASS", value: "{{ db_password }}" } + - { key: "DB_PASS", value: "{{ db_pass }}" } - { key: "DB_NAME", value: "{{ db_name }}" } - no_log: true + no_log: false - - name: Ensure MySQL scripts directory exists + - name: Ensure MySQL directory exists file: - path: "/etc/ansible/mysql" + path: /etc/ansible/mysql state: directory + owner: root + group: root mode: "0755" - name: Copy SQL script to MySQL directory copy: - src: "{{ mysql_script_path }}" - dest: "{{ remote_mysql_script_path }}" + src: "{{ playbook_dir }}/files/mysql/movie_db.sql" + dest: /etc/ansible/mysql/movie_db.sql + owner: root + group: root mode: "0644" + + - name: Handle MySQL database creation + block: + - name: Attempt to create database + command: > + mysql -h "{{ db_host }}" -u "{{ db_user }}" -p"{{ db_pass }}" + -e "CREATE DATABASE IF NOT EXISTS {{ db_name }}; SELECT 'success' as status;" + register: create_db + no_log: false + ignore_errors: yes + changed_when: false # We'll handle changes manually + + - name: Check if MySQL database exists + command: > + mysql -h "{{ db_host }}" -u "{{ db_user }}" -p"{{ db_pass }}" + -e "SHOW DATABASES LIKE '{{ db_name }}';" + register: db_check + no_log: false + ignore_errors: yes + changed_when: false + + - name: Validate database creation + fail: + msg: "Failed to create database '{{ db_name }}'. Connection or permissions issue." + when: > + create_db is defined and db_check is defined and + ('success' not in (create_db.stdout | default(''))) and + ((db_check.stdout | default('')).find(db_name) == -1) + no_log: false + + rescue: + - name: Display database creation error + debug: + msg: "Database creation error: {{ create_db.stderr | default('Unknown error') }}" + no_log: false + - name: Load MySQL initialization script (run once) run_once: true shell: | - mysql -h {{ db_host }} -u{{ db_user }} -p'{{ db_password }}' {{ db_name }} < {{ remote_mysql_script_path }} + mysql -h {{ db_host }} -u{{ db_user }} -p'{{ db_pass }}' {{ db_name }} < {{ remote_mysql_script_path }} args: executable: /bin/bash changed_when: false @@ -52,7 +92,7 @@ - name: Verify database initialization run_once: true shell: | - mysql -h {{ db_host }} -u{{ db_user }} -p'{{ db_password }}' {{ db_name }} -e "SHOW TABLES;" + mysql -h "{{ db_host }}" -u"{{ db_user }}" -p"{{ db_pass }}" "{{ db_name }}" -e "SHOW TABLES;" register: db_check changed_when: false when: db_load_result.rc == 0 @@ -61,4 +101,13 @@ run_once: true fail: msg: "Database initialization failed with exit code {{ db_load_result.rc }}. Check the SQL script for errors." - when: db_load_result.rc != 0 or db_check is skipped \ No newline at end of file + when: db_load_result.rc != 0 or db_check is skipped + + - name: Validate that 'movies' table exists and has rows + run_once: true + shell: | + mysql -h "{{ db_host }}" -u"{{ db_user }}" -p"{{ db_pass }}" "{{ db_name }}" -e \ + "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='{{ db_name }}' AND table_name='movies'; \ + SELECT COUNT(*) FROM movies;" + register: movies_table_check + changed_when: false diff --git a/ansible/deploy-ansible-from-local.sh b/ansible/deploy-ansible-from-local.sh index ac98880..e1a302d 100644 --- a/ansible/deploy-ansible-from-local.sh +++ b/ansible/deploy-ansible-from-local.sh @@ -1,8 +1,9 @@ #!/bin/bash +##!/bin/bash + # === CONFIGURACIΓ“N === -# === STEP 0: Read JUMP_HOST and JUMP_USER from inventory === -INVENTORY_FILE="./ansible/inventory.ini" +INVENTORY_FILE="inventory.ini" JUMP_HOST=$(awk '/^\[control\]/ {getline; match($0, /ansible_host=([^ ]+)/, m); print m[1]}' "$INVENTORY_FILE") JUMP_USER=$(awk '/^\[control\]/ {getline; match($0, /ansible_user=([^ ]+)/, m); print m[1]}' "$INVENTORY_FILE") if [ -z "$JUMP_USER" ] || [ -z "$JUMP_HOST" ]; then @@ -14,81 +15,17 @@ SSH_KEY_LOCAL="$HOME/.ssh/vm_ssh_key" REMOTE_DIR="/home/${JUMP_USER}/ansible-setup" SETUP_PLAYBOOK_FILE="setup-infra.yml" -# === PASO 1: Crear directorio remoto en la jumphost === -echo "[1/5] Creando directorio remoto en la jumphost..." -ssh -i ${SSH_KEY_LOCAL} ${JUMP_USER}@${JUMP_HOST} "mkdir -p ${REMOTE_DIR}" - -# === PASO 2: Subir playbook corregido a la jumphost === -echo "[2/5] Subiendo playbook corregido a la jumphost..." -cat > /tmp/temp-setup-infra.yml <<'EOF' ---- -- name: Configure infrastructure - hosts: all - gather_facts: false - tasks: - - name: Ensure all nodes are in /etc/hosts - become: yes - ansible.builtin.lineinfile: - path: /etc/hosts - line: "{{ hostvars[item].ansible_host | default(item) }} {{ item }}" - state: present - loop: "{{ groups['all'] }}" - -- name: Configure control node ssh - hosts: control - gather_facts: false - vars: - ssh_private_key_path: "/home/adminuser/.ssh/id_rsa" - tasks: - - name: Create ~/.ssh directory - ansible.builtin.file: - path: ~/.ssh - state: directory - mode: "0700" - - - name: Update SSH config for node* hosts - ansible.builtin.blockinfile: - path: ~/.ssh/config - block: | - Host node* - StrictHostKeyChecking no - UserKnownHostsFile /dev/null - User adminuser - IdentityFile {{ ssh_private_key_path }} - marker: "# {mark} ANSIBLE MANAGED BLOCK - NODE CONFIG" - create: yes -EOF - -scp -i ${SSH_KEY_LOCAL} /tmp/temp-setup-infra.yml ${JUMP_USER}@${JUMP_HOST}:${REMOTE_DIR}/${SETUP_PLAYBOOK_FILE} -rm /tmp/temp-setup-infra.yml - -# === PASO 3: Generar inventory.ini corregido en la jumphost === -echo "[3/5] Generando inventory.ini corregido en la jumphost..." -ssh -i ${SSH_KEY_LOCAL} ${JUMP_USER}@${JUMP_HOST} << 'EOF' -cat > /home/adminuser/ansible-setup/inventory.ini < /dev/null; then diff --git a/ansible/deploy-api-from-local.sh b/ansible/deploy-api-jumpbox-to-vms.sh similarity index 58% rename from ansible/deploy-api-from-local.sh rename to ansible/deploy-api-jumpbox-to-vms.sh index 73165bd..4fa3f6f 100644 --- a/ansible/deploy-api-from-local.sh +++ b/ansible/deploy-api-jumpbox-to-vms.sh @@ -6,35 +6,21 @@ DB_HOST=${DB_HOST:-$1} DB_USER=${DB_USER:-$2} DB_PASS=${DB_PASS:-$3} DB_NAME=${DB_NAME:-$4} -: "${DB_HOST:?Missing DB_HOST}" -: "${DB_USER:?Missing DB_USER}" -: "${DB_PASS:?Missing DB_PASS}" -: "${DB_NAME:?Missing DB_NAME}" -if [[ -z "$DB_HOST" || -z "$DB_USER" || -z "$DB_PASS" || -z "$DB_NAME" ]]; then - echo "❌ Missing required DB variables" - exit 1 -fi +JUMP_HOST=${JUMP_HOST:-$5} +JUMP_USER=${JUMP_USER:-$6} # === CONFIG === # === STEP 0: Read JUMP_HOST and JUMP_USER from inventory === INVENTORY_FILE="./ansible/inventory.ini" -JUMP_HOST=$(awk '/^\[control\]/ {getline; match($0, /ansible_host=([^ ]+)/, m); print m[1]}' "$INVENTORY_FILE") -JUMP_USER=$(awk '/^\[control\]/ {getline; match($0, /ansible_user=([^ ]+)/, m); print m[1]}' "$INVENTORY_FILE") -if [ -z "$JUMP_USER" ] || [ -z "$JUMP_HOST" ]; then - echo "❌ Could not parse JUMP_USER or JUMP_HOST from inventory.ini" - exit 1 -fi + SSH_KEY_LOCAL="$HOME/.ssh/vm_ssh_key" REMOTE_DIR="/home/${JUMP_USER}/ansible-setup" API_SRC_LOCAL="./src/movie-analyst-api" API_SRC_REMOTE="${REMOTE_DIR}/src/movie-analyst-api" -API_PLAYBOOK_LOCAL="./ansible/api-setup.yml" -TEST_PLAYBOOK_LOCAL="./ansible/connection-test.yml" TEMPLATE_LOCAL="./ansible/templates/movie-api.service.j2" TEMPLATE_REMOTE="${REMOTE_DIR}/templates/movie-api.service.j2" -INVENTORY_LOCAL="./ansible/inventory.ini" -INVENTORY_REMOTE="${REMOTE_DIR}/inventory.ini" + # === STEP 0: Validate jumpbox configuration === if [ -z "$JUMP_USER" ] || [ -z "$JUMP_HOST" ]; then @@ -42,15 +28,15 @@ if [ -z "$JUMP_USER" ] || [ -z "$JUMP_HOST" ]; then exit 1 fi -# === STEP 1: Upload playbook, test connection and inventory === -echo "[1/4] Uploading playbook and inventory..." -scp -i "${SSH_KEY_LOCAL}" "${API_PLAYBOOK_LOCAL}" "${TEST_PLAYBOOK_LOCAL}" "${INVENTORY_LOCAL}" "${JUMP_USER}@${JUMP_HOST}:${REMOTE_DIR}/" - # === STEP 2: Copy API source code === echo "[2/4] Copying API source code..." ssh -i "$SSH_KEY_LOCAL" "$JUMP_USER@$JUMP_HOST" "mkdir -p ${REMOTE_DIR}/src" scp -i "$SSH_KEY_LOCAL" -r "$API_SRC_LOCAL" "$JUMP_USER@$JUMP_HOST:${REMOTE_DIR}/src/" +# === STEP 3: Upload systemd template === +echo "[3/4] Uploading systemd template..." +ssh -i "${SSH_KEY_LOCAL}" "${JUMP_USER}@${JUMP_HOST}" "mkdir -p ${REMOTE_DIR}/templates" +scp -i "${SSH_KEY_LOCAL}" "${TEMPLATE_LOCAL}" "${JUMP_USER}@${JUMP_HOST}:${TEMPLATE_REMOTE}" # === STEP 3.5: Preload known_hosts in the jumpbox to avoid host key verification === echo "[3.5/4] Adding backend VM keys to known_hosts on the jumpbox..." @@ -71,10 +57,10 @@ fi # === STEP 4: Execute playbook remotely === echo "[4/4] Executing playbook from the jump host..." echo "πŸ”Ž Verificando envs en jumpbox:" -echo "DB_HOST=${DB_HOST}" -echo "DB_USER=${DB_USER}" -echo "DB_PASS=${DB_PASS}" -echo "DB_NAME=${DB_NAME}" +# echo "DB_HOST=${DB_HOST}" +# echo "DB_USER=${DB_USER}" +# echo "DB_PASS=${DB_PASS}" +# echo "DB_NAME=${DB_NAME}" ssh -i "${SSH_KEY_LOCAL}" "${JUMP_USER}@${JUMP_HOST}" < }); // Start server -const port = process.env.PORT || 8080; -app.listen(port, '0.0.0.0', () => +const PORT = process.env.PORT || 8080; // 8080 como valor por defecto +app.listen(PORT, () => { - console.log(`Server running on port ${port}`); + console.log(`API listening on port ${PORT}`); }); + module.exports = app; \ No newline at end of file diff --git a/src/movie-analyst-ui/package-lock.json b/src/movie-analyst-ui/package-lock.json index 2189b4d..a834230 100644 --- a/src/movie-analyst-ui/package-lock.json +++ b/src/movie-analyst-ui/package-lock.json @@ -9,6 +9,7 @@ "version": "1.0.0", "license": "ISC", "dependencies": { + "dotenv": "^16.4.5", "ejs": "^2.5.7", "express": "^4.15.4", "superagent": "^3.6.0" @@ -452,6 +453,17 @@ "node": ">=0.3.1" } }, + "node_modules/dotenv": { + "version": "16.6.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", + "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, "node_modules/dunder-proto": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", diff --git a/src/movie-analyst-ui/package.json b/src/movie-analyst-ui/package.json index c3e19c2..4dfd606 100644 --- a/src/movie-analyst-ui/package.json +++ b/src/movie-analyst-ui/package.json @@ -3,7 +3,8 @@ "version": "1.0.0", "description": "ui for the movie analyst website....im totally not ripping this off.......", "scripts": { - "test": "mocha" + "test": "mocha", + "start": "node server.js" }, "repository": { "type": "git", @@ -16,6 +17,7 @@ }, "homepage": "https://github.com/juan-ruiz/movie-analyst-ui#readme", "dependencies": { + "dotenv": "^16.4.5", "ejs": "^2.5.7", "express": "^4.15.4", "superagent": "^3.6.0" @@ -23,4 +25,4 @@ "devDependencies": { "mocha": "^11.7.1" } -} +} \ No newline at end of file diff --git a/src/movie-analyst-ui/server.js b/src/movie-analyst-ui/server.js index 8e5d3aa..7e15d10 100644 --- a/src/movie-analyst-ui/server.js +++ b/src/movie-analyst-ui/server.js @@ -1,77 +1,89 @@ -// Declare our dependencies -var express = require('express'); -var request = require('superagent'); +require('dotenv').config(); +const express = require('express'); +const path = require('path'); +const request = require('superagent'); // o puedes usar fetch si prefieres +const app = express(); -// Create our express app -var app = express(); - -// Set the view engine to use EJS as well as set the default views directory +// Set EJS as templating engine app.set('view engine', 'ejs'); -app.set('views', __dirname + '/public/views/'); +app.set('views', path.join(__dirname, 'views')); -// This tells Express out of which directory to serve static assets like CSS and images -app.use(express.static(__dirname + '/public')); +// Serve static assets if needed +// app.use(express.static(path.join(__dirname, 'public'))); -let backend_url = process.env.BACKEND_URL || "localhost:3000" +let backend_url = process.env.BACKEND_URL || "http://localhost:8080"; -// The homepage route of our application does not interface with the MovieAnalyst API and is always accessible. We won’t use the getAccessToken middleware here. We’ll simply render the index.ejs view. -app.get('/', function(req, res){ +// Home +app.get('/', (req, res) => +{ res.render('index'); -}) +}); -// For the movies route, we’ll call the getAccessToken middleware to ensure we have an access token. If we do have a valid access_token, we’ll make a request with the superagent library and we’ll be sure to add our access_token in an Authorization header before making the request to our API. -// Once the request is sent out, our API will validate that the access_token has the right scope to request the /movies resource and if it does, will return the movie data. We’ll take this movie data, and pass it alongside our movies.ejs template for rendering -app.get('/movies', function(req, res){ +// Movies +app.get('/movies', (req, res) => +{ request - .get(`http://${backend_url}/movies`) - .end(function(err, data) { - if(data.status == 403){ - res.send(403, '403 Forbidden'); - } else { - var movies = data.body; - res.render('movies', { movies: movies} ); + .get(`${backend_url}/movies`) + .end((err, data) => + { + if (err || data.status === 403) + { + res.status(403).send('403 Forbidden'); + } else + { + res.render('movies', { movies: data.body }); } - }) -}) + }); +}); -// The process will be the same for the remaining routes. We’ll make sure to get the acess_token first and then make the request to our API to get the data. -// The key difference on the authors route, is that for our client, we’re naming the route /authors, but our API endpoint is /reviewers. Our route on the client does not have to match the API endpoint route. -app.get('/authors', function(req, res){ +// Authors +app.get('/authors', (req, res) => +{ request - .get(`http://${backend_url}/reviewers`) - .set('Authorization', 'Bearer ' + req.access_token) - .end(function(err, data) { - if(data.status == 403){ - res.send(403, '403 Forbidden'); - } else { - var authors = data.body; - res.render('authors', {authors : authors}); + .get(`${backend_url}/reviewers`) + .end((err, data) => + { + if (err || data.status === 403) + { + res.status(403).send('403 Forbidden'); + } else + { + res.render('authors', { authors: data.body }); } - }) -}) + }); +}); -app.get('/publications', function(req, res){ +// Publications +app.get('/publications', (req, res) => +{ request - .get(`http://${backend_url}/publications`) - .end(function(err, data) { - if(data.status == 403){ - res.send(403, '403 Forbidden'); - } else { - var publications = data.body; - res.render('publications', {publications : publications}); + .get(`${backend_url}/publications`) + .end((err, data) => + { + if (err || data.status === 403) + { + res.status(403).send('403 Forbidden'); + } else + { + res.render('publications', { publications: data.body }); } - }) -}) + }); +}); -// We’ve added the pending route, but calling this route from the MovieAnalyst Website will always result in a 403 Forbidden error as this client does not have the admin scope required to get the data. -app.get('/pending', function(req, res){ +// Pending (admin-only) +app.get('/pending', (req, res) => +{ request - .get(`http://${backend_url}/pending`) - .end(function(err, data) { - if(data.status == 403){ - res.send(403, '403 Forbidden'); - } - }) -}) + .get(`${backend_url}/pending`) + .end((err, data) => + { + res.status(403).send('403 Forbidden'); // Always forbidden + }); +}); -app.listen(process.env.PORT || 3030); \ No newline at end of file +// Start server +const PORT = process.env.PORT || 8080; +app.listen(PORT, () => +{ + console.log(`Frontend listening on port ${PORT}`); +}); diff --git a/src/movie-analyst-ui/public/views/authors.ejs b/src/movie-analyst-ui/views/authors.ejs similarity index 100% rename from src/movie-analyst-ui/public/views/authors.ejs rename to src/movie-analyst-ui/views/authors.ejs diff --git a/src/movie-analyst-ui/public/views/index.ejs b/src/movie-analyst-ui/views/index.ejs similarity index 100% rename from src/movie-analyst-ui/public/views/index.ejs rename to src/movie-analyst-ui/views/index.ejs diff --git a/src/movie-analyst-ui/public/views/movies.ejs b/src/movie-analyst-ui/views/movies.ejs similarity index 100% rename from src/movie-analyst-ui/public/views/movies.ejs rename to src/movie-analyst-ui/views/movies.ejs diff --git a/src/movie-analyst-ui/public/views/publications.ejs b/src/movie-analyst-ui/views/publications.ejs similarity index 100% rename from src/movie-analyst-ui/public/views/publications.ejs rename to src/movie-analyst-ui/views/publications.ejs