diff --git a/03-Azure/01-03-Infrastructure/05_Azure_VMware_Solution/Lab/info/AVS Labs Credentials - Example.xlsx b/03-Azure/01-03-Infrastructure/05_Azure_VMware_Solution/Lab/info/AVS Labs Credentials - Example.xlsx index 3393264f1..cf15bbc18 100644 Binary files a/03-Azure/01-03-Infrastructure/05_Azure_VMware_Solution/Lab/info/AVS Labs Credentials - Example.xlsx and b/03-Azure/01-03-Infrastructure/05_Azure_VMware_Solution/Lab/info/AVS Labs Credentials - Example.xlsx differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/.gitignore b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/.gitignore index 17f186751..ca0a3c7c5 100644 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/.gitignore +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/.gitignore @@ -1 +1,77 @@ -NOTES.md \ No newline at end of file +# contain sensitive data +misc/ +**/mis/ +NOTES.md + +gghack.yaml +ggfabric.yaml +adbping-job.yaml +connping-job.yaml +resources/infra/terraform/user_credentials.json +resources/infra/terraform/mhodaa-sp-credentials.json +resources/infra/terraform/*-sp-credentials.json +resources/infra/terraform/user-photos/ +resources/gg-bigdata-build +resources/scripts/adbping.sh + +# ignore helm chart temp folder +resources/infra/terraform/.helm/ + +# exclude the terrafom files and folders which should not be commited to git following best practces, located under the folder resources and all subfolder +resources/**/terraform.tfstate +resources/**/terraform.tfstate.backup +resources/**/.terraform +resources/**/crash.log +resources/**/override.tf +resources/**/terraform.tfvars +resources/**/terraform.tfvars.json +resources/**/terraform.rc +resources/**/terraform.d +# resources/**/terraform +# resources/**/modules +resources/**/providers +resources/**/workspace + +# Terraform Files +resources/**/*.tfstate +resources/**/*.tfstate.* +resources/**/*.tfplan +resources/**/*.tfplan.* +resources/**/.terraform/ +resources/**/.terraform.lock.hcl + +# Variable files (may contain sensitive data) +resources/**/terraform.tfvars +resources/**/*.auto.tfvars +resources/**/*.auto.tfvars.json + +# Override files +resources/**/override.tf +resources/**/override.tf.json +resources/**/*_override.tf +resources/**/*_override.tf.json + +# CLI configuration files +resources/**/.terraformrc +resources/**/terraform.rc + +# IDE files +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# Log files +resources/**/*.log +resources/**/crash.log +resources/**/crash.*.log + +# Ignore any .tfvars files that are generated automatically +resources/**/**/*.auto.tfvars + +# Exclude Oracle GoldenGate BigData Docker images and extracted files +resources/gg-bigdata-build/V1043090-01.zip +resources/gg-bigdata-build/extracted/ +resources/gg-bigdata-build/oracle-docker-images/ +resources/gg-bigdata-build/oracle-docker-images/.git* \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Challenges/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Challenges/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/README.md index eae4267cb..2828fe44b 100644 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/README.md +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/README.md @@ -1,49 +1,455 @@ -# Microhack - Intro To Oracle DB Migration to Azure +ο»Ώ![ODAA microhack logo](media/logo_ODAA_microhack_1900x300.jpg) -## Important Notice +# πŸš€ Microhack - Oracle Database @ Azure (ODAA) -This project is currently under development and is subject to change until the first official release, which is expected by the end of 2024. Please note that all content, including instructions and resources, may be updated or modified as the project progresses. +## πŸ“– Introduction +This intro-level microhack (hackathon) helps you gain hands-on experience with Oracle Database@Azure (ODAA). -## Introduction +### What is Oracle Database at Azure +Oracle Database@Azure (ODAA) is the joint Oracle–Microsoft managed service that delivers different Database services - see [ODAA deployed Azure regions](https://apexadb.oracle.com/ords/r/dbexpert/multicloud-capabilities/multicloud-regions?session=412943632928469) running on Oracle infrastructure colocated in Azure regions while exposing native Azure management, networking, billing, integration with Azure Key Vault, Entra ID or Azure Sentinel. This microhack targets the first-tier partner solution play focused on Autonomous Database because Microsoft designates ODAA as a strategic, co-sell priority workload; the exercises give partner architects the end-to-end skillsβ€”subscription linking, delegated networking, hybrid connectivity, and performance validationβ€”needed to confidently deliver that priority scenario for customers with Oracle-related workloads in Azure. -This intro level microhack (hackathon) will help you get hands-on experience migrating Oracle databases from on-premises to different Azure Services. +### What You Will Learn in the MicroHack +You will learn how to create and configure an Autonomous Database Shared of the offered Oracle Database@Azure services, how to deploy an Autonomous Database instance inside an Azure delegated subnet, update network security group (NSG) and DNS settings to enable connectivity from a simulated on-premises environment, and measure network performance to the Oracle Autonomous Database instance. To make the microhack more realistic, we will deploy the Application layer (AKS) and the Data layer (ODAA) in two different subscriptions to simulate a hub-and-spoke architecture. The following picture shows the high-level architecture of the microhack. + +![ODAA microhack architecture](media/overivew%20deployment.png) + +Furthermore, we will address the integration of ODAA into the existing Azure native services and how to use GoldenGate for migrations to ODAA and integration into Azure Fabric. + + +## What is VNet Peering? + +In our deployed scenario, we created in advance a VNet peering between the AKS VNet and the ADB VNet, which is required so the Kubernetes workloads can communicate privately and directly with the database. + +### Architecture Diagram + +The following diagram shows how VNet peering connects the AKS cluster to the Oracle Autonomous Database: + +```mermaid +flowchart TB + subgraph AKS_SUB[Azure Subscription AKS] + subgraph AKS_RG[Resource Group: aks-userXX] + subgraph AKS_VNET[VNet: aks-userXX 10.0.0.0/16] + subgraph AKS_SUBNET[Subnet: aks 10.0.0.0/23] + AKS[AKS Cluster] + end + DNS[Private DNS Zones] + end + end + end + + subgraph ODAA_SUB[Azure Subscription ODAA] + subgraph ODAA_RG[Resource Group: odaa-userXX] + subgraph ODAA_VNET[VNet: odaa-userXX 192.168.0.0/16] + subgraph ODAA_SUBNET[Delegated Subnet 192.168.0.0/24] + ADB[Oracle Autonomous Database] + end + end + NSG[NSG: Allow 10.0.0.0/16] + end + end + + AKS_VNET <-->|VNet Peering| ODAA_VNET + AKS -.->|SQL Queries| ADB + DNS -.->|Resolves hostname| ADB + + style AKS_SUB fill:#0078D4,color:#fff + style AKS_RG fill:#50E6FF,color:#000 + style AKS_VNET fill:#7FBA00,color:#fff + style AKS_SUBNET fill:#98FB98,color:#000 + style ODAA_SUB fill:#0078D4,color:#fff + style ODAA_RG fill:#50E6FF,color:#000 + style ODAA_VNET fill:#7FBA00,color:#fff + style ODAA_SUBNET fill:#98FB98,color:#000 + style ADB fill:#C74634,color:#fff + style AKS fill:#FFB900,color:#000 + style DNS fill:#50E6FF,color:#000 + style NSG fill:#F25022,color:#fff +``` + +### What does VNet peering mean in detail + +| Concept | Description | +|---------|-------------| +| **VNet isolation by default** | The AKS nodes run in one VNet and ADB sits in another; without peering, those address spaces are completely isolated and pods cannot reach the database IPs at all. | +| **Private, internal traffic** | Peering lets both VNets exchange traffic over private IPs only, as if they were one network. No public IPs, no internet exposure, no extra gateways are needed. | +| **Low latency, high bandwidth path** | Application-database calls stay on the cloud backbone, which is crucial for chatty OLTP workloads and for predictable performance. | +| **Simple routing model** | With peering, standard system routes know how to reach the other VNet's CIDR; you avoid managing separate VPNs, user-defined routes, or NAT just to reach the DB. | +| **Granular security with NSGs** | Even with peering in place, NSGs on subnets/NICs still control which AKS node subnets and ports (for example, 1521/2484) can reach ADB, giving you a simple but secure pattern. | + +**In summary:** The peering is what turns two isolated networks (AKS and ADB) into a securely connected, private application-database path, which the scenario depends on for the workloads to function. + +## Mapping between Azure and OCI + +### Azure Resource Hierarchy Diagram + +The following diagram shows how Azure organizes resources, mapped to our Terraform deployment: + +```mermaid +flowchart TB + subgraph TENANT[Azure Tenant - Entra ID Directory] + direction TB + USERS[Users and Groups
mh-odaa-user-grp] + + subgraph SUB_AKS[Subscription: AKS] + subgraph RG_AKS[Resource Group: aks-userXX] + VNET_AKS[VNet: aks-userXX
10.0.0.0/16] + AKS_CLUSTER[AKS Cluster] + LOG[Log Analytics] + DNS_ZONES[Private DNS Zones] + end + end + + subgraph SUB_ODAA[Subscription: ODAA] + subgraph RG_ODAA[Resource Group: odaa-userXX] + VNET_ODAA[VNet: odaa-userXX
192.168.0.0/16] + ADB[Oracle ADB] + end + end + end + + USERS --> SUB_AKS + USERS --> SUB_ODAA + VNET_AKS <-.->|VNet Peering| VNET_ODAA + + style TENANT fill:#0078D4,color:#fff + style USERS fill:#FFB900,color:#000 + style SUB_AKS fill:#50E6FF,color:#000 + style SUB_ODAA fill:#50E6FF,color:#000 + style RG_AKS fill:#7FBA00,color:#fff + style RG_ODAA fill:#7FBA00,color:#fff + style VNET_AKS fill:#98FB98,color:#000 + style VNET_ODAA fill:#98FB98,color:#000 + style AKS_CLUSTER fill:#FFB900,color:#000 + style LOG fill:#50E6FF,color:#000 + style DNS_ZONES fill:#50E6FF,color:#000 + style ADB fill:#C74634,color:#fff +``` + +> **Learn more:** [Azure resource organization](https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/ready/azure-setup-guide/organize-resources) + +### Comparison Table: Azure vs OCI + +| Azure Concept | Description | OCI Equivalent | +|---------------|-------------|----------------| +| **Tenant** | Top-level identity boundary (Entra ID directory: users, groups, apps) | **Tenancy** (root container with identity domain/compartments) | +| **Subscription** | Billing + deployment boundary; holds resource groups and resources | **Tenancy + Compartments** with cost-tracking tags | +| **Resource Group** | Logical container for related resources; used for lifecycle, RBAC, policy, and tagging scope | **Compartment** (logical container for access control and organization) | +| **Region** | Geographic area containing one or more datacenters | **Region** | +| **Availability Zone** | Physically separate datacenter within a region | **Availability Domain** | + +### Hierarchy Comparison + +``` +Azure: Tenant --> Subscription --> Resource Group --> Resource +OCI: Tenancy --> Compartment (nested) --> Resource +``` + +> **Note:** OCI compartments are closer to Azure resource groups + some subscription-scope concepts. + +### Networking Concepts + +| Azure | Description | OCI Equivalent | +|-------|-------------|----------------| +| **Virtual Network (VNet)** | A private network in Azure where you place resources (VMs, databases, etc.), similar to an on-premises LAN in the cloud | **Virtual Cloud Network (VCN)** | +| **Subnet** | A segment inside a VNet that groups resources and defines their IP range and routing boundaries | **Subnet** | +| **Network Security Group (NSG)** | A set of inbound/outbound rules that allow or block traffic to subnets or individual NICs, acting like a basic stateful firewall | **Security List / NSG** | +| **VNet Peering** | Connects two VNets so they can communicate using private IPs | **Local/Remote Peering** | ## Learning Objectives -In this microhack you will solve a common challenge for companies migrating to the cloud: migrating Oracle databases to Azure. The application using the database is a sample e-commerce [application](https://github.com/pzinsta/pizzeria) written in JavaScript. It will be configured to use Oracle Database Express Edition [Oracle XE]. - -The participants will learn how to: - -1. Perform a pre-migration assessment of the databases looking at size, database engine type, database version, etc. -1. Use offline tools to copy the databases to Azure OSS databases -1. Use the Azure Database Migration Service to perform an online migration (if applicable) -1. Do cutover and validation to ensure the application is working properly with the new configuration -1. Use a private endpoint for Azure OSS databases instead of a public IP address for the database -1. Configure a read replica for the Azure OSS databases - -## Challenges -- Challenge 0: **[Pre-requisites - Setup Environment and Prerequisites!](Student/00-prereqs.md)** - - Prepare your environment to run the sample application -- Challenge 1: **[Discovery and assessment](Student/01-discovery.md)** - - Discover and assess the application's PostgreSQL/MySQL/Oracle databases -- Challenge 2: Oracle to IaaS migration -- Challenge 3: Oracle to PaaS migration -- Challenge 4: Oracle to Azure OCI migration -- Challenge 5: Oracle to Oracle Database on Azure migration - -## Prerequisites - -- Access to an Azure subscription with Owner access - - If you don't have one, [Sign Up for Azure HERE](https://azure.microsoft.com/en-us/free/) - - Familiarity with Azure Cloud Shell -- [**Visual Studio Code**](https://code.visualstudio.com/) (optional) - -## Repository Contents -- `../Coach` - - [Lecture presentation](Coach/OSS-DB-What-the-Hack-Lecture.pptx?raw=true) with short presentations to introduce each challenge - - Example solutions and coach tips to the challenges (If you're a student, don't cheat yourself out of an education!) -- `../Student/Resources` - - Pizzeria application environment setup +- Understand how to onboard securely to Azure and prepare an account for Oracle Database@Azure administration. +- Learn the sequence for purchasing and linking an Oracle Database@Azure subscription with Oracle Cloud Infrastructure. +- Deploy an Autonomous Database instance inside an Azure network architecture and the required preparations. +- Apply required networking and DNS configurations to enable hybrid connectivity between Azure Kubernetes Service and Oracle Database@Azure resources. +- Operate the provided tooling (Helm, GoldenGate, Data Pump, SQL*Plus) to simulate data replication scenarios and measure connectivity performance. + +## πŸ“‹ Prerequisites + +- PowerShell Terminal +- πŸ”§ Install Azure CLI +- βš“ Install kubectl +- Install Helm +- Install git and clone this repo by following the instructions in [Clone Partial Repository](docs/clone-partial-repo.md) + +## 🎯 Challenges + +### Challenge 0: Set Up Your User Account + +The goal is to ensure your Azure account is ready for administrative work in the remaining challenges. + +> [!IMPORTANT] Before using the AZ command line in your preferred GUI or CLI, please make sure to log out of any previous session by running the command: +> +>```powershell +>az logout +>``` + +You will receive a user and password for your account from your microhack coach. You must change this password during the initial registration. + +Start by browsing to the Azure Portal https://portal.azure.com. + +Open a private browser session or create your own browser profile to sign in with the credentials you received, and register multi-factor authentication. + +As a first check, you have to verify if the two resource groups for the hackathon are created via the Azure Portal https://portal.azure.com. + +#### Actions + +* Enable multi-factor authentication (MFA) +* Log in to the Azure portal with the assigned user +* Verify if the ODAA and AKS resource groups including resources are available +* Verify the user's roles + +#### Success Criteria + +* Download the Microsoft Authenticator app on your mobile phone +* Enable MFA for a successful login +* Check if the resource groups for AKS and ODAA are available and contain the resources via the Azure Portal https://portal.azure.com +* Check if the assigned user has the required roles in both resource groups. + +#### Learning Resources + +* [Sign in to the Azure portal](https://azure.microsoft.com/en-us/get-started/azure-portal) +* [Set up Microsoft Entra multi-factor authentication](https://learn.microsoft.com/azure/active-directory/authentication/howto-mfa-userdevicesettings) +* [Groups and roles in Azure](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/oaagroupsroles.htm) + +#### Solution + +* Challenge 0: [Set Up Your User Account](./walkthrough/setup-user-account/setup-user-account.md) + +### Challenge 1: Create an Oracle Database@Azure (ODAA) Subscription + +> [!NOTE] +> **This is a theoretical challenge only.** No action is required from participants aside from reading the content. The ODAA subscription has already been created for you to save time. + +Review the Oracle Database@Azure service offer, the required Azure resource providers, and the role of the OCI tenancy. By the end you should understand how an Azure subscription links to Oracle Cloud so database services can be created. Please consider that Challenge 1 is already realized for you to save time and is therefore a purely theoretical challenge. + +#### Actions + +* Move to the ODAA marketplace side. The purchasing is already done, but check out the implementation of ODAA on the Azure side. +* Check if the required Azure resource providers are enabled + +#### Success Criteria + +* Find the Oracle Database at Azure Service in the Azure Portal +* Make yourself familiar with the available services of ODAA and how to purchase ODAA + +#### Learning Resources + +* [ODAA in Azure an overview](https://www.oracle.com/cloud/azure/oracle-database-at-azure/) +* [Enhanced Networking for ODAA](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-network-plan) + +#### Solution + +* Challenge 1: [Create an Oracle Database@Azure (ODAA) Subscription](./walkthrough/create-odaa-subscription/create-odaa-subscription.md) + +### Challenge 2: Create an Oracle Database@Azure (ODAA) Autonomous Database (ADB) Instance + +Walk through the delegated subnet prerequisites, select the assigned resource group, and deploy the Autonomous Database instance with the standard parameters supplied in the guide. Completion is confirmed when the database instance shows a healthy state in the portal. + +#### Actions + +* Verify that a delegated subnet of the upcoming ADB deployment is available + +> [!IMPORTANT] +> +> Setup the ADB exactly with the following settings: +> +> **ADB Deployment Settings:** +> 1. Workload type: **OLTP** +> 2. Database version: **23ai** +> 3. ECPU Count: **2** +> 4. Compute auto scaling: **off** +> 5. Storage: **20 GB** +> 6. Storage autoscaling: **off** +> 7. Backup retention period in days: **1 day** +> 8. Administrator password: (do not use '!' inside your password) +> 9. License type: **License included** +> 10. Oracle database edition: **Enterprise Edition** + +After you started the ADB deployment please clone the Github repository. Instructions are listed in the challenge 2 at the end of the ADB deployment section - see **IMPORTANT: While you are waiting for the ADB creation** + +#### Success Criteria + +* Delegated Subnet is available +* ADB Shared is successfully deployed + +#### Learning Resources + +* [How to provision an Oracle ADB in Azure](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-provision-autonomous-database) +* [Deploy an ADB in Azure](https://docs.oracle.com/en/solutions/deploy-autonomous-database-db-at-azure/index.html) + +#### Solution + +* Challenge 2: [Create an Oracle Database@Azure (ODAA) Autonomous Database (ADB) Instance](./walkthrough/create-odaa-adb/create-odaa-adb.md) + +### Challenge 3: Update the Oracle ADB NSG and DNS + +Update the Network Security Group to allow traffic from the AKS environment and register the Oracle private endpoints in the AKS Private DNS zones. Validate connectivity from AKS after both security and DNS changes are applied. + +#### Actions + +* Set the NSG of the CIDR on the OCI side, to allow Ingress from the AKS on the ADB +* Extract the ODAA "Database private URL" (FQDN) and "Database private IP" and assign them to the "Azure Private DNS Zones" linked to the AKS VNet. + +#### DNS Configuration Diagram + +The following diagram shows how Private DNS enables AKS pods to resolve the Oracle ADB hostname: + +```mermaid +flowchart TB + subgraph AKS_SUB["Azure Subscription: AKS"] + subgraph AKS_RG["Resource Group: aks-userXX"] + subgraph VNET["VNet: aks-userXX
10.0.0.0/16"] + POD["πŸ“¦ Pod"] + end + LINK["πŸ”— VNet Link"] + subgraph DNS_ZONE["Private DNS Zone
adb.eu-paris-1.oraclecloud.com"] + A_RECORD["A Record
Name: abc123
IP: 192.168.0.10"] + end + end + end + + subgraph ODAA_SUB["Azure Subscription: ODAA"] + ADB["πŸ—„οΈ Oracle ADB
━━━━━━━━━━━━━━━━━
Database private URL:
abc123.adb.eu-paris-1...
Database private IP:
192.168.0.10"] + end + + ADB -.->|"Copy URL & IP"| A_RECORD + VNET --- LINK + LINK --- DNS_ZONE + + style AKS_SUB fill:#0078D4,color:#fff + style ODAA_SUB fill:#0078D4,color:#fff + style DNS_ZONE fill:#50E6FF,color:#000 + style A_RECORD fill:#FFB900,color:#000 + style ADB fill:#C74634,color:#fff + style VNET fill:#7FBA00,color:#fff +``` + +**Steps:** + +1. **Copy** the Database private URL and IP from the Azure Portal (ODAA ADB resource) +2. **Create an A record** in the Private DNS Zone with the hostname pointing to the private IP +3. **Pods in AKS** resolve the FQDN via the VNet-linked Private DNS Zone + +#### Success Criteria + +* Set the NSG of the CIDR on the OCI side, to allow ingress from AKS to the ADB +* DNS is set up correctly. + +> [!CAUTION] +> **Without a working DNS the next Challenge will fail.** Make sure DNS resolution is properly configured before proceeding. + +#### Learning Resources + +* [Network security groups overview](https://learn.microsoft.com/azure/virtual-network/network-security-groups-overview), +* [Private DNS zones in Azure](https://learn.microsoft.com/azure/dns/private-dns-privatednszone), +* [Oracle Database@Azure networking guidance](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/network.htm) + +#### Solution + +* Challenge 3: [Update the Oracle ADB NSG and DNS](./walkthrough/update-odaa-nsg-dns/update-odaa-nsg-dns.md) + +### Challenge 4: Simulate the On-Premises Environment + +Deploy the pre-built Helm chart into AKS to install the sample Oracle database, Data Pump job, GoldenGate services, and Instant Client. Manage the shared secrets carefully and verify that data flows from the source schema into the Autonomous Database target schema. + +#### Architecture Diagram + +The following diagram shows the components deployed via Helm and the data replication flow: + +```mermaid +flowchart TB + subgraph AKS_SUB["Azure Subscription: AKS"] + subgraph AKS["AKS Cluster (Namespace: microhacks)"] + subgraph HELM["Helm Chart: goldengate-microhack-sample"] + DB["πŸ—„οΈ Oracle 23ai Free
(Source DB)
Schema: SH"] + OGG["⚑ GoldenGate
(CDC Replication)"] + IC["πŸ’» Instant Client
(SQL*Plus)"] + JUP["πŸ““ Jupyter Notebook
(CPAT)"] + PUMP["πŸ“¦ Data Pump Job
(Initial Load)"] + end + SECRETS["πŸ” K8s Secrets
ogg-admin-secret
db-admin-secret"] + INGRESS["🌐 NGINX Ingress"] + end + end + + subgraph ODAA_SUB["Azure Subscription: ODAA"] + ADB["πŸ—„οΈ Oracle ADB
(Target DB)
Schema: SH2"] + end + + SECRETS -.-> HELM + PUMP -->|"1️⃣ Initial Load
SH β†’ SH2"| ADB + OGG -->|"2️⃣ CDC Replication
(Real-time)"| ADB + IC -->|"SQL Queries"| DB + IC -->|"SQL Queries"| ADB + INGRESS -->|"Web UI"| OGG + INGRESS -->|"Web UI"| JUP + + style AKS_SUB fill:#0078D4,color:#fff + style ODAA_SUB fill:#0078D4,color:#fff + style HELM fill:#50E6FF,color:#000 + style DB fill:#C74634,color:#fff + style ADB fill:#C74634,color:#fff + style OGG fill:#FFB900,color:#000 + style SECRETS fill:#7FBA00,color:#fff +``` + +**Data Flow:** +1. **Data Pump** performs the initial bulk load of the SH schema to the SH2 schema in ADB +2. **GoldenGate** captures ongoing changes (CDC) and replicates them in near real-time +3. **Instant Client** provides SQL*Plus access to both source and target databases + +#### Actions + +* Deploy the AKS cluster with the responsible Pods, Jupyter notebook with CPAT, Oracle Instant Client and GoldenGate +* Verify AKS cluster deployment +* Check the connectivity from Instant Client to the ADB database and check if the SH schema from the 23ai Free Edition is migrated to the SH2 schema in the ADB +* Review the GoldenGate configuration + +#### Success Criteria + +* Successful AKS deployment with Pods +* Successful connection from the Instant Client to the ADB and source database +* Successful login to GoldenGate + +#### Learning Resources + +* [Connect to an AKS cluster using Azure CLI](https://learn.microsoft.com/azure/aks/learn/quick-kubernetes-deploy-cli), +* [Use Helm with AKS](https://learn.microsoft.com/azure/aks/kubernetes-helm), +* [Oracle GoldenGate Microservices overview](https://docs.oracle.com/en/middleware/goldengate/core/23/coredoc/), +* [Oracle Data Pump overview](https://docs.oracle.com/en/database/oracle/oracle-database/26/sutil/oracle-data-pump-overview.html) + +#### Solution + +* Challenge 4: [Simulate the On-Premises Environment](./walkthrough/onprem-ramp-up/onprem-ramp-up.md) + +--- + +### Challenge 5: Measure Network Performance to Your Oracle Database@Azure Autonomous Database + +Use the Instant Client pod to run the scripted SQL latency test against the Autonomous Database and collect the round-trip results. Optionally supplement the findings with the lightweight TCP probe to observe connection setup timing. + +#### Actions +* Log in to the Instant Client and execute a first performance test from the AKS cluster against the deployed ADB + +#### Success Criteria +* Successful login to the ADB via the Instant Client +* Successful execution of the available performance scripts + +#### Learning Resources +* [Connect to Oracle Database@Azure using SQL*Plus](https://docs.oracle.com/en-us/iaas/autonomous-database-serverless/doc/connect-sqlplus-tls.html), +* [Diagnose metrics and logs for Oracle Database@Azure](https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/scenarios/oracle-on-azure/oracle-manage-monitor-oracle-database-azure) + +#### Solution +* Challenge 5: [Measure Network Performance to Your Oracle Database@Azure Autonomous Database](./walkthrough/perf-test-odaa/perf-test-odaa.md) + + + ## Contributors +*To be added* + diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Walkthrough/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Walkthrough/README.md deleted file mode 100644 index 30404ce4c..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Walkthrough/README.md +++ /dev/null @@ -1 +0,0 @@ -TODO \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/DEBUG-ENTRAID-AUTH.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/DEBUG-ENTRAID-AUTH.md new file mode 100644 index 000000000..881774807 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/DEBUG-ENTRAID-AUTH.md @@ -0,0 +1,461 @@ +# Debugging Entra ID Authentication to Oracle Autonomous Database + +## Error Received +``` +ORA-01017: invalid credential or not authorized; logon denied +``` + +## Step-by-Step Debugging Guide + +### 1. Token Analysis βœ… + +**Your Token Details:** +- **User (upn):** ga1@cptazure.org +- **Audience (aud):** 7d22ece1-dd60-4279-a911-4b7b95934f2e βœ… (matches app registration) +- **Tenant (tid):** f71980b2-590a-4de9-90d5-6fbc867da951 βœ… (matches configuration) +- **Issuer (iss):** https://login.microsoftonline.com/f71980b2-590a-4de9-90d5-6fbc867da951/v2.0 βœ… +- **Token Version:** 2.0 βœ… (correct - you set `accessTokenAcceptedVersion: 2`) +- **Scope (scp):** session:scope:connect βœ… +- **Roles:** 1314ae09-ccc6-4f59-b68b-3837ff44465b, fa80ec82-2110-4b45-be28-b3341bf19661 +- **Token Valid:** Yes (expires 10/17/2025 09:31:32) + +**Token appears valid! βœ…** + +--- + +### 2. Database Configuration Checks + +Connect to the database as ADMIN to verify configuration: + +```powershell +# Get pod name +$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] } + +# Connect to pod +kubectl exec -it -n microhacks $podInstanteClientName -- /bin/bash + +# Inside the pod, connect as ADMIN +sqlplus admin@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))' +``` + +#### Check 2.1: Verify Entra ID is Enabled + +```sql +-- Should show AZURE_AD +SELECT NAME, VALUE FROM V$PARAMETER WHERE NAME='identity_provider_type'; +``` + +**Expected Output:** +``` +NAME VALUE +------------------------------ ---------- +identity_provider_type AZURE_AD +``` + +#### Check 2.2: Verify User GA1 Exists and is Global + +```sql +-- Should show GLOBAL authentication +SELECT username, authentication_type, account_status, external_name +FROM dba_users +WHERE username = 'GA1'; +``` + +**Expected Output:** +``` +USERNAME AUTHENTI ACCOUNT_STATUS EXTERNAL_NAME +--------- --------- --------------- ------------------------------ +GA1 GLOBAL OPEN AZURE_USER=ga1@cptazure.org +``` + +⚠️ **CRITICAL CHECK:** The `EXTERNAL_NAME` must be exactly `AZURE_USER=ga1@cptazure.org` + +#### Check 2.3: Verify User Privileges + +```sql +-- GA1 must have CREATE SESSION privilege +SELECT * FROM dba_sys_privs WHERE grantee = 'GA1'; +``` + +**Expected Output:** +``` +GRANTEE PRIVILEGE ADMIN_OPTION +-------- --------------- ------------ +GA1 CREATE SESSION NO +``` + +#### Check 2.4: Verify Entra ID Configuration + +```sql +-- Check Azure AD configuration +SELECT + param_name, + param_value +FROM + dba_cloud_config +WHERE + param_name IN ('AZURE_TENANT_ID', 'AZURE_APPLICATION_ID', 'AZURE_APPLICATION_ID_URI') +ORDER BY + param_name; +``` + +**Expected Values:** +``` +PARAM_NAME PARAM_VALUE +-------------------------- -------------------------------------------------- +AZURE_TENANT_ID f71980b2-590a-4de9-90d5-6fbc867da951 +AZURE_APPLICATION_ID 7d22ece1-dd60-4279-a911-4b7b95934f2e +AZURE_APPLICATION_ID_URI https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e +``` + +#### Check 2.5: Verify Network ACLs + +```sql +-- Check if GA1 has network access to Entra ID endpoints +SELECT host, lower_port, upper_port, principal, privilege +FROM dba_host_aces +WHERE host LIKE 'login%' AND principal = 'GA1' +ORDER BY host, privilege; +``` + +**Expected Output:** +``` +HOST PRINCIPAL PRIVILEGE +------------------------- ---------- --------- +login.microsoftonline.com GA1 connect +login.microsoftonline.com GA1 resolve +login.windows.net GA1 connect +login.windows.net GA1 resolve +``` + +If missing, add them: + +```sql +BEGIN + DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE( + host => 'login.windows.net', + ace => xs$ace_type( + privilege_list => xs$name_list('connect','resolve'), + principal_name => 'GA1', + principal_type => xs_acl.ptype_db)); +END; +/ +COMMIT; + +BEGIN + DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE( + host => 'login.microsoftonline.com', + ace => xs$ace_type( + privilege_list => xs$name_list('connect','resolve'), + principal_name => 'GA1', + principal_type => xs_acl.ptype_db)); +END; +/ +COMMIT; +``` + +#### Check 2.6: Test Database Can Reach Entra ID + +```sql +-- Test HTTPS connectivity to Entra ID (as ADMIN) +SET SERVEROUTPUT ON SIZE 40000 +DECLARE + req UTL_HTTP.REQ; + resp UTL_HTTP.RESP; +BEGIN + UTL_HTTP.SET_WALLET(path => 'system:'); + req := UTL_HTTP.BEGIN_REQUEST('https://login.windows.net/common/discovery/keys'); + resp := UTL_HTTP.GET_RESPONSE(req); + DBMS_OUTPUT.PUT_LINE('HTTP response status code: ' || resp.status_code); + UTL_HTTP.END_RESPONSE(resp); +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM); +END; +/ +``` + +**Expected Output:** `HTTP response status code: 200` + +--- + +### 3. Client Configuration Checks + +#### Check 3.1: Verify Token File Exists and is Readable + +```bash +# Inside the pod +ls -la /tmp/wallet/token.txt +cat /tmp/wallet/token.txt | wc -c # Should be ~1900 bytes (not empty!) +``` + +#### Check 3.2: Verify sqlnet.ora Configuration + +```bash +# Check sqlnet.ora content +cat /tmp/wallet/sqlnet.ora +``` + +**Expected Content:** +``` +WALLET_LOCATION = (SOURCE = (METHOD = file) (METHOD_DATA = (DIRECTORY="/tmp/wallet"))) +SSL_SERVER_DN_MATCH=ON +SQLNET.AUTHENTICATION_SERVICES= (TCPS,NTS) +NAMES.DIRECTORY_PATH= (TNSNAMES, EZCONNECT) +TOKEN_AUTH=OAUTH +TOKEN_LOCATION="/tmp/wallet/token.txt" +``` + +⚠️ **CRITICAL CHECKS:** +- `SSL_SERVER_DN_MATCH=ON` (for Entra ID connections) +- `TOKEN_AUTH=OAUTH` +- `TOKEN_LOCATION="/tmp/wallet/token.txt"` (correct path) + +#### Check 3.3: Verify TNS_ADMIN Environment Variable + +```bash +# Should point to /tmp/wallet +echo $TNS_ADMIN +``` + +If not set: +```bash +export TNS_ADMIN=/tmp/wallet +``` + +#### Check 3.4: Test Token is Valid and Not Expired + +```bash +# Check token expiry (you can decode it manually or check the exp claim) +# Your current token expires: 10/17/2025 09:31:32 +date +``` + +If expired, regenerate: + +```powershell +# On your local machine +az login --tenant "f71980b2-590a-4de9-90d5-6fbc867da951" +$token=az account get-access-token --scope "https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e/.default" --query accessToken -o tsv +$token | Out-File -FilePath .\misc\token.txt -Encoding ascii + +# Upload to pod +$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] } +kubectl cp ./misc/token.txt ${podInstanteClientName}:/tmp/wallet/token.txt -n microhacks +``` + +--- + +### 4. Database Alert Log and Trace Files + +#### Check 4.1: Check Oracle Alert Log + +If you have access to the database alert log (typically through Oracle Cloud Console): + +**Look for entries like:** +- `ORA-01017` with additional context +- `OAUTH` or `AZURE_AD` authentication failures +- Token validation errors +- Network connectivity issues to Entra ID endpoints + +**Location (on ADB):** Typically accessible through OCI Console β†’ Autonomous Database β†’ Performance Hub β†’ SQL Monitoring + +#### Check 4.2: Enable SQL*Net Tracing (if needed) + +```bash +# Add to sqlnet.ora temporarily for debugging +cat <<'EOF' >> /tmp/wallet/sqlnet.ora +TRACE_LEVEL_CLIENT=16 +TRACE_DIRECTORY_CLIENT=/tmp +TRACE_FILE_CLIENT=sqlnet_trace.log +EOF +``` + +Then retry connection and check `/tmp/sqlnet_trace.log` + +--- + +### 5. Common Issues and Solutions + +#### Issue 5.1: User Mapping Mismatch + +**Problem:** Database expects exact UPN from token + +**Solution:** +```sql +-- Recreate user with exact UPN from token +DROP USER GA1 CASCADE; +CREATE USER GA1 IDENTIFIED GLOBALLY AS 'AZURE_USER=ga1@cptazure.org'; +GRANT CREATE SESSION TO GA1; +``` + +⚠️ **The UPN in the token is:** `ga1@cptazure.org` + +#### Issue 5.2: Wrong Connection String + +**Problem:** Using wrong security settings + +**Current attempt:** +``` +(security=(ssl_server_dn_match=on)) +``` + +**Try with explicit token parameters:** +```bash +sqlplus /@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=yes)(TOKEN_AUTH=OAUTH)(TOKEN_LOCATION="/tmp/wallet/token.txt")))' +``` + +Or use sqlnet.ora settings and simpler connection: +```bash +# Ensure TNS_ADMIN is set +export TNS_ADMIN=/tmp/wallet + +# Try simple connection using tnsnames alias +sqlplus /@adbger_high +``` + +#### Issue 5.3: Token Encoding Issues + +**Problem:** Token file has wrong encoding or line breaks + +**Solution:** +```bash +# Check for line breaks or extra characters +od -c /tmp/wallet/token.txt | head -20 + +# Token should be ONE line, ASCII encoded +# If it has line breaks, fix it: +tr -d '\n\r' < /tmp/wallet/token.txt > /tmp/wallet/token_fixed.txt +mv /tmp/wallet/token_fixed.txt /tmp/wallet/token.txt +``` + +#### Issue 5.4: Missing App Role Assignment + +**Problem:** User not assigned to app roles in Entra ID + +**Check in Entra ID (Azure Portal):** +1. Go to Enterprise Applications β†’ adbger (7d22ece1-dd60-4279-a911-4b7b95934f2e) +2. Users and groups β†’ Check if ga1@cptazure.org is assigned +3. If using app roles, verify ga1 is assigned to correct role + +From your token, I see these role GUIDs: +- `1314ae09-ccc6-4f59-b68b-3837ff44465b` +- `fa80ec82-2110-4b45-be28-b3341bf19661` + +But your manifest only shows: +- `e9ea0527-85f2-4e84-9884-2ae95c4f5a17` (SH2_APP) + +⚠️ **POTENTIAL ISSUE:** Role GUIDs in token don't match manifest! + +--- + +### 6. Recommended Debugging Sequence + +**Step 1:** Verify database configuration (run all SQL checks above) + +**Step 2:** Verify token is current and properly formatted +```bash +# Inside pod +ls -la /tmp/wallet/token.txt +cat /tmp/wallet/token.txt | wc -c +# Should be ~1900 bytes +``` + +**Step 3:** Try simplified connection string +```bash +export TNS_ADMIN=/tmp/wallet +sqlplus /@adbger_high +``` + +**Step 4:** If still failing, check database logs via OCI Console + +**Step 5:** Verify Entra ID app role assignments match user + +--- + +### 7. Quick Diagnostic Commands + +Run these in sequence to generate a diagnostic report: + +```sql +-- Connect as ADMIN first +sqlplus admin@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))' + +SPOOL /tmp/entraid_diag.txt + +-- Identity Provider +SELECT NAME, VALUE FROM V$PARAMETER WHERE NAME='identity_provider_type'; + +-- User Configuration +SELECT username, authentication_type, account_status, external_name +FROM dba_users +WHERE username = 'GA1'; + +-- User Privileges +SELECT * FROM dba_sys_privs WHERE grantee = 'GA1'; + +-- Azure AD Config +SELECT param_name, param_value +FROM dba_cloud_config +WHERE param_name LIKE 'AZURE%' +ORDER BY param_name; + +-- Network ACLs +SELECT host, principal, privilege +FROM dba_host_aces +WHERE host LIKE 'login%' +ORDER BY host, principal, privilege; + +SPOOL OFF +EXIT +``` + +Then copy diagnostic file: +```bash +# From pod +cat /tmp/entraid_diag.txt +``` + +--- + +### 8. Expected Log Files for Review + +If the issue persists, check these log locations: + +**On Autonomous Database (via OCI Console):** +1. **Alert Log:** + - OCI Console β†’ Autonomous Database β†’ Performance Hub β†’ ASH Analytics + - Look for ORA-01017 entries around your connection time + +2. **Audit Trail:** + ```sql + SELECT timestamp, username, action_name, returncode, comment_text + FROM unified_audit_trail + WHERE username = 'GA1' + ORDER BY timestamp DESC + FETCH FIRST 10 ROWS ONLY; + ``` + +3. **External Authentication Logs:** + ```sql + SELECT * FROM v$diag_alert_ext + WHERE message_text LIKE '%AZURE%' OR message_text LIKE '%OAUTH%' + ORDER BY originating_timestamp DESC + FETCH FIRST 20 ROWS ONLY; + ``` + +**On Client (pod):** +- SQL*Net trace: `/tmp/sqlnet_trace.log` (if tracing enabled) +- SQL*Plus log: Check terminal output carefully + +--- + +## Most Likely Cause + +Based on your configuration, the most likely issues are: + +1. ⚠️ **App Role Mismatch:** The role GUIDs in your token don't match the app registration manifest +2. ⚠️ **User Mapping:** GA1 user external name might not exactly match the UPN in the token +3. ⚠️ **Network ACLs:** GA1 might not have network access to Entra ID endpoints + +**Start with running all SQL checks in Step 2 above!** diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/RESOLUTION-ENTRAID-AUTH.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/RESOLUTION-ENTRAID-AUTH.md new file mode 100644 index 000000000..6590bda1e --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/RESOLUTION-ENTRAID-AUTH.md @@ -0,0 +1,265 @@ +# Entra ID Authentication - RESOLUTION SUMMARY + +## πŸŽ‰ **STATUS: WORKING** βœ… + +**Date:** October 17, 2025 +**Authentication Method:** TOKEN_GLOBAL +**User:** ga1@cptazure.org β†’ GA1 + +--- + +## Issues Found and Fixed + +### 1. ❌ **Missing TOKEN Configuration in sqlnet.ora** +**Problem:** The `sqlnet.ora` file was missing TOKEN_AUTH and TOKEN_LOCATION parameters. + +**Solution:** Updated `/tmp/wallet/sqlnet.ora` to include: +``` +TOKEN_AUTH=OAUTH +TOKEN_LOCATION="/tmp/wallet/token.txt" +``` + +**Files Updated:** +- `c:\Users\chpinoto\workspace\msftmh\03-Azure\01-03-Infrastructure\10_Oracle_on_Azure\misc\wallet\sqlnet.ora` + +--- + +### 2. ❌ **Token File Had Line Breaks** +**Problem:** The token file contained a newline character (1 line break), which can cause parsing issues. + +**Solution:** Removed line breaks from token file to make it a single line. + +**Verification:** +```bash +wc -l /tmp/wallet/token.txt # Should show: 0 +``` + +--- + +### 3. ❌ **Missing Network ACLs for GA1 User** +**Problem:** User GA1 had no network access control lists (ACLs) to reach Entra ID endpoints. + +**Solution:** Added ACLs for GA1 to access: +- `login.windows.net` (connect, resolve) +- `login.microsoftonline.com` (connect, resolve) + +**SQL Commands Used:** +```sql +BEGIN + DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE( + host => 'login.windows.net', + ace => xs$ace_type( + privilege_list => xs$name_list('connect','resolve'), + principal_name => 'GA1', + principal_type => xs_acl.ptype_db)); +END; +/ + +BEGIN + DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE( + host => 'login.microsoftonline.com', + ace => xs$ace_type( + privilege_list => xs$name_list('connect','resolve'), + principal_name => 'GA1', + principal_type => xs_acl.ptype_db)); +END; +/ +COMMIT; +``` + +--- + +### 4. ℹ️ **User External Name Case (Not an Issue)** +**Observation:** Oracle stores the external name in lowercase: `azure_user=ga1@cptazure.org` + +**Resolution:** This is Oracle's normal behavior and does NOT affect authentication. The matching is case-insensitive. + +--- + +## Working Configuration + +### Database Configuration βœ… +``` +Identity Provider Type: AZURE_AD +User: GA1 +Authentication Type: GLOBAL +External Name: azure_user=ga1@cptazure.org +Privileges: CREATE SESSION +Network ACLs: login.windows.net, login.microsoftonline.com +``` + +### Client Configuration βœ… +**sqlnet.ora** (`/tmp/wallet/sqlnet.ora`): +``` +WALLET_LOCATION = (SOURCE = (METHOD = file) (METHOD_DATA = (DIRECTORY="/tmp/wallet"))) +SSL_SERVER_DN_MATCH=ON +SQLNET.AUTHENTICATION_SERVICES= (TCPS,NTS) +NAMES.DIRECTORY_PATH= (TNSNAMES, EZCONNECT) +TOKEN_AUTH=OAUTH +TOKEN_LOCATION="/tmp/wallet/token.txt" +``` + +**Environment Variables:** +```bash +export TNS_ADMIN=/tmp/wallet +export LD_LIBRARY_PATH=/opt/oracle/instantclient_23_4 +export PATH=/opt/oracle/instantclient_23_4:$PATH +``` + +### Token Configuration βœ… +- **File:** `/tmp/wallet/token.txt` +- **Size:** 1783 bytes +- **Line Breaks:** 0 (single line) +- **Encoding:** ASCII +- **Token Type:** JWT (JSON Web Token) +- **Version:** 2.0 +- **Audience (aud):** 7d22ece1-dd60-4279-a911-4b7b95934f2e +- **Tenant (tid):** f71980b2-590a-4de9-90d5-6fbc867da951 +- **UPN:** ga1@cptazure.org +- **Scope:** session:scope:connect + +--- + +## Connection Test Results + +### Successful Connection βœ… +```bash +#!/bin/bash +export TNS_ADMIN=/tmp/wallet +export LD_LIBRARY_PATH=/opt/oracle/instantclient_23_4 +export PATH=/opt/oracle/instantclient_23_4:$PATH + +/opt/oracle/instantclient_23_4/sqlplus /@adbger_high +``` + +**Output:** +``` +SQL*Plus: Release 23.0.0.0.0 - Production +Connected to: +Oracle Database 23ai Enterprise Edition Release 23.0.0.0.0 + +USER: GA1 +CURRENT_USER: GA1 +AUTH_METHOD: TOKEN_GLOBAL +``` + +--- + +## How to Test + +### From PowerShell (Local Machine) +```powershell +# Get pod name +$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] } + +# Test authentication +kubectl exec -n microhacks $podInstanteClientName -- bash /tmp/test_entraid_auth.sh +``` + +### Inside the Pod +```bash +# Connect to pod +kubectl exec -it -n microhacks $podInstanteClientName -- /bin/bash + +# Set environment +export TNS_ADMIN=/tmp/wallet +export LD_LIBRARY_PATH=/opt/oracle/instantclient_23_4 +export PATH=/opt/oracle/instantclient_23_4:$PATH + +# Connect using Entra ID token +sqlplus /@adbger_high + +# Verify user +SQL> SELECT USER FROM DUAL; +# Should show: GA1 + +SQL> SELECT SYS_CONTEXT('USERENV', 'AUTHENTICATION_METHOD') FROM DUAL; +# Should show: TOKEN_GLOBAL +``` + +--- + +## Token Renewal + +The token expires after **60-90 minutes**. For production use, you need automated token refresh. + +### Manual Token Refresh (Testing Only) + +```powershell +# On local machine +az login --tenant "f71980b2-590a-4de9-90d5-6fbc867da951" +$token=az account get-access-token --scope "https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e/.default" --query accessToken -o tsv +$token | Out-File -FilePath .\misc\token.txt -Encoding ascii -NoNewline + +# Upload to pod +$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] } +kubectl cp ./misc/token.txt ${podInstanteClientName}:/tmp/wallet/token.txt -n microhacks +``` + +### ⭐ Automated Token Refresh (Production) + +**For production environments, see comprehensive token refresh strategies:** + +πŸ“– **[TOKEN-REFRESH-STRATEGIES.md](TOKEN-REFRESH-STRATEGIES.md)** + +Recommended approaches: +1. **Sidecar Container** - Automatic refresh every 45 minutes (RECOMMENDED) +2. **CronJob** - Kubernetes CronJob for periodic refresh +3. **Application-Level** - Token refresh built into your application +4. **Azure Key Vault + CSI Driver** - Enterprise solution with auto-sync + +The sidecar approach is recommended for Kubernetes deployments as it: +- βœ… Refreshes tokens automatically before expiration +- βœ… Uses Azure Workload Identity (no secrets in code) +- βœ… Requires no changes to application code +- βœ… Provides high availability with built-in retry logic + +--- + +## Files Created for Debugging + +1. **DEBUG-ENTRAID-AUTH.md** - Comprehensive debugging guide +2. **misc/diagnose.sql** - SQL diagnostic script +3. **misc/Run-EntraIDDiagnostics.ps1** - PowerShell diagnostic runner +4. **misc/db_diagnostics.sh** - Bash diagnostic script +5. **misc/db_diag_v2.sh** - Improved diagnostic script +6. **misc/fix_entraid.sh** - Script to fix configuration issues +7. **misc/test_entraid_auth.sh** - Authentication test script + +--- + +## Key Learnings + +1. **TOKEN_AUTH and TOKEN_LOCATION must be in sqlnet.ora** - Without these parameters, SQL*Plus won't use the token file. + +2. **Token must be a single line** - Line breaks in the token file can cause authentication failures. + +3. **Network ACLs are required** - The database user must have network access to Entra ID endpoints to validate tokens. + +4. **Case sensitivity in external names doesn't matter** - Oracle stores external names in lowercase, but matching is case-insensitive. + +5. **TNS_ADMIN must be set** - The environment variable must point to the wallet directory containing sqlnet.ora and tnsnames.ora. + +--- + +## Troubleshooting Future Issues + +If authentication stops working, check: + +1. **Token expiry:** Tokens expire after ~90 minutes +2. **Token format:** Must be single line, ASCII encoding +3. **Network ACLs:** Check `dba_host_aces` for GA1 principal +4. **sqlnet.ora:** Verify TOKEN_AUTH=OAUTH and TOKEN_LOCATION are set +5. **Environment:** Ensure TNS_ADMIN, LD_LIBRARY_PATH, and PATH are set correctly + +Run diagnostics: +```bash +kubectl exec -n microhacks $podInstanteClientName -- bash /tmp/db_diag_v2.sh +``` + +--- + +## References + +- Oracle Documentation: [Authenticating Microsoft Entra ID Users in Oracle Databases](https://docs.oracle.com/en/database/oracle/oracle-database/19/dbseg/authenticating-and-authorizing-microsoft-entra-id-ms-ei-users-oracle-databases-oracle-exadata.html) +- Autonomous Database: [Enable Microsoft Entra ID Authentication](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/autonomous-azure-ad-enable.html) diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/TOKEN-REFRESH-STRATEGIES.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/TOKEN-REFRESH-STRATEGIES.md new file mode 100644 index 000000000..1ab6de3f5 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/TOKEN-REFRESH-STRATEGIES.md @@ -0,0 +1,575 @@ +# Token Refresh Strategies for Entra ID Authentication + +## Overview + +OAuth2 tokens from Entra ID typically expire after **60-90 minutes**. For production scenarios, you need an automated token refresh mechanism to maintain continuous database connectivity. + +--- + +## Recommended Approaches + +### ⭐ **Option 1: Sidecar Container with Token Refresh (RECOMMENDED for Production)** + +Deploy a sidecar container in your Kubernetes pod that automatically refreshes the token before expiration. + +#### Architecture +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Kubernetes Pod β”‚ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Application β”‚ β”‚ Token Refresh β”‚ β”‚ +β”‚ β”‚ Container β”‚ β”‚ Sidecar β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ - Reads β”‚ β”‚ - Refreshes β”‚ β”‚ +β”‚ β”‚ token β”‚ β”‚ every 45min β”‚ β”‚ +β”‚ β”‚ - Connects β”‚ β”‚ - Uses MSI β”‚ β”‚ +β”‚ β”‚ to Oracle β”‚ β”‚ - Writes to β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ shared vol β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Shared Volume β”‚ β”‚ +β”‚ β”‚ /tmp/wallet/ β”‚ β”‚ +β”‚ β”‚ token.txt β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +#### Implementation + +**1. Create Token Refresh Script:** + +```bash +#!/bin/bash +# refresh-token.sh +# Automatically refreshes Entra ID token using Azure Managed Identity + +TENANT_ID="f71980b2-590a-4de9-90d5-6fbc867da951" +CLIENT_ID="7d22ece1-dd60-4279-a911-4b7b95934f2e" +SCOPE="https://cptazure.org/${CLIENT_ID}/.default" +TOKEN_FILE="/tmp/wallet/token.txt" +REFRESH_INTERVAL=2700 # 45 minutes (before 60-minute expiry) + +while true; do + echo "$(date): Refreshing token..." + + # Get token using Managed Identity + TOKEN=$(curl -s "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=${SCOPE}" \ + -H "Metadata: true" \ + | jq -r .access_token) + + if [ "$TOKEN" != "null" ] && [ -n "$TOKEN" ]; then + echo -n "$TOKEN" > "$TOKEN_FILE" + chmod 600 "$TOKEN_FILE" + echo "$(date): Token refreshed successfully" + else + echo "$(date): ERROR - Failed to refresh token" + fi + + sleep $REFRESH_INTERVAL +done +``` + +**2. Create Sidecar Container Image:** + +```dockerfile +# Dockerfile.token-refresh +FROM mcr.microsoft.com/azure-cli:latest + +# Install jq for JSON parsing +RUN apk add --no-cache jq curl bash + +# Copy refresh script +COPY refresh-token.sh /usr/local/bin/refresh-token.sh +RUN chmod +x /usr/local/bin/refresh-token.sh + +# Run the refresh loop +CMD ["/usr/local/bin/refresh-token.sh"] +``` + +**3. Update Kubernetes Deployment:** + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: oracle-client + namespace: microhacks +spec: + template: + spec: + serviceAccountName: oracle-client-sa # With Azure Workload Identity + containers: + # Main application container + - name: app + image: your-oracle-client:latest + volumeMounts: + - name: wallet + mountPath: /tmp/wallet + env: + - name: TNS_ADMIN + value: "/tmp/wallet" + - name: ORACLE_HOME + value: "/opt/oracle/instantclient_23_4" + - name: LD_LIBRARY_PATH + value: "/opt/oracle/instantclient_23_4" + + # Token refresh sidecar + - name: token-refresh + image: your-registry/token-refresh:latest + volumeMounts: + - name: wallet + mountPath: /tmp/wallet + env: + - name: AZURE_CLIENT_ID + value: "7d22ece1-dd60-4279-a911-4b7b95934f2e" + - name: AZURE_TENANT_ID + value: "f71980b2-590a-4de9-90d5-6fbc867da951" + + volumes: + - name: wallet + emptyDir: {} +``` + +**4. Setup Azure Workload Identity:** + +```bash +# Create Azure Managed Identity +az identity create \ + --name oracle-token-refresh \ + --resource-group odaa \ + --location germanywestcentral + +# Get identity details +IDENTITY_CLIENT_ID=$(az identity show --name oracle-token-refresh --resource-group odaa --query clientId -o tsv) +IDENTITY_ID=$(az identity show --name oracle-token-refresh --resource-group odaa --query id -o tsv) + +# Grant permissions to get tokens for the app registration +az ad app permission grant \ + --id 7d22ece1-dd60-4279-a911-4b7b95934f2e \ + --api 7d22ece1-dd60-4279-a911-4b7b95934f2e \ + --scope session:scope:connect + +# Setup Workload Identity Federation +az identity federated-credential create \ + --name oracle-aks-federated \ + --identity-name oracle-token-refresh \ + --resource-group odaa \ + --issuer $(az aks show -n odaa -g odaa --query "oidcIssuerProfile.issuerUrl" -o tsv) \ + --subject "system:serviceaccount:microhacks:oracle-client-sa" + +# Create Kubernetes Service Account +kubectl create serviceaccount oracle-client-sa -n microhacks +kubectl annotate serviceaccount oracle-client-sa -n microhacks \ + azure.workload.identity/client-id=$IDENTITY_CLIENT_ID +``` + +--- + +### ⭐ **Option 2: CronJob-based Token Refresh (Simpler, Good for Testing)** + +Use Kubernetes CronJob to refresh the token periodically. + +```yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: refresh-oracle-token + namespace: microhacks +spec: + schedule: "*/45 * * * *" # Every 45 minutes + jobTemplate: + spec: + template: + spec: + serviceAccountName: oracle-client-sa + containers: + - name: token-refresh + image: mcr.microsoft.com/azure-cli:latest + command: + - /bin/bash + - -c + - | + # Get token + TOKEN=$(az account get-access-token \ + --scope "https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e/.default" \ + --query accessToken -o tsv) + + # Update ConfigMap with new token + kubectl create configmap oracle-token \ + --from-literal=token=$TOKEN \ + --dry-run=client -o yaml | kubectl apply -f - + + # Restart pods to pick up new token + kubectl rollout restart deployment/oracle-client -n microhacks + restartPolicy: OnFailure +``` + +Then mount the token from ConfigMap: + +```yaml +volumes: +- name: token + configMap: + name: oracle-token +``` + +--- + +### ⭐ **Option 3: Application-Level Token Refresh (Best for Custom Apps)** + +Implement token refresh logic directly in your application. + +#### Python Example with Connection Pool + +```python +# oracle_entraid_client.py +import os +import time +import subprocess +import threading +from datetime import datetime, timedelta +import oracledb + +class EntraIDTokenManager: + def __init__(self, tenant_id, client_id, scope, token_file): + self.tenant_id = tenant_id + self.client_id = client_id + self.scope = scope + self.token_file = token_file + self.token_expiry = None + self.refresh_thread = None + self.running = False + + def get_token(self): + """Get new token from Entra ID using Azure CLI or Managed Identity""" + try: + # Try Managed Identity first + import requests + response = requests.get( + "http://169.254.169.254/metadata/identity/oauth2/token", + params={ + "api-version": "2018-02-01", + "resource": self.scope + }, + headers={"Metadata": "true"}, + timeout=5 + ) + if response.status_code == 200: + data = response.json() + return data['access_token'], data['expires_on'] + except: + pass + + # Fallback to Azure CLI + result = subprocess.run([ + 'az', 'account', 'get-access-token', + '--scope', self.scope, + '--query', 'accessToken', + '-o', 'tsv' + ], capture_output=True, text=True) + + if result.returncode == 0: + token = result.stdout.strip() + # Default expiry: 60 minutes + expiry = int(time.time()) + 3600 + return token, expiry + + raise Exception("Failed to get token") + + def refresh_token(self): + """Refresh token and write to file""" + token, expiry = self.get_token() + + # Write token to file (single line, no newline) + with open(self.token_file, 'w') as f: + f.write(token) + + os.chmod(self.token_file, 0o600) + self.token_expiry = datetime.fromtimestamp(int(expiry)) + + print(f"Token refreshed. Expires at: {self.token_expiry}") + + def start_refresh_loop(self): + """Start background thread to refresh token""" + self.running = True + self.refresh_thread = threading.Thread(target=self._refresh_loop, daemon=True) + self.refresh_thread.start() + + def _refresh_loop(self): + """Background loop to refresh token before expiry""" + while self.running: + try: + # Refresh token + self.refresh_token() + + # Calculate next refresh time (5 minutes before expiry) + if self.token_expiry: + time_until_expiry = (self.token_expiry - datetime.now()).total_seconds() + sleep_time = max(60, time_until_expiry - 300) # 5 min buffer + else: + sleep_time = 2700 # 45 minutes default + + print(f"Next token refresh in {sleep_time/60:.1f} minutes") + time.sleep(sleep_time) + + except Exception as e: + print(f"Error refreshing token: {e}") + time.sleep(60) # Retry after 1 minute + + def stop(self): + """Stop refresh loop""" + self.running = False + + +class OracleEntraIDConnection: + def __init__(self, dsn, token_manager): + self.dsn = dsn + self.token_manager = token_manager + self.pool = None + + def create_pool(self, min_connections=2, max_connections=10): + """Create connection pool""" + # Set TNS_ADMIN for wallet location + os.environ['TNS_ADMIN'] = '/tmp/wallet' + + # Create connection pool with external authentication + self.pool = oracledb.create_pool( + dsn=self.dsn, + min=min_connections, + max=max_connections, + externalauth=True # Use external authentication (token) + ) + + print(f"Connection pool created: {min_connections}-{max_connections} connections") + return self.pool + + def get_connection(self): + """Get connection from pool""" + if not self.pool: + raise Exception("Pool not created. Call create_pool() first.") + return self.pool.acquire() + + +# Usage Example +if __name__ == "__main__": + # Configuration + TENANT_ID = "f71980b2-590a-4de9-90d5-6fbc867da951" + CLIENT_ID = "7d22ece1-dd60-4279-a911-4b7b95934f2e" + SCOPE = f"https://cptazure.org/{CLIENT_ID}/.default" + TOKEN_FILE = "/tmp/wallet/token.txt" + DSN = "adbger_high" + + # Initialize token manager + token_mgr = EntraIDTokenManager(TENANT_ID, CLIENT_ID, SCOPE, TOKEN_FILE) + + # Get initial token + token_mgr.refresh_token() + + # Start automatic refresh + token_mgr.start_refresh_loop() + + # Create Oracle connection + oracle_conn = OracleEntraIDConnection(DSN, token_mgr) + pool = oracle_conn.create_pool(min_connections=2, max_connections=10) + + # Use connection + try: + conn = oracle_conn.get_connection() + cursor = conn.cursor() + cursor.execute("SELECT USER, SYS_CONTEXT('USERENV', 'AUTHENTICATION_METHOD') FROM DUAL") + result = cursor.fetchone() + print(f"Connected as: {result[0]}, Auth method: {result[1]}") + cursor.close() + conn.close() + finally: + token_mgr.stop() + pool.close() +``` + +--- + +### ⭐ **Option 4: Azure Key Vault with Periodic Sync (Enterprise)** + +Store and automatically sync tokens via Azure Key Vault. + +```bash +# Store token in Key Vault +az keyvault secret set \ + --vault-name your-keyvault \ + --name oracle-entraid-token \ + --value "$TOKEN" + +# Use CSI driver to mount as volume +# The CSI driver can be configured to sync every X minutes +``` + +```yaml +apiVersion: v1 +kind: SecretProviderClass +metadata: + name: oracle-token-sync +spec: + provider: azure + parameters: + usePodIdentity: "false" + useVMManagedIdentity: "true" + keyvaultName: "your-keyvault" + objects: | + array: + - | + objectName: oracle-entraid-token + objectType: secret + objectVersion: "" + tenantId: "f71980b2-590a-4de9-90d5-6fbc867da951" + syncPeriod: "45m" # Auto-refresh every 45 minutes +``` + +--- + +## Comparison Matrix + +| Approach | Complexity | Reliability | Use Case | Token Refresh | +|----------|-----------|-------------|----------|---------------| +| **Sidecar Container** | Medium | ⭐⭐⭐⭐⭐ | Production apps in K8s | Automatic (45 min) | +| **CronJob** | Low | ⭐⭐⭐ | Testing, simple deployments | Every 45 min | +| **Application-Level** | Medium-High | ⭐⭐⭐⭐ | Custom applications | Application-controlled | +| **Key Vault + CSI** | High | ⭐⭐⭐⭐⭐ | Enterprise, multi-pod | CSI sync (configurable) | + +--- + +## Quick Implementation for Your Environment + +For your current AKS setup, I recommend **Option 1 (Sidecar Container)**. Here's a quick start: + +### Step 1: Create the Token Refresh Script + +Save this as `misc/refresh-token.sh`: + +```bash +#!/bin/bash +set -e + +TENANT_ID="${AZURE_TENANT_ID:-f71980b2-590a-4de9-90d5-6fbc867da951}" +CLIENT_ID="${AZURE_CLIENT_ID:-7d22ece1-dd60-4279-a911-4b7b95934f2e}" +SCOPE="https://cptazure.org/${CLIENT_ID}/.default" +TOKEN_FILE="/tmp/wallet/token.txt" +REFRESH_INTERVAL=${REFRESH_INTERVAL:-2700} # 45 minutes + +echo "Starting token refresh service..." +echo "Tenant: $TENANT_ID" +echo "Client: $CLIENT_ID" +echo "Refresh interval: $REFRESH_INTERVAL seconds" + +while true; do + echo "$(date '+%Y-%m-%d %H:%M:%S'): Refreshing token..." + + # Get token using Azure CLI with Managed Identity + TOKEN=$(az account get-access-token \ + --tenant "$TENANT_ID" \ + --scope "$SCOPE" \ + --query accessToken \ + --output tsv 2>&1) + + if [ $? -eq 0 ] && [ -n "$TOKEN" ]; then + # Write token without newline + echo -n "$TOKEN" > "$TOKEN_FILE" + chmod 600 "$TOKEN_FILE" + echo "$(date '+%Y-%m-%d %H:%M:%S'): βœ… Token refreshed successfully" + + # Decode and show expiry time + EXP=$(echo "$TOKEN" | cut -d'.' -f2 | base64 -d 2>/dev/null | grep -o '"exp":[0-9]*' | cut -d':' -f2) + if [ -n "$EXP" ]; then + EXPIRY_DATE=$(date -d "@$EXP" '+%Y-%m-%d %H:%M:%S' 2>/dev/null || echo "unknown") + echo "$(date '+%Y-%m-%d %H:%M:%S'): Token expires at: $EXPIRY_DATE" + fi + else + echo "$(date '+%Y-%m-%d %H:%M:%S'): ❌ ERROR - Failed to refresh token: $TOKEN" + fi + + echo "$(date '+%Y-%m-%d %H:%M:%S'): Sleeping for $REFRESH_INTERVAL seconds..." + sleep $REFRESH_INTERVAL +done +``` + +### Step 2: Build and Push Sidecar Image + +```dockerfile +# misc/Dockerfile.token-refresh +FROM mcr.microsoft.com/azure-cli:2.55.0 + +# Install required tools +RUN apk add --no-cache coreutils bash + +# Copy refresh script +COPY refresh-token.sh /usr/local/bin/refresh-token.sh +RUN chmod +x /usr/local/bin/refresh-token.sh + +# Health check +HEALTHCHECK --interval=5m --timeout=10s --retries=3 \ + CMD test -f /tmp/wallet/token.txt && \ + test $(find /tmp/wallet/token.txt -mmin -60) || exit 1 + +CMD ["/usr/local/bin/refresh-token.sh"] +``` + +```powershell +# Build and push +cd misc +docker build -f Dockerfile.token-refresh -t .azurecr.io/token-refresh:latest . +docker push .azurecr.io/token-refresh:latest +``` + +### Step 3: Update Your Deployment + +Add the sidecar to your existing deployment - see the YAML example in Option 1 above. + +--- + +## Monitoring & Alerts + +Set up monitoring to alert when token refresh fails: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-alerts +data: + alerts.yml: | + groups: + - name: oracle-token + rules: + - alert: TokenRefreshFailed + expr: time() - oracle_token_last_refresh_timestamp > 3600 + for: 5m + annotations: + summary: "Oracle token hasn't been refreshed in 1 hour" +``` + +--- + +## Best Practices + +1. βœ… **Refresh before expiry** - Refresh 15 minutes before token expiration +2. βœ… **Use Managed Identity** - Avoid storing credentials in code/config +3. βœ… **Monitor refresh status** - Set up alerts for failed refreshes +4. βœ… **Handle failures gracefully** - Retry with exponential backoff +5. βœ… **Log token events** - Track refresh times and failures +6. βœ… **Single line tokens** - Always write tokens without newlines +7. βœ… **Secure storage** - Set file permissions to 600 (read/write for owner only) + +--- + +## Next Steps + +1. Choose the approach that fits your architecture +2. Implement token refresh automation +3. Set up monitoring and alerts +4. Test token expiry scenarios +5. Document the solution for your team + +Would you like help implementing any of these options? diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/clone-partial-repo.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/clone-partial-repo.md new file mode 100644 index 000000000..f1b27baee --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/clone-partial-repo.md @@ -0,0 +1,45 @@ +# Clone Partial Repository + +This guide shows how to clone only the "Oracle on Azure" project from the GIT repository without downloading the entire project history. + +> NOTE: During Challenge we did setup Azure CloudShell, feel free to use the Azure CloudShell to clone the repo. Alternative you can execute this commands from your local PC. + +## Quick Start + +```powershell +# Clone with sparse-checkout (recommended) +git clone --depth 1 --filter=blob:none --sparse https://github.com/cpinotossi/msftmh.git + +cd msftmh + +# Checkout only the Oracle on Azure folder +git sparse-checkout set 03-Azure/01-03-Infrastructure/10_Oracle_on_Azure +``` + +## What This Does + +- `--depth 1`: Downloads only the latest commit (shallow clone) +- `--filter=blob:none`: Downloads only necessary files, not all file versions +- `--sparse`: Enables sparse-checkout mode +- `git sparse-checkout set`: Specifies which folder to download + +## Switch to the right folder + +You'll have only the `10_Oracle_on_Azure` folder with its contents, saving bandwidth and disk space. + +~~~powershell +cd 03-Azure/01-03-Infrastructure/10_Oracle_on_Azure +~~~ + +## Tips and Tricks + +### Customizing the Prompt + +The following PowerShell function customizes your prompt to show only the current folder name, making it easier to identify your location in the terminal. + +~~~powershell +function prompt { + $currentFolder = (Get-Item -Path ".\" -Verbose).Name + "PS $currentFolder> " +} +~~~ \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/create.odaa.adb.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/create.odaa.adb.md new file mode 100644 index 000000000..dc1a5dbef --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/create.odaa.adb.md @@ -0,0 +1,37 @@ +# πŸš€ Oracle Database @ Azure (ODAA) - Deployment Scripts for ADB + +### πŸ” Login to Azure and set the right subscription + +~~~powershell +az login # choose your assigned user account for ex. user01@cptazure.org or the menu "Work or school account +az account show +az account set -s "ODAA" +# Register required providers for odaa +# TBD: Check if all are required +az provider register --namespace "Microsoft.Oracle" +az provider register --namespace "Microsoft.Baremetal" +az provider register --namespace "Microsoft.Network" +~~~ + +### 🌍 Define some environment variables + +~~~powershell +$prefix="odaa" +$postfix="1" +$location="francecentral" +$password = Read-Host -Prompt "Enter the shared password" +$cidr="10.0.0.0" +~~~ + +### πŸ—οΈ Create Azure Resources + +> ℹ️ **NOTE:** This would be created manually during the workshop. + +~~~bash +az deployment sub create -n $prefix -l $location -f ./resources/infra/bicep/odaa/main.bicep -p location=$location prefix=$prefix postfix=$postfix password=$password cidr=$cidr +# Verify the created resources, list all resource inside the resource group +az resource list -g $rgName -o table --query "[].{Name:name, Type:type}" +~~~ + +~~~text +~~~ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/deploy-adbping.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/deploy-adbping.md new file mode 100644 index 000000000..2cf634b74 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/deploy-adbping.md @@ -0,0 +1,106 @@ +# Deploy Oracle ADB Ping Pod + +## Prerequisites + +- AKS cluster with access to Oracle Container Registry +- Oracle Container Registry credentials (if using private images) +- ODAA Autonomous Database connection details + +## Step 1: Create Oracle Container Registry Secret (if needed) + +```powershell +kubectl create secret docker-registry ocir-secret -n microhacks ` + --docker-server=container-registry.oracle.com ` + --docker-username='' ` + --docker-password='' ` + --docker-email='' +``` + +## Step 2: Deploy the Pod + +```powershell +kubectl apply -f resources/pods/oracle-adbping.yaml +``` + +Wait for the pod to be ready: + +```powershell +kubectl wait pod/oracle-adbping -n microhacks --for=condition=Ready --timeout=120s +``` + +## Step 3: Copy the adbping Script to the Pod + +```powershell +kubectl cp resources/scripts/adbping.sh microhacks/oracle-adbping:/home/oracle/adbping.sh +kubectl exec -n microhacks oracle-adbping -- chmod +x /home/oracle/adbping.sh +``` + +## Step 4: Run the ADB Ping Test + +```powershell +# Set your connection details +$ADB_HOST = "zeii0mxy.adb.eu-paris-1.oraclecloud.com" +$ADB_SERVICE = "gc2401553d1c7ab_adbuser01_high.adb.oraclecloud.com" +$ADB_USER = "admin" +$ADB_PASSWORD = Read-Host -Prompt "Enter the shared password" + +# Build connection string +$CONNECTION_STRING = "(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=$ADB_HOST))(connect_data=(service_name=$ADB_SERVICE))(security=(ssl_server_dn_match=no)))" + +# Execute the ping test +kubectl exec -n microhacks oracle-adbping -- /home/oracle/adbping.sh "$CONNECTION_STRING" "$ADB_USER" "$ADB_PASSWORD" 10 +``` + +## Step 5: Interactive Shell (Optional) + +For manual testing: + +```powershell +kubectl exec -it -n microhacks oracle-adbping -- /bin/bash + +# Inside the pod: +export TNS_CONN="(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=zeii0mxy.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=gc2401553d1c7ab_adbuser01_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))" + +# Test with tnsping +tnsping "$TNS_CONN" + +# Test with sqlplus +sqlplus admin/<"Assigned Password">#@"$TNS_CONN" +``` + +## Cleanup + +```powershell +kubectl delete pod oracle-adbping -n microhacks +``` + +## Troubleshooting + +### Image Pull Issues + +If the pod fails to pull the image: + +1. Check the image pull secret: + ```powershell + kubectl get secret ocir-secret -n microhacks + ``` + +2. Use an alternative public image: + ```yaml + image: ghcr.io/gvenzl/oracle-instantclient:21 + # Remove imagePullSecrets section + ``` + +### Connection Failures + +1. Verify DNS resolution: + ```powershell + kubectl exec -n microhacks oracle-adbping -- nslookup zeii0mxy.adb.eu-paris-1.oraclecloud.com + ``` + +2. Check network connectivity: + ```powershell + kubectl exec -n microhacks oracle-adbping -- openssl s_client -connect zeii0mxy.adb.eu-paris-1.oraclecloud.com:1521 -brief + ``` + +3. Review NSG rules and VNet peering between AKS and ODAA subnets diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/import-oci-image-to-acr.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/import-oci-image-to-acr.md new file mode 100644 index 000000000..4ed4e7bc5 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/import-oci-image-to-acr.md @@ -0,0 +1,160 @@ +# Importing Oracle Container Registry Images to Azure Container Registry + +This guide explains how to import Oracle GoldenGate images from Oracle Container Image Registry (OCIR) to Azure Container Registry (ACR). + +## Overview + +The `az acr import` command allows you to import container images from external registries directly into your Azure Container Registry without needing to pull and push the image locally. + +## Prerequisites + +1. **Azure CLI** installed and authenticated +2. **Azure Container Registry** (e.g., `odaamh.azurecr.io`) +3. **Oracle Cloud Infrastructure (OCI) credentials**: + - OCI username + - OCI Auth Token + - Tenancy namespace + +## Getting OCI Credentials + +### 1. Tenancy Namespace +Your tenancy namespace is visible in the Oracle Container Registry URL. For example: +- URL: `fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.4.0.24.06` +- Tenancy namespace: `frul1g8cgfam` + +### 2. OCI Username +Your OCI username format depends on your identity provider: +- **OCI IAM**: `` +- **Oracle Identity Cloud Service (IDCS)**: `oracleidentitycloudservice/` +- **Federated users**: `/` + +### 3. Auth Token +Generate an Auth Token in the OCI Console: +1. Sign in to Oracle Cloud Console +2. Click your profile icon β†’ **User Settings** +3. Under **Resources**, click **Auth Tokens** +4. Click **Generate Token** +5. Provide a description and click **Generate Token** +6. **Copy and save the token immediately** (it won't be shown again) + +## Import Command + +### Basic Syntax + +```powershell +az acr import ` + --name ` + --source //: ` + --image : ` + --username "/" ` + --password "" +``` + +### Example: Importing GoldenGate BigData Image + +```powershell +# Set the correct Azure subscription +az account set --subscription 09808f31-065f-4231-914d-776c2d6bbe34 + +# Import the image +az acr import ` + --name odaamh ` + --source fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.4.0.24.06 ` + --image goldengate/pub_gg_micro_bigdata:23.4.0.24.06 ` + --username "frul1g8cgfam/" ` + --password "" +``` + +### Using Environment Variables (Recommended for Security) + +```powershell +# Store credentials in environment variables +$env:OCI_USERNAME = "frul1g8cgfam/" +$env:OCI_AUTH_TOKEN = "" + +# Import using environment variables +az acr import ` + --name odaamh ` + --source fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.4.0.24.06 ` + --image goldengate/pub_gg_micro_bigdata:23.4.0.24.06 ` + --username $env:OCI_USERNAME ` + --password $env:OCI_AUTH_TOKEN +``` + +## Available Images to Import + +Based on the configuration in `ggfabric.yaml`, you may need to import: + +1. **BigData Image (23.4.0)**: + ``` + Source: fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.4.0.24.06 + Target: odaamh.azurecr.io/goldengate/pub_gg_micro_bigdata:23.4.0.24.06 + ``` + +2. **BigData Image (23.8.4)**: + ``` + Source: fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.8.4.25.08 + Target: odaamh.azurecr.io/goldengate/pub_gg_micro_bigdata:23.8.4.25.08 + ``` + +## Verification + +After importing, verify the image is available in your ACR: + +```powershell +# List all repositories +az acr repository list --name odaamh --output table + +# Show tags for a specific repository +az acr repository show-tags --name odaamh --repository goldengate/pub_gg_micro_bigdata --output table + +# Get image details +az acr repository show --name odaamh --image goldengate/pub_gg_micro_bigdata:23.4.0.24.06 +``` + +## Updating Kubernetes Deployments + +After importing, update your Helm values or Kubernetes manifests to use the ACR image: + +```yaml +image: + imageName: odaamh.azurecr.io/goldengate/pub_gg_micro_bigdata:23.4.0.24.06 +``` + +## Troubleshooting + +### 403 Forbidden Error +``` +Anonymous users are only allowed read access on public repos +``` +**Solution**: Ensure you're providing valid OCI credentials with `--username` and `--password` flags. + +### Invalid Credentials +**Solution**: +- Verify your OCI username format matches your identity provider +- Ensure the Auth Token is valid and not expired +- Check that the tenancy namespace is correct + +### Subscription Not Found +**Solution**: Set the correct Azure subscription: +```powershell +az account set --subscription +``` + +### Image Not Found in Source Registry +**Solution**: +- Verify you have access to the OCI repository +- Check that the image path and tag are correct +- Ensure your OCI user has pull permissions for the repository + +## Additional Resources + +- [Azure Container Registry Import Documentation](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-import-images) +- [Oracle Container Registry Documentation](https://docs.oracle.com/en-us/iaas/Content/Registry/home.htm) +- [Managing Auth Tokens in OCI](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managingcredentials.htm) + +## Related Files + +- `ggfabric.yaml` - Helm values file containing image configurations +- `resources/gg-bigdata-build/` - GoldenGate build resources +- `resources/infra/` - Infrastructure deployment files diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/odaa-get-token.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/odaa-get-token.md new file mode 100644 index 000000000..c1529eb05 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/odaa-get-token.md @@ -0,0 +1,44 @@ +# How to retrieve the Oracle Database Autonomous Database connection string from ODAA + +To connect to the Oracle Database you will need the TNS connection string. + +## πŸ“ Retrieve the connection string via the Azure Portal from the ODAA ADB instance. + +1. 🎯 Go to your Oracle Database in Azure Portal, search for "adb" in the search bar on top. +2. πŸ” Select "Oracle Database@Azure" from the search results. +3. πŸ“‹ Select "Oracle Autonomous Database Service" from the left menu. +4. πŸŽͺ Select your created ADB instance. +5. πŸ”— Select "Connection" from the left menu. +6. πŸ”’ Select High profile, TLS Authentication=TLS Connection String + +## πŸ”§ Alternative you can use the Azure CLI to retrieve the connection string. + +~~~powershell +# Prerequisites (if not already installed) +az extension add --name oracle-database + +$adbName="user02" # replace with your ADB name + +# Switch to the subscription where ODAA is deployed +$subODAA="sub-mhodaa" +az account set --subscription $subODAA + +$rgODAA="odaa-user02" # replace with your resource group name + +# Enable preview features for Oracle Database extension +az config set extension.dynamic_install_allow_preview=true +# Install Oracle Database extension if not already installed +az extension add --name oracle-database +# Retrieve TNS Connection string High profile (TCPS, tlsAuthentication = Server) +$trgConn=az oracle-database autonomous-database show -g $rgODAA -n $adbName --query "connectionStrings.profiles[?consumerGroup=='High' && protocol=='TCPS' && tlsAuthentication=='Server'].value | [0]" -o tsv + +echo $trgConn +~~~ + +Output should look similar to this: + +~~~text +(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=zeii0mxy.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=gc2401553d1c7ab_adbuser01_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no))) +~~~ + + diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/ENDE MFA.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/ENDE MFA.png new file mode 100644 index 000000000..e14dc86a7 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/ENDE MFA.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA.png new file mode 100644 index 000000000..46a7aa706 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA1.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA1.png new file mode 100644 index 000000000..aee200cc4 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA1.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA2.png new file mode 100644 index 000000000..b638d7bbb Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA2.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA3.png new file mode 100644 index 000000000..4b35a6814 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA3.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA4.png new file mode 100644 index 000000000..6c72f08ff Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA4.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA5.png new file mode 100644 index 000000000..ec3c7bc1e Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA5.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA6.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA6.png new file mode 100644 index 000000000..a40d86872 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA6.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 0.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 0.png new file mode 100644 index 000000000..064db298b Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 0.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 1.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 1.png new file mode 100644 index 000000000..154aa9a79 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 1.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 2.png new file mode 100644 index 000000000..0536455fb Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 2.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 3.png new file mode 100644 index 000000000..a109f1393 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 3.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 4.png new file mode 100644 index 000000000..689dade7b Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 4.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 5.png new file mode 100644 index 000000000..1ab893d7c Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 5.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/create_browser_profile.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/create_browser_profile.png new file mode 100644 index 000000000..1a47fd43f Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/create_browser_profile.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/logo_ODAA_microhack_1900x300.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/logo_ODAA_microhack_1900x300.jpg new file mode 100644 index 000000000..9c7dd4659 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/logo_ODAA_microhack_1900x300.jpg differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/overivew deployment.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/overivew deployment.png new file mode 100644 index 000000000..cfb8acfc2 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/overivew deployment.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available1.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available1.png new file mode 100644 index 000000000..3535f4d68 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available1.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available2.png new file mode 100644 index 000000000..046778e8d Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available2.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available3.png new file mode 100644 index 000000000..52f5d9249 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available3.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available4.png new file mode 100644 index 000000000..e82878970 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available4.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available5.png new file mode 100644 index 000000000..f749de832 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available5.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available6.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available6.png new file mode 100644 index 000000000..8859f5357 Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available6.png differ diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/README.md new file mode 100644 index 000000000..8e47e05a7 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/README.md @@ -0,0 +1,192 @@ +# πŸš€ Oracle Database @ Azure (ODAA) - Deployment Scripts + +## βš™οΈ Install the Microhack Environment + +The following resources needs to be created before the workshop start. + +- πŸ“¦ Azure Resource Group +- βš“ Azure Kubernetes Service (AKS) +- 🌐 Install Ingress Controller NGINX on AKS + +> ⚠️ **IMPORTANT:** Make sure the CIDR of the created VNet is added to the Oracle NSG. + +### πŸ“‹ Prerequisites + +- πŸ”§ install Azure CLI +- βš“ install kubectl +- πŸ“¦ install helm +- πŸ” install jq +- πŸ’» Scripts need to run on bash (Linux, MacOS, WSL2 on Windows) + +### πŸ” Login to Azure and set the right subscription + +~~~powershell +az login # choose your assigned user account for ex. user01@cptazure.org or the menu "Work or school account +az account show +az account set -s "" +az provider register --namespace Microsoft.ContainerService +az provider register --namespace Microsoft.Network +az provider register --namespace Microsoft.OperationalInsights +az provider register --namespace Microsoft.Compute +~~~ + +### 🌍 Define some environment variables + +~~~powershell +$prefix="team" +$postfix="1" +$location="francecentral" +~~~ + +### πŸ—οΈ Create Azure Resources + +> ℹ️ **NOTE:** Currently you will need to redo this steps for each Team environment. Make sure to change the postfix. + +~~~bash +az deployment sub create -n "$prefix$postfix" -l $location -f ./resources/infra/bicep/aks/main.bicep -p location=$location prefix=$prefix postfix=$postfix aksVmSize="Standard_D8ads_v6" cidr="10.11.0.0" +# Verify the created resources, list all resource inside the resource group +az resource list -g "$prefix$postfix" -o table --query "[].{Name:name, Type:type}" +~~~ + +~~~text +Name Type +------ ------------------------------------------ +odaa1 Microsoft.Network/virtualNetworks +odaa1 Microsoft.OperationalInsights/workspaces +odaa1 Microsoft.ContainerService/managedClusters +~~~ + +### βš“ Connect to AKS + +~~~powershell +# set the right subscription +az account set -s "sub-team0" +# login to aks +az aks get-credentials -g "aks-team0" -n "aks-team0" --overwrite-existing +# list namespaces +kubectl get namespaces # should show default, kube-system, kube-public +~~~ + +~~~text +NAME STATUS AGE +default Active 10m +gatekeeper-system Active 9m37s +kube-node-lease Active 10m +kube-public Active 10m +kube-system Active 10m +~~~ + +### 🌐 Install Ingress Controller + +🌟 An ingress controller on Azure Kubernetes Service (AKS) manages external access to services running inside your cluster. It acts as a gateway, routing HTTP and HTTPS traffic from outside the cluster to the appropriate internal services based on rules you define. This enables features like SSL termination, load balancing, and path-based routing, making it easier to securely expose and manage multiple applications within AKS. + +~~~powershell +# Change directory to the scripts +# cd scripts/k8s_install/ +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +kubectl create namespace ingress-nginx +helm install nginx-quick ingress-nginx/ingress-nginx -n ingress-nginx +# patch health probe: +kubectl patch service nginx-quick-ingress-nginx-controller -n ingress-nginx -p '{\"metadata\":{\"annotations\":{\"service.beta.kubernetes.io/azure-load-balancer-health-probe-request-path\":\"/healthz\"}}}' +# verify if annotation is added +kubectl get service nginx-quick-ingress-nginx-controller -n ingress-nginx -o jsonpath='{.metadata.annotations}' | jq +kubectl get service --namespace ingress-nginx nginx-quick-ingress-nginx-controller --output wide +# get external IP of nginx controller, you maybe need to wait a few minutes until the IP is assigned +kubectl get service -n ingress-nginx -o jsonpath='{.items[*].status.loadBalancer.ingress[*].ip}' +~~~ + +### Setup Azure Fabric + +Based on https://learn.microsoft.com/en-us/fabric/data-engineering/tutorial-lakehouse-introduction#lakehouse-end-to-end-scenario + +1. https://app.fabric.microsoft.com/home?experience=power-bi +1. + +Sign in to your Power BI account and sign up for the free Microsoft Fabric trial. If you don't have a Power BI license, sign up for a Fabric free license and then you can start the Fabric trial. + +Build and implement an end-to-end lakehouse for your organization: + +Create a Fabric workspace. +Create a lakehouse. +Ingest data, transform data, and load it into the lakehouse. You can also explore the OneLake, one copy of your data across lakehouse mode and SQL analytics endpoint mode. +Connect to your lakehouse using the SQL analytics endpoint and Create a Power BI report using DirectLake to analyze sales data across different dimensions. +Optionally, you can orchestrate and schedule data ingestion and transformation flow with a pipeline. +Clean up resources by deleting the workspace and other items. + +#### Install Image GoldenGate for Distributed Applications and Analytics + +GoldenGate for Distributed Applications and Analytics v23.4.0.24.06 on Linux x86-64 + +Links: +- (Overview of all possible GG Download Images)[https://www.oracle.com/middleware/technologies/goldengate-downloads.html#] +- (Download Page for Application andf Analytics GG)[https://edelivery.oracle.com/ocom/faces/Downloads;jsessionid=ir4RtGq2ylyafl5mEIgKLVFghwS6M8qi1_-8fuPA1wyWxNb2EYUh!122914563?dlp_cid=1184745&rel_cid=1153160&auth_token=1761237128_MDA0ZDFkMjczNTYyNmU3YzE2YTFmZjJlZmQ3NTBjOWIxNjRlOGY3MGFhZDI0NzQyY2Y1Yjc3NThiMzBkZmUyMzo6b3NkY19vcmFjbGUuY29t#] + +Build your own image and push it to your private Azure Container Registry (ACR). + +~~~powershell +# switch to ACR subscription +az account set -s +# change to directory where Dockerfile is located +cd .\10_Oracle_on_Azure\misc\goldengate-temp +# build and push image to ACR +az acr build --registry odaamh --image goldengate/goldengate-oracle-bigdata:23.4.0.24.06 --file Dockerfile . + +az acr repository list --name odaamh --output table + +~~~ + +### Attache ACR to AKS + +~~~powershell +# switch to ACR subscription +az account set -s +$acrId = az acr show --name odaamh --resource-group odaa --query "id" --output tsv + +az account set -s +az aks update --resource-group odaa1 --name odaa1 --attach-acr $acrId +~~~ + +## Tips and Tricks + +### VNet Peering between two subscriptions + +In case your odaa does run in a different tenant / subscription, you need to create a VNet Peering between the two VNet. + +~~~powershell +$postfixODAA = "2" +$postfixAKS = "1" +$subODAAName = "ODAA" +$subAKSName = "sub-1" + +az login -t "" +az account set -s $subODAAName + +# Peering AKS VNet to ODAA VNet +# We need to retrieve the subscription IDs first of the ODAA Vnet +az account set -s $subODAAName; +$subODAAId = az account show --query id -o tsv +# Now we need to login into the subscription where AKS is deployed +az login -t "" +az account set -s $subAKSName; +$subAKSId = az account show --query id -o tsv +az network vnet peering create --name AKS-to-ODAA -g "$prefix$postfixAKS" --vnet-name "$prefix$postfixAKS" --remote-vnet /subscriptions/$subODAAId/resourceGroups/"$prefix$postfixODAA"/providers/Microsoft.Network/virtualNetworks/"$prefix$postfixODAA" --allow-vnet-access +# Peering ODAA VNet to AKS VNet +az account set -s $subODAAName; +az network vnet peering create -n ODAA-to-AKS -g "$prefix$postfixODAA" --vnet-name "$prefix$postfixODAA" --remote-vnet /subscriptions/$subAKSId/resourceGroups/"$prefix$postfixAKS"/providers/Microsoft.Network/virtualNetworks/"$prefix$postfixAKS" --allow-vnet-access + + +# Verify peering on sububscription sub-cptdx-01 +az network vnet peering list -g "$prefix$postfixODAA" --vnet-name "$prefix$postfixODAA" -o table +az account set -s $subAKSName +az network vnet peering list -g "$prefix$postfixAKS" --vnet-name "$prefix$postfixAKS" -o table +~~~ + +### Validate Ingress controller in AKS + +~~~powershell +kubectl get service --namespace ingress-nginx nginx-quick-ingress-nginx-controller +# validate health probe +kubectl get service nginx-quick-ingress-nginx-controller -n ingress-nginx -o jsonpath='{.metadata.annotations}' + +~~~ \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/README.md deleted file mode 100644 index 452569f55..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Step-by-step Instructions how to Deploy Oracle Data Guard on Azure VMs - Terraform Automation - -## Overview - -This repository contains code to install and configure Oracle databases on Azure VM IaaS in an automated fashion. The scenario of two VMs in an Oracle Dataguard configuration, deployed through Terraform (TODO: and Ansible). - -For more information about how to install and configure Data Guard on an Azure virtual machine (VM) with CLI refer to the documentation [here](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/oracle-dataguard). - -__Important Note - Disclaimer__: The code of this repository is largely based on the Oracle Deployment Automation repository (lza-oracle), which can be found [here](https://github.com/Azure/lza-oracle). The goal of the Terraform automation scripts in this repository is primarily to facilitate the successful execution of the Microhack. The code in this repository is not intended for production use and should be used with caution. -At the lza-oracle repository, you can find the code for deploying Oracle databases on Azure VMs using different scenarios, such as single and Dataguard using Terraform, Bicept and Ansible. -If you are interested in deploying Oracle databases on Azure VMs, we recommend you to check the [lza-oracle](https://github.com/Azure/lza-oracle) repository. - -Note that Oracle licensing is not a part of this solution. Please verify that you have the necessary Oracle licenses to run Oracle software on Azure IaaS. - - -The above resources can be deployed using the sample Github action workflows provided in the repository. The workflows are designed to deploy the infrastructure and configure the Oracle database on the VMs. This is the recommended way to deploy the infrastructure and configure the Oracle database. Alternatively the infrastructure can be deployed using Azure CLI and the Oracle database can be configured using Ansible. - -Note that the code provided in this repository is for demonstration purposes only and should not be used in a production environment without thorough testing. - -## Prerequisites - -1. Azure Entra ID Tenant. -2. Minimum 1 subscription, for when deploying VMs. If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/en-us/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio) before you begin. -3. Azure CLI installed on your local machine. You can install Azure CLI from [here](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli). -4. Terraform installed on your local machine. You can install Terraform from [here](https://learn.hashicorp.com/tutorials/terraform/install-cli). - - -## 1. Authenticate Terraform to Azure - -To use Terraform commands against your Azure subscription, you must first authenticate Terraform to that subscription. [This doc](https://learn.microsoft.com/en-us/azure/developer/terraform/authenticate-to-azure?tabs=bash) describes how to authenticate Terraform to your Azure subscription. - -### 2. Create SSH Key - -To deploy Oracle Data Guard on the VMs, you can use **data_guard** module in this repo. The module is located on `terraform/data_guard` directory. - -Before using this module, you have to create your own ssh key to deploy and connect to the two virtual machines you will create. - -```bash -ssh-keygen -f ~/.ssh/mh-oracle-data-guard - -ls -lha ~/.ssh/ --rw------- 1 yourname staff 2.6K 8 17 2023 mh-oracle-data-guard --rw-r--r-- 1 yourname staff 589B 8 17 2023 mh-oracle-data-guard.pub -``` - -### 4. Define Variables - -Define the variables such as location and Resource Group name in the `global_variables.tf` file. For more reference on all variables you can set, see [variables description](variables.md) - -Next, you go to `terraform/data_guard` directory and create `fixtures.tfvars` file, then copy the contents of the ssh public key used for deploying virtual machines on Azure (~/.ssh/mh-oracle-data-guard.pub). - -This is a sample `fixtures.tfvars` file. - -```tf:fixtures.tfvars -ssh_key = "ssh-rsa xxxxxxxxxxxxxx=" -``` -### 5. Execute Terraform Commands -Execute below Terraform commands. When you deploy resources to Azure, you have to indicate `fixtures.tfvars` as a variable file, which contains the ssh public key. - -```bash - -$ terraform init - -$ terraform plan -var-file=fixtures.tfvars - -$ terraform apply -var-file=fixtures.tfvars -``` - -You can connect to the virtual machine with ssh private key. While deploying resources, a public ip address is generated and attached to the virtual machine, so that you can connect to the virtual machine with this IP address. The username is `oracle`, which is fixed in `terraform/data_guard/module.tf`. - -``` -$ ssh -i ~/.ssh/mh-oracle-data-guard oracle@ - - - -## Trademarks - -This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/LICENSE b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/LICENSE deleted file mode 100644 index 9e841e7a2..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ - MIT License - - Copyright (c) Microsoft Corporation. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/backend.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/backend.tf deleted file mode 100644 index 556a8c85c..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/backend.tf +++ /dev/null @@ -1,3 +0,0 @@ -terraform { - backend "local" {} -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/jit_rule.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/jit_rule.tf deleted file mode 100644 index 39ba4ff7d..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/jit_rule.tf +++ /dev/null @@ -1,91 +0,0 @@ -######################################################################################### -# # -# JIT Access Policy # -# # -######################################################################################### -data "azurerm_virtual_machine" "oracle_primary_vm" { - name = module.vm_primary.vm.name - resource_group_name = module.common_infrastructure.resource_group.name - - depends_on = [module.vm_primary, - module.storage_primary - ] -} - -data "azurerm_virtual_machine" "oracle_secondary_vm" { - name = module.vm_secondary.vm.name - resource_group_name = module.common_infrastructure.resource_group.name - - depends_on = [module.vm_secondary - , module.storage_secondary - ] -} - -resource "time_sleep" "wait_for_primary_vm_creation" { - create_duration = var.jit_wait_for_vm_creation - - depends_on = [data.azurerm_virtual_machine.oracle_primary_vm, - module.storage_primary - ] -} - -resource "time_sleep" "wait_for_secondary_vm_creation" { - create_duration = var.jit_wait_for_vm_creation - - depends_on = [data.azurerm_virtual_machine.oracle_secondary_vm - , module.storage_secondary - ] -} - - -resource "azapi_resource" "jit_ssh_policy_primary" { - count = module.vm_primary.database_server_count - name = "JIT-SSH-Policy-primary" - parent_id = "${module.common_infrastructure.resource_group.id}/providers/Microsoft.Security/locations/${module.common_infrastructure.resource_group.location}" - type = "Microsoft.Security/locations/jitNetworkAccessPolicies@2020-01-01" - schema_validation_enabled = false - body = jsonencode({ - "kind" : "Basic" - "properties" : { - "virtualMachines" : [{ - "id" : "/subscriptions/${module.common_infrastructure.current_subscription.subscription_id}/resourceGroups/${module.common_infrastructure.resource_group.name}/providers/Microsoft.Compute/virtualMachines/${module.vm_primary.vm.name}", - "ports" : [ - { - "number" : 22, - "protocol" : "TCP", - "allowedSourceAddressPrefix" : "*", - "maxRequestAccessDuration" : "PT3H" - } - ] - }] - } - }) - - depends_on = [time_sleep.wait_for_primary_vm_creation] -} - -resource "azapi_resource" "jit_ssh_policy_secondary" { - count = module.vm_secondary.database_server_count - name = "JIT-SSH-Policy-secondary" - parent_id = "${module.common_infrastructure.resource_group.id}/providers/Microsoft.Security/locations/${module.common_infrastructure.resource_group.location}" - type = "Microsoft.Security/locations/jitNetworkAccessPolicies@2020-01-01" - schema_validation_enabled = false - body = jsonencode({ - "kind" : "Basic" - "properties" : { - "virtualMachines" : [{ - "id" : "/subscriptions/${module.common_infrastructure.current_subscription.subscription_id}/resourceGroups/${module.common_infrastructure.resource_group.name}/providers/Microsoft.Compute/virtualMachines/${module.vm_secondary.vm.name}", - "ports" : [ - { - "number" : 22, - "protocol" : "TCP", - "allowedSourceAddressPrefix" : "*", - "maxRequestAccessDuration" : "PT3H" - } - ] - }] - } - }) - - depends_on = [time_sleep.wait_for_secondary_vm_creation] -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/module.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/module.tf deleted file mode 100644 index 2e94560c4..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/module.tf +++ /dev/null @@ -1,257 +0,0 @@ -data "azurerm_client_config" "current" {} - -module "common_infrastructure" { - source = "./modules/common_infrastructure" - - infrastructure = local.infrastructure - is_data_guard = true - is_diagnostic_settings_enabled = var.is_diagnostic_settings_enabled - diagnostic_target = var.diagnostic_target - tags = var.resourcegroup_tags - -} - -module "vm_primary" { - source = "./modules/compute" - - resource_group_name = module.common_infrastructure.created_resource_group_name - location = var.location - vm_name = "vm-primary-0" - public_key = var.ssh_key - sid_username = "oracle" - vm_sku = var.vm_sku - - vm_source_image_reference = var.vm_source_image_reference - aad_system_assigned_identity = true - public_ip_address_resource_id = module.network.db_server_puplic_ip_resources[0].id - - - is_diagnostic_settings_enabled = module.common_infrastructure.is_diagnostic_settings_enabled - diagnostic_target = module.common_infrastructure.diagnostic_target - storage_account_id = module.common_infrastructure.target_storage_account_id - storage_account_sas_token = module.common_infrastructure.target_storage_account_sas - log_analytics_workspace = module.common_infrastructure.log_analytics_workspace != null ? { - id = module.common_infrastructure.log_analytics_workspace.id - name = module.common_infrastructure.log_analytics_workspace.name - } : null - data_collection_rules = module.common_infrastructure.data_collection_rules - eventhub_authorization_rule_id = module.common_infrastructure.eventhub_authorization_rule_id - partner_solution_id = module.common_infrastructure.partner_solution_id - tags = module.common_infrastructure.tags - db_subnet = module.network.db_subnet - - availability_zone = 1 - - - - vm_user_assigned_identity_id = var.vm_user_assigned_identity_id - - vm_os_disk = { - name = "osdisk-primary" - caching = "ReadWrite" - storage_account_type = "Premium_LRS" - disk_encryption_set_id = null - disk_size_gb = 128 - } - - role_assignments = { - role_assignment_1 = { - role_definition_id_or_name = "Virtual Machine Contributor" - principal_id = data.azurerm_client_config.current.object_id - skip_service_principal_aad_check = false - } - } - - role_assignments_nic = { - role_assignment_1 = { - role_definition_id_or_name = "Contributor" - principal_id = data.azurerm_client_config.current.object_id - skip_service_principal_aad_check = false - } - } - - vm_extensions = { - azure_monitor_agent = { - name = "vm-primary-azure-monitor-agent" - publisher = "Microsoft.Azure.Monitor" - type = "AzureMonitorLinuxAgent" - type_handler_version = "1.0" - auto_upgrade_minor_version = true - automatic_upgrade_enabled = true - settings = null - } - } - - depends_on = [module.network, module.common_infrastructure] -} - - -module "vm_secondary" { - source = "./modules/compute" - - resource_group_name = module.common_infrastructure.created_resource_group_name - location = var.location - vm_name = "vm-secondary-0" - public_key = var.ssh_key - sid_username = "oracle" - vm_sku = var.vm_sku - - vm_source_image_reference = var.vm_source_image_reference - vm_user_assigned_identity_id = var.vm_user_assigned_identity_id - aad_system_assigned_identity = true - public_ip_address_resource_id = module.network.db_server_puplic_ip_resources[1].id - - is_diagnostic_settings_enabled = module.common_infrastructure.is_diagnostic_settings_enabled - diagnostic_target = module.common_infrastructure.diagnostic_target - storage_account_id = module.common_infrastructure.target_storage_account_id - storage_account_sas_token = module.common_infrastructure.target_storage_account_sas - log_analytics_workspace = module.common_infrastructure.log_analytics_workspace != null ? { - id = module.common_infrastructure.log_analytics_workspace.id - name = module.common_infrastructure.log_analytics_workspace.name - } : null - data_collection_rules = module.common_infrastructure.data_collection_rules - eventhub_authorization_rule_id = module.common_infrastructure.eventhub_authorization_rule_id - partner_solution_id = module.common_infrastructure.partner_solution_id - tags = module.common_infrastructure.tags - db_subnet = module.network.db_subnet - - - - vm_os_disk = { - name = "osdisk-secondary" - caching = "ReadWrite" - storage_account_type = "Premium_LRS" - disk_encryption_set_id = null - disk_size_gb = 128 - } - - role_assignments = { - role_assignment_1 = { - role_definition_id_or_name = "Virtual Machine Contributor" - principal_id = data.azurerm_client_config.current.object_id - skip_service_principal_aad_check = false - } - } - - vm_extensions = { - azure_monitor_agent = { - name = "vm-secondary-azure-monitor-agent" - publisher = "Microsoft.Azure.Monitor" - type = "AzureMonitorLinuxAgent" - type_handler_version = "1.1" - auto_upgrade_minor_version = true - automatic_upgrade_enabled = true - settings = null - } - } - #ToDo: Pending - # role_assignments_nic = { - # role_assignment_1 = { - # role_definition_id_or_name = "Contributor" - # principal_id = data.azurerm_client_config.current.object_id - # skip_service_principal_aad_check = false - # } - # } - - depends_on = [module.network, module.common_infrastructure] -} - -module "network" { - source = "./modules/network" - - resource_group = module.common_infrastructure.resource_group - is_data_guard = module.common_infrastructure.is_data_guard - is_diagnostic_settings_enabled = module.common_infrastructure.is_diagnostic_settings_enabled - diagnostic_target = module.common_infrastructure.diagnostic_target - storage_account_id = module.common_infrastructure.target_storage_account_id - log_analytics_workspace_id = try(module.common_infrastructure.log_analytics_workspace.id, "") - eventhub_authorization_rule_id = module.common_infrastructure.eventhub_authorization_rule_id - partner_solution_id = module.common_infrastructure.partner_solution_id - tags = module.common_infrastructure.tags - - - #ToDo: role_assignments_nic - # role_assignments_nic = { - # role_assignment_1 = { - # name = "Contributor" - # skip_service_principal_aad_check = false - # } - # } - - role_assignments_pip = { - role_assignment_1 = { - name = "Contributor" - skip_service_principal_aad_check = false - } - } - - role_assignments_nsg = { - role_assignment_1 = { - name = "Contributor" - skip_service_principal_aad_check = false - } - } - - role_assignments_vnet = { - role_assignment_1 = { - name = "Contributor" - skip_service_principal_aad_check = false - } - } - - role_assignments_subnet = { - role_assignment_1 = { - name = "Contributor" - skip_service_principal_aad_check = false - } - } -} - - -module "storage_primary" { - source = "./modules/storage" - - resource_group = module.common_infrastructure.resource_group - is_data_guard = module.common_infrastructure.is_data_guard - naming = "oracle-primary" - vm = module.vm_primary.vm - tags = module.common_infrastructure.tags - database_disks_options = { - data_disks = var.database_disks_options.data_disks - asm_disks = var.database_disks_options.asm_disks - redo_disks = var.database_disks_options.redo_disks - } - availability_zone = module.vm_primary.availability_zone - - role_assignments = { - role_assignment_1 = { - name = "Contributor" - skip_service_principal_aad_check = false - } - } -} - -module "storage_secondary" { - source = "./modules/storage" - - resource_group = module.common_infrastructure.resource_group - is_data_guard = module.common_infrastructure.is_data_guard - naming = "oracle-secondary" - vm = module.vm_secondary.vm - tags = module.common_infrastructure.tags - database_disks_options = { - data_disks = var.database_disks_options.data_disks - asm_disks = var.database_disks_options.asm_disks - redo_disks = var.database_disks_options.redo_disks - } - availability_zone = module.vm_secondary.availability_zone - - role_assignments = { - role_assignment_1 = { - name = "Contributor" - skip_service_principal_aad_check = false - } - } -} - - diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/infrastructure.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/infrastructure.tf deleted file mode 100644 index a91ed21d9..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/infrastructure.tf +++ /dev/null @@ -1,149 +0,0 @@ -######################################################################################### -# # -# Subscription # -# # -######################################################################################### -data "azurerm_subscription" "current" {} - -######################################################################################### -# # -# Resource Group # -# # -######################################################################################### -resource "azurerm_resource_group" "rg" { - count = local.resource_group_exists ? 0 : 1 - name = local.rg_name - location = var.infrastructure.region - tags = var.tags - - lifecycle { - ignore_changes = [ - tags - ] - } -} - -data "azurerm_resource_group" "rg" { - name = local.rg_name - - depends_on = [azurerm_resource_group.rg] -} - -######################################################################################### -# # -# Diagnostic Settings # -# # -######################################################################################### -resource "azurerm_storage_account" "diagnostic" { - count = var.is_diagnostic_settings_enabled ? 1 : 0 - name = "${local.prefix}diag${random_string.suffix.result}" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location - tags = merge(local.tags, var.tags) - - account_tier = "Standard" - account_replication_type = "LRS" -} - -data "azurerm_storage_account" "diagnostic" { - count = var.is_diagnostic_settings_enabled ? 1 : 0 - name = azurerm_storage_account.diagnostic[count.index].name - resource_group_name = data.azurerm_resource_group.rg.name - - depends_on = [azurerm_storage_account.diagnostic] -} - -resource "random_string" "suffix" { - length = 14 - special = false - upper = false -} - -data "azurerm_storage_account_sas" "diagnostic" { - count = var.is_diagnostic_settings_enabled ? 1 : 0 - connection_string = azurerm_storage_account.diagnostic[0].primary_connection_string - - resource_types { - service = false - container = true - object = true - } - - services { - blob = true - queue = false - table = true - file = false - } - - start = timestamp() - expiry = timeadd(timestamp(), "8766h") - - permissions { - read = false - write = true - delete = false - list = true - add = true - create = true - update = true - process = false - tag = false - filter = false - } -} - -resource "azurerm_log_analytics_workspace" "diagnostic" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0 - name = "${local.prefix}diag${random_string.suffix.result}" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location - sku = "PerGB2018" - retention_in_days = 30 - tags = merge(local.tags, var.tags) -} - -data "azurerm_log_analytics_workspace" "diagnostic" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0 - name = "${local.prefix}diag${random_string.suffix.result}" - resource_group_name = data.azurerm_resource_group.rg.name - - depends_on = [azurerm_log_analytics_workspace.diagnostic] -} - -resource "azurerm_eventhub_namespace" "diagnostic" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Event_Hubs" ? 1 : 0 - name = "${local.prefix}diag${random_string.suffix.result}" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location - sku = "Standard" - capacity = 1 - tags = merge(local.tags, var.tags) -} - -resource "azurerm_eventhub_namespace_authorization_rule" "diagnostic" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Event_Hubs" ? 1 : 0 - name = "${local.prefix}diag${random_string.suffix.result}" - namespace_name = azurerm_eventhub_namespace.diagnostic[0].name - resource_group_name = data.azurerm_resource_group.rg.name - listen = var.eventhub_permission.listen - send = var.eventhub_permission.send - manage = var.eventhub_permission.manage -} - -resource "azurerm_new_relic_monitor" "diagnostic" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Partner_Solutions" ? 1 : 0 - name = "${local.prefix}diag${random_string.suffix.result}" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location - plan { - effective_date = "2023-09-20T00:00:00Z" - } - - user { - email = var.logz_user.email - first_name = var.logz_user.first_name - last_name = var.logz_user.last_name - phone_number = var.logz_user.phone_number - } -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/monitoring_settings.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/monitoring_settings.tf deleted file mode 100644 index bfcf4f2da..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/monitoring_settings.tf +++ /dev/null @@ -1,127 +0,0 @@ - -#Data collection rules -resource "azurerm_monitor_data_collection_rule" "collection_rule_linux" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0 - kind = "Linux" - location = var.infrastructure.region - name = "LinuxCollectionRule" - resource_group_name = local.rg_name - tags = var.tags - data_flow { - destinations = [data.azurerm_log_analytics_workspace.diagnostic[0].name] - output_stream = "Microsoft-Perf" - streams = ["Microsoft-Perf"] - transform_kql = "source" - } - data_flow { - destinations = [data.azurerm_log_analytics_workspace.diagnostic[0].name] - output_stream = "Microsoft-Syslog" - streams = ["Microsoft-Syslog"] - transform_kql = "source" - } - data_sources { - performance_counter { - counter_specifiers = ["Processor(*)\\% Processor Time", "Processor(*)\\% Idle Time", "Processor(*)\\% User Time", "Processor(*)\\% Nice Time", "Processor(*)\\% Privileged Time", "Processor(*)\\% IO Wait Time", "Processor(*)\\% Interrupt Time", "Processor(*)\\% DPC Time", "Memory(*)\\Available MBytes Memory", "Memory(*)\\% Available Memory", "Memory(*)\\Used Memory MBytes", "Memory(*)\\% Used Memory", "Memory(*)\\Pages/sec", "Memory(*)\\Page Reads/sec", "Memory(*)\\Page Writes/sec", "Memory(*)\\Available MBytes Swap", "Memory(*)\\% Available Swap Space", "Memory(*)\\Used MBytes Swap Space", "Memory(*)\\% Used Swap Space", "Process(*)\\Pct User Time", "Process(*)\\Pct Privileged Time", "Process(*)\\Used Memory", "Process(*)\\Virtual Shared Memory", "Logical Disk(*)\\% Free Inodes", "Logical Disk(*)\\% Used Inodes", "Logical Disk(*)\\Free Megabytes", "Logical Disk(*)\\% Free Space", "Logical Disk(*)\\% Used Space", "Logical Disk(*)\\Logical Disk Bytes/sec", "Logical Disk(*)\\Disk Read Bytes/sec", "Logical Disk(*)\\Disk Write Bytes/sec", "Logical Disk(*)\\Disk Transfers/sec", "Logical Disk(*)\\Disk Reads/sec", "Logical Disk(*)\\Disk Writes/sec", "Network(*)\\Total Bytes Transmitted", "Network(*)\\Total Bytes Received", "Network(*)\\Total Bytes", "Network(*)\\Total Packets Transmitted", "Network(*)\\Total Packets Received", "Network(*)\\Total Rx Errors", "Network(*)\\Total Tx Errors", "Network(*)\\Total Collisions", "System(*)\\Uptime", "System(*)\\Load1", "System(*)\\Load5", "System(*)\\Load15", "System(*)\\Users", "System(*)\\Unique Users", "System(*)\\CPUs"] - name = "perfCounterDataSource60" - sampling_frequency_in_seconds = 60 - streams = ["Microsoft-Perf"] - } - syslog { - facility_names = ["alert", "audit", "auth", "authpriv", "clock", "cron", "daemon", "ftp", "kern", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", "lpr", "mail", "news", "nopri", "ntp", "syslog", "user", "uucp"] - log_levels = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency"] - name = "sysLogsDataSource-1688419672" - } - } - - - destinations { - - dynamic "log_analytics" { - for_each = local.law_destination_settings - iterator = dest - - content { - workspace_resource_id = dest.value.resource_id - name = dest.value.name - } - } - - dynamic "event_hub" { - for_each = local.eventhub_destination_settings - - content { - event_hub_id = each.value.resource_id - name = each.value.name - } - } - - dynamic "storage_blob" { - for_each = local.storage_account_destination_settings - - content { - storage_account_id = each.value.resource_id - container_name = each.value.container_name - name = each.value.name - } - } - } - - - depends_on = [data.azurerm_log_analytics_workspace.diagnostic] -} - -# Data collection rule for VM Insights -resource "azurerm_monitor_data_collection_rule" "collection_rule_vm_insights" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0 - description = "Data collection rule for VM Insights." - location = var.infrastructure.region - name = "MSVMI-DataCollectionRuleVMInsights" - resource_group_name = local.rg_name - tags = var.tags - - data_flow { - destinations = ["VMInsightsPerf-Logs-Dest"] - streams = ["Microsoft-InsightsMetrics"] - } - data_flow { - destinations = ["VMInsightsPerf-Logs-Dest"] - streams = ["Microsoft-ServiceMap"] - } - data_sources { - extension { - extension_name = "DependencyAgent" - name = "DependencyAgentDataSource" - streams = ["Microsoft-ServiceMap"] - } - performance_counter { - counter_specifiers = ["\\VmInsights\\DetailedMetrics"] - name = "VMInsightsPerfCounters" - sampling_frequency_in_seconds = 60 - streams = ["Microsoft-InsightsMetrics"] - } - } - destinations { - log_analytics { - name = "VMInsightsPerf-Logs-Dest" - workspace_resource_id = data.azurerm_log_analytics_workspace.diagnostic[0].id - } - } - depends_on = [ - data.azurerm_log_analytics_workspace.diagnostic - ] -} - -data "azurerm_monitor_data_collection_rule" "collection_rule_linux" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0 - - name = azurerm_monitor_data_collection_rule.collection_rule_linux[0].name - resource_group_name = local.rg_name -} - - -data "azurerm_monitor_data_collection_rule" "collection_rule_vm_insights" { - count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0 - - name = azurerm_monitor_data_collection_rule.collection_rule_vm_insights[0].name - resource_group_name = local.rg_name -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/outputs.tf deleted file mode 100644 index 9686b921e..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/outputs.tf +++ /dev/null @@ -1,98 +0,0 @@ -############################################################################### -# # -# Subscription # -# # -############################################################################### -output "current_subscription" { - value = data.azurerm_subscription.current -} - -############################################################################### -# # -# Resource Group # -# # -############################################################################### -output "resource_group" { - value = data.azurerm_resource_group.rg -} - -output "created_resource_group_id" { - description = "Created resource group ID" - value = data.azurerm_resource_group.rg.id -} - -output "created_resource_group_name" { - description = "Created resource group name" - value = data.azurerm_resource_group.rg.name -} - -output "created_resource_group_subscription_id" { - description = "Created resource group' subscription ID" - value = data.azurerm_resource_group.rg.id -} - -output "is_data_guard" { - description = "Whether the deployment is for Data Guard" - value = var.is_data_guard -} - -output "is_diagnostic_settings_enabled" { - description = "Whether diagnostic settings are enabled" - value = var.is_diagnostic_settings_enabled -} - -output "target_storage_account_id" { - description = "Storage account ID used for diagnostics" - value = var.is_diagnostic_settings_enabled ? data.azurerm_storage_account.diagnostic[0].id : "" -} - -output "target_storage_account_sas" { - description = "Storage account SAS used for diagnostics" - value = var.is_diagnostic_settings_enabled ? data.azurerm_storage_account_sas.diagnostic[0].sas : "" -} - -output "log_analytics_workspace" { - description = "Log Analytics workspace ID" - value = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? { - id = data.azurerm_log_analytics_workspace.diagnostic[0].id - name = data.azurerm_log_analytics_workspace.diagnostic[0].name - } : null -} - -output "eventhub_authorization_rule_id" { - description = "ID of an Event Hub authorization rule" - value = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Event_Hubs" ? azurerm_eventhub_namespace_authorization_rule.diagnostic[0].id : null -} - -output "partner_solution_id" { - description = "Partner solution ID" - value = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Partner_Solutions" ? azurerm_new_relic_monitor.diagnostic[0].id : null -} - -output "diagnostic_target" { - description = "The destination type of the diagnostic settings" - value = var.diagnostic_target -} - -output "availability_zone" { - description = "Availability zones" - value = var.availability_zone -} - -output "tags" { - description = "Tags applied to the resources" - value = var.tags -} - -output "data_collection_rules" { - value = (var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace") ? { - "${data.azurerm_monitor_data_collection_rule.collection_rule_linux[0].name}" = { - id = data.azurerm_monitor_data_collection_rule.collection_rule_linux[0].id - }, - "${data.azurerm_monitor_data_collection_rule.collection_rule_vm_insights[0].name}" = { - id = data.azurerm_monitor_data_collection_rule.collection_rule_vm_insights[0].id - } - - } : {} - -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/providers.tf deleted file mode 100644 index ae8863f42..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/providers.tf +++ /dev/null @@ -1,24 +0,0 @@ -terraform { - required_version = ">=1.6.0" - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = ">=3.11.0, <4.0" - } - azapi = { - source = "Azure/azapi" - version = "=1.8.0" - } - } -} - -provider "azurerm" { - features { - resource_group { - prevent_deletion_if_contains_resources = true - } - virtual_machine { - delete_os_disk_on_deletion = true - } - } -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/resource_lock.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/resource_lock.tf deleted file mode 100644 index e15a22bd5..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/resource_lock.tf +++ /dev/null @@ -1,27 +0,0 @@ -resource "azurerm_management_lock" "subscription" { - count = length(var.subscription_locks) > 1 && length(try(var.subscription_locks.name, "")) > 0 ? 1 : 0 - name = var.subscription_locks.name - scope = data.azurerm_subscription.current.id - lock_level = var.subscription_locks.type -} - -resource "azurerm_management_lock" "resource_group" { - count = length(var.resource_group_locks) > 1 && length(try(var.resource_group_locks.name, "")) > 0 ? 1 : 0 - name = var.resource_group_locks.name - scope = data.azurerm_resource_group.rg.id - lock_level = var.resource_group_locks.type - - depends_on = [azurerm_resource_group.rg] -} - -resource "azurerm_management_lock" "storage_account_diagnostic" { - count = (length(var.resource_group_locks) > 1 && length(try(var.resource_group_locks.name, "")) > 0 && var.is_diagnostic_settings_enabled ) ? 1 : 0 - name = var.resource_group_locks.name - scope = data.azurerm_storage_account.diagnostic[0].id - lock_level = var.resource_group_locks.type - - depends_on = [azurerm_resource_group.rg, data.azurerm_storage_account.diagnostic] -} - -#ToDo: Add more locks for other resources - diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/role_assignments.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/role_assignments.tf deleted file mode 100644 index 43603476f..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/role_assignments.tf +++ /dev/null @@ -1,17 +0,0 @@ -data "azurerm_client_config" "current" {} - -data "azurerm_role_definition" "builtin" { - for_each = var.role_assignments - name = each.value.name -} - -resource "azurerm_role_assignment" "assignment" { - for_each = var.role_assignments - role_definition_name = data.azurerm_role_definition.builtin[each.key].name - principal_id = data.azurerm_client_config.current.object_id - scope = try(each.value.scope, data.azurerm_subscription.current.id) - skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, null) - description = try(each.value.description, null) - condition = try(each.value.condition, null) - condition_version = try(each.value.condition_version, null) -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_global.tf deleted file mode 100644 index aa85b1c1f..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_global.tf +++ /dev/null @@ -1,95 +0,0 @@ -variable "infrastructure" {} - -variable "is_data_guard" { - description = "Whether Data Guard is enabled" - default = false -} - -variable "is_diagnostic_settings_enabled" { - description = "Whether diagnostic settings are enabled" - default = false -} - -variable "diagnostic_target" { - description = "The destination type of the diagnostic settings" - default = "Log_Analytics_Workspace" - validation { - condition = contains(["Log_Analytics_Workspace", "Storage_Account", "Event_Hubs", "Partner_Solutions"], var.diagnostic_target) - error_message = "Allowed values are Log_Analytics_Workspace, Storage_Account, Event_Hubs, Partner_Solutions" - } -} - -variable "eventhub_permission" { - description = "Authorization rule permissions for Event Hub" - default = { - listen = true - send = true - manage = true - } -} - -variable "log_destinations" { - type = map(object({ - type = string // E.g., "LogAnalytics", "EventHub", "StorageBlob" - resource_id = optional(string) // For Log Analytics, Event Hub, Storage Account - # workspace_id = optional(string) // For Log Analytics - # eventhub_id = optional(string) // For Event Hub - # storage_account_id = optional(string) // For Storage Account - container_name = optional(string) // For Blob container - name = string // Destination name within the DCR - })) - default = {} -} - - - - - -variable "logz_user" { - description = "Logz.io" - default = { - email = "user@example.com" - first_name = "Example" - last_name = "User" - phone_number = "+12313803556" - } -} - -variable "role_assignments" { - description = "Role assignments" - default = {} -} - -variable "subscription_locks" { - type = object({ - name = optional(string, "") - type = optional(string, "CanNotDelete") - }) - default = {} - validation { - condition = contains(["CanNotDelete", "ReadOnly"], var.subscription_locks.type) - error_message = "Lock type must be one of: CanNotDelete, ReadOnly." - } -} - -variable "resource_group_locks" { - type = object({ - name = optional(string, "") - type = optional(string, "CanNotDelete") - }) - default = {} - validation { - condition = contains(["CanNotDelete", "ReadOnly"], var.resource_group_locks.type) - error_message = "Lock type must be one of: CanNotDelete, ReadOnly." - } -} - -variable "availability_zone" { - description = "The availability zones of the resource" - default = null -} - -variable "tags" { - description = "Tags to be added to the resources" - default = {} -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_local.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_local.tf deleted file mode 100644 index f8a6cc21b..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_local.tf +++ /dev/null @@ -1,43 +0,0 @@ -locals { - resource_group_exists = length(try(var.infrastructure.resource_group.arm_id, "")) > 0 - // If resource ID is specified extract the resourcegroup name from it otherwise read it either from input of create using the naming convention - rg_name = local.resource_group_exists ? ( - try(split("/", var.infrastructure.resource_group.arm_id))[4]) : ( - length(var.infrastructure.resource_group.name) > 0 ? ( - var.infrastructure.resource_group.name) : ( - format("%s-%s-%s-%s-%s", - "rg", - local.prefix, - "demo", - var.infrastructure.region, - "001" - ) - ) - ) - - // Resource group - prefix = "oracle" - - - law_destination_settings = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? { Log_Analytics_Workspace = { - type = "Log_Analytics_Workspace" - resource_id = data.azurerm_log_analytics_workspace.diagnostic[0].id - name = data.azurerm_log_analytics_workspace.diagnostic[0].name - } } : {} - - storage_account_destination_settings = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Storage_Account" ? { Storage_Account = { - type = "Storage_Account" - resource_id = data.azurerm_storage_account.diagnostic[0].id - container_name = data.azurerm_storage_account_sas.diagnostic[0].sas - name = data.azurerm_storage_account.diagnostic[0].name - } } : {} - - eventhub_destination_settings = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Event_Hubs" ? { Event_Hubs = { - type = "Event_Hubs" - resource_id = azurerm_eventhub_namespace_authorization_rule.diagnostic[0].id - name = azurerm_eventhub_namespace_authorization_rule.diagnostic[0].name - } } : {} - - - tags = {} -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/availability_set.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/availability_set.tf deleted file mode 100644 index 816c3fd28..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/availability_set.tf +++ /dev/null @@ -1,17 +0,0 @@ -resource "azurerm_availability_set" "oracle_vm" { - count = var.availability_zone == null ? 1 : 0 - name = "as-${count.index}" - location = var.location - resource_group_name = var.resource_group_name - - platform_fault_domain_count = 2 - -} - -data "azurerm_availability_set" "oracle_vm" { - count = var.availability_zone == null ? 1 : 0 - name = "as-${count.index}" - resource_group_name = var.resource_group_name - - depends_on = [azurerm_availability_set.oracle_vm] -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/data.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/data.tf deleted file mode 100644 index a526daaa6..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/data.tf +++ /dev/null @@ -1,14 +0,0 @@ -data "azurerm_virtual_machine" "oracle_vm_primary" { - name = module.avm-res-compute-virtualmachine[keys(local.vm_config_data_parameter)[0]].virtual_machine.name - resource_group_name = var.resource_group_name - - depends_on = [module.avm-res-compute-virtualmachine] -} - -data "azurerm_virtual_machine" "oracle_vms" { - for_each = { for vm in module.avm-res-compute-virtualmachine : vm.name => vm.virtual_machine } - name = each.value.name - resource_group_name = var.resource_group_name - - depends_on = [module.avm-res-compute-virtualmachine] -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/monitoring_settings.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/monitoring_settings.tf deleted file mode 100644 index 44ce990ec..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/monitoring_settings.tf +++ /dev/null @@ -1,10 +0,0 @@ -# Create Data Collection Rule Association for VM created -resource "azurerm_monitor_data_collection_rule_association" "dcra_vm_insights" { - # Create association for each data collection rule - for_each = { for key, val in var.data_collection_rules : key => val if(var.log_analytics_workspace != null && var.is_diagnostic_settings_enabled) } - - name = each.key - target_resource_id = data.azurerm_virtual_machine.oracle_vm_primary.id - data_collection_rule_id = each.value.id -} - diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/outputs.tf deleted file mode 100644 index 81eb5373c..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/outputs.tf +++ /dev/null @@ -1,32 +0,0 @@ -output "vm" { - value = data.azurerm_virtual_machine.oracle_vm_primary -} - -output "database_server_count" { - value = var.database_server_count -} - -output "availability_zone" { - value = var.availability_zone != null ? var.availability_zone : null -} - -output "oracle_vms" { - value = data.azurerm_virtual_machine.oracle_vms - sensitive = true -} - -output "vm_map_collection" { - value = { for vm in module.avm-res-compute-virtualmachine : vm.name => { - name = vm.name - id = vm.resource_id - public_ips = vm.public_ips - - } } - sensitive = false -} - - -output "vm_collection" { - value = flatten([for vm in module.avm-res-compute-virtualmachine : vm.name]) - sensitive = false -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/variable_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/variable_global.tf deleted file mode 100644 index cc53a8fe4..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/variable_global.tf +++ /dev/null @@ -1,366 +0,0 @@ -variable "database_server_count" { - description = "The number of database servers" - default = 1 - type = number -} - -variable "vm_name" { - description = "The name of the Oracle VM" - type = string -} - -# variable "resource_group" { -# description = "Details of the resource group" -# default = {} -# } - - -variable "resource_group_name" { - description = "Created resource group name" - type = string -} - -variable "location" { - description = "The location of the resource" - type = string -} - - - -variable "database" { - description = "Details of the database node" - type = object({ - use_DHCP = string - authentication = object({ - type = string - }) - }) - default = { - use_DHCP = true - authentication = { - type = "key" - } - } -} - -variable "nic_locks" { - type = object({ - name = optional(string, "") - type = optional(string, "CanNotDelete") - }) - default = {} - validation { - condition = contains(["CanNotDelete", "ReadOnly"], var.nic_locks.type) - error_message = "Lock type must be one of: CanNotDelete, ReadOnly." - } -} - -variable "aad_system_assigned_identity" { - description = "AAD system assigned identity" - type = bool -} - -variable "skip_service_principal_aad_check" { - type = bool - description = "If the principal_id is a newly provisioned `Service Principal` set this value to true to skip the Azure Active Directory check which may fail due to replication lag." - default = true -} - -variable "storage_account_id" { - description = "Storage account ID used for diagnostics" - type = string - default = null -} - -variable "storage_account_sas_token" { - description = "Storage account SAS token used for diagnostics" - type = string - default = null -} - -variable "log_analytics_workspace" { - type = object({ - id = string - name = string - }) - - description = "Log Analytics workspace" - default = null -} - -variable "eventhub_authorization_rule_id" { - description = "ID of an Event Hub authorization rule" - type = string - default = null -} - -variable "partner_solution_id" { - description = "Value of the partner solution ID" - default = null -} - -variable "is_diagnostic_settings_enabled" { - description = "Whether diagnostic settings are enabled" - default = false -} - -variable "diagnostic_target" { - description = "The destination type of the diagnostic settings" - default = "Log_Analytics_Workspace" - validation { - condition = contains(["Log_Analytics_Workspace", "Storage_Account", "Event_Hubs", "Partner_Solutions"], var.diagnostic_target) - error_message = "Allowed values are Log_Analytics_Workspace, Storage_Account, Event_Hubs, Partner_Solutions" - } -} - -variable "data_collection_rules" { - type = map(object({ - id = string - })) - description = "Data collection rules" - default = {} -} - -# variable "role_assignments" { -# description = "Role assignments" -# default = {} -# } - -variable "role_assignments" { - type = map(object({ - role_definition_id_or_name = string - principal_id = optional(string) - condition = optional(string) - condition_version = optional(string) - description = optional(string) - skip_service_principal_aad_check = optional(bool, true) - delegated_managed_identity_resource_id = optional(string) - } - )) - default = {} -} - -variable "vm_lock" { - type = object({ - name = optional(string, null) - kind = optional(string, "None") - }) - default = {} - description = < { - name = ipconfig.name - private_ip_subnet_resource_id = ipconfig.subnet_id - create_public_ip_address = ipconfig.create_public_ip_address - public_ip_address_resource_id = ipconfig.public_ip_address_resource_id - public_ip_address_name = ipconfig.create_public_ip_address ? "${var.vm_name}-pip" : "" - private_ip_address_allocation = ipconfig.private_ip_address_allocation - is_primary_ipconfiguration = ipconfig.primary - private_ip_address = var.database.use_DHCP ? ipconfig.nic_ips[0] : "" - } - } - - # role_assignments_nic_parameter = {for key, value in var.role_assignments_nic : key => { - # principal_id = value.principal_id - # role_definition_id_or_name = value.role_definition_id_or_name - # assign_to_child_public_ip_addresses = true - # skip_service_principal_aad_check = value.skip_service_principal_aad_check - # } - - - - vm_default_config_data = { - "vm-0" = { - name = var.vm_name - os_type = "Linux" - generate_admin_password_or_ssh_key = false - enable_auth_password = local.enable_auth_password - admin_username = var.sid_username - admin_ssh_keys = { - username = var.sid_username - public_key = var.public_key - } - source_image_reference = var.vm_source_image_reference - virtualmachine_sku_size = var.vm_sku - os_disk = var.vm_os_disk - availability_zone = var.availability_zone - enable_telemetry = var.enable_telemetry - user_assigned_identity_id = var.vm_user_assigned_identity_id - role_assignments = var.role_assignments - skip_service_principal_aad_check = var.skip_service_principal_aad_check - - #Network Interfaces - network_interfaces = { - - network_interface_1 = { - name = "oraclevmnic-${var.vm_name}" - location = var.location - resource_group_name = var.resource_group_name - tags = merge(local.tags, var.tags) - accelerated_networking_enabled = true - - ip_configurations = local.network_interface_ipconfigs - - #ToDo: role_assignments_nic_parameter - # role_assignments = { - # role_assignment_1 = { - # role_definition_id_or_name = "Contributor" - # principal_id = data.azurerm_client_config.current.object_id - # skip_service_principal_aad_check = var.skip_service_principal_aad_check - # } - # } - - - } - } - } - } - - - # Variable with the data to create the Oracle VM - vm_config_data_parameter = merge(var.vm_config_data, local.vm_default_config_data) - - -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/vm.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/vm.tf deleted file mode 100644 index 6596fe074..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/vm.tf +++ /dev/null @@ -1,45 +0,0 @@ -######################################################################################### -# # -# Virtual Machine # -# # -######################################################################################### - - -module "avm-res-compute-virtualmachine" { - source = "Azure/avm-res-compute-virtualmachine/azurerm" - version = "0.17.0" - for_each = local.vm_config_data_parameter - - - name = each.value.name - location = var.location - resource_group_name = var.resource_group_name - os_type = each.value.os_type - - generate_admin_password_or_ssh_key = each.value.generate_admin_password_or_ssh_key - disable_password_authentication = !each.value.enable_auth_password #!local.enable_auth_password #should be true - admin_username = each.value.admin_username - admin_ssh_keys = [each.value.admin_ssh_keys] - source_image_reference = each.value.source_image_reference - sku_size = each.value.virtualmachine_sku_size - os_disk = each.value.os_disk - extensions = var.vm_extensions - network_interfaces = each.value.network_interfaces - - - zone = each.value.availability_zone - availability_set_resource_id = var.availability_zone == null ? data.azurerm_availability_set.oracle_vm[0].id : null - tags = merge(local.tags, var.tags) - - - - managed_identities = { - system_assigned = var.aad_system_assigned_identity - user_assigned_resource_ids = [each.value.user_assigned_identity_id] - } - - role_assignments = each.value.role_assignments -} - - - diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/diagnostic_settings.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/diagnostic_settings.tf deleted file mode 100644 index 46e6a34d2..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/diagnostic_settings.tf +++ /dev/null @@ -1,145 +0,0 @@ - -#ToDo: Should be replicated on VM Module -# resource "azurerm_monitor_diagnostic_setting" "nic" { -# count = var.is_diagnostic_settings_enabled ? 1 : 0 -# name = "nic-${count.index}-diag" -# target_resource_id = azurerm_network_interface.oracle_db[count.index].id -# storage_account_id = var.diagnostic_target == "Storage_Account" ? var.storage_account_id : null -# log_analytics_workspace_id = var.diagnostic_target == "Log_Analytics_Workspace" ? var.log_analytics_workspace_id : null -# eventhub_authorization_rule_id = var.diagnostic_target == "Event_Hubs" ? var.eventhub_authorization_rule_id : null -# partner_solution_id = var.diagnostic_target == "Partner_Solutions" ? var.partner_solution_id : null - -# metric { -# category = "AllMetrics" -# retention_policy { -# enabled = false -# } -# } -# } - -resource "azurerm_monitor_diagnostic_setting" "nsg" { - count = var.is_diagnostic_settings_enabled ? 1 : 0 - name = "nsg" - target_resource_id = azurerm_network_security_group.blank.id - storage_account_id = var.diagnostic_target == "Storage_Account" ? var.storage_account_id : null - log_analytics_workspace_id = var.diagnostic_target == "Log_Analytics_Workspace" ? var.log_analytics_workspace_id : null - eventhub_authorization_rule_id = var.diagnostic_target == "Event_Hubs" ? var.eventhub_authorization_rule_id : null - partner_solution_id = var.diagnostic_target == "Partner_Solutions" ? var.partner_solution_id : null - - dynamic "enabled_log" { - for_each = toset(data.azurerm_monitor_diagnostic_categories.nsg[count.index].log_category_types) - content { - category = enabled_log.value - retention_policy { - enabled = false - } - } - } -} - -#ToDo: It does not work -# resource "azurerm_monitor_diagnostic_setting" "pip" { -# count = var.is_diagnostic_settings_enabled ? var.is_data_guard ? 2 : 1 : 0 -# name = "pip" -# target_resource_id = azurerm_public_ip.vm_pip[count.index].id -# storage_account_id = var.diagnostic_target == "Storage_Account" ? var.storage_account_id : null -# log_analytics_workspace_id = var.diagnostic_target == "Log_Analytics_Workspace" ? var.log_analytics_workspace_id : null -# eventhub_authorization_rule_id = var.diagnostic_target == "Event_Hubs" ? var.eventhub_authorization_rule_id : null -# partner_solution_id = var.diagnostic_target == "Partner_Solutions" ? var.partner_solution_id : null - -# dynamic "enabled_log" { -# for_each = toset(data.azurerm_monitor_diagnostic_categories.pip[count.index].log_category_types) -# content { -# category = enabled_log.value -# retention_policy { -# enabled = false -# } -# } -# } - -# metric { -# category = "AllMetrics" -# retention_policy { -# enabled = false -# } -# } -# } - -resource "azurerm_monitor_diagnostic_setting" "vnet" { - count = var.is_diagnostic_settings_enabled ? 1 : 0 - name = "vnet" - target_resource_id = data.azurerm_virtual_network.vnet_oracle[count.index].id - storage_account_id = var.diagnostic_target == "Storage_Account" ? var.storage_account_id : null - log_analytics_workspace_id = var.diagnostic_target == "Log_Analytics_Workspace" ? var.log_analytics_workspace_id : null - eventhub_authorization_rule_id = var.diagnostic_target == "Event_Hubs" ? var.eventhub_authorization_rule_id : null - partner_solution_id = var.diagnostic_target == "Partner_Solutions" ? var.partner_solution_id : null - - dynamic "enabled_log" { - for_each = toset(data.azurerm_monitor_diagnostic_categories.vnet[count.index].log_category_types) - content { - category = enabled_log.value - retention_policy { - enabled = false - } - } - } - - metric { - category = "AllMetrics" - retention_policy { - enabled = false - } - } -} - -# data "azurerm_monitor_diagnostic_categories" "nic" { -# count = var.is_diagnostic_settings_enabled ? 1 : 0 -# resource_id = data.azurerm_network_interface.nic[count.index].id -# } - -data "azurerm_monitor_diagnostic_categories" "nsg" { - count = var.is_diagnostic_settings_enabled ? 1 : 0 - resource_id = data.azurerm_network_security_group.nsg[count.index].id -} - -data "azurerm_monitor_diagnostic_categories" "pip" { - count = var.is_diagnostic_settings_enabled ? 1 : 0 - resource_id = data.azurerm_public_ip.pip[count.index].id -} - -data "azurerm_monitor_diagnostic_categories" "vnet" { - count = var.is_diagnostic_settings_enabled ? 1 : 0 - resource_id = data.azurerm_virtual_network.vnet[count.index].id -} - -# data "azurerm_network_interface" "nic" { -# count = var.is_data_guard ? 2 : 1 -# name = "oraclevmnic-${count.index}" -# resource_group_name = var.resource_group.name - -# depends_on = [azurerm_network_interface.oracle_db] -# } - -data "azurerm_network_security_group" "nsg" { - count = 1 - name = "blank" - resource_group_name = var.resource_group.name - - depends_on = [azurerm_network_security_group.blank] -} - -data "azurerm_public_ip" "pip" { - count = var.is_data_guard ? 2 : 1 - name = "vmpip-${count.index}" - resource_group_name = var.resource_group.name - - depends_on = [azurerm_public_ip.vm_pip] -} - -data "azurerm_virtual_network" "vnet" { - count = 1 - name = local.vnet_oracle_name - resource_group_name = var.resource_group.name - - depends_on = [module.vnet] -} \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/nsg.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/nsg.tf deleted file mode 100644 index d1d7dab5b..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/nsg.tf +++ /dev/null @@ -1,26 +0,0 @@ -######################################################################################### -# # -# Network Security Group # -# # -######################################################################################### -resource "azurerm_network_security_group" "blank" { - name = "blank" - location = var.resource_group.location - resource_group_name = var.resource_group.name - - tags = merge(local.tags, var.tags) -} - -resource "azurerm_subnet_network_security_group_association" "ssh" { - subnet_id = data.azurerm_subnet.subnet_oracle[0].id - network_security_group_id = azurerm_network_security_group.blank.id -} - -data "azurerm_network_security_group" "blank" { - name = "blank" - resource_group_name = var.resource_group.name - - depends_on = [azurerm_network_security_group.blank] -} - - diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/outputs.tf deleted file mode 100644 index 49b5ba02a..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/outputs.tf +++ /dev/null @@ -1,20 +0,0 @@ -############################################################################### -# # -# Network # -# # -############################################################################### -output "network_location" { - value = data.azurerm_virtual_network.vnet_oracle[0].location -} - -output "db_subnet" { - value = data.azurerm_subnet.subnet_oracle[0] -} - -output "db_server_puplic_ip" { - value = azurerm_public_ip.vm_pip[0].ip_address -} - -output "db_server_puplic_ip_resources" { - value = azurerm_public_ip.vm_pip -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/pip.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/pip.tf deleted file mode 100644 index 5c9c31d39..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/pip.tf +++ /dev/null @@ -1,24 +0,0 @@ -######################################################################################### -# # -# Public IPs # -# # -######################################################################################### - -resource "azurerm_public_ip" "vm_pip" { - count = var.is_data_guard ? 2 : 1 - name = "vmpip-${count.index}" - location = var.resource_group.location - resource_group_name = var.resource_group.name - allocation_method = "Static" - sku = "Standard" - - tags = merge(local.tags, var.tags) -} - -data "azurerm_public_ip" "vm_pip" { - count = var.is_data_guard ? 2 : 1 - name = "vmpip-${count.index}" - resource_group_name = var.resource_group.name - - depends_on = [azurerm_public_ip.vm_pip] -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/providers.tf deleted file mode 100644 index c9561f7e8..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/providers.tf +++ /dev/null @@ -1,24 +0,0 @@ -terraform { - required_version = ">=1.2" - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = ">=3.11.0, <4.0" - } - azapi = { - source = "Azure/azapi" - version = "=1.8.0" - } - } -} - -provider "azurerm" { - features { - resource_group { - prevent_deletion_if_contains_resources = true - } - virtual_machine { - delete_os_disk_on_deletion = true - } - } -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/resource_lock.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/resource_lock.tf deleted file mode 100644 index 3e83d1603..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/resource_lock.tf +++ /dev/null @@ -1,27 +0,0 @@ - -resource "azurerm_management_lock" "nsg" { - count = length(var.nsg_locks) > 1 && length(try(var.nsg_locks.name, "")) > 0 ? 1 : 0 - name = var.nsg_locks.name - scope = data.azurerm_network_security_group.blank.id - lock_level = var.nsg_locks.type - - depends_on = [azurerm_network_security_group.blank] -} - -resource "azurerm_management_lock" "vnet" { - count = length(var.vnet_locks) > 1 && length(try(var.vnet_locks.name, "")) > 0 ? 1 : 0 - name = var.vnet_locks.name - scope = data.azurerm_virtual_network.vnet_oracle[0].id - lock_level = var.vnet_locks.type - - depends_on = [data.azurerm_virtual_network.vnet_oracle] -} - -resource "azurerm_management_lock" "subnet" { - count = length(var.subnet_locks) > 1 && length(try(var.subnet_locks.name, "")) > 0 ? 1 : 0 - name = var.subnet_locks.name - scope = data.azurerm_subnet.subnet_oracle[0].id - lock_level = var.subnet_locks.type - - depends_on = [data.azurerm_subnet.subnet_oracle] -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/role_assignments.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/role_assignments.tf deleted file mode 100644 index 93654a6ee..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/role_assignments.tf +++ /dev/null @@ -1,82 +0,0 @@ -data "azurerm_client_config" "current" {} - -# data "azurerm_role_definition" "nic" { -# for_each = var.role_assignments_nic -# name = each.value.name -# } - -data "azurerm_role_definition" "pip" { - for_each = var.role_assignments_pip - name = each.value.name -} - -data "azurerm_role_definition" "nsg" { - for_each = var.role_assignments_nsg - name = each.value.name -} - -data "azurerm_role_definition" "vnet" { - for_each = var.role_assignments_vnet - name = each.value.name -} - -data "azurerm_role_definition" "subnet" { - for_each = var.role_assignments_subnet - name = each.value.name -} - - -# resource "azurerm_role_assignment" "nic" { -# for_each = var.role_assignments_nic -# role_definition_name = data.azurerm_role_definition.nic[each.key].name -# principal_id = data.azurerm_client_config.current.object_id -# scope = try(each.value.scope, data.azurerm_network_interface.oracle_db[0].id) -# skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false) -# description = try(each.value.description, null) -# condition = try(each.value.condition, null) -# condition_version = try(each.value.condition_version, null) -# } - -resource "azurerm_role_assignment" "pip" { - for_each = var.role_assignments_pip - role_definition_name = data.azurerm_role_definition.pip[each.key].name - principal_id = data.azurerm_client_config.current.object_id - scope = try(each.value.scope, data.azurerm_public_ip.vm_pip[0].id) - skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false) - description = try(each.value.description, null) - condition = try(each.value.condition, null) - condition_version = try(each.value.condition_version, null) -} - -resource "azurerm_role_assignment" "nsg" { - for_each = var.role_assignments_nsg - role_definition_name = data.azurerm_role_definition.nsg[each.key].name - principal_id = data.azurerm_client_config.current.object_id - scope = try(each.value.scope, data.azurerm_network_security_group.blank.id) - skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false) - description = try(each.value.description, null) - condition = try(each.value.condition, null) - condition_version = try(each.value.condition_version, null) -} - -resource "azurerm_role_assignment" "vnet" { - for_each = var.role_assignments_vnet - role_definition_name = data.azurerm_role_definition.vnet[each.key].name - principal_id = data.azurerm_client_config.current.object_id - scope = try(each.value.scope, data.azurerm_virtual_network.vnet_oracle[0].id) - skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false) - description = try(each.value.description, null) - condition = try(each.value.condition, null) - condition_version = try(each.value.condition_version, null) -} - -resource "azurerm_role_assignment" "subnet" { - for_each = var.role_assignments_subnet - role_definition_name = data.azurerm_role_definition.subnet[each.key].name - principal_id = data.azurerm_client_config.current.object_id - scope = try(each.value.scope, data.azurerm_subnet.subnet_oracle[0].id) - skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false) - description = try(each.value.description, null) - condition = try(each.value.condition, null) - condition_version = try(each.value.condition_version, null) -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_global.tf deleted file mode 100644 index 12a43dedb..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_global.tf +++ /dev/null @@ -1,128 +0,0 @@ -variable "resource_group" { - type = object({ - name = string - location = string - id = string - }) - description = "Details of the resource group" - default = null -} - -variable "diagnostic_target" { - type = string - description = "The destination type of the diagnostic settings" - default = "Log_Analytics_Workspace" - validation { - condition = contains(["Log_Analytics_Workspace", "Storage_Account", "Event_Hubs", "Partner_Solutions"], var.diagnostic_target) - error_message = "Allowed values are Log_Analytics_Workspace, Storage_Account, Event_Hubs, Partner_Solutions" - } -} - -variable "storage_account_id" { - description = "Storage account ID used for diagnostics" - type = string - default = null -} - -variable "log_analytics_workspace_id" { - description = "Log Analytics workspace ID" - type = string - default = null -} - -variable "eventhub_authorization_rule_id" { - description = "ID of an Event Hub authorization rule" - type = string - default = null -} - -variable "partner_solution_id" { - type = string - description = "Value of the partner solution ID" - default = null -} - -variable "is_diagnostic_settings_enabled" { - type = bool - description = "Whether diagnostic settings are enabled" - default = false -} - -variable "role_assignments_pip" { - type = map(object({ - name = string - })) - description = "Role assignments scoped to the public IP address" -} - -variable "role_assignments_nsg" { - type = map(object({ - name = string - })) - description = "Role assignments scoped to the network security group" - default = {} -} - -variable "role_assignments_vnet" { - type = map(object({ - name = string - })) - description = "Role assignments scoped to the virtual network" - default = {} -} - -variable "role_assignments_subnet" { - type = map(object({ - name = string - })) - description = "Role assignments scoped to the subnet" - default = {} -} - -variable "nsg_locks" { - type = object({ - name = optional(string, "") - type = optional(string, "CanNotDelete") - }) - default = {} - validation { - condition = contains(["CanNotDelete", "ReadOnly"], var.nsg_locks.type) - error_message = "Lock type must be one of: CanNotDelete, ReadOnly." - } -} - -variable "vnet_locks" { - type = object({ - name = optional(string, "") - type = optional(string, "CanNotDelete") - }) - default = {} - validation { - condition = contains(["CanNotDelete", "ReadOnly"], var.vnet_locks.type) - error_message = "Lock type must be one of: CanNotDelete, ReadOnly." - } -} - -variable "subnet_locks" { - type = object({ - name = optional(string, "") - type = optional(string, "CanNotDelete") - }) - default = {} - validation { - condition = contains(["CanNotDelete", "ReadOnly"], var.subnet_locks.type) - error_message = "Lock type must be one of: CanNotDelete, ReadOnly." - } -} - -variable "is_data_guard" { - type = bool - description = "Whether Data Guard is enabled" - default = false -} - -variable "tags" { - type = map(any) - description = "Tags to be added to the resources" - default = {} -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_local.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_local.tf deleted file mode 100644 index 1d32d1016..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_local.tf +++ /dev/null @@ -1,13 +0,0 @@ -locals { - vnet_oracle_name = "vnet1" - database_subnet_name = "subnet1" - vnet_oracle_addr = "10.0.0.0/16" - database_subnet_prefix = "10.0.0.0/24" - - vnet_oracle_arm_id = try(local.vnet_oracle_name.arm_id, "") - vnet_oracle_exists = length(local.vnet_oracle_arm_id) > 0 - subnet_oracle_arm_id = try(local.database_subnet_name.arm_id, "") - subnet_oracle_exists = length(local.subnet_oracle_arm_id) > 0 - - tags = {} -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/vnet_main.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/vnet_main.tf deleted file mode 100644 index 9c7548304..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/vnet_main.tf +++ /dev/null @@ -1,37 +0,0 @@ -module "vnet" { - source = "Azure/avm-res-network-virtualnetwork/azurerm" - version = "0.1.3" - - resource_group_name = var.resource_group.name - vnet_location = var.resource_group.location - vnet_name = local.vnet_oracle_name - virtual_network_address_space = [local.vnet_oracle_addr] - subnets = { - subnet1 = { - address_prefixes = [local.database_subnet_prefix] - azurerm_network_security_group = { - id = azurerm_network_security_group.blank.id - } - } - } - - tags = merge(local.tags, var.tags) -} - - -data "azurerm_virtual_network" "vnet_oracle" { - count = local.vnet_oracle_exists ? 0 : 1 - name = local.vnet_oracle_name - resource_group_name = var.resource_group.name - - depends_on = [module.vnet] -} - -data "azurerm_subnet" "subnet_oracle" { - count = local.subnet_oracle_exists ? 0 : 1 - name = local.database_subnet_name - resource_group_name = var.resource_group.name - virtual_network_name = data.azurerm_virtual_network.vnet_oracle[count.index].name - - depends_on = [module.vnet] -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/data_disk.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/data_disk.tf deleted file mode 100644 index 18d437046..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/data_disk.tf +++ /dev/null @@ -1,83 +0,0 @@ -resource "azurerm_managed_disk" "data_disk" { - count = length(local.data_disks) - name = "${var.naming}-data-${count.index}" - location = var.resource_group.location - resource_group_name = var.resource_group.name - storage_account_type = var.disk_type - create_option = local.data_disks[count.index].create_option - disk_size_gb = local.data_disks[count.index].disk_size_gb - zone = var.availability_zone - - tags = merge(local.tags, var.tags) -} - -resource "azurerm_managed_disk" "asm_disk" { - count = length(local.asm_disks) - name = "${var.naming}-asm-${count.index}" - location = var.resource_group.location - resource_group_name = var.resource_group.name - storage_account_type = var.disk_type - create_option = local.asm_disks[count.index].create_option - disk_size_gb = local.asm_disks[count.index].disk_size_gb - zone = var.availability_zone - - tags = merge(local.tags, var.tags) -} - -resource "azurerm_managed_disk" "redo_disk" { - count = length(local.redo_disks) - name = "${var.naming}-redo-${count.index}" - location = var.resource_group.location - resource_group_name = var.resource_group.name - storage_account_type = var.disk_type - create_option = local.redo_disks[count.index].create_option - disk_size_gb = local.redo_disks[count.index].disk_size_gb - zone = var.availability_zone - - tags = merge(local.tags, var.tags) -} - -resource "azurerm_virtual_machine_data_disk_attachment" "data_disk_attachment" { - count = length(local.data_disks) - managed_disk_id = azurerm_managed_disk.data_disk[count.index].id - virtual_machine_id = var.vm.id - caching = local.data_disks[count.index].caching - write_accelerator_enabled = local.data_disks[count.index].write_accelerator_enabled - lun = local.data_disks[count.index].lun -} - -resource "azurerm_virtual_machine_data_disk_attachment" "asm_disk_attachment" { - count = length(local.asm_disks) - managed_disk_id = azurerm_managed_disk.asm_disk[count.index].id - virtual_machine_id = var.vm.id - caching = local.asm_disks[count.index].caching - write_accelerator_enabled = local.asm_disks[count.index].write_accelerator_enabled - lun = local.asm_disks[count.index].lun -} - -resource "azurerm_virtual_machine_data_disk_attachment" "redo_disk_attachment" { - count = length(local.redo_disks) - managed_disk_id = azurerm_managed_disk.redo_disk[count.index].id - virtual_machine_id = var.vm.id - caching = local.redo_disks[count.index].caching - write_accelerator_enabled = local.redo_disks[count.index].write_accelerator_enabled - lun = local.redo_disks[count.index].lun -} - -data "azurerm_managed_disk" "data_disk" { - count = length(local.data_disks) - name = azurerm_managed_disk.data_disk[count.index].name - resource_group_name = var.resource_group.name -} - -data "azurerm_managed_disk" "asm_disk" { - count = length(local.asm_disks) - name = azurerm_managed_disk.asm_disk[count.index].name - resource_group_name = var.resource_group.name -} - -data "azurerm_managed_disk" "redo_disk" { - count = length(local.redo_disks) - name = azurerm_managed_disk.redo_disk[count.index].name - resource_group_name = var.resource_group.name -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/outputs.tf deleted file mode 100644 index 5a50b7284..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/outputs.tf +++ /dev/null @@ -1,24 +0,0 @@ -output "data_disks" { - value = local.data_disks -} - -output "asm_disks" { - value = local.asm_disks -} - -output "redo_disks" { - value = local.redo_disks -} - - -output "data_disks_resource" { - value = data.azurerm_managed_disk.data_disk -} - -output "asm_disks_resource" { - value = data.azurerm_managed_disk.asm_disk -} - -output "redo_disks_resource" { - value = data.azurerm_managed_disk.redo_disk -} \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/providers.tf deleted file mode 100644 index c9561f7e8..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/providers.tf +++ /dev/null @@ -1,24 +0,0 @@ -terraform { - required_version = ">=1.2" - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = ">=3.11.0, <4.0" - } - azapi = { - source = "Azure/azapi" - version = "=1.8.0" - } - } -} - -provider "azurerm" { - features { - resource_group { - prevent_deletion_if_contains_resources = true - } - virtual_machine { - delete_os_disk_on_deletion = true - } - } -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/resource_lock.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/resource_lock.tf deleted file mode 100644 index cb4c1be3f..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/resource_lock.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "azurerm_management_lock" "data_disk" { - count = length(var.data_disk_locks) > 1 && length(try(var.data_disk_locks.name, "")) > 1 ? 1 : 0 - name = var.data_disk_locks.name - scope = data.azurerm_managed_disk.data_disk[0].id - lock_level = var.data_disk_locks.type - - depends_on = [azurerm_managed_disk.data_disk] -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/role_assignments.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/role_assignments.tf deleted file mode 100644 index d7aff7956..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/role_assignments.tf +++ /dev/null @@ -1,17 +0,0 @@ -data "azurerm_client_config" "current" {} - -data "azurerm_role_definition" "builtin" { - for_each = var.role_assignments - name = each.value.name -} - -resource "azurerm_role_assignment" "assignment" { - for_each = var.role_assignments - role_definition_name = data.azurerm_role_definition.builtin[each.key].name - principal_id = data.azurerm_client_config.current.object_id - scope = try(each.value.scope, data.azurerm_managed_disk.data_disk[0].id) - skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false) - description = try(each.value.description, null) - condition = try(each.value.condition, null) - condition_version = try(each.value.condition_version, null) -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_global.tf deleted file mode 100644 index 69ee40511..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_global.tf +++ /dev/null @@ -1,92 +0,0 @@ -variable "naming" { - description = "Defines the names for the resources" -} - -variable "vm" { - description = "Virtual machine name" -} - -variable "resource_group" { - description = "Details of the resource group" - default = {} -} - -variable "disk_type" { - description = "The type of the storage account" - default = "Premium_LRS" - validation { - condition = contains(["Standard_LRS", "StandardSSD_ZRS", "Premium_LRS", "PremiumV2_LRS", "Premium_ZRS", "StandardSSD_LRS", "UltraSSD_LRS"], var.disk_type) - error_message = "Allowed values are Standard_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS, StandardSSD_LRS, UltraSSD_LRS" - } -} - -variable "database_disks_options" { - description = "Details of the database node" - default = { - data_disks = [ - { - count = 1 - caching = "ReadOnly" - create_option = "Empty" - disk_size_gb = 1024 - lun = 20 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ], - asm_disks = [ - { - count = 1 - caching = "ReadOnly" - create_option = "Empty" - disk_size_gb = 1024 - lun = 10 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ] - redo_disks = [ - { - count = 1 - caching = "None" - create_option = "Empty" - disk_size_gb = 1024 - lun = 60 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ] - } -} - -variable "role_assignments" { - description = "Role assignments" - default = {} -} - -variable "data_disk_locks" { - type = object({ - name = optional(string, "") - type = optional(string, "CanNotDelete") - }) - default = {} - validation { - condition = contains(["CanNotDelete", "ReadOnly"], var.data_disk_locks.type) - error_message = "Lock type must be one of: CanNotDelete, ReadOnly." - } -} - -variable "availability_zone" { - description = "The availability zones of the resource" - default = null -} - -variable "is_data_guard" { - description = "Whether Data Guard is enabled" - default = false -} - -variable "tags" { - description = "Tags to be added to the resources" - default = {} -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_local.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_local.tf deleted file mode 100644 index 6a03a5998..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_local.tf +++ /dev/null @@ -1,51 +0,0 @@ -locals { - data_disks = flatten( - [ - for disk in var.database_disks_options.data_disks : [ - for i in range(0, disk.count) : { - name = "${var.vm.name}-datadisk${i}" - caching = disk.caching - create_option = disk.create_option - disk_size_gb = disk.disk_size_gb - lun = disk.lun + i - managed_disk_type = disk.disk_type - storage_account_type = disk.disk_type - write_accelerator_enabled = disk.write_accelerator_enabled - } - ] - ] - ) - asm_disks = flatten( - [ - for disk in var.database_disks_options.asm_disks : [ - for i in range(0, disk.count) : { - name = "${var.vm.name}-asmdisk${i}" - caching = disk.caching - create_option = disk.create_option - disk_size_gb = disk.disk_size_gb - lun = disk.lun + i - managed_disk_type = disk.disk_type - storage_account_type = disk.disk_type - write_accelerator_enabled = disk.write_accelerator_enabled - } - ] - ] - ) - redo_disks = flatten( - [ - for disk in var.database_disks_options.redo_disks : [ - for i in range(0, disk.count) : { - name = "${var.vm.name}-redodisk${i}" - caching = disk.caching - create_option = disk.create_option - disk_size_gb = disk.disk_size_gb - lun = disk.lun + i - managed_disk_type = disk.disk_type - storage_account_type = disk.disk_type - write_accelerator_enabled = disk.write_accelerator_enabled - } - ] - ] - ) - tags = {} -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/outputs.tf deleted file mode 100644 index 2c94f297a..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/outputs.tf +++ /dev/null @@ -1,70 +0,0 @@ -# ############################################################################### -# # # -# # Resource Group # -# # # -# ############################################################################### -output "resource_group" { - value = module.common_infrastructure.resource_group -} - -# output "created_resource_group_id" { -# description = "Created resource group ID" -# value = module.common_infrastructure.resource_group.id -# } - -# output "created_resource_group_name" { -# description = "Created resource group name" -# value = module.common_infrastructure.resource_group.name -# } - -# output "created_resource_group_subscription_id" { -# description = "Created resource group' subscription ID" -# value = module.common_infrastructure.resource_group.id -# } - -# output "created_resource_group_tags" { -# description = "Created resource group tags" -# value = module.common_infrastructure.tags -# } - -# ############################################################################### -# # # -# # Network # -# # # -# ############################################################################### -# output "network_location" { -# value = module.network.network_location -# } - -# output "db_subnet" { -# value = module.network.db_subnet -# } - -# ############################################################################### -# # # -# # Storage # -# # # -# ############################################################################### -# output "database_data_disks_primary" { -# value = module.storage_primary.data_disks -# } - -# output "database_asm_disks_primary" { -# value = module.storage_primary.asm_disks -# } - -# output "database_redo_disks_primary" { -# value = module.storage_primary.redo_disks -# } - -# output "database_data_disks_secondary" { -# value = module.storage_secondary.data_disks -# } - -# output "database_asm_disks_secondary" { -# value = module.storage_secondary.asm_disks -# } - -# output "database_redo_disks_secondary" { -# value = module.storage_secondary.redo_disks -# } diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/providers.tf deleted file mode 100644 index a6c69adcb..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/providers.tf +++ /dev/null @@ -1,29 +0,0 @@ -terraform { - required_version = ">=1.7.0" - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = ">=3.11.0, <4.0" - } - azapi = { - source = "Azure/azapi" - version = ">=1.8.0" - } - } -} - -provider "azurerm" { - skip_provider_registration = true - features { - resource_group { - prevent_deletion_if_contains_resources = true - } - virtual_machine { - delete_os_disk_on_deletion = true - } - } -} - -provider "azapi" { - use_oidc = true -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/resources.telemetry.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/resources.telemetry.tf deleted file mode 100644 index 0a320c3cc..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/resources.telemetry.tf +++ /dev/null @@ -1,15 +0,0 @@ -# The following random id is created once per module instantiation and is appended to the teleletry deployment name -resource "random_id" "telem" { - count = local.disable_telemetry ? 0 : 1 - byte_length = 4 -} - -# This is the core module telemetry deployment that is only created if telemetry is enabled. -# It is deployed to the default subscription -resource "azurerm_subscription_template_deployment" "telemetry_core" { - count = local.telem_core_deployment_enabled ? 1 : 0 - provider = azurerm - name = local.telem_core_arm_deployment_name - location = var.location - template_content = local.telem_arm_subscription_template_content -} \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/transform.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/transform.tf deleted file mode 100644 index 17780f880..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/transform.tf +++ /dev/null @@ -1,39 +0,0 @@ -locals { - infrastructure = { - region = coalesce(var.location, try(var.infrastructure.region, "")) - resource_group = { - name = try( - coalesce( - var.resourcegroup_name, - try(var.infrastructure.resource_group.name, "") - ), - "" - ) - } - vnet = { - name = try( - coalesce( - local.vnet_oracle_name, - try(var.infrastructure.vnet.name, "") - ), - "" - ) - } - subnet = { - name = try( - coalesce( - local.database_subnet_name, - try(var.infrastructure.subnet.name, "") - ), - "" - ) - } - tags = try( - coalesce( - var.resourcegroup_tags, - try(var.infrastructure.tags, {}) - ), - {} - ) - } -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables.md deleted file mode 100644 index 8bc43b1eb..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables.md +++ /dev/null @@ -1,173 +0,0 @@ -# Terraform Variable Explanations - -1. **[Common Parameters](#common-parameters)** - - - [`location`](#location) - - [`resourcegroup_name`](#resourcegroup_name) - - [`resourcegroup_tags`](#resourcegroup_tags) - - [`is_diagnostic_settings_enabled`](#is_diagnostic_settings_enabled) - - [`diagnostic_target`](#diagnostic_target) - - [`infrastructure`](#infrastructure) - -2. **[Virtual Machine Parameters](#virtual-machine-parameters)** - - - [`ssh_key`](#ssh_key) - - [`vm_sku`](#vm_sku) - - [`vm_source_image_reference`](#vm_source_image_reference) - - [`vm_os_disk`](#vm_os_disk) - -3. **[Database Parameters](#database-parameters)** - - [`database`](#database) - - [`database_disks_options`](#database_disks_options) - - [`database_db_nic_ips`](#database_db_nic_ips) - -### `location` - -- **Description:** Defines the Azure location where the resources will be deployed. -- **Type:** String -- **Default Value:** "eastus" - -### `resourcegroup_name` - -- **Description:** If defined, this variable specifies the name of the resource group into which the resources will be deployed. -- **Default Value:** "" - -### `resourcegroup_tags` - -- **Description:** Tags to be added to the resource group. -- **Default Value:** {} - -### `is_diagnostic_settings_enabled` - -- **Description:** Whether diagnostic settings are enabled. -- **Default Value:** false - -### `diagnostic_target` - -- **Description:** The destination type of the diagnostic settings. Allowed values are "Log_Analytics_Workspace," "Storage_Account," "Event_Hubs," or "Partner_Solutions." -- **Default Value:** "Log_Analytics_Workspace" - -### `infrastructure` - -- **Description:** Details of the Azure infrastructure to deploy the SAP landscape into. -- **Default Value:** {} - -## Virtual Machine Parameters - -### `ssh_key` - -- **Description:** Value of the SSH public key to be used for the virtual machines. - -### `vm_sku` - -- **Description:** The SKU of the virtual machine. -- **Default Value:** "Standard_D4s_v3" - -### `vm_source_image_reference` - -- **Description:** The source image reference of the virtual machine. -- **Default Value:** - ```hcl - { - publisher = "Oracle" - offer = "Oracle-Linux" - sku = "79-gen2" - version = "7.9.36" - } - ``` - -### `vm_os_disk` - -- **Description:** Details of the OS disk, including name, caching, storage account type, disk encryption set, and disk size. -- **Default Value:** - ```hcl - { - name = "osdisk" - caching = "ReadWrite" - storage_account_type = "Premium_LRS" - disk_encryption_set_id = null - disk_size_gb = 128 - } - ``` - -## Database Parameters - -### `database` - -- **Description:** Details of the database node, including options such as DHCP, authentication type, and data disks. -- **Default Value:** - ```hcl - { - use_DHCP = true - authentication = { - type = "key" - } - data_disks = [ - { - count = 1 - caching = "ReadOnly" - create_option = "Empty" - disk_size_gb = 1024 - lun = 0 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - }, - { - count = 1 - caching = "None" - create_option = "Empty" - disk_size_gb = 1024 - lun = 1 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ] - } - ``` - -### `database_disks_options` - -- **Description:** Details of the database node's disk options, including data disks, ASM disks, and redo disks. -- **Default Value:** - ```hcl - { - data_disks = [ - { - count = 1 - caching = "ReadOnly" - create_option = "Empty" - disk_size_gb = 1024 - lun = 20 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ], - asm_disks = [ - { - count = 1 - caching = "ReadOnly" - create_option = "Empty" - disk_size_gb = 1024 - lun = 10 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ] - redo_disks = [ - { - count = 1 - caching = "None" - create_option = "Empty" - disk_size_gb = 1024 - lun = 60 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ] - } - ``` - -### `database_db_nic_ips` - -- **Description:** If provided, the database tier virtual machines will be configured using the specified IPs. -- **Default Value:** [""] diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_global.tf deleted file mode 100644 index 0843270a1..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_global.tf +++ /dev/null @@ -1,184 +0,0 @@ -######################################################################################### -# Common parameters # -######################################################################################### -variable "location" { - description = "Defines the Azure location where the resources will be deployed" - type = string - default = "germanywestcentral" -} - -variable "resourcegroup_name" { - description = "If defined, the name of the resource group into which the resources will be deployed" - default = "rg-mh-oracle4" -} - -variable "resourcegroup_tags" { - description = "tags to be added to the resource group" - default = {} -} - -variable "is_diagnostic_settings_enabled" { - description = "Whether diagnostic settings are enabled" - default = false -} - -variable "diagnostic_target" { - description = "The destination type of the diagnostic settings" - default = "Log_Analytics_Workspace" - validation { - condition = contains(["Log_Analytics_Workspace", "Storage_Account", "Event_Hubs", "Partner_Solutions"], var.diagnostic_target) - error_message = "Allowed values are Log_Analytics_Workspace, Storage_Account, Event_Hubs, Partner_Solutions" - } -} - -variable "infrastructure" { - description = "Details of the Azure infrastructure to deploy the SAP landscape into" - default = {} -} - -variable "disable_telemetry" { - type = bool - description = "If set to true, will disable telemetry for the module. See https://aka.ms/alz-terraform-module-telemetry." - default = false -} -######################################################################################### -# Virtual Machine parameters # -######################################################################################### -variable "ssh_key" { - description = "value of the ssh public key to be used for the virtual machines" -} - -variable "vm_sku" { - description = "The SKU of the virtual machine" - default = "Standard_D4s_v5" -} - -variable "vm_source_image_reference" { - description = "The source image reference of the virtual machine" - default = { - publisher = "Oracle" - offer = "oracle-database-19-3" - sku = "oracle-database-19-0904" - version = "latest" - } -} - -variable "vm_os_disk" { - description = "Details of the OS disk" - default = { - name = "osdisk" - caching = "ReadWrite" - storage_account_type = "Premium_LRS" - disk_encryption_set_id = null - disk_size_gb = 128 - } -} - -variable "vm_user_assigned_identity_id" { - description = "The ID of the user assigned identity to be used for the virtual machine" -} - -variable "jit_wait_for_vm_creation" { - description = "The duration to wait for the virtual machine to be created before creating the JIT policy" - default = "60s" -} - -variable "vm_extensions" { - description = "The extensions to be added to the virtual machine" - type = map(object({ - name = string - publisher = string - type = string - type_handler_version = string - auto_upgrade_minor_version = optional(bool) - automatic_upgrade_enabled = optional(bool) - failure_suppression_enabled = optional(bool, false) - settings = optional(string) - protected_settings = optional(string) - provision_after_extensions = optional(list(string), []) - tags = optional(map(any)) - protected_settings_from_key_vault = optional(object({ - secret_url = string - source_vault_id = string - })) - })) - default = {} -} - - -######################################################################################### -# Database parameters # -######################################################################################### -variable "database" { - description = "Details of the database node" - default = { - use_DHCP = true - authentication = { - type = "key" - } - data_disks = [ - { - count = 1 - caching = "ReadOnly" - create_option = "Empty" - disk_size_gb = 1024 - lun = 0 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - }, - { - count = 1 - caching = "None" - create_option = "Empty" - disk_size_gb = 1024 - lun = 1 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ] - } -} - -variable "database_disks_options" { - description = "Details of the database node" - default = { - data_disks = [ - { - count = 1 - caching = "ReadOnly" - create_option = "Empty" - disk_size_gb = 1024 - lun = 1 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ], - asm_disks = [ - { - count = 1 - caching = "ReadOnly" - create_option = "Empty" - disk_size_gb = 1024 - lun = 0 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ] - redo_disks = [ - { - count = 1 - caching = "None" - create_option = "Empty" - disk_size_gb = 1024 - lun = 2 - disk_type = "Premium_LRS" - write_accelerator_enabled = false - } - ] - } -} - -variable "database_db_nic_ips" { - description = "If provided, the database tier virtual machines will be configured using the specified IPs" - default = [""] -} diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_local.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_local.tf deleted file mode 100644 index 124d5e8e0..000000000 --- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_local.tf +++ /dev/null @@ -1,46 +0,0 @@ -locals { - vnet_oracle_name = "vnet1" - database_subnet_name = "subnet1" - disable_telemetry = var.disable_telemetry - telem_core_puid = "440d81eb-6657-4a7d-ad93-c7e9cc09e5da" - empty_string = "" - telem_random_hex = can(random_id.telem[0].hex) ? random_id.telem[0].hex : local.empty_string -} - - -# This constructs the ARM deployment name that is used for the telemetry. -# We shouldn't ever hit the 64 character limit but use substr just in case -locals { - telem_core_arm_deployment_name = substr( - format( - "pid-%s_%s", - local.telem_core_puid, - local.telem_random_hex, - ), - 0, - 64 - ) -} - -locals { - telem_arm_subscription_template_content = <