diff --git a/03-Azure/01-03-Infrastructure/05_Azure_VMware_Solution/Lab/info/AVS Labs Credentials - Example.xlsx b/03-Azure/01-03-Infrastructure/05_Azure_VMware_Solution/Lab/info/AVS Labs Credentials - Example.xlsx
index 3393264f1..cf15bbc18 100644
Binary files a/03-Azure/01-03-Infrastructure/05_Azure_VMware_Solution/Lab/info/AVS Labs Credentials - Example.xlsx and b/03-Azure/01-03-Infrastructure/05_Azure_VMware_Solution/Lab/info/AVS Labs Credentials - Example.xlsx differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/.gitignore b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/.gitignore
index 17f186751..ca0a3c7c5 100644
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/.gitignore
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/.gitignore
@@ -1 +1,77 @@
-NOTES.md
\ No newline at end of file
+# contain sensitive data
+misc/
+**/mis/
+NOTES.md
+
+gghack.yaml
+ggfabric.yaml
+adbping-job.yaml
+connping-job.yaml
+resources/infra/terraform/user_credentials.json
+resources/infra/terraform/mhodaa-sp-credentials.json
+resources/infra/terraform/*-sp-credentials.json
+resources/infra/terraform/user-photos/
+resources/gg-bigdata-build
+resources/scripts/adbping.sh
+
+# ignore helm chart temp folder
+resources/infra/terraform/.helm/
+
+# exclude the terrafom files and folders which should not be commited to git following best practces, located under the folder resources and all subfolder
+resources/**/terraform.tfstate
+resources/**/terraform.tfstate.backup
+resources/**/.terraform
+resources/**/crash.log
+resources/**/override.tf
+resources/**/terraform.tfvars
+resources/**/terraform.tfvars.json
+resources/**/terraform.rc
+resources/**/terraform.d
+# resources/**/terraform
+# resources/**/modules
+resources/**/providers
+resources/**/workspace
+
+# Terraform Files
+resources/**/*.tfstate
+resources/**/*.tfstate.*
+resources/**/*.tfplan
+resources/**/*.tfplan.*
+resources/**/.terraform/
+resources/**/.terraform.lock.hcl
+
+# Variable files (may contain sensitive data)
+resources/**/terraform.tfvars
+resources/**/*.auto.tfvars
+resources/**/*.auto.tfvars.json
+
+# Override files
+resources/**/override.tf
+resources/**/override.tf.json
+resources/**/*_override.tf
+resources/**/*_override.tf.json
+
+# CLI configuration files
+resources/**/.terraformrc
+resources/**/terraform.rc
+
+# IDE files
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# Log files
+resources/**/*.log
+resources/**/crash.log
+resources/**/crash.*.log
+
+# Ignore any .tfvars files that are generated automatically
+resources/**/**/*.auto.tfvars
+
+# Exclude Oracle GoldenGate BigData Docker images and extracted files
+resources/gg-bigdata-build/V1043090-01.zip
+resources/gg-bigdata-build/extracted/
+resources/gg-bigdata-build/oracle-docker-images/
+resources/gg-bigdata-build/oracle-docker-images/.git*
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Challenges/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Challenges/README.md
deleted file mode 100644
index e69de29bb..000000000
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/README.md
index eae4267cb..2828fe44b 100644
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/README.md
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/README.md
@@ -1,49 +1,455 @@
-# Microhack - Intro To Oracle DB Migration to Azure
+ο»Ώ
-## Important Notice
+# π Microhack - Oracle Database @ Azure (ODAA)
-This project is currently under development and is subject to change until the first official release, which is expected by the end of 2024. Please note that all content, including instructions and resources, may be updated or modified as the project progresses.
+## π Introduction
+This intro-level microhack (hackathon) helps you gain hands-on experience with Oracle Database@Azure (ODAA).
-## Introduction
+### What is Oracle Database at Azure
+Oracle Database@Azure (ODAA) is the joint OracleβMicrosoft managed service that delivers different Database services - see [ODAA deployed Azure regions](https://apexadb.oracle.com/ords/r/dbexpert/multicloud-capabilities/multicloud-regions?session=412943632928469) running on Oracle infrastructure colocated in Azure regions while exposing native Azure management, networking, billing, integration with Azure Key Vault, Entra ID or Azure Sentinel. This microhack targets the first-tier partner solution play focused on Autonomous Database because Microsoft designates ODAA as a strategic, co-sell priority workload; the exercises give partner architects the end-to-end skillsβsubscription linking, delegated networking, hybrid connectivity, and performance validationβneeded to confidently deliver that priority scenario for customers with Oracle-related workloads in Azure.
-This intro level microhack (hackathon) will help you get hands-on experience migrating Oracle databases from on-premises to different Azure Services.
+### What You Will Learn in the MicroHack
+You will learn how to create and configure an Autonomous Database Shared of the offered Oracle Database@Azure services, how to deploy an Autonomous Database instance inside an Azure delegated subnet, update network security group (NSG) and DNS settings to enable connectivity from a simulated on-premises environment, and measure network performance to the Oracle Autonomous Database instance. To make the microhack more realistic, we will deploy the Application layer (AKS) and the Data layer (ODAA) in two different subscriptions to simulate a hub-and-spoke architecture. The following picture shows the high-level architecture of the microhack.
+
+
+
+Furthermore, we will address the integration of ODAA into the existing Azure native services and how to use GoldenGate for migrations to ODAA and integration into Azure Fabric.
+
+
+## What is VNet Peering?
+
+In our deployed scenario, we created in advance a VNet peering between the AKS VNet and the ADB VNet, which is required so the Kubernetes workloads can communicate privately and directly with the database.
+
+### Architecture Diagram
+
+The following diagram shows how VNet peering connects the AKS cluster to the Oracle Autonomous Database:
+
+```mermaid
+flowchart TB
+ subgraph AKS_SUB[Azure Subscription AKS]
+ subgraph AKS_RG[Resource Group: aks-userXX]
+ subgraph AKS_VNET[VNet: aks-userXX 10.0.0.0/16]
+ subgraph AKS_SUBNET[Subnet: aks 10.0.0.0/23]
+ AKS[AKS Cluster]
+ end
+ DNS[Private DNS Zones]
+ end
+ end
+ end
+
+ subgraph ODAA_SUB[Azure Subscription ODAA]
+ subgraph ODAA_RG[Resource Group: odaa-userXX]
+ subgraph ODAA_VNET[VNet: odaa-userXX 192.168.0.0/16]
+ subgraph ODAA_SUBNET[Delegated Subnet 192.168.0.0/24]
+ ADB[Oracle Autonomous Database]
+ end
+ end
+ NSG[NSG: Allow 10.0.0.0/16]
+ end
+ end
+
+ AKS_VNET <-->|VNet Peering| ODAA_VNET
+ AKS -.->|SQL Queries| ADB
+ DNS -.->|Resolves hostname| ADB
+
+ style AKS_SUB fill:#0078D4,color:#fff
+ style AKS_RG fill:#50E6FF,color:#000
+ style AKS_VNET fill:#7FBA00,color:#fff
+ style AKS_SUBNET fill:#98FB98,color:#000
+ style ODAA_SUB fill:#0078D4,color:#fff
+ style ODAA_RG fill:#50E6FF,color:#000
+ style ODAA_VNET fill:#7FBA00,color:#fff
+ style ODAA_SUBNET fill:#98FB98,color:#000
+ style ADB fill:#C74634,color:#fff
+ style AKS fill:#FFB900,color:#000
+ style DNS fill:#50E6FF,color:#000
+ style NSG fill:#F25022,color:#fff
+```
+
+### What does VNet peering mean in detail
+
+| Concept | Description |
+|---------|-------------|
+| **VNet isolation by default** | The AKS nodes run in one VNet and ADB sits in another; without peering, those address spaces are completely isolated and pods cannot reach the database IPs at all. |
+| **Private, internal traffic** | Peering lets both VNets exchange traffic over private IPs only, as if they were one network. No public IPs, no internet exposure, no extra gateways are needed. |
+| **Low latency, high bandwidth path** | Application-database calls stay on the cloud backbone, which is crucial for chatty OLTP workloads and for predictable performance. |
+| **Simple routing model** | With peering, standard system routes know how to reach the other VNet's CIDR; you avoid managing separate VPNs, user-defined routes, or NAT just to reach the DB. |
+| **Granular security with NSGs** | Even with peering in place, NSGs on subnets/NICs still control which AKS node subnets and ports (for example, 1521/2484) can reach ADB, giving you a simple but secure pattern. |
+
+**In summary:** The peering is what turns two isolated networks (AKS and ADB) into a securely connected, private application-database path, which the scenario depends on for the workloads to function.
+
+## Mapping between Azure and OCI
+
+### Azure Resource Hierarchy Diagram
+
+The following diagram shows how Azure organizes resources, mapped to our Terraform deployment:
+
+```mermaid
+flowchart TB
+ subgraph TENANT[Azure Tenant - Entra ID Directory]
+ direction TB
+ USERS[Users and Groups
mh-odaa-user-grp]
+
+ subgraph SUB_AKS[Subscription: AKS]
+ subgraph RG_AKS[Resource Group: aks-userXX]
+ VNET_AKS[VNet: aks-userXX
10.0.0.0/16]
+ AKS_CLUSTER[AKS Cluster]
+ LOG[Log Analytics]
+ DNS_ZONES[Private DNS Zones]
+ end
+ end
+
+ subgraph SUB_ODAA[Subscription: ODAA]
+ subgraph RG_ODAA[Resource Group: odaa-userXX]
+ VNET_ODAA[VNet: odaa-userXX
192.168.0.0/16]
+ ADB[Oracle ADB]
+ end
+ end
+ end
+
+ USERS --> SUB_AKS
+ USERS --> SUB_ODAA
+ VNET_AKS <-.->|VNet Peering| VNET_ODAA
+
+ style TENANT fill:#0078D4,color:#fff
+ style USERS fill:#FFB900,color:#000
+ style SUB_AKS fill:#50E6FF,color:#000
+ style SUB_ODAA fill:#50E6FF,color:#000
+ style RG_AKS fill:#7FBA00,color:#fff
+ style RG_ODAA fill:#7FBA00,color:#fff
+ style VNET_AKS fill:#98FB98,color:#000
+ style VNET_ODAA fill:#98FB98,color:#000
+ style AKS_CLUSTER fill:#FFB900,color:#000
+ style LOG fill:#50E6FF,color:#000
+ style DNS_ZONES fill:#50E6FF,color:#000
+ style ADB fill:#C74634,color:#fff
+```
+
+> **Learn more:** [Azure resource organization](https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/ready/azure-setup-guide/organize-resources)
+
+### Comparison Table: Azure vs OCI
+
+| Azure Concept | Description | OCI Equivalent |
+|---------------|-------------|----------------|
+| **Tenant** | Top-level identity boundary (Entra ID directory: users, groups, apps) | **Tenancy** (root container with identity domain/compartments) |
+| **Subscription** | Billing + deployment boundary; holds resource groups and resources | **Tenancy + Compartments** with cost-tracking tags |
+| **Resource Group** | Logical container for related resources; used for lifecycle, RBAC, policy, and tagging scope | **Compartment** (logical container for access control and organization) |
+| **Region** | Geographic area containing one or more datacenters | **Region** |
+| **Availability Zone** | Physically separate datacenter within a region | **Availability Domain** |
+
+### Hierarchy Comparison
+
+```
+Azure: Tenant --> Subscription --> Resource Group --> Resource
+OCI: Tenancy --> Compartment (nested) --> Resource
+```
+
+> **Note:** OCI compartments are closer to Azure resource groups + some subscription-scope concepts.
+
+### Networking Concepts
+
+| Azure | Description | OCI Equivalent |
+|-------|-------------|----------------|
+| **Virtual Network (VNet)** | A private network in Azure where you place resources (VMs, databases, etc.), similar to an on-premises LAN in the cloud | **Virtual Cloud Network (VCN)** |
+| **Subnet** | A segment inside a VNet that groups resources and defines their IP range and routing boundaries | **Subnet** |
+| **Network Security Group (NSG)** | A set of inbound/outbound rules that allow or block traffic to subnets or individual NICs, acting like a basic stateful firewall | **Security List / NSG** |
+| **VNet Peering** | Connects two VNets so they can communicate using private IPs | **Local/Remote Peering** |
## Learning Objectives
-In this microhack you will solve a common challenge for companies migrating to the cloud: migrating Oracle databases to Azure. The application using the database is a sample e-commerce [application](https://github.com/pzinsta/pizzeria) written in JavaScript. It will be configured to use Oracle Database Express Edition [Oracle XE].
-
-The participants will learn how to:
-
-1. Perform a pre-migration assessment of the databases looking at size, database engine type, database version, etc.
-1. Use offline tools to copy the databases to Azure OSS databases
-1. Use the Azure Database Migration Service to perform an online migration (if applicable)
-1. Do cutover and validation to ensure the application is working properly with the new configuration
-1. Use a private endpoint for Azure OSS databases instead of a public IP address for the database
-1. Configure a read replica for the Azure OSS databases
-
-## Challenges
-- Challenge 0: **[Pre-requisites - Setup Environment and Prerequisites!](Student/00-prereqs.md)**
- - Prepare your environment to run the sample application
-- Challenge 1: **[Discovery and assessment](Student/01-discovery.md)**
- - Discover and assess the application's PostgreSQL/MySQL/Oracle databases
-- Challenge 2: Oracle to IaaS migration
-- Challenge 3: Oracle to PaaS migration
-- Challenge 4: Oracle to Azure OCI migration
-- Challenge 5: Oracle to Oracle Database on Azure migration
-
-## Prerequisites
-
-- Access to an Azure subscription with Owner access
- - If you don't have one, [Sign Up for Azure HERE](https://azure.microsoft.com/en-us/free/)
- - Familiarity with Azure Cloud Shell
-- [**Visual Studio Code**](https://code.visualstudio.com/) (optional)
-
-## Repository Contents
-- `../Coach`
- - [Lecture presentation](Coach/OSS-DB-What-the-Hack-Lecture.pptx?raw=true) with short presentations to introduce each challenge
- - Example solutions and coach tips to the challenges (If you're a student, don't cheat yourself out of an education!)
-- `../Student/Resources`
- - Pizzeria application environment setup
+- Understand how to onboard securely to Azure and prepare an account for Oracle Database@Azure administration.
+- Learn the sequence for purchasing and linking an Oracle Database@Azure subscription with Oracle Cloud Infrastructure.
+- Deploy an Autonomous Database instance inside an Azure network architecture and the required preparations.
+- Apply required networking and DNS configurations to enable hybrid connectivity between Azure Kubernetes Service and Oracle Database@Azure resources.
+- Operate the provided tooling (Helm, GoldenGate, Data Pump, SQL*Plus) to simulate data replication scenarios and measure connectivity performance.
+
+## π Prerequisites
+
+- PowerShell Terminal
+- π§ Install Azure CLI
+- β Install kubectl
+- Install Helm
+- Install git and clone this repo by following the instructions in [Clone Partial Repository](docs/clone-partial-repo.md)
+
+## π― Challenges
+
+### Challenge 0: Set Up Your User Account
+
+The goal is to ensure your Azure account is ready for administrative work in the remaining challenges.
+
+> [!IMPORTANT] Before using the AZ command line in your preferred GUI or CLI, please make sure to log out of any previous session by running the command:
+>
+>```powershell
+>az logout
+>```
+
+You will receive a user and password for your account from your microhack coach. You must change this password during the initial registration.
+
+Start by browsing to the Azure Portal https://portal.azure.com.
+
+Open a private browser session or create your own browser profile to sign in with the credentials you received, and register multi-factor authentication.
+
+As a first check, you have to verify if the two resource groups for the hackathon are created via the Azure Portal https://portal.azure.com.
+
+#### Actions
+
+* Enable multi-factor authentication (MFA)
+* Log in to the Azure portal with the assigned user
+* Verify if the ODAA and AKS resource groups including resources are available
+* Verify the user's roles
+
+#### Success Criteria
+
+* Download the Microsoft Authenticator app on your mobile phone
+* Enable MFA for a successful login
+* Check if the resource groups for AKS and ODAA are available and contain the resources via the Azure Portal https://portal.azure.com
+* Check if the assigned user has the required roles in both resource groups.
+
+#### Learning Resources
+
+* [Sign in to the Azure portal](https://azure.microsoft.com/en-us/get-started/azure-portal)
+* [Set up Microsoft Entra multi-factor authentication](https://learn.microsoft.com/azure/active-directory/authentication/howto-mfa-userdevicesettings)
+* [Groups and roles in Azure](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/oaagroupsroles.htm)
+
+#### Solution
+
+* Challenge 0: [Set Up Your User Account](./walkthrough/setup-user-account/setup-user-account.md)
+
+### Challenge 1: Create an Oracle Database@Azure (ODAA) Subscription
+
+> [!NOTE]
+> **This is a theoretical challenge only.** No action is required from participants aside from reading the content. The ODAA subscription has already been created for you to save time.
+
+Review the Oracle Database@Azure service offer, the required Azure resource providers, and the role of the OCI tenancy. By the end you should understand how an Azure subscription links to Oracle Cloud so database services can be created. Please consider that Challenge 1 is already realized for you to save time and is therefore a purely theoretical challenge.
+
+#### Actions
+
+* Move to the ODAA marketplace side. The purchasing is already done, but check out the implementation of ODAA on the Azure side.
+* Check if the required Azure resource providers are enabled
+
+#### Success Criteria
+
+* Find the Oracle Database at Azure Service in the Azure Portal
+* Make yourself familiar with the available services of ODAA and how to purchase ODAA
+
+#### Learning Resources
+
+* [ODAA in Azure an overview](https://www.oracle.com/cloud/azure/oracle-database-at-azure/)
+* [Enhanced Networking for ODAA](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-network-plan)
+
+#### Solution
+
+* Challenge 1: [Create an Oracle Database@Azure (ODAA) Subscription](./walkthrough/create-odaa-subscription/create-odaa-subscription.md)
+
+### Challenge 2: Create an Oracle Database@Azure (ODAA) Autonomous Database (ADB) Instance
+
+Walk through the delegated subnet prerequisites, select the assigned resource group, and deploy the Autonomous Database instance with the standard parameters supplied in the guide. Completion is confirmed when the database instance shows a healthy state in the portal.
+
+#### Actions
+
+* Verify that a delegated subnet of the upcoming ADB deployment is available
+
+> [!IMPORTANT]
+>
+> Setup the ADB exactly with the following settings:
+>
+> **ADB Deployment Settings:**
+> 1. Workload type: **OLTP**
+> 2. Database version: **23ai**
+> 3. ECPU Count: **2**
+> 4. Compute auto scaling: **off**
+> 5. Storage: **20 GB**
+> 6. Storage autoscaling: **off**
+> 7. Backup retention period in days: **1 day**
+> 8. Administrator password: (do not use '!' inside your password)
+> 9. License type: **License included**
+> 10. Oracle database edition: **Enterprise Edition**
+
+After you started the ADB deployment please clone the Github repository. Instructions are listed in the challenge 2 at the end of the ADB deployment section - see **IMPORTANT: While you are waiting for the ADB creation**
+
+#### Success Criteria
+
+* Delegated Subnet is available
+* ADB Shared is successfully deployed
+
+#### Learning Resources
+
+* [How to provision an Oracle ADB in Azure](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-provision-autonomous-database)
+* [Deploy an ADB in Azure](https://docs.oracle.com/en/solutions/deploy-autonomous-database-db-at-azure/index.html)
+
+#### Solution
+
+* Challenge 2: [Create an Oracle Database@Azure (ODAA) Autonomous Database (ADB) Instance](./walkthrough/create-odaa-adb/create-odaa-adb.md)
+
+### Challenge 3: Update the Oracle ADB NSG and DNS
+
+Update the Network Security Group to allow traffic from the AKS environment and register the Oracle private endpoints in the AKS Private DNS zones. Validate connectivity from AKS after both security and DNS changes are applied.
+
+#### Actions
+
+* Set the NSG of the CIDR on the OCI side, to allow Ingress from the AKS on the ADB
+* Extract the ODAA "Database private URL" (FQDN) and "Database private IP" and assign them to the "Azure Private DNS Zones" linked to the AKS VNet.
+
+#### DNS Configuration Diagram
+
+The following diagram shows how Private DNS enables AKS pods to resolve the Oracle ADB hostname:
+
+```mermaid
+flowchart TB
+ subgraph AKS_SUB["Azure Subscription: AKS"]
+ subgraph AKS_RG["Resource Group: aks-userXX"]
+ subgraph VNET["VNet: aks-userXX
10.0.0.0/16"]
+ POD["π¦ Pod"]
+ end
+ LINK["π VNet Link"]
+ subgraph DNS_ZONE["Private DNS Zone
adb.eu-paris-1.oraclecloud.com"]
+ A_RECORD["A Record
Name: abc123
IP: 192.168.0.10"]
+ end
+ end
+ end
+
+ subgraph ODAA_SUB["Azure Subscription: ODAA"]
+ ADB["ποΈ Oracle ADB
βββββββββββββββββ
Database private URL:
abc123.adb.eu-paris-1...
Database private IP:
192.168.0.10"]
+ end
+
+ ADB -.->|"Copy URL & IP"| A_RECORD
+ VNET --- LINK
+ LINK --- DNS_ZONE
+
+ style AKS_SUB fill:#0078D4,color:#fff
+ style ODAA_SUB fill:#0078D4,color:#fff
+ style DNS_ZONE fill:#50E6FF,color:#000
+ style A_RECORD fill:#FFB900,color:#000
+ style ADB fill:#C74634,color:#fff
+ style VNET fill:#7FBA00,color:#fff
+```
+
+**Steps:**
+
+1. **Copy** the Database private URL and IP from the Azure Portal (ODAA ADB resource)
+2. **Create an A record** in the Private DNS Zone with the hostname pointing to the private IP
+3. **Pods in AKS** resolve the FQDN via the VNet-linked Private DNS Zone
+
+#### Success Criteria
+
+* Set the NSG of the CIDR on the OCI side, to allow ingress from AKS to the ADB
+* DNS is set up correctly.
+
+> [!CAUTION]
+> **Without a working DNS the next Challenge will fail.** Make sure DNS resolution is properly configured before proceeding.
+
+#### Learning Resources
+
+* [Network security groups overview](https://learn.microsoft.com/azure/virtual-network/network-security-groups-overview),
+* [Private DNS zones in Azure](https://learn.microsoft.com/azure/dns/private-dns-privatednszone),
+* [Oracle Database@Azure networking guidance](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/network.htm)
+
+#### Solution
+
+* Challenge 3: [Update the Oracle ADB NSG and DNS](./walkthrough/update-odaa-nsg-dns/update-odaa-nsg-dns.md)
+
+### Challenge 4: Simulate the On-Premises Environment
+
+Deploy the pre-built Helm chart into AKS to install the sample Oracle database, Data Pump job, GoldenGate services, and Instant Client. Manage the shared secrets carefully and verify that data flows from the source schema into the Autonomous Database target schema.
+
+#### Architecture Diagram
+
+The following diagram shows the components deployed via Helm and the data replication flow:
+
+```mermaid
+flowchart TB
+ subgraph AKS_SUB["Azure Subscription: AKS"]
+ subgraph AKS["AKS Cluster (Namespace: microhacks)"]
+ subgraph HELM["Helm Chart: goldengate-microhack-sample"]
+ DB["ποΈ Oracle 23ai Free
(Source DB)
Schema: SH"]
+ OGG["β‘ GoldenGate
(CDC Replication)"]
+ IC["π» Instant Client
(SQL*Plus)"]
+ JUP["π Jupyter Notebook
(CPAT)"]
+ PUMP["π¦ Data Pump Job
(Initial Load)"]
+ end
+ SECRETS["π K8s Secrets
ogg-admin-secret
db-admin-secret"]
+ INGRESS["π NGINX Ingress"]
+ end
+ end
+
+ subgraph ODAA_SUB["Azure Subscription: ODAA"]
+ ADB["ποΈ Oracle ADB
(Target DB)
Schema: SH2"]
+ end
+
+ SECRETS -.-> HELM
+ PUMP -->|"1οΈβ£ Initial Load
SH β SH2"| ADB
+ OGG -->|"2οΈβ£ CDC Replication
(Real-time)"| ADB
+ IC -->|"SQL Queries"| DB
+ IC -->|"SQL Queries"| ADB
+ INGRESS -->|"Web UI"| OGG
+ INGRESS -->|"Web UI"| JUP
+
+ style AKS_SUB fill:#0078D4,color:#fff
+ style ODAA_SUB fill:#0078D4,color:#fff
+ style HELM fill:#50E6FF,color:#000
+ style DB fill:#C74634,color:#fff
+ style ADB fill:#C74634,color:#fff
+ style OGG fill:#FFB900,color:#000
+ style SECRETS fill:#7FBA00,color:#fff
+```
+
+**Data Flow:**
+1. **Data Pump** performs the initial bulk load of the SH schema to the SH2 schema in ADB
+2. **GoldenGate** captures ongoing changes (CDC) and replicates them in near real-time
+3. **Instant Client** provides SQL*Plus access to both source and target databases
+
+#### Actions
+
+* Deploy the AKS cluster with the responsible Pods, Jupyter notebook with CPAT, Oracle Instant Client and GoldenGate
+* Verify AKS cluster deployment
+* Check the connectivity from Instant Client to the ADB database and check if the SH schema from the 23ai Free Edition is migrated to the SH2 schema in the ADB
+* Review the GoldenGate configuration
+
+#### Success Criteria
+
+* Successful AKS deployment with Pods
+* Successful connection from the Instant Client to the ADB and source database
+* Successful login to GoldenGate
+
+#### Learning Resources
+
+* [Connect to an AKS cluster using Azure CLI](https://learn.microsoft.com/azure/aks/learn/quick-kubernetes-deploy-cli),
+* [Use Helm with AKS](https://learn.microsoft.com/azure/aks/kubernetes-helm),
+* [Oracle GoldenGate Microservices overview](https://docs.oracle.com/en/middleware/goldengate/core/23/coredoc/),
+* [Oracle Data Pump overview](https://docs.oracle.com/en/database/oracle/oracle-database/26/sutil/oracle-data-pump-overview.html)
+
+#### Solution
+
+* Challenge 4: [Simulate the On-Premises Environment](./walkthrough/onprem-ramp-up/onprem-ramp-up.md)
+
+---
+
+### Challenge 5: Measure Network Performance to Your Oracle Database@Azure Autonomous Database
+
+Use the Instant Client pod to run the scripted SQL latency test against the Autonomous Database and collect the round-trip results. Optionally supplement the findings with the lightweight TCP probe to observe connection setup timing.
+
+#### Actions
+* Log in to the Instant Client and execute a first performance test from the AKS cluster against the deployed ADB
+
+#### Success Criteria
+* Successful login to the ADB via the Instant Client
+* Successful execution of the available performance scripts
+
+#### Learning Resources
+* [Connect to Oracle Database@Azure using SQL*Plus](https://docs.oracle.com/en-us/iaas/autonomous-database-serverless/doc/connect-sqlplus-tls.html),
+* [Diagnose metrics and logs for Oracle Database@Azure](https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/scenarios/oracle-on-azure/oracle-manage-monitor-oracle-database-azure)
+
+#### Solution
+* Challenge 5: [Measure Network Performance to Your Oracle Database@Azure Autonomous Database](./walkthrough/perf-test-odaa/perf-test-odaa.md)
+
+
+
## Contributors
+*To be added*
+
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Walkthrough/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Walkthrough/README.md
deleted file mode 100644
index 30404ce4c..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/Walkthrough/README.md
+++ /dev/null
@@ -1 +0,0 @@
-TODO
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/DEBUG-ENTRAID-AUTH.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/DEBUG-ENTRAID-AUTH.md
new file mode 100644
index 000000000..881774807
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/DEBUG-ENTRAID-AUTH.md
@@ -0,0 +1,461 @@
+# Debugging Entra ID Authentication to Oracle Autonomous Database
+
+## Error Received
+```
+ORA-01017: invalid credential or not authorized; logon denied
+```
+
+## Step-by-Step Debugging Guide
+
+### 1. Token Analysis β
+
+**Your Token Details:**
+- **User (upn):** ga1@cptazure.org
+- **Audience (aud):** 7d22ece1-dd60-4279-a911-4b7b95934f2e β
(matches app registration)
+- **Tenant (tid):** f71980b2-590a-4de9-90d5-6fbc867da951 β
(matches configuration)
+- **Issuer (iss):** https://login.microsoftonline.com/f71980b2-590a-4de9-90d5-6fbc867da951/v2.0 β
+- **Token Version:** 2.0 β
(correct - you set `accessTokenAcceptedVersion: 2`)
+- **Scope (scp):** session:scope:connect β
+- **Roles:** 1314ae09-ccc6-4f59-b68b-3837ff44465b, fa80ec82-2110-4b45-be28-b3341bf19661
+- **Token Valid:** Yes (expires 10/17/2025 09:31:32)
+
+**Token appears valid! β
**
+
+---
+
+### 2. Database Configuration Checks
+
+Connect to the database as ADMIN to verify configuration:
+
+```powershell
+# Get pod name
+$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] }
+
+# Connect to pod
+kubectl exec -it -n microhacks $podInstanteClientName -- /bin/bash
+
+# Inside the pod, connect as ADMIN
+sqlplus admin@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))'
+```
+
+#### Check 2.1: Verify Entra ID is Enabled
+
+```sql
+-- Should show AZURE_AD
+SELECT NAME, VALUE FROM V$PARAMETER WHERE NAME='identity_provider_type';
+```
+
+**Expected Output:**
+```
+NAME VALUE
+------------------------------ ----------
+identity_provider_type AZURE_AD
+```
+
+#### Check 2.2: Verify User GA1 Exists and is Global
+
+```sql
+-- Should show GLOBAL authentication
+SELECT username, authentication_type, account_status, external_name
+FROM dba_users
+WHERE username = 'GA1';
+```
+
+**Expected Output:**
+```
+USERNAME AUTHENTI ACCOUNT_STATUS EXTERNAL_NAME
+--------- --------- --------------- ------------------------------
+GA1 GLOBAL OPEN AZURE_USER=ga1@cptazure.org
+```
+
+β οΈ **CRITICAL CHECK:** The `EXTERNAL_NAME` must be exactly `AZURE_USER=ga1@cptazure.org`
+
+#### Check 2.3: Verify User Privileges
+
+```sql
+-- GA1 must have CREATE SESSION privilege
+SELECT * FROM dba_sys_privs WHERE grantee = 'GA1';
+```
+
+**Expected Output:**
+```
+GRANTEE PRIVILEGE ADMIN_OPTION
+-------- --------------- ------------
+GA1 CREATE SESSION NO
+```
+
+#### Check 2.4: Verify Entra ID Configuration
+
+```sql
+-- Check Azure AD configuration
+SELECT
+ param_name,
+ param_value
+FROM
+ dba_cloud_config
+WHERE
+ param_name IN ('AZURE_TENANT_ID', 'AZURE_APPLICATION_ID', 'AZURE_APPLICATION_ID_URI')
+ORDER BY
+ param_name;
+```
+
+**Expected Values:**
+```
+PARAM_NAME PARAM_VALUE
+-------------------------- --------------------------------------------------
+AZURE_TENANT_ID f71980b2-590a-4de9-90d5-6fbc867da951
+AZURE_APPLICATION_ID 7d22ece1-dd60-4279-a911-4b7b95934f2e
+AZURE_APPLICATION_ID_URI https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e
+```
+
+#### Check 2.5: Verify Network ACLs
+
+```sql
+-- Check if GA1 has network access to Entra ID endpoints
+SELECT host, lower_port, upper_port, principal, privilege
+FROM dba_host_aces
+WHERE host LIKE 'login%' AND principal = 'GA1'
+ORDER BY host, privilege;
+```
+
+**Expected Output:**
+```
+HOST PRINCIPAL PRIVILEGE
+------------------------- ---------- ---------
+login.microsoftonline.com GA1 connect
+login.microsoftonline.com GA1 resolve
+login.windows.net GA1 connect
+login.windows.net GA1 resolve
+```
+
+If missing, add them:
+
+```sql
+BEGIN
+ DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(
+ host => 'login.windows.net',
+ ace => xs$ace_type(
+ privilege_list => xs$name_list('connect','resolve'),
+ principal_name => 'GA1',
+ principal_type => xs_acl.ptype_db));
+END;
+/
+COMMIT;
+
+BEGIN
+ DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(
+ host => 'login.microsoftonline.com',
+ ace => xs$ace_type(
+ privilege_list => xs$name_list('connect','resolve'),
+ principal_name => 'GA1',
+ principal_type => xs_acl.ptype_db));
+END;
+/
+COMMIT;
+```
+
+#### Check 2.6: Test Database Can Reach Entra ID
+
+```sql
+-- Test HTTPS connectivity to Entra ID (as ADMIN)
+SET SERVEROUTPUT ON SIZE 40000
+DECLARE
+ req UTL_HTTP.REQ;
+ resp UTL_HTTP.RESP;
+BEGIN
+ UTL_HTTP.SET_WALLET(path => 'system:');
+ req := UTL_HTTP.BEGIN_REQUEST('https://login.windows.net/common/discovery/keys');
+ resp := UTL_HTTP.GET_RESPONSE(req);
+ DBMS_OUTPUT.PUT_LINE('HTTP response status code: ' || resp.status_code);
+ UTL_HTTP.END_RESPONSE(resp);
+EXCEPTION
+ WHEN OTHERS THEN
+ DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
+END;
+/
+```
+
+**Expected Output:** `HTTP response status code: 200`
+
+---
+
+### 3. Client Configuration Checks
+
+#### Check 3.1: Verify Token File Exists and is Readable
+
+```bash
+# Inside the pod
+ls -la /tmp/wallet/token.txt
+cat /tmp/wallet/token.txt | wc -c # Should be ~1900 bytes (not empty!)
+```
+
+#### Check 3.2: Verify sqlnet.ora Configuration
+
+```bash
+# Check sqlnet.ora content
+cat /tmp/wallet/sqlnet.ora
+```
+
+**Expected Content:**
+```
+WALLET_LOCATION = (SOURCE = (METHOD = file) (METHOD_DATA = (DIRECTORY="/tmp/wallet")))
+SSL_SERVER_DN_MATCH=ON
+SQLNET.AUTHENTICATION_SERVICES= (TCPS,NTS)
+NAMES.DIRECTORY_PATH= (TNSNAMES, EZCONNECT)
+TOKEN_AUTH=OAUTH
+TOKEN_LOCATION="/tmp/wallet/token.txt"
+```
+
+β οΈ **CRITICAL CHECKS:**
+- `SSL_SERVER_DN_MATCH=ON` (for Entra ID connections)
+- `TOKEN_AUTH=OAUTH`
+- `TOKEN_LOCATION="/tmp/wallet/token.txt"` (correct path)
+
+#### Check 3.3: Verify TNS_ADMIN Environment Variable
+
+```bash
+# Should point to /tmp/wallet
+echo $TNS_ADMIN
+```
+
+If not set:
+```bash
+export TNS_ADMIN=/tmp/wallet
+```
+
+#### Check 3.4: Test Token is Valid and Not Expired
+
+```bash
+# Check token expiry (you can decode it manually or check the exp claim)
+# Your current token expires: 10/17/2025 09:31:32
+date
+```
+
+If expired, regenerate:
+
+```powershell
+# On your local machine
+az login --tenant "f71980b2-590a-4de9-90d5-6fbc867da951"
+$token=az account get-access-token --scope "https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e/.default" --query accessToken -o tsv
+$token | Out-File -FilePath .\misc\token.txt -Encoding ascii
+
+# Upload to pod
+$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] }
+kubectl cp ./misc/token.txt ${podInstanteClientName}:/tmp/wallet/token.txt -n microhacks
+```
+
+---
+
+### 4. Database Alert Log and Trace Files
+
+#### Check 4.1: Check Oracle Alert Log
+
+If you have access to the database alert log (typically through Oracle Cloud Console):
+
+**Look for entries like:**
+- `ORA-01017` with additional context
+- `OAUTH` or `AZURE_AD` authentication failures
+- Token validation errors
+- Network connectivity issues to Entra ID endpoints
+
+**Location (on ADB):** Typically accessible through OCI Console β Autonomous Database β Performance Hub β SQL Monitoring
+
+#### Check 4.2: Enable SQL*Net Tracing (if needed)
+
+```bash
+# Add to sqlnet.ora temporarily for debugging
+cat <<'EOF' >> /tmp/wallet/sqlnet.ora
+TRACE_LEVEL_CLIENT=16
+TRACE_DIRECTORY_CLIENT=/tmp
+TRACE_FILE_CLIENT=sqlnet_trace.log
+EOF
+```
+
+Then retry connection and check `/tmp/sqlnet_trace.log`
+
+---
+
+### 5. Common Issues and Solutions
+
+#### Issue 5.1: User Mapping Mismatch
+
+**Problem:** Database expects exact UPN from token
+
+**Solution:**
+```sql
+-- Recreate user with exact UPN from token
+DROP USER GA1 CASCADE;
+CREATE USER GA1 IDENTIFIED GLOBALLY AS 'AZURE_USER=ga1@cptazure.org';
+GRANT CREATE SESSION TO GA1;
+```
+
+β οΈ **The UPN in the token is:** `ga1@cptazure.org`
+
+#### Issue 5.2: Wrong Connection String
+
+**Problem:** Using wrong security settings
+
+**Current attempt:**
+```
+(security=(ssl_server_dn_match=on))
+```
+
+**Try with explicit token parameters:**
+```bash
+sqlplus /@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=yes)(TOKEN_AUTH=OAUTH)(TOKEN_LOCATION="/tmp/wallet/token.txt")))'
+```
+
+Or use sqlnet.ora settings and simpler connection:
+```bash
+# Ensure TNS_ADMIN is set
+export TNS_ADMIN=/tmp/wallet
+
+# Try simple connection using tnsnames alias
+sqlplus /@adbger_high
+```
+
+#### Issue 5.3: Token Encoding Issues
+
+**Problem:** Token file has wrong encoding or line breaks
+
+**Solution:**
+```bash
+# Check for line breaks or extra characters
+od -c /tmp/wallet/token.txt | head -20
+
+# Token should be ONE line, ASCII encoded
+# If it has line breaks, fix it:
+tr -d '\n\r' < /tmp/wallet/token.txt > /tmp/wallet/token_fixed.txt
+mv /tmp/wallet/token_fixed.txt /tmp/wallet/token.txt
+```
+
+#### Issue 5.4: Missing App Role Assignment
+
+**Problem:** User not assigned to app roles in Entra ID
+
+**Check in Entra ID (Azure Portal):**
+1. Go to Enterprise Applications β adbger (7d22ece1-dd60-4279-a911-4b7b95934f2e)
+2. Users and groups β Check if ga1@cptazure.org is assigned
+3. If using app roles, verify ga1 is assigned to correct role
+
+From your token, I see these role GUIDs:
+- `1314ae09-ccc6-4f59-b68b-3837ff44465b`
+- `fa80ec82-2110-4b45-be28-b3341bf19661`
+
+But your manifest only shows:
+- `e9ea0527-85f2-4e84-9884-2ae95c4f5a17` (SH2_APP)
+
+β οΈ **POTENTIAL ISSUE:** Role GUIDs in token don't match manifest!
+
+---
+
+### 6. Recommended Debugging Sequence
+
+**Step 1:** Verify database configuration (run all SQL checks above)
+
+**Step 2:** Verify token is current and properly formatted
+```bash
+# Inside pod
+ls -la /tmp/wallet/token.txt
+cat /tmp/wallet/token.txt | wc -c
+# Should be ~1900 bytes
+```
+
+**Step 3:** Try simplified connection string
+```bash
+export TNS_ADMIN=/tmp/wallet
+sqlplus /@adbger_high
+```
+
+**Step 4:** If still failing, check database logs via OCI Console
+
+**Step 5:** Verify Entra ID app role assignments match user
+
+---
+
+### 7. Quick Diagnostic Commands
+
+Run these in sequence to generate a diagnostic report:
+
+```sql
+-- Connect as ADMIN first
+sqlplus admin@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))'
+
+SPOOL /tmp/entraid_diag.txt
+
+-- Identity Provider
+SELECT NAME, VALUE FROM V$PARAMETER WHERE NAME='identity_provider_type';
+
+-- User Configuration
+SELECT username, authentication_type, account_status, external_name
+FROM dba_users
+WHERE username = 'GA1';
+
+-- User Privileges
+SELECT * FROM dba_sys_privs WHERE grantee = 'GA1';
+
+-- Azure AD Config
+SELECT param_name, param_value
+FROM dba_cloud_config
+WHERE param_name LIKE 'AZURE%'
+ORDER BY param_name;
+
+-- Network ACLs
+SELECT host, principal, privilege
+FROM dba_host_aces
+WHERE host LIKE 'login%'
+ORDER BY host, principal, privilege;
+
+SPOOL OFF
+EXIT
+```
+
+Then copy diagnostic file:
+```bash
+# From pod
+cat /tmp/entraid_diag.txt
+```
+
+---
+
+### 8. Expected Log Files for Review
+
+If the issue persists, check these log locations:
+
+**On Autonomous Database (via OCI Console):**
+1. **Alert Log:**
+ - OCI Console β Autonomous Database β Performance Hub β ASH Analytics
+ - Look for ORA-01017 entries around your connection time
+
+2. **Audit Trail:**
+ ```sql
+ SELECT timestamp, username, action_name, returncode, comment_text
+ FROM unified_audit_trail
+ WHERE username = 'GA1'
+ ORDER BY timestamp DESC
+ FETCH FIRST 10 ROWS ONLY;
+ ```
+
+3. **External Authentication Logs:**
+ ```sql
+ SELECT * FROM v$diag_alert_ext
+ WHERE message_text LIKE '%AZURE%' OR message_text LIKE '%OAUTH%'
+ ORDER BY originating_timestamp DESC
+ FETCH FIRST 20 ROWS ONLY;
+ ```
+
+**On Client (pod):**
+- SQL*Net trace: `/tmp/sqlnet_trace.log` (if tracing enabled)
+- SQL*Plus log: Check terminal output carefully
+
+---
+
+## Most Likely Cause
+
+Based on your configuration, the most likely issues are:
+
+1. β οΈ **App Role Mismatch:** The role GUIDs in your token don't match the app registration manifest
+2. β οΈ **User Mapping:** GA1 user external name might not exactly match the UPN in the token
+3. β οΈ **Network ACLs:** GA1 might not have network access to Entra ID endpoints
+
+**Start with running all SQL checks in Step 2 above!**
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/RESOLUTION-ENTRAID-AUTH.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/RESOLUTION-ENTRAID-AUTH.md
new file mode 100644
index 000000000..6590bda1e
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/RESOLUTION-ENTRAID-AUTH.md
@@ -0,0 +1,265 @@
+# Entra ID Authentication - RESOLUTION SUMMARY
+
+## π **STATUS: WORKING** β
+
+**Date:** October 17, 2025
+**Authentication Method:** TOKEN_GLOBAL
+**User:** ga1@cptazure.org β GA1
+
+---
+
+## Issues Found and Fixed
+
+### 1. β **Missing TOKEN Configuration in sqlnet.ora**
+**Problem:** The `sqlnet.ora` file was missing TOKEN_AUTH and TOKEN_LOCATION parameters.
+
+**Solution:** Updated `/tmp/wallet/sqlnet.ora` to include:
+```
+TOKEN_AUTH=OAUTH
+TOKEN_LOCATION="/tmp/wallet/token.txt"
+```
+
+**Files Updated:**
+- `c:\Users\chpinoto\workspace\msftmh\03-Azure\01-03-Infrastructure\10_Oracle_on_Azure\misc\wallet\sqlnet.ora`
+
+---
+
+### 2. β **Token File Had Line Breaks**
+**Problem:** The token file contained a newline character (1 line break), which can cause parsing issues.
+
+**Solution:** Removed line breaks from token file to make it a single line.
+
+**Verification:**
+```bash
+wc -l /tmp/wallet/token.txt # Should show: 0
+```
+
+---
+
+### 3. β **Missing Network ACLs for GA1 User**
+**Problem:** User GA1 had no network access control lists (ACLs) to reach Entra ID endpoints.
+
+**Solution:** Added ACLs for GA1 to access:
+- `login.windows.net` (connect, resolve)
+- `login.microsoftonline.com` (connect, resolve)
+
+**SQL Commands Used:**
+```sql
+BEGIN
+ DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(
+ host => 'login.windows.net',
+ ace => xs$ace_type(
+ privilege_list => xs$name_list('connect','resolve'),
+ principal_name => 'GA1',
+ principal_type => xs_acl.ptype_db));
+END;
+/
+
+BEGIN
+ DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(
+ host => 'login.microsoftonline.com',
+ ace => xs$ace_type(
+ privilege_list => xs$name_list('connect','resolve'),
+ principal_name => 'GA1',
+ principal_type => xs_acl.ptype_db));
+END;
+/
+COMMIT;
+```
+
+---
+
+### 4. βΉοΈ **User External Name Case (Not an Issue)**
+**Observation:** Oracle stores the external name in lowercase: `azure_user=ga1@cptazure.org`
+
+**Resolution:** This is Oracle's normal behavior and does NOT affect authentication. The matching is case-insensitive.
+
+---
+
+## Working Configuration
+
+### Database Configuration β
+```
+Identity Provider Type: AZURE_AD
+User: GA1
+Authentication Type: GLOBAL
+External Name: azure_user=ga1@cptazure.org
+Privileges: CREATE SESSION
+Network ACLs: login.windows.net, login.microsoftonline.com
+```
+
+### Client Configuration β
+**sqlnet.ora** (`/tmp/wallet/sqlnet.ora`):
+```
+WALLET_LOCATION = (SOURCE = (METHOD = file) (METHOD_DATA = (DIRECTORY="/tmp/wallet")))
+SSL_SERVER_DN_MATCH=ON
+SQLNET.AUTHENTICATION_SERVICES= (TCPS,NTS)
+NAMES.DIRECTORY_PATH= (TNSNAMES, EZCONNECT)
+TOKEN_AUTH=OAUTH
+TOKEN_LOCATION="/tmp/wallet/token.txt"
+```
+
+**Environment Variables:**
+```bash
+export TNS_ADMIN=/tmp/wallet
+export LD_LIBRARY_PATH=/opt/oracle/instantclient_23_4
+export PATH=/opt/oracle/instantclient_23_4:$PATH
+```
+
+### Token Configuration β
+- **File:** `/tmp/wallet/token.txt`
+- **Size:** 1783 bytes
+- **Line Breaks:** 0 (single line)
+- **Encoding:** ASCII
+- **Token Type:** JWT (JSON Web Token)
+- **Version:** 2.0
+- **Audience (aud):** 7d22ece1-dd60-4279-a911-4b7b95934f2e
+- **Tenant (tid):** f71980b2-590a-4de9-90d5-6fbc867da951
+- **UPN:** ga1@cptazure.org
+- **Scope:** session:scope:connect
+
+---
+
+## Connection Test Results
+
+### Successful Connection β
+```bash
+#!/bin/bash
+export TNS_ADMIN=/tmp/wallet
+export LD_LIBRARY_PATH=/opt/oracle/instantclient_23_4
+export PATH=/opt/oracle/instantclient_23_4:$PATH
+
+/opt/oracle/instantclient_23_4/sqlplus /@adbger_high
+```
+
+**Output:**
+```
+SQL*Plus: Release 23.0.0.0.0 - Production
+Connected to:
+Oracle Database 23ai Enterprise Edition Release 23.0.0.0.0
+
+USER: GA1
+CURRENT_USER: GA1
+AUTH_METHOD: TOKEN_GLOBAL
+```
+
+---
+
+## How to Test
+
+### From PowerShell (Local Machine)
+```powershell
+# Get pod name
+$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] }
+
+# Test authentication
+kubectl exec -n microhacks $podInstanteClientName -- bash /tmp/test_entraid_auth.sh
+```
+
+### Inside the Pod
+```bash
+# Connect to pod
+kubectl exec -it -n microhacks $podInstanteClientName -- /bin/bash
+
+# Set environment
+export TNS_ADMIN=/tmp/wallet
+export LD_LIBRARY_PATH=/opt/oracle/instantclient_23_4
+export PATH=/opt/oracle/instantclient_23_4:$PATH
+
+# Connect using Entra ID token
+sqlplus /@adbger_high
+
+# Verify user
+SQL> SELECT USER FROM DUAL;
+# Should show: GA1
+
+SQL> SELECT SYS_CONTEXT('USERENV', 'AUTHENTICATION_METHOD') FROM DUAL;
+# Should show: TOKEN_GLOBAL
+```
+
+---
+
+## Token Renewal
+
+The token expires after **60-90 minutes**. For production use, you need automated token refresh.
+
+### Manual Token Refresh (Testing Only)
+
+```powershell
+# On local machine
+az login --tenant "f71980b2-590a-4de9-90d5-6fbc867da951"
+$token=az account get-access-token --scope "https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e/.default" --query accessToken -o tsv
+$token | Out-File -FilePath .\misc\token.txt -Encoding ascii -NoNewline
+
+# Upload to pod
+$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] }
+kubectl cp ./misc/token.txt ${podInstanteClientName}:/tmp/wallet/token.txt -n microhacks
+```
+
+### β Automated Token Refresh (Production)
+
+**For production environments, see comprehensive token refresh strategies:**
+
+π **[TOKEN-REFRESH-STRATEGIES.md](TOKEN-REFRESH-STRATEGIES.md)**
+
+Recommended approaches:
+1. **Sidecar Container** - Automatic refresh every 45 minutes (RECOMMENDED)
+2. **CronJob** - Kubernetes CronJob for periodic refresh
+3. **Application-Level** - Token refresh built into your application
+4. **Azure Key Vault + CSI Driver** - Enterprise solution with auto-sync
+
+The sidecar approach is recommended for Kubernetes deployments as it:
+- β
Refreshes tokens automatically before expiration
+- β
Uses Azure Workload Identity (no secrets in code)
+- β
Requires no changes to application code
+- β
Provides high availability with built-in retry logic
+
+---
+
+## Files Created for Debugging
+
+1. **DEBUG-ENTRAID-AUTH.md** - Comprehensive debugging guide
+2. **misc/diagnose.sql** - SQL diagnostic script
+3. **misc/Run-EntraIDDiagnostics.ps1** - PowerShell diagnostic runner
+4. **misc/db_diagnostics.sh** - Bash diagnostic script
+5. **misc/db_diag_v2.sh** - Improved diagnostic script
+6. **misc/fix_entraid.sh** - Script to fix configuration issues
+7. **misc/test_entraid_auth.sh** - Authentication test script
+
+---
+
+## Key Learnings
+
+1. **TOKEN_AUTH and TOKEN_LOCATION must be in sqlnet.ora** - Without these parameters, SQL*Plus won't use the token file.
+
+2. **Token must be a single line** - Line breaks in the token file can cause authentication failures.
+
+3. **Network ACLs are required** - The database user must have network access to Entra ID endpoints to validate tokens.
+
+4. **Case sensitivity in external names doesn't matter** - Oracle stores external names in lowercase, but matching is case-insensitive.
+
+5. **TNS_ADMIN must be set** - The environment variable must point to the wallet directory containing sqlnet.ora and tnsnames.ora.
+
+---
+
+## Troubleshooting Future Issues
+
+If authentication stops working, check:
+
+1. **Token expiry:** Tokens expire after ~90 minutes
+2. **Token format:** Must be single line, ASCII encoding
+3. **Network ACLs:** Check `dba_host_aces` for GA1 principal
+4. **sqlnet.ora:** Verify TOKEN_AUTH=OAUTH and TOKEN_LOCATION are set
+5. **Environment:** Ensure TNS_ADMIN, LD_LIBRARY_PATH, and PATH are set correctly
+
+Run diagnostics:
+```bash
+kubectl exec -n microhacks $podInstanteClientName -- bash /tmp/db_diag_v2.sh
+```
+
+---
+
+## References
+
+- Oracle Documentation: [Authenticating Microsoft Entra ID Users in Oracle Databases](https://docs.oracle.com/en/database/oracle/oracle-database/19/dbseg/authenticating-and-authorizing-microsoft-entra-id-ms-ei-users-oracle-databases-oracle-exadata.html)
+- Autonomous Database: [Enable Microsoft Entra ID Authentication](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/autonomous-azure-ad-enable.html)
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/TOKEN-REFRESH-STRATEGIES.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/TOKEN-REFRESH-STRATEGIES.md
new file mode 100644
index 000000000..1ab6de3f5
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/TOKEN-REFRESH-STRATEGIES.md
@@ -0,0 +1,575 @@
+# Token Refresh Strategies for Entra ID Authentication
+
+## Overview
+
+OAuth2 tokens from Entra ID typically expire after **60-90 minutes**. For production scenarios, you need an automated token refresh mechanism to maintain continuous database connectivity.
+
+---
+
+## Recommended Approaches
+
+### β **Option 1: Sidecar Container with Token Refresh (RECOMMENDED for Production)**
+
+Deploy a sidecar container in your Kubernetes pod that automatically refreshes the token before expiration.
+
+#### Architecture
+```
+βββββββββββββββββββββββββββββββββββββββββββ
+β Kubernetes Pod β
+β β
+β ββββββββββββββββ βββββββββββββββββ β
+β β Application β β Token Refresh β β
+β β Container β β Sidecar β β
+β β β β β β
+β β - Reads β β - Refreshes β β
+β β token β β every 45min β β
+β β - Connects β β - Uses MSI β β
+β β to Oracle β β - Writes to β β
+β β β β shared vol β β
+β ββββββββββββββββ βββββββββββββββββ β
+β β β β
+β ββββββββββ¬ββββββββββββ β
+β β β
+β ββββββββββΌβββββββββ β
+β β Shared Volume β β
+β β /tmp/wallet/ β β
+β β token.txt β β
+β βββββββββββββββββββ β
+βββββββββββββββββββββββββββββββββββββββββββ
+```
+
+#### Implementation
+
+**1. Create Token Refresh Script:**
+
+```bash
+#!/bin/bash
+# refresh-token.sh
+# Automatically refreshes Entra ID token using Azure Managed Identity
+
+TENANT_ID="f71980b2-590a-4de9-90d5-6fbc867da951"
+CLIENT_ID="7d22ece1-dd60-4279-a911-4b7b95934f2e"
+SCOPE="https://cptazure.org/${CLIENT_ID}/.default"
+TOKEN_FILE="/tmp/wallet/token.txt"
+REFRESH_INTERVAL=2700 # 45 minutes (before 60-minute expiry)
+
+while true; do
+ echo "$(date): Refreshing token..."
+
+ # Get token using Managed Identity
+ TOKEN=$(curl -s "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=${SCOPE}" \
+ -H "Metadata: true" \
+ | jq -r .access_token)
+
+ if [ "$TOKEN" != "null" ] && [ -n "$TOKEN" ]; then
+ echo -n "$TOKEN" > "$TOKEN_FILE"
+ chmod 600 "$TOKEN_FILE"
+ echo "$(date): Token refreshed successfully"
+ else
+ echo "$(date): ERROR - Failed to refresh token"
+ fi
+
+ sleep $REFRESH_INTERVAL
+done
+```
+
+**2. Create Sidecar Container Image:**
+
+```dockerfile
+# Dockerfile.token-refresh
+FROM mcr.microsoft.com/azure-cli:latest
+
+# Install jq for JSON parsing
+RUN apk add --no-cache jq curl bash
+
+# Copy refresh script
+COPY refresh-token.sh /usr/local/bin/refresh-token.sh
+RUN chmod +x /usr/local/bin/refresh-token.sh
+
+# Run the refresh loop
+CMD ["/usr/local/bin/refresh-token.sh"]
+```
+
+**3. Update Kubernetes Deployment:**
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: oracle-client
+ namespace: microhacks
+spec:
+ template:
+ spec:
+ serviceAccountName: oracle-client-sa # With Azure Workload Identity
+ containers:
+ # Main application container
+ - name: app
+ image: your-oracle-client:latest
+ volumeMounts:
+ - name: wallet
+ mountPath: /tmp/wallet
+ env:
+ - name: TNS_ADMIN
+ value: "/tmp/wallet"
+ - name: ORACLE_HOME
+ value: "/opt/oracle/instantclient_23_4"
+ - name: LD_LIBRARY_PATH
+ value: "/opt/oracle/instantclient_23_4"
+
+ # Token refresh sidecar
+ - name: token-refresh
+ image: your-registry/token-refresh:latest
+ volumeMounts:
+ - name: wallet
+ mountPath: /tmp/wallet
+ env:
+ - name: AZURE_CLIENT_ID
+ value: "7d22ece1-dd60-4279-a911-4b7b95934f2e"
+ - name: AZURE_TENANT_ID
+ value: "f71980b2-590a-4de9-90d5-6fbc867da951"
+
+ volumes:
+ - name: wallet
+ emptyDir: {}
+```
+
+**4. Setup Azure Workload Identity:**
+
+```bash
+# Create Azure Managed Identity
+az identity create \
+ --name oracle-token-refresh \
+ --resource-group odaa \
+ --location germanywestcentral
+
+# Get identity details
+IDENTITY_CLIENT_ID=$(az identity show --name oracle-token-refresh --resource-group odaa --query clientId -o tsv)
+IDENTITY_ID=$(az identity show --name oracle-token-refresh --resource-group odaa --query id -o tsv)
+
+# Grant permissions to get tokens for the app registration
+az ad app permission grant \
+ --id 7d22ece1-dd60-4279-a911-4b7b95934f2e \
+ --api 7d22ece1-dd60-4279-a911-4b7b95934f2e \
+ --scope session:scope:connect
+
+# Setup Workload Identity Federation
+az identity federated-credential create \
+ --name oracle-aks-federated \
+ --identity-name oracle-token-refresh \
+ --resource-group odaa \
+ --issuer $(az aks show -n odaa -g odaa --query "oidcIssuerProfile.issuerUrl" -o tsv) \
+ --subject "system:serviceaccount:microhacks:oracle-client-sa"
+
+# Create Kubernetes Service Account
+kubectl create serviceaccount oracle-client-sa -n microhacks
+kubectl annotate serviceaccount oracle-client-sa -n microhacks \
+ azure.workload.identity/client-id=$IDENTITY_CLIENT_ID
+```
+
+---
+
+### β **Option 2: CronJob-based Token Refresh (Simpler, Good for Testing)**
+
+Use Kubernetes CronJob to refresh the token periodically.
+
+```yaml
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: refresh-oracle-token
+ namespace: microhacks
+spec:
+ schedule: "*/45 * * * *" # Every 45 minutes
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ serviceAccountName: oracle-client-sa
+ containers:
+ - name: token-refresh
+ image: mcr.microsoft.com/azure-cli:latest
+ command:
+ - /bin/bash
+ - -c
+ - |
+ # Get token
+ TOKEN=$(az account get-access-token \
+ --scope "https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e/.default" \
+ --query accessToken -o tsv)
+
+ # Update ConfigMap with new token
+ kubectl create configmap oracle-token \
+ --from-literal=token=$TOKEN \
+ --dry-run=client -o yaml | kubectl apply -f -
+
+ # Restart pods to pick up new token
+ kubectl rollout restart deployment/oracle-client -n microhacks
+ restartPolicy: OnFailure
+```
+
+Then mount the token from ConfigMap:
+
+```yaml
+volumes:
+- name: token
+ configMap:
+ name: oracle-token
+```
+
+---
+
+### β **Option 3: Application-Level Token Refresh (Best for Custom Apps)**
+
+Implement token refresh logic directly in your application.
+
+#### Python Example with Connection Pool
+
+```python
+# oracle_entraid_client.py
+import os
+import time
+import subprocess
+import threading
+from datetime import datetime, timedelta
+import oracledb
+
+class EntraIDTokenManager:
+ def __init__(self, tenant_id, client_id, scope, token_file):
+ self.tenant_id = tenant_id
+ self.client_id = client_id
+ self.scope = scope
+ self.token_file = token_file
+ self.token_expiry = None
+ self.refresh_thread = None
+ self.running = False
+
+ def get_token(self):
+ """Get new token from Entra ID using Azure CLI or Managed Identity"""
+ try:
+ # Try Managed Identity first
+ import requests
+ response = requests.get(
+ "http://169.254.169.254/metadata/identity/oauth2/token",
+ params={
+ "api-version": "2018-02-01",
+ "resource": self.scope
+ },
+ headers={"Metadata": "true"},
+ timeout=5
+ )
+ if response.status_code == 200:
+ data = response.json()
+ return data['access_token'], data['expires_on']
+ except:
+ pass
+
+ # Fallback to Azure CLI
+ result = subprocess.run([
+ 'az', 'account', 'get-access-token',
+ '--scope', self.scope,
+ '--query', 'accessToken',
+ '-o', 'tsv'
+ ], capture_output=True, text=True)
+
+ if result.returncode == 0:
+ token = result.stdout.strip()
+ # Default expiry: 60 minutes
+ expiry = int(time.time()) + 3600
+ return token, expiry
+
+ raise Exception("Failed to get token")
+
+ def refresh_token(self):
+ """Refresh token and write to file"""
+ token, expiry = self.get_token()
+
+ # Write token to file (single line, no newline)
+ with open(self.token_file, 'w') as f:
+ f.write(token)
+
+ os.chmod(self.token_file, 0o600)
+ self.token_expiry = datetime.fromtimestamp(int(expiry))
+
+ print(f"Token refreshed. Expires at: {self.token_expiry}")
+
+ def start_refresh_loop(self):
+ """Start background thread to refresh token"""
+ self.running = True
+ self.refresh_thread = threading.Thread(target=self._refresh_loop, daemon=True)
+ self.refresh_thread.start()
+
+ def _refresh_loop(self):
+ """Background loop to refresh token before expiry"""
+ while self.running:
+ try:
+ # Refresh token
+ self.refresh_token()
+
+ # Calculate next refresh time (5 minutes before expiry)
+ if self.token_expiry:
+ time_until_expiry = (self.token_expiry - datetime.now()).total_seconds()
+ sleep_time = max(60, time_until_expiry - 300) # 5 min buffer
+ else:
+ sleep_time = 2700 # 45 minutes default
+
+ print(f"Next token refresh in {sleep_time/60:.1f} minutes")
+ time.sleep(sleep_time)
+
+ except Exception as e:
+ print(f"Error refreshing token: {e}")
+ time.sleep(60) # Retry after 1 minute
+
+ def stop(self):
+ """Stop refresh loop"""
+ self.running = False
+
+
+class OracleEntraIDConnection:
+ def __init__(self, dsn, token_manager):
+ self.dsn = dsn
+ self.token_manager = token_manager
+ self.pool = None
+
+ def create_pool(self, min_connections=2, max_connections=10):
+ """Create connection pool"""
+ # Set TNS_ADMIN for wallet location
+ os.environ['TNS_ADMIN'] = '/tmp/wallet'
+
+ # Create connection pool with external authentication
+ self.pool = oracledb.create_pool(
+ dsn=self.dsn,
+ min=min_connections,
+ max=max_connections,
+ externalauth=True # Use external authentication (token)
+ )
+
+ print(f"Connection pool created: {min_connections}-{max_connections} connections")
+ return self.pool
+
+ def get_connection(self):
+ """Get connection from pool"""
+ if not self.pool:
+ raise Exception("Pool not created. Call create_pool() first.")
+ return self.pool.acquire()
+
+
+# Usage Example
+if __name__ == "__main__":
+ # Configuration
+ TENANT_ID = "f71980b2-590a-4de9-90d5-6fbc867da951"
+ CLIENT_ID = "7d22ece1-dd60-4279-a911-4b7b95934f2e"
+ SCOPE = f"https://cptazure.org/{CLIENT_ID}/.default"
+ TOKEN_FILE = "/tmp/wallet/token.txt"
+ DSN = "adbger_high"
+
+ # Initialize token manager
+ token_mgr = EntraIDTokenManager(TENANT_ID, CLIENT_ID, SCOPE, TOKEN_FILE)
+
+ # Get initial token
+ token_mgr.refresh_token()
+
+ # Start automatic refresh
+ token_mgr.start_refresh_loop()
+
+ # Create Oracle connection
+ oracle_conn = OracleEntraIDConnection(DSN, token_mgr)
+ pool = oracle_conn.create_pool(min_connections=2, max_connections=10)
+
+ # Use connection
+ try:
+ conn = oracle_conn.get_connection()
+ cursor = conn.cursor()
+ cursor.execute("SELECT USER, SYS_CONTEXT('USERENV', 'AUTHENTICATION_METHOD') FROM DUAL")
+ result = cursor.fetchone()
+ print(f"Connected as: {result[0]}, Auth method: {result[1]}")
+ cursor.close()
+ conn.close()
+ finally:
+ token_mgr.stop()
+ pool.close()
+```
+
+---
+
+### β **Option 4: Azure Key Vault with Periodic Sync (Enterprise)**
+
+Store and automatically sync tokens via Azure Key Vault.
+
+```bash
+# Store token in Key Vault
+az keyvault secret set \
+ --vault-name your-keyvault \
+ --name oracle-entraid-token \
+ --value "$TOKEN"
+
+# Use CSI driver to mount as volume
+# The CSI driver can be configured to sync every X minutes
+```
+
+```yaml
+apiVersion: v1
+kind: SecretProviderClass
+metadata:
+ name: oracle-token-sync
+spec:
+ provider: azure
+ parameters:
+ usePodIdentity: "false"
+ useVMManagedIdentity: "true"
+ keyvaultName: "your-keyvault"
+ objects: |
+ array:
+ - |
+ objectName: oracle-entraid-token
+ objectType: secret
+ objectVersion: ""
+ tenantId: "f71980b2-590a-4de9-90d5-6fbc867da951"
+ syncPeriod: "45m" # Auto-refresh every 45 minutes
+```
+
+---
+
+## Comparison Matrix
+
+| Approach | Complexity | Reliability | Use Case | Token Refresh |
+|----------|-----------|-------------|----------|---------------|
+| **Sidecar Container** | Medium | βββββ | Production apps in K8s | Automatic (45 min) |
+| **CronJob** | Low | βββ | Testing, simple deployments | Every 45 min |
+| **Application-Level** | Medium-High | ββββ | Custom applications | Application-controlled |
+| **Key Vault + CSI** | High | βββββ | Enterprise, multi-pod | CSI sync (configurable) |
+
+---
+
+## Quick Implementation for Your Environment
+
+For your current AKS setup, I recommend **Option 1 (Sidecar Container)**. Here's a quick start:
+
+### Step 1: Create the Token Refresh Script
+
+Save this as `misc/refresh-token.sh`:
+
+```bash
+#!/bin/bash
+set -e
+
+TENANT_ID="${AZURE_TENANT_ID:-f71980b2-590a-4de9-90d5-6fbc867da951}"
+CLIENT_ID="${AZURE_CLIENT_ID:-7d22ece1-dd60-4279-a911-4b7b95934f2e}"
+SCOPE="https://cptazure.org/${CLIENT_ID}/.default"
+TOKEN_FILE="/tmp/wallet/token.txt"
+REFRESH_INTERVAL=${REFRESH_INTERVAL:-2700} # 45 minutes
+
+echo "Starting token refresh service..."
+echo "Tenant: $TENANT_ID"
+echo "Client: $CLIENT_ID"
+echo "Refresh interval: $REFRESH_INTERVAL seconds"
+
+while true; do
+ echo "$(date '+%Y-%m-%d %H:%M:%S'): Refreshing token..."
+
+ # Get token using Azure CLI with Managed Identity
+ TOKEN=$(az account get-access-token \
+ --tenant "$TENANT_ID" \
+ --scope "$SCOPE" \
+ --query accessToken \
+ --output tsv 2>&1)
+
+ if [ $? -eq 0 ] && [ -n "$TOKEN" ]; then
+ # Write token without newline
+ echo -n "$TOKEN" > "$TOKEN_FILE"
+ chmod 600 "$TOKEN_FILE"
+ echo "$(date '+%Y-%m-%d %H:%M:%S'): β
Token refreshed successfully"
+
+ # Decode and show expiry time
+ EXP=$(echo "$TOKEN" | cut -d'.' -f2 | base64 -d 2>/dev/null | grep -o '"exp":[0-9]*' | cut -d':' -f2)
+ if [ -n "$EXP" ]; then
+ EXPIRY_DATE=$(date -d "@$EXP" '+%Y-%m-%d %H:%M:%S' 2>/dev/null || echo "unknown")
+ echo "$(date '+%Y-%m-%d %H:%M:%S'): Token expires at: $EXPIRY_DATE"
+ fi
+ else
+ echo "$(date '+%Y-%m-%d %H:%M:%S'): β ERROR - Failed to refresh token: $TOKEN"
+ fi
+
+ echo "$(date '+%Y-%m-%d %H:%M:%S'): Sleeping for $REFRESH_INTERVAL seconds..."
+ sleep $REFRESH_INTERVAL
+done
+```
+
+### Step 2: Build and Push Sidecar Image
+
+```dockerfile
+# misc/Dockerfile.token-refresh
+FROM mcr.microsoft.com/azure-cli:2.55.0
+
+# Install required tools
+RUN apk add --no-cache coreutils bash
+
+# Copy refresh script
+COPY refresh-token.sh /usr/local/bin/refresh-token.sh
+RUN chmod +x /usr/local/bin/refresh-token.sh
+
+# Health check
+HEALTHCHECK --interval=5m --timeout=10s --retries=3 \
+ CMD test -f /tmp/wallet/token.txt && \
+ test $(find /tmp/wallet/token.txt -mmin -60) || exit 1
+
+CMD ["/usr/local/bin/refresh-token.sh"]
+```
+
+```powershell
+# Build and push
+cd misc
+docker build -f Dockerfile.token-refresh -t .azurecr.io/token-refresh:latest .
+docker push .azurecr.io/token-refresh:latest
+```
+
+### Step 3: Update Your Deployment
+
+Add the sidecar to your existing deployment - see the YAML example in Option 1 above.
+
+---
+
+## Monitoring & Alerts
+
+Set up monitoring to alert when token refresh fails:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: prometheus-alerts
+data:
+ alerts.yml: |
+ groups:
+ - name: oracle-token
+ rules:
+ - alert: TokenRefreshFailed
+ expr: time() - oracle_token_last_refresh_timestamp > 3600
+ for: 5m
+ annotations:
+ summary: "Oracle token hasn't been refreshed in 1 hour"
+```
+
+---
+
+## Best Practices
+
+1. β
**Refresh before expiry** - Refresh 15 minutes before token expiration
+2. β
**Use Managed Identity** - Avoid storing credentials in code/config
+3. β
**Monitor refresh status** - Set up alerts for failed refreshes
+4. β
**Handle failures gracefully** - Retry with exponential backoff
+5. β
**Log token events** - Track refresh times and failures
+6. β
**Single line tokens** - Always write tokens without newlines
+7. β
**Secure storage** - Set file permissions to 600 (read/write for owner only)
+
+---
+
+## Next Steps
+
+1. Choose the approach that fits your architecture
+2. Implement token refresh automation
+3. Set up monitoring and alerts
+4. Test token expiry scenarios
+5. Document the solution for your team
+
+Would you like help implementing any of these options?
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/clone-partial-repo.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/clone-partial-repo.md
new file mode 100644
index 000000000..f1b27baee
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/clone-partial-repo.md
@@ -0,0 +1,45 @@
+# Clone Partial Repository
+
+This guide shows how to clone only the "Oracle on Azure" project from the GIT repository without downloading the entire project history.
+
+> NOTE: During Challenge we did setup Azure CloudShell, feel free to use the Azure CloudShell to clone the repo. Alternative you can execute this commands from your local PC.
+
+## Quick Start
+
+```powershell
+# Clone with sparse-checkout (recommended)
+git clone --depth 1 --filter=blob:none --sparse https://github.com/cpinotossi/msftmh.git
+
+cd msftmh
+
+# Checkout only the Oracle on Azure folder
+git sparse-checkout set 03-Azure/01-03-Infrastructure/10_Oracle_on_Azure
+```
+
+## What This Does
+
+- `--depth 1`: Downloads only the latest commit (shallow clone)
+- `--filter=blob:none`: Downloads only necessary files, not all file versions
+- `--sparse`: Enables sparse-checkout mode
+- `git sparse-checkout set`: Specifies which folder to download
+
+## Switch to the right folder
+
+You'll have only the `10_Oracle_on_Azure` folder with its contents, saving bandwidth and disk space.
+
+~~~powershell
+cd 03-Azure/01-03-Infrastructure/10_Oracle_on_Azure
+~~~
+
+## Tips and Tricks
+
+### Customizing the Prompt
+
+The following PowerShell function customizes your prompt to show only the current folder name, making it easier to identify your location in the terminal.
+
+~~~powershell
+function prompt {
+ $currentFolder = (Get-Item -Path ".\" -Verbose).Name
+ "PS $currentFolder> "
+}
+~~~
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/create.odaa.adb.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/create.odaa.adb.md
new file mode 100644
index 000000000..dc1a5dbef
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/create.odaa.adb.md
@@ -0,0 +1,37 @@
+# π Oracle Database @ Azure (ODAA) - Deployment Scripts for ADB
+
+### π Login to Azure and set the right subscription
+
+~~~powershell
+az login # choose your assigned user account for ex. user01@cptazure.org or the menu "Work or school account
+az account show
+az account set -s "ODAA"
+# Register required providers for odaa
+# TBD: Check if all are required
+az provider register --namespace "Microsoft.Oracle"
+az provider register --namespace "Microsoft.Baremetal"
+az provider register --namespace "Microsoft.Network"
+~~~
+
+### π Define some environment variables
+
+~~~powershell
+$prefix="odaa"
+$postfix="1"
+$location="francecentral"
+$password = Read-Host -Prompt "Enter the shared password"
+$cidr="10.0.0.0"
+~~~
+
+### ποΈ Create Azure Resources
+
+> βΉοΈ **NOTE:** This would be created manually during the workshop.
+
+~~~bash
+az deployment sub create -n $prefix -l $location -f ./resources/infra/bicep/odaa/main.bicep -p location=$location prefix=$prefix postfix=$postfix password=$password cidr=$cidr
+# Verify the created resources, list all resource inside the resource group
+az resource list -g $rgName -o table --query "[].{Name:name, Type:type}"
+~~~
+
+~~~text
+~~~
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/deploy-adbping.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/deploy-adbping.md
new file mode 100644
index 000000000..2cf634b74
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/deploy-adbping.md
@@ -0,0 +1,106 @@
+# Deploy Oracle ADB Ping Pod
+
+## Prerequisites
+
+- AKS cluster with access to Oracle Container Registry
+- Oracle Container Registry credentials (if using private images)
+- ODAA Autonomous Database connection details
+
+## Step 1: Create Oracle Container Registry Secret (if needed)
+
+```powershell
+kubectl create secret docker-registry ocir-secret -n microhacks `
+ --docker-server=container-registry.oracle.com `
+ --docker-username='' `
+ --docker-password='' `
+ --docker-email=''
+```
+
+## Step 2: Deploy the Pod
+
+```powershell
+kubectl apply -f resources/pods/oracle-adbping.yaml
+```
+
+Wait for the pod to be ready:
+
+```powershell
+kubectl wait pod/oracle-adbping -n microhacks --for=condition=Ready --timeout=120s
+```
+
+## Step 3: Copy the adbping Script to the Pod
+
+```powershell
+kubectl cp resources/scripts/adbping.sh microhacks/oracle-adbping:/home/oracle/adbping.sh
+kubectl exec -n microhacks oracle-adbping -- chmod +x /home/oracle/adbping.sh
+```
+
+## Step 4: Run the ADB Ping Test
+
+```powershell
+# Set your connection details
+$ADB_HOST = "zeii0mxy.adb.eu-paris-1.oraclecloud.com"
+$ADB_SERVICE = "gc2401553d1c7ab_adbuser01_high.adb.oraclecloud.com"
+$ADB_USER = "admin"
+$ADB_PASSWORD = Read-Host -Prompt "Enter the shared password"
+
+# Build connection string
+$CONNECTION_STRING = "(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=$ADB_HOST))(connect_data=(service_name=$ADB_SERVICE))(security=(ssl_server_dn_match=no)))"
+
+# Execute the ping test
+kubectl exec -n microhacks oracle-adbping -- /home/oracle/adbping.sh "$CONNECTION_STRING" "$ADB_USER" "$ADB_PASSWORD" 10
+```
+
+## Step 5: Interactive Shell (Optional)
+
+For manual testing:
+
+```powershell
+kubectl exec -it -n microhacks oracle-adbping -- /bin/bash
+
+# Inside the pod:
+export TNS_CONN="(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=zeii0mxy.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=gc2401553d1c7ab_adbuser01_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))"
+
+# Test with tnsping
+tnsping "$TNS_CONN"
+
+# Test with sqlplus
+sqlplus admin/<"Assigned Password">#@"$TNS_CONN"
+```
+
+## Cleanup
+
+```powershell
+kubectl delete pod oracle-adbping -n microhacks
+```
+
+## Troubleshooting
+
+### Image Pull Issues
+
+If the pod fails to pull the image:
+
+1. Check the image pull secret:
+ ```powershell
+ kubectl get secret ocir-secret -n microhacks
+ ```
+
+2. Use an alternative public image:
+ ```yaml
+ image: ghcr.io/gvenzl/oracle-instantclient:21
+ # Remove imagePullSecrets section
+ ```
+
+### Connection Failures
+
+1. Verify DNS resolution:
+ ```powershell
+ kubectl exec -n microhacks oracle-adbping -- nslookup zeii0mxy.adb.eu-paris-1.oraclecloud.com
+ ```
+
+2. Check network connectivity:
+ ```powershell
+ kubectl exec -n microhacks oracle-adbping -- openssl s_client -connect zeii0mxy.adb.eu-paris-1.oraclecloud.com:1521 -brief
+ ```
+
+3. Review NSG rules and VNet peering between AKS and ODAA subnets
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/import-oci-image-to-acr.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/import-oci-image-to-acr.md
new file mode 100644
index 000000000..4ed4e7bc5
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/import-oci-image-to-acr.md
@@ -0,0 +1,160 @@
+# Importing Oracle Container Registry Images to Azure Container Registry
+
+This guide explains how to import Oracle GoldenGate images from Oracle Container Image Registry (OCIR) to Azure Container Registry (ACR).
+
+## Overview
+
+The `az acr import` command allows you to import container images from external registries directly into your Azure Container Registry without needing to pull and push the image locally.
+
+## Prerequisites
+
+1. **Azure CLI** installed and authenticated
+2. **Azure Container Registry** (e.g., `odaamh.azurecr.io`)
+3. **Oracle Cloud Infrastructure (OCI) credentials**:
+ - OCI username
+ - OCI Auth Token
+ - Tenancy namespace
+
+## Getting OCI Credentials
+
+### 1. Tenancy Namespace
+Your tenancy namespace is visible in the Oracle Container Registry URL. For example:
+- URL: `fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.4.0.24.06`
+- Tenancy namespace: `frul1g8cgfam`
+
+### 2. OCI Username
+Your OCI username format depends on your identity provider:
+- **OCI IAM**: ``
+- **Oracle Identity Cloud Service (IDCS)**: `oracleidentitycloudservice/`
+- **Federated users**: `/`
+
+### 3. Auth Token
+Generate an Auth Token in the OCI Console:
+1. Sign in to Oracle Cloud Console
+2. Click your profile icon β **User Settings**
+3. Under **Resources**, click **Auth Tokens**
+4. Click **Generate Token**
+5. Provide a description and click **Generate Token**
+6. **Copy and save the token immediately** (it won't be shown again)
+
+## Import Command
+
+### Basic Syntax
+
+```powershell
+az acr import `
+ --name `
+ --source //: `
+ --image : `
+ --username "/" `
+ --password ""
+```
+
+### Example: Importing GoldenGate BigData Image
+
+```powershell
+# Set the correct Azure subscription
+az account set --subscription 09808f31-065f-4231-914d-776c2d6bbe34
+
+# Import the image
+az acr import `
+ --name odaamh `
+ --source fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.4.0.24.06 `
+ --image goldengate/pub_gg_micro_bigdata:23.4.0.24.06 `
+ --username "frul1g8cgfam/" `
+ --password ""
+```
+
+### Using Environment Variables (Recommended for Security)
+
+```powershell
+# Store credentials in environment variables
+$env:OCI_USERNAME = "frul1g8cgfam/"
+$env:OCI_AUTH_TOKEN = ""
+
+# Import using environment variables
+az acr import `
+ --name odaamh `
+ --source fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.4.0.24.06 `
+ --image goldengate/pub_gg_micro_bigdata:23.4.0.24.06 `
+ --username $env:OCI_USERNAME `
+ --password $env:OCI_AUTH_TOKEN
+```
+
+## Available Images to Import
+
+Based on the configuration in `ggfabric.yaml`, you may need to import:
+
+1. **BigData Image (23.4.0)**:
+ ```
+ Source: fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.4.0.24.06
+ Target: odaamh.azurecr.io/goldengate/pub_gg_micro_bigdata:23.4.0.24.06
+ ```
+
+2. **BigData Image (23.8.4)**:
+ ```
+ Source: fra.ocir.io/frul1g8cgfam/pub_gg_micro_bigdata:23.8.4.25.08
+ Target: odaamh.azurecr.io/goldengate/pub_gg_micro_bigdata:23.8.4.25.08
+ ```
+
+## Verification
+
+After importing, verify the image is available in your ACR:
+
+```powershell
+# List all repositories
+az acr repository list --name odaamh --output table
+
+# Show tags for a specific repository
+az acr repository show-tags --name odaamh --repository goldengate/pub_gg_micro_bigdata --output table
+
+# Get image details
+az acr repository show --name odaamh --image goldengate/pub_gg_micro_bigdata:23.4.0.24.06
+```
+
+## Updating Kubernetes Deployments
+
+After importing, update your Helm values or Kubernetes manifests to use the ACR image:
+
+```yaml
+image:
+ imageName: odaamh.azurecr.io/goldengate/pub_gg_micro_bigdata:23.4.0.24.06
+```
+
+## Troubleshooting
+
+### 403 Forbidden Error
+```
+Anonymous users are only allowed read access on public repos
+```
+**Solution**: Ensure you're providing valid OCI credentials with `--username` and `--password` flags.
+
+### Invalid Credentials
+**Solution**:
+- Verify your OCI username format matches your identity provider
+- Ensure the Auth Token is valid and not expired
+- Check that the tenancy namespace is correct
+
+### Subscription Not Found
+**Solution**: Set the correct Azure subscription:
+```powershell
+az account set --subscription
+```
+
+### Image Not Found in Source Registry
+**Solution**:
+- Verify you have access to the OCI repository
+- Check that the image path and tag are correct
+- Ensure your OCI user has pull permissions for the repository
+
+## Additional Resources
+
+- [Azure Container Registry Import Documentation](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-import-images)
+- [Oracle Container Registry Documentation](https://docs.oracle.com/en-us/iaas/Content/Registry/home.htm)
+- [Managing Auth Tokens in OCI](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managingcredentials.htm)
+
+## Related Files
+
+- `ggfabric.yaml` - Helm values file containing image configurations
+- `resources/gg-bigdata-build/` - GoldenGate build resources
+- `resources/infra/` - Infrastructure deployment files
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/odaa-get-token.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/odaa-get-token.md
new file mode 100644
index 000000000..c1529eb05
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/docs/odaa-get-token.md
@@ -0,0 +1,44 @@
+# How to retrieve the Oracle Database Autonomous Database connection string from ODAA
+
+To connect to the Oracle Database you will need the TNS connection string.
+
+## π Retrieve the connection string via the Azure Portal from the ODAA ADB instance.
+
+1. π― Go to your Oracle Database in Azure Portal, search for "adb" in the search bar on top.
+2. π Select "Oracle Database@Azure" from the search results.
+3. π Select "Oracle Autonomous Database Service" from the left menu.
+4. πͺ Select your created ADB instance.
+5. π Select "Connection" from the left menu.
+6. π Select High profile, TLS Authentication=TLS Connection String
+
+## π§ Alternative you can use the Azure CLI to retrieve the connection string.
+
+~~~powershell
+# Prerequisites (if not already installed)
+az extension add --name oracle-database
+
+$adbName="user02" # replace with your ADB name
+
+# Switch to the subscription where ODAA is deployed
+$subODAA="sub-mhodaa"
+az account set --subscription $subODAA
+
+$rgODAA="odaa-user02" # replace with your resource group name
+
+# Enable preview features for Oracle Database extension
+az config set extension.dynamic_install_allow_preview=true
+# Install Oracle Database extension if not already installed
+az extension add --name oracle-database
+# Retrieve TNS Connection string High profile (TCPS, tlsAuthentication = Server)
+$trgConn=az oracle-database autonomous-database show -g $rgODAA -n $adbName --query "connectionStrings.profiles[?consumerGroup=='High' && protocol=='TCPS' && tlsAuthentication=='Server'].value | [0]" -o tsv
+
+echo $trgConn
+~~~
+
+Output should look similar to this:
+
+~~~text
+(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=zeii0mxy.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=gc2401553d1c7ab_adbuser01_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+~~~
+
+
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/ENDE MFA.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/ENDE MFA.png
new file mode 100644
index 000000000..e14dc86a7
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/ENDE MFA.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA.png
new file mode 100644
index 000000000..46a7aa706
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA1.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA1.png
new file mode 100644
index 000000000..aee200cc4
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA1.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA2.png
new file mode 100644
index 000000000..b638d7bbb
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA2.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA3.png
new file mode 100644
index 000000000..4b35a6814
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA3.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA4.png
new file mode 100644
index 000000000..6c72f08ff
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA4.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA5.png
new file mode 100644
index 000000000..ec3c7bc1e
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA5.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA6.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA6.png
new file mode 100644
index 000000000..a40d86872
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/MFA6.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 0.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 0.png
new file mode 100644
index 000000000..064db298b
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 0.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 1.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 1.png
new file mode 100644
index 000000000..154aa9a79
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 1.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 2.png
new file mode 100644
index 000000000..0536455fb
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 2.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 3.png
new file mode 100644
index 000000000..a109f1393
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 3.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 4.png
new file mode 100644
index 000000000..689dade7b
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 4.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 5.png
new file mode 100644
index 000000000..1ab893d7c
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/conditional_access_issue/conditional access issue 5.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/create_browser_profile.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/create_browser_profile.png
new file mode 100644
index 000000000..1a47fd43f
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/create_browser_profile.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/logo_ODAA_microhack_1900x300.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/logo_ODAA_microhack_1900x300.jpg
new file mode 100644
index 000000000..9c7dd4659
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/logo_ODAA_microhack_1900x300.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/overivew deployment.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/overivew deployment.png
new file mode 100644
index 000000000..cfb8acfc2
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/overivew deployment.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available1.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available1.png
new file mode 100644
index 000000000..3535f4d68
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available1.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available2.png
new file mode 100644
index 000000000..046778e8d
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available2.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available3.png
new file mode 100644
index 000000000..52f5d9249
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available3.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available4.png
new file mode 100644
index 000000000..e82878970
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available4.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available5.png
new file mode 100644
index 000000000..f749de832
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available5.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available6.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available6.png
new file mode 100644
index 000000000..8859f5357
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/media/resource_group_check/first_check_rg_available6.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/README.md
new file mode 100644
index 000000000..8e47e05a7
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/README.md
@@ -0,0 +1,192 @@
+# π Oracle Database @ Azure (ODAA) - Deployment Scripts
+
+## βοΈ Install the Microhack Environment
+
+The following resources needs to be created before the workshop start.
+
+- π¦ Azure Resource Group
+- β Azure Kubernetes Service (AKS)
+- π Install Ingress Controller NGINX on AKS
+
+> β οΈ **IMPORTANT:** Make sure the CIDR of the created VNet is added to the Oracle NSG.
+
+### π Prerequisites
+
+- π§ install Azure CLI
+- β install kubectl
+- π¦ install helm
+- π install jq
+- π» Scripts need to run on bash (Linux, MacOS, WSL2 on Windows)
+
+### π Login to Azure and set the right subscription
+
+~~~powershell
+az login # choose your assigned user account for ex. user01@cptazure.org or the menu "Work or school account
+az account show
+az account set -s ""
+az provider register --namespace Microsoft.ContainerService
+az provider register --namespace Microsoft.Network
+az provider register --namespace Microsoft.OperationalInsights
+az provider register --namespace Microsoft.Compute
+~~~
+
+### π Define some environment variables
+
+~~~powershell
+$prefix="team"
+$postfix="1"
+$location="francecentral"
+~~~
+
+### ποΈ Create Azure Resources
+
+> βΉοΈ **NOTE:** Currently you will need to redo this steps for each Team environment. Make sure to change the postfix.
+
+~~~bash
+az deployment sub create -n "$prefix$postfix" -l $location -f ./resources/infra/bicep/aks/main.bicep -p location=$location prefix=$prefix postfix=$postfix aksVmSize="Standard_D8ads_v6" cidr="10.11.0.0"
+# Verify the created resources, list all resource inside the resource group
+az resource list -g "$prefix$postfix" -o table --query "[].{Name:name, Type:type}"
+~~~
+
+~~~text
+Name Type
+------ ------------------------------------------
+odaa1 Microsoft.Network/virtualNetworks
+odaa1 Microsoft.OperationalInsights/workspaces
+odaa1 Microsoft.ContainerService/managedClusters
+~~~
+
+### β Connect to AKS
+
+~~~powershell
+# set the right subscription
+az account set -s "sub-team0"
+# login to aks
+az aks get-credentials -g "aks-team0" -n "aks-team0" --overwrite-existing
+# list namespaces
+kubectl get namespaces # should show default, kube-system, kube-public
+~~~
+
+~~~text
+NAME STATUS AGE
+default Active 10m
+gatekeeper-system Active 9m37s
+kube-node-lease Active 10m
+kube-public Active 10m
+kube-system Active 10m
+~~~
+
+### π Install Ingress Controller
+
+π An ingress controller on Azure Kubernetes Service (AKS) manages external access to services running inside your cluster. It acts as a gateway, routing HTTP and HTTPS traffic from outside the cluster to the appropriate internal services based on rules you define. This enables features like SSL termination, load balancing, and path-based routing, making it easier to securely expose and manage multiple applications within AKS.
+
+~~~powershell
+# Change directory to the scripts
+# cd scripts/k8s_install/
+helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+helm repo update
+kubectl create namespace ingress-nginx
+helm install nginx-quick ingress-nginx/ingress-nginx -n ingress-nginx
+# patch health probe:
+kubectl patch service nginx-quick-ingress-nginx-controller -n ingress-nginx -p '{\"metadata\":{\"annotations\":{\"service.beta.kubernetes.io/azure-load-balancer-health-probe-request-path\":\"/healthz\"}}}'
+# verify if annotation is added
+kubectl get service nginx-quick-ingress-nginx-controller -n ingress-nginx -o jsonpath='{.metadata.annotations}' | jq
+kubectl get service --namespace ingress-nginx nginx-quick-ingress-nginx-controller --output wide
+# get external IP of nginx controller, you maybe need to wait a few minutes until the IP is assigned
+kubectl get service -n ingress-nginx -o jsonpath='{.items[*].status.loadBalancer.ingress[*].ip}'
+~~~
+
+### Setup Azure Fabric
+
+Based on https://learn.microsoft.com/en-us/fabric/data-engineering/tutorial-lakehouse-introduction#lakehouse-end-to-end-scenario
+
+1. https://app.fabric.microsoft.com/home?experience=power-bi
+1.
+
+Sign in to your Power BI account and sign up for the free Microsoft Fabric trial. If you don't have a Power BI license, sign up for a Fabric free license and then you can start the Fabric trial.
+
+Build and implement an end-to-end lakehouse for your organization:
+
+Create a Fabric workspace.
+Create a lakehouse.
+Ingest data, transform data, and load it into the lakehouse. You can also explore the OneLake, one copy of your data across lakehouse mode and SQL analytics endpoint mode.
+Connect to your lakehouse using the SQL analytics endpoint and Create a Power BI report using DirectLake to analyze sales data across different dimensions.
+Optionally, you can orchestrate and schedule data ingestion and transformation flow with a pipeline.
+Clean up resources by deleting the workspace and other items.
+
+#### Install Image GoldenGate for Distributed Applications and Analytics
+
+GoldenGate for Distributed Applications and Analytics v23.4.0.24.06 on Linux x86-64
+
+Links:
+- (Overview of all possible GG Download Images)[https://www.oracle.com/middleware/technologies/goldengate-downloads.html#]
+- (Download Page for Application andf Analytics GG)[https://edelivery.oracle.com/ocom/faces/Downloads;jsessionid=ir4RtGq2ylyafl5mEIgKLVFghwS6M8qi1_-8fuPA1wyWxNb2EYUh!122914563?dlp_cid=1184745&rel_cid=1153160&auth_token=1761237128_MDA0ZDFkMjczNTYyNmU3YzE2YTFmZjJlZmQ3NTBjOWIxNjRlOGY3MGFhZDI0NzQyY2Y1Yjc3NThiMzBkZmUyMzo6b3NkY19vcmFjbGUuY29t#]
+
+Build your own image and push it to your private Azure Container Registry (ACR).
+
+~~~powershell
+# switch to ACR subscription
+az account set -s
+# change to directory where Dockerfile is located
+cd .\10_Oracle_on_Azure\misc\goldengate-temp
+# build and push image to ACR
+az acr build --registry odaamh --image goldengate/goldengate-oracle-bigdata:23.4.0.24.06 --file Dockerfile .
+
+az acr repository list --name odaamh --output table
+
+~~~
+
+### Attache ACR to AKS
+
+~~~powershell
+# switch to ACR subscription
+az account set -s
+$acrId = az acr show --name odaamh --resource-group odaa --query "id" --output tsv
+
+az account set -s
+az aks update --resource-group odaa1 --name odaa1 --attach-acr $acrId
+~~~
+
+## Tips and Tricks
+
+### VNet Peering between two subscriptions
+
+In case your odaa does run in a different tenant / subscription, you need to create a VNet Peering between the two VNet.
+
+~~~powershell
+$postfixODAA = "2"
+$postfixAKS = "1"
+$subODAAName = "ODAA"
+$subAKSName = "sub-1"
+
+az login -t ""
+az account set -s $subODAAName
+
+# Peering AKS VNet to ODAA VNet
+# We need to retrieve the subscription IDs first of the ODAA Vnet
+az account set -s $subODAAName;
+$subODAAId = az account show --query id -o tsv
+# Now we need to login into the subscription where AKS is deployed
+az login -t ""
+az account set -s $subAKSName;
+$subAKSId = az account show --query id -o tsv
+az network vnet peering create --name AKS-to-ODAA -g "$prefix$postfixAKS" --vnet-name "$prefix$postfixAKS" --remote-vnet /subscriptions/$subODAAId/resourceGroups/"$prefix$postfixODAA"/providers/Microsoft.Network/virtualNetworks/"$prefix$postfixODAA" --allow-vnet-access
+# Peering ODAA VNet to AKS VNet
+az account set -s $subODAAName;
+az network vnet peering create -n ODAA-to-AKS -g "$prefix$postfixODAA" --vnet-name "$prefix$postfixODAA" --remote-vnet /subscriptions/$subAKSId/resourceGroups/"$prefix$postfixAKS"/providers/Microsoft.Network/virtualNetworks/"$prefix$postfixAKS" --allow-vnet-access
+
+
+# Verify peering on sububscription sub-cptdx-01
+az network vnet peering list -g "$prefix$postfixODAA" --vnet-name "$prefix$postfixODAA" -o table
+az account set -s $subAKSName
+az network vnet peering list -g "$prefix$postfixAKS" --vnet-name "$prefix$postfixAKS" -o table
+~~~
+
+### Validate Ingress controller in AKS
+
+~~~powershell
+kubectl get service --namespace ingress-nginx nginx-quick-ingress-nginx-controller
+# validate health probe
+kubectl get service nginx-quick-ingress-nginx-controller -n ingress-nginx -o jsonpath='{.metadata.annotations}'
+
+~~~
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/README.md
deleted file mode 100644
index 452569f55..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/README.md
+++ /dev/null
@@ -1,78 +0,0 @@
-# Step-by-step Instructions how to Deploy Oracle Data Guard on Azure VMs - Terraform Automation
-
-## Overview
-
-This repository contains code to install and configure Oracle databases on Azure VM IaaS in an automated fashion. The scenario of two VMs in an Oracle Dataguard configuration, deployed through Terraform (TODO: and Ansible).
-
-For more information about how to install and configure Data Guard on an Azure virtual machine (VM) with CLI refer to the documentation [here](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/oracle-dataguard).
-
-__Important Note - Disclaimer__: The code of this repository is largely based on the Oracle Deployment Automation repository (lza-oracle), which can be found [here](https://github.com/Azure/lza-oracle). The goal of the Terraform automation scripts in this repository is primarily to facilitate the successful execution of the Microhack. The code in this repository is not intended for production use and should be used with caution.
-At the lza-oracle repository, you can find the code for deploying Oracle databases on Azure VMs using different scenarios, such as single and Dataguard using Terraform, Bicept and Ansible.
-If you are interested in deploying Oracle databases on Azure VMs, we recommend you to check the [lza-oracle](https://github.com/Azure/lza-oracle) repository.
-
-Note that Oracle licensing is not a part of this solution. Please verify that you have the necessary Oracle licenses to run Oracle software on Azure IaaS.
-
-
-The above resources can be deployed using the sample Github action workflows provided in the repository. The workflows are designed to deploy the infrastructure and configure the Oracle database on the VMs. This is the recommended way to deploy the infrastructure and configure the Oracle database. Alternatively the infrastructure can be deployed using Azure CLI and the Oracle database can be configured using Ansible.
-
-Note that the code provided in this repository is for demonstration purposes only and should not be used in a production environment without thorough testing.
-
-## Prerequisites
-
-1. Azure Entra ID Tenant.
-2. Minimum 1 subscription, for when deploying VMs. If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/en-us/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio) before you begin.
-3. Azure CLI installed on your local machine. You can install Azure CLI from [here](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli).
-4. Terraform installed on your local machine. You can install Terraform from [here](https://learn.hashicorp.com/tutorials/terraform/install-cli).
-
-
-## 1. Authenticate Terraform to Azure
-
-To use Terraform commands against your Azure subscription, you must first authenticate Terraform to that subscription. [This doc](https://learn.microsoft.com/en-us/azure/developer/terraform/authenticate-to-azure?tabs=bash) describes how to authenticate Terraform to your Azure subscription.
-
-### 2. Create SSH Key
-
-To deploy Oracle Data Guard on the VMs, you can use **data_guard** module in this repo. The module is located on `terraform/data_guard` directory.
-
-Before using this module, you have to create your own ssh key to deploy and connect to the two virtual machines you will create.
-
-```bash
-ssh-keygen -f ~/.ssh/mh-oracle-data-guard
-
-ls -lha ~/.ssh/
--rw------- 1 yourname staff 2.6K 8 17 2023 mh-oracle-data-guard
--rw-r--r-- 1 yourname staff 589B 8 17 2023 mh-oracle-data-guard.pub
-```
-
-### 4. Define Variables
-
-Define the variables such as location and Resource Group name in the `global_variables.tf` file. For more reference on all variables you can set, see [variables description](variables.md)
-
-Next, you go to `terraform/data_guard` directory and create `fixtures.tfvars` file, then copy the contents of the ssh public key used for deploying virtual machines on Azure (~/.ssh/mh-oracle-data-guard.pub).
-
-This is a sample `fixtures.tfvars` file.
-
-```tf:fixtures.tfvars
-ssh_key = "ssh-rsa xxxxxxxxxxxxxx="
-```
-### 5. Execute Terraform Commands
-Execute below Terraform commands. When you deploy resources to Azure, you have to indicate `fixtures.tfvars` as a variable file, which contains the ssh public key.
-
-```bash
-
-$ terraform init
-
-$ terraform plan -var-file=fixtures.tfvars
-
-$ terraform apply -var-file=fixtures.tfvars
-```
-
-You can connect to the virtual machine with ssh private key. While deploying resources, a public ip address is generated and attached to the virtual machine, so that you can connect to the virtual machine with this IP address. The username is `oracle`, which is fixed in `terraform/data_guard/module.tf`.
-
-```
-$ ssh -i ~/.ssh/mh-oracle-data-guard oracle@
-
-
-
-## Trademarks
-
-This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies.
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/LICENSE b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/LICENSE
deleted file mode 100644
index 9e841e7a2..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
- MIT License
-
- Copyright (c) Microsoft Corporation.
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/backend.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/backend.tf
deleted file mode 100644
index 556a8c85c..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/backend.tf
+++ /dev/null
@@ -1,3 +0,0 @@
-terraform {
- backend "local" {}
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/jit_rule.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/jit_rule.tf
deleted file mode 100644
index 39ba4ff7d..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/jit_rule.tf
+++ /dev/null
@@ -1,91 +0,0 @@
-#########################################################################################
-# #
-# JIT Access Policy #
-# #
-#########################################################################################
-data "azurerm_virtual_machine" "oracle_primary_vm" {
- name = module.vm_primary.vm.name
- resource_group_name = module.common_infrastructure.resource_group.name
-
- depends_on = [module.vm_primary,
- module.storage_primary
- ]
-}
-
-data "azurerm_virtual_machine" "oracle_secondary_vm" {
- name = module.vm_secondary.vm.name
- resource_group_name = module.common_infrastructure.resource_group.name
-
- depends_on = [module.vm_secondary
- , module.storage_secondary
- ]
-}
-
-resource "time_sleep" "wait_for_primary_vm_creation" {
- create_duration = var.jit_wait_for_vm_creation
-
- depends_on = [data.azurerm_virtual_machine.oracle_primary_vm,
- module.storage_primary
- ]
-}
-
-resource "time_sleep" "wait_for_secondary_vm_creation" {
- create_duration = var.jit_wait_for_vm_creation
-
- depends_on = [data.azurerm_virtual_machine.oracle_secondary_vm
- , module.storage_secondary
- ]
-}
-
-
-resource "azapi_resource" "jit_ssh_policy_primary" {
- count = module.vm_primary.database_server_count
- name = "JIT-SSH-Policy-primary"
- parent_id = "${module.common_infrastructure.resource_group.id}/providers/Microsoft.Security/locations/${module.common_infrastructure.resource_group.location}"
- type = "Microsoft.Security/locations/jitNetworkAccessPolicies@2020-01-01"
- schema_validation_enabled = false
- body = jsonencode({
- "kind" : "Basic"
- "properties" : {
- "virtualMachines" : [{
- "id" : "/subscriptions/${module.common_infrastructure.current_subscription.subscription_id}/resourceGroups/${module.common_infrastructure.resource_group.name}/providers/Microsoft.Compute/virtualMachines/${module.vm_primary.vm.name}",
- "ports" : [
- {
- "number" : 22,
- "protocol" : "TCP",
- "allowedSourceAddressPrefix" : "*",
- "maxRequestAccessDuration" : "PT3H"
- }
- ]
- }]
- }
- })
-
- depends_on = [time_sleep.wait_for_primary_vm_creation]
-}
-
-resource "azapi_resource" "jit_ssh_policy_secondary" {
- count = module.vm_secondary.database_server_count
- name = "JIT-SSH-Policy-secondary"
- parent_id = "${module.common_infrastructure.resource_group.id}/providers/Microsoft.Security/locations/${module.common_infrastructure.resource_group.location}"
- type = "Microsoft.Security/locations/jitNetworkAccessPolicies@2020-01-01"
- schema_validation_enabled = false
- body = jsonencode({
- "kind" : "Basic"
- "properties" : {
- "virtualMachines" : [{
- "id" : "/subscriptions/${module.common_infrastructure.current_subscription.subscription_id}/resourceGroups/${module.common_infrastructure.resource_group.name}/providers/Microsoft.Compute/virtualMachines/${module.vm_secondary.vm.name}",
- "ports" : [
- {
- "number" : 22,
- "protocol" : "TCP",
- "allowedSourceAddressPrefix" : "*",
- "maxRequestAccessDuration" : "PT3H"
- }
- ]
- }]
- }
- })
-
- depends_on = [time_sleep.wait_for_secondary_vm_creation]
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/module.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/module.tf
deleted file mode 100644
index 2e94560c4..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/module.tf
+++ /dev/null
@@ -1,257 +0,0 @@
-data "azurerm_client_config" "current" {}
-
-module "common_infrastructure" {
- source = "./modules/common_infrastructure"
-
- infrastructure = local.infrastructure
- is_data_guard = true
- is_diagnostic_settings_enabled = var.is_diagnostic_settings_enabled
- diagnostic_target = var.diagnostic_target
- tags = var.resourcegroup_tags
-
-}
-
-module "vm_primary" {
- source = "./modules/compute"
-
- resource_group_name = module.common_infrastructure.created_resource_group_name
- location = var.location
- vm_name = "vm-primary-0"
- public_key = var.ssh_key
- sid_username = "oracle"
- vm_sku = var.vm_sku
-
- vm_source_image_reference = var.vm_source_image_reference
- aad_system_assigned_identity = true
- public_ip_address_resource_id = module.network.db_server_puplic_ip_resources[0].id
-
-
- is_diagnostic_settings_enabled = module.common_infrastructure.is_diagnostic_settings_enabled
- diagnostic_target = module.common_infrastructure.diagnostic_target
- storage_account_id = module.common_infrastructure.target_storage_account_id
- storage_account_sas_token = module.common_infrastructure.target_storage_account_sas
- log_analytics_workspace = module.common_infrastructure.log_analytics_workspace != null ? {
- id = module.common_infrastructure.log_analytics_workspace.id
- name = module.common_infrastructure.log_analytics_workspace.name
- } : null
- data_collection_rules = module.common_infrastructure.data_collection_rules
- eventhub_authorization_rule_id = module.common_infrastructure.eventhub_authorization_rule_id
- partner_solution_id = module.common_infrastructure.partner_solution_id
- tags = module.common_infrastructure.tags
- db_subnet = module.network.db_subnet
-
- availability_zone = 1
-
-
-
- vm_user_assigned_identity_id = var.vm_user_assigned_identity_id
-
- vm_os_disk = {
- name = "osdisk-primary"
- caching = "ReadWrite"
- storage_account_type = "Premium_LRS"
- disk_encryption_set_id = null
- disk_size_gb = 128
- }
-
- role_assignments = {
- role_assignment_1 = {
- role_definition_id_or_name = "Virtual Machine Contributor"
- principal_id = data.azurerm_client_config.current.object_id
- skip_service_principal_aad_check = false
- }
- }
-
- role_assignments_nic = {
- role_assignment_1 = {
- role_definition_id_or_name = "Contributor"
- principal_id = data.azurerm_client_config.current.object_id
- skip_service_principal_aad_check = false
- }
- }
-
- vm_extensions = {
- azure_monitor_agent = {
- name = "vm-primary-azure-monitor-agent"
- publisher = "Microsoft.Azure.Monitor"
- type = "AzureMonitorLinuxAgent"
- type_handler_version = "1.0"
- auto_upgrade_minor_version = true
- automatic_upgrade_enabled = true
- settings = null
- }
- }
-
- depends_on = [module.network, module.common_infrastructure]
-}
-
-
-module "vm_secondary" {
- source = "./modules/compute"
-
- resource_group_name = module.common_infrastructure.created_resource_group_name
- location = var.location
- vm_name = "vm-secondary-0"
- public_key = var.ssh_key
- sid_username = "oracle"
- vm_sku = var.vm_sku
-
- vm_source_image_reference = var.vm_source_image_reference
- vm_user_assigned_identity_id = var.vm_user_assigned_identity_id
- aad_system_assigned_identity = true
- public_ip_address_resource_id = module.network.db_server_puplic_ip_resources[1].id
-
- is_diagnostic_settings_enabled = module.common_infrastructure.is_diagnostic_settings_enabled
- diagnostic_target = module.common_infrastructure.diagnostic_target
- storage_account_id = module.common_infrastructure.target_storage_account_id
- storage_account_sas_token = module.common_infrastructure.target_storage_account_sas
- log_analytics_workspace = module.common_infrastructure.log_analytics_workspace != null ? {
- id = module.common_infrastructure.log_analytics_workspace.id
- name = module.common_infrastructure.log_analytics_workspace.name
- } : null
- data_collection_rules = module.common_infrastructure.data_collection_rules
- eventhub_authorization_rule_id = module.common_infrastructure.eventhub_authorization_rule_id
- partner_solution_id = module.common_infrastructure.partner_solution_id
- tags = module.common_infrastructure.tags
- db_subnet = module.network.db_subnet
-
-
-
- vm_os_disk = {
- name = "osdisk-secondary"
- caching = "ReadWrite"
- storage_account_type = "Premium_LRS"
- disk_encryption_set_id = null
- disk_size_gb = 128
- }
-
- role_assignments = {
- role_assignment_1 = {
- role_definition_id_or_name = "Virtual Machine Contributor"
- principal_id = data.azurerm_client_config.current.object_id
- skip_service_principal_aad_check = false
- }
- }
-
- vm_extensions = {
- azure_monitor_agent = {
- name = "vm-secondary-azure-monitor-agent"
- publisher = "Microsoft.Azure.Monitor"
- type = "AzureMonitorLinuxAgent"
- type_handler_version = "1.1"
- auto_upgrade_minor_version = true
- automatic_upgrade_enabled = true
- settings = null
- }
- }
- #ToDo: Pending
- # role_assignments_nic = {
- # role_assignment_1 = {
- # role_definition_id_or_name = "Contributor"
- # principal_id = data.azurerm_client_config.current.object_id
- # skip_service_principal_aad_check = false
- # }
- # }
-
- depends_on = [module.network, module.common_infrastructure]
-}
-
-module "network" {
- source = "./modules/network"
-
- resource_group = module.common_infrastructure.resource_group
- is_data_guard = module.common_infrastructure.is_data_guard
- is_diagnostic_settings_enabled = module.common_infrastructure.is_diagnostic_settings_enabled
- diagnostic_target = module.common_infrastructure.diagnostic_target
- storage_account_id = module.common_infrastructure.target_storage_account_id
- log_analytics_workspace_id = try(module.common_infrastructure.log_analytics_workspace.id, "")
- eventhub_authorization_rule_id = module.common_infrastructure.eventhub_authorization_rule_id
- partner_solution_id = module.common_infrastructure.partner_solution_id
- tags = module.common_infrastructure.tags
-
-
- #ToDo: role_assignments_nic
- # role_assignments_nic = {
- # role_assignment_1 = {
- # name = "Contributor"
- # skip_service_principal_aad_check = false
- # }
- # }
-
- role_assignments_pip = {
- role_assignment_1 = {
- name = "Contributor"
- skip_service_principal_aad_check = false
- }
- }
-
- role_assignments_nsg = {
- role_assignment_1 = {
- name = "Contributor"
- skip_service_principal_aad_check = false
- }
- }
-
- role_assignments_vnet = {
- role_assignment_1 = {
- name = "Contributor"
- skip_service_principal_aad_check = false
- }
- }
-
- role_assignments_subnet = {
- role_assignment_1 = {
- name = "Contributor"
- skip_service_principal_aad_check = false
- }
- }
-}
-
-
-module "storage_primary" {
- source = "./modules/storage"
-
- resource_group = module.common_infrastructure.resource_group
- is_data_guard = module.common_infrastructure.is_data_guard
- naming = "oracle-primary"
- vm = module.vm_primary.vm
- tags = module.common_infrastructure.tags
- database_disks_options = {
- data_disks = var.database_disks_options.data_disks
- asm_disks = var.database_disks_options.asm_disks
- redo_disks = var.database_disks_options.redo_disks
- }
- availability_zone = module.vm_primary.availability_zone
-
- role_assignments = {
- role_assignment_1 = {
- name = "Contributor"
- skip_service_principal_aad_check = false
- }
- }
-}
-
-module "storage_secondary" {
- source = "./modules/storage"
-
- resource_group = module.common_infrastructure.resource_group
- is_data_guard = module.common_infrastructure.is_data_guard
- naming = "oracle-secondary"
- vm = module.vm_secondary.vm
- tags = module.common_infrastructure.tags
- database_disks_options = {
- data_disks = var.database_disks_options.data_disks
- asm_disks = var.database_disks_options.asm_disks
- redo_disks = var.database_disks_options.redo_disks
- }
- availability_zone = module.vm_secondary.availability_zone
-
- role_assignments = {
- role_assignment_1 = {
- name = "Contributor"
- skip_service_principal_aad_check = false
- }
- }
-}
-
-
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/infrastructure.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/infrastructure.tf
deleted file mode 100644
index a91ed21d9..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/infrastructure.tf
+++ /dev/null
@@ -1,149 +0,0 @@
-#########################################################################################
-# #
-# Subscription #
-# #
-#########################################################################################
-data "azurerm_subscription" "current" {}
-
-#########################################################################################
-# #
-# Resource Group #
-# #
-#########################################################################################
-resource "azurerm_resource_group" "rg" {
- count = local.resource_group_exists ? 0 : 1
- name = local.rg_name
- location = var.infrastructure.region
- tags = var.tags
-
- lifecycle {
- ignore_changes = [
- tags
- ]
- }
-}
-
-data "azurerm_resource_group" "rg" {
- name = local.rg_name
-
- depends_on = [azurerm_resource_group.rg]
-}
-
-#########################################################################################
-# #
-# Diagnostic Settings #
-# #
-#########################################################################################
-resource "azurerm_storage_account" "diagnostic" {
- count = var.is_diagnostic_settings_enabled ? 1 : 0
- name = "${local.prefix}diag${random_string.suffix.result}"
- resource_group_name = data.azurerm_resource_group.rg.name
- location = data.azurerm_resource_group.rg.location
- tags = merge(local.tags, var.tags)
-
- account_tier = "Standard"
- account_replication_type = "LRS"
-}
-
-data "azurerm_storage_account" "diagnostic" {
- count = var.is_diagnostic_settings_enabled ? 1 : 0
- name = azurerm_storage_account.diagnostic[count.index].name
- resource_group_name = data.azurerm_resource_group.rg.name
-
- depends_on = [azurerm_storage_account.diagnostic]
-}
-
-resource "random_string" "suffix" {
- length = 14
- special = false
- upper = false
-}
-
-data "azurerm_storage_account_sas" "diagnostic" {
- count = var.is_diagnostic_settings_enabled ? 1 : 0
- connection_string = azurerm_storage_account.diagnostic[0].primary_connection_string
-
- resource_types {
- service = false
- container = true
- object = true
- }
-
- services {
- blob = true
- queue = false
- table = true
- file = false
- }
-
- start = timestamp()
- expiry = timeadd(timestamp(), "8766h")
-
- permissions {
- read = false
- write = true
- delete = false
- list = true
- add = true
- create = true
- update = true
- process = false
- tag = false
- filter = false
- }
-}
-
-resource "azurerm_log_analytics_workspace" "diagnostic" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0
- name = "${local.prefix}diag${random_string.suffix.result}"
- resource_group_name = data.azurerm_resource_group.rg.name
- location = data.azurerm_resource_group.rg.location
- sku = "PerGB2018"
- retention_in_days = 30
- tags = merge(local.tags, var.tags)
-}
-
-data "azurerm_log_analytics_workspace" "diagnostic" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0
- name = "${local.prefix}diag${random_string.suffix.result}"
- resource_group_name = data.azurerm_resource_group.rg.name
-
- depends_on = [azurerm_log_analytics_workspace.diagnostic]
-}
-
-resource "azurerm_eventhub_namespace" "diagnostic" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Event_Hubs" ? 1 : 0
- name = "${local.prefix}diag${random_string.suffix.result}"
- resource_group_name = data.azurerm_resource_group.rg.name
- location = data.azurerm_resource_group.rg.location
- sku = "Standard"
- capacity = 1
- tags = merge(local.tags, var.tags)
-}
-
-resource "azurerm_eventhub_namespace_authorization_rule" "diagnostic" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Event_Hubs" ? 1 : 0
- name = "${local.prefix}diag${random_string.suffix.result}"
- namespace_name = azurerm_eventhub_namespace.diagnostic[0].name
- resource_group_name = data.azurerm_resource_group.rg.name
- listen = var.eventhub_permission.listen
- send = var.eventhub_permission.send
- manage = var.eventhub_permission.manage
-}
-
-resource "azurerm_new_relic_monitor" "diagnostic" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Partner_Solutions" ? 1 : 0
- name = "${local.prefix}diag${random_string.suffix.result}"
- resource_group_name = data.azurerm_resource_group.rg.name
- location = data.azurerm_resource_group.rg.location
- plan {
- effective_date = "2023-09-20T00:00:00Z"
- }
-
- user {
- email = var.logz_user.email
- first_name = var.logz_user.first_name
- last_name = var.logz_user.last_name
- phone_number = var.logz_user.phone_number
- }
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/monitoring_settings.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/monitoring_settings.tf
deleted file mode 100644
index bfcf4f2da..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/monitoring_settings.tf
+++ /dev/null
@@ -1,127 +0,0 @@
-
-#Data collection rules
-resource "azurerm_monitor_data_collection_rule" "collection_rule_linux" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0
- kind = "Linux"
- location = var.infrastructure.region
- name = "LinuxCollectionRule"
- resource_group_name = local.rg_name
- tags = var.tags
- data_flow {
- destinations = [data.azurerm_log_analytics_workspace.diagnostic[0].name]
- output_stream = "Microsoft-Perf"
- streams = ["Microsoft-Perf"]
- transform_kql = "source"
- }
- data_flow {
- destinations = [data.azurerm_log_analytics_workspace.diagnostic[0].name]
- output_stream = "Microsoft-Syslog"
- streams = ["Microsoft-Syslog"]
- transform_kql = "source"
- }
- data_sources {
- performance_counter {
- counter_specifiers = ["Processor(*)\\% Processor Time", "Processor(*)\\% Idle Time", "Processor(*)\\% User Time", "Processor(*)\\% Nice Time", "Processor(*)\\% Privileged Time", "Processor(*)\\% IO Wait Time", "Processor(*)\\% Interrupt Time", "Processor(*)\\% DPC Time", "Memory(*)\\Available MBytes Memory", "Memory(*)\\% Available Memory", "Memory(*)\\Used Memory MBytes", "Memory(*)\\% Used Memory", "Memory(*)\\Pages/sec", "Memory(*)\\Page Reads/sec", "Memory(*)\\Page Writes/sec", "Memory(*)\\Available MBytes Swap", "Memory(*)\\% Available Swap Space", "Memory(*)\\Used MBytes Swap Space", "Memory(*)\\% Used Swap Space", "Process(*)\\Pct User Time", "Process(*)\\Pct Privileged Time", "Process(*)\\Used Memory", "Process(*)\\Virtual Shared Memory", "Logical Disk(*)\\% Free Inodes", "Logical Disk(*)\\% Used Inodes", "Logical Disk(*)\\Free Megabytes", "Logical Disk(*)\\% Free Space", "Logical Disk(*)\\% Used Space", "Logical Disk(*)\\Logical Disk Bytes/sec", "Logical Disk(*)\\Disk Read Bytes/sec", "Logical Disk(*)\\Disk Write Bytes/sec", "Logical Disk(*)\\Disk Transfers/sec", "Logical Disk(*)\\Disk Reads/sec", "Logical Disk(*)\\Disk Writes/sec", "Network(*)\\Total Bytes Transmitted", "Network(*)\\Total Bytes Received", "Network(*)\\Total Bytes", "Network(*)\\Total Packets Transmitted", "Network(*)\\Total Packets Received", "Network(*)\\Total Rx Errors", "Network(*)\\Total Tx Errors", "Network(*)\\Total Collisions", "System(*)\\Uptime", "System(*)\\Load1", "System(*)\\Load5", "System(*)\\Load15", "System(*)\\Users", "System(*)\\Unique Users", "System(*)\\CPUs"]
- name = "perfCounterDataSource60"
- sampling_frequency_in_seconds = 60
- streams = ["Microsoft-Perf"]
- }
- syslog {
- facility_names = ["alert", "audit", "auth", "authpriv", "clock", "cron", "daemon", "ftp", "kern", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", "lpr", "mail", "news", "nopri", "ntp", "syslog", "user", "uucp"]
- log_levels = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency"]
- name = "sysLogsDataSource-1688419672"
- }
- }
-
-
- destinations {
-
- dynamic "log_analytics" {
- for_each = local.law_destination_settings
- iterator = dest
-
- content {
- workspace_resource_id = dest.value.resource_id
- name = dest.value.name
- }
- }
-
- dynamic "event_hub" {
- for_each = local.eventhub_destination_settings
-
- content {
- event_hub_id = each.value.resource_id
- name = each.value.name
- }
- }
-
- dynamic "storage_blob" {
- for_each = local.storage_account_destination_settings
-
- content {
- storage_account_id = each.value.resource_id
- container_name = each.value.container_name
- name = each.value.name
- }
- }
- }
-
-
- depends_on = [data.azurerm_log_analytics_workspace.diagnostic]
-}
-
-# Data collection rule for VM Insights
-resource "azurerm_monitor_data_collection_rule" "collection_rule_vm_insights" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0
- description = "Data collection rule for VM Insights."
- location = var.infrastructure.region
- name = "MSVMI-DataCollectionRuleVMInsights"
- resource_group_name = local.rg_name
- tags = var.tags
-
- data_flow {
- destinations = ["VMInsightsPerf-Logs-Dest"]
- streams = ["Microsoft-InsightsMetrics"]
- }
- data_flow {
- destinations = ["VMInsightsPerf-Logs-Dest"]
- streams = ["Microsoft-ServiceMap"]
- }
- data_sources {
- extension {
- extension_name = "DependencyAgent"
- name = "DependencyAgentDataSource"
- streams = ["Microsoft-ServiceMap"]
- }
- performance_counter {
- counter_specifiers = ["\\VmInsights\\DetailedMetrics"]
- name = "VMInsightsPerfCounters"
- sampling_frequency_in_seconds = 60
- streams = ["Microsoft-InsightsMetrics"]
- }
- }
- destinations {
- log_analytics {
- name = "VMInsightsPerf-Logs-Dest"
- workspace_resource_id = data.azurerm_log_analytics_workspace.diagnostic[0].id
- }
- }
- depends_on = [
- data.azurerm_log_analytics_workspace.diagnostic
- ]
-}
-
-data "azurerm_monitor_data_collection_rule" "collection_rule_linux" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0
-
- name = azurerm_monitor_data_collection_rule.collection_rule_linux[0].name
- resource_group_name = local.rg_name
-}
-
-
-data "azurerm_monitor_data_collection_rule" "collection_rule_vm_insights" {
- count = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? 1 : 0
-
- name = azurerm_monitor_data_collection_rule.collection_rule_vm_insights[0].name
- resource_group_name = local.rg_name
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/outputs.tf
deleted file mode 100644
index 9686b921e..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/outputs.tf
+++ /dev/null
@@ -1,98 +0,0 @@
-###############################################################################
-# #
-# Subscription #
-# #
-###############################################################################
-output "current_subscription" {
- value = data.azurerm_subscription.current
-}
-
-###############################################################################
-# #
-# Resource Group #
-# #
-###############################################################################
-output "resource_group" {
- value = data.azurerm_resource_group.rg
-}
-
-output "created_resource_group_id" {
- description = "Created resource group ID"
- value = data.azurerm_resource_group.rg.id
-}
-
-output "created_resource_group_name" {
- description = "Created resource group name"
- value = data.azurerm_resource_group.rg.name
-}
-
-output "created_resource_group_subscription_id" {
- description = "Created resource group' subscription ID"
- value = data.azurerm_resource_group.rg.id
-}
-
-output "is_data_guard" {
- description = "Whether the deployment is for Data Guard"
- value = var.is_data_guard
-}
-
-output "is_diagnostic_settings_enabled" {
- description = "Whether diagnostic settings are enabled"
- value = var.is_diagnostic_settings_enabled
-}
-
-output "target_storage_account_id" {
- description = "Storage account ID used for diagnostics"
- value = var.is_diagnostic_settings_enabled ? data.azurerm_storage_account.diagnostic[0].id : ""
-}
-
-output "target_storage_account_sas" {
- description = "Storage account SAS used for diagnostics"
- value = var.is_diagnostic_settings_enabled ? data.azurerm_storage_account_sas.diagnostic[0].sas : ""
-}
-
-output "log_analytics_workspace" {
- description = "Log Analytics workspace ID"
- value = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? {
- id = data.azurerm_log_analytics_workspace.diagnostic[0].id
- name = data.azurerm_log_analytics_workspace.diagnostic[0].name
- } : null
-}
-
-output "eventhub_authorization_rule_id" {
- description = "ID of an Event Hub authorization rule"
- value = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Event_Hubs" ? azurerm_eventhub_namespace_authorization_rule.diagnostic[0].id : null
-}
-
-output "partner_solution_id" {
- description = "Partner solution ID"
- value = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Partner_Solutions" ? azurerm_new_relic_monitor.diagnostic[0].id : null
-}
-
-output "diagnostic_target" {
- description = "The destination type of the diagnostic settings"
- value = var.diagnostic_target
-}
-
-output "availability_zone" {
- description = "Availability zones"
- value = var.availability_zone
-}
-
-output "tags" {
- description = "Tags applied to the resources"
- value = var.tags
-}
-
-output "data_collection_rules" {
- value = (var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace") ? {
- "${data.azurerm_monitor_data_collection_rule.collection_rule_linux[0].name}" = {
- id = data.azurerm_monitor_data_collection_rule.collection_rule_linux[0].id
- },
- "${data.azurerm_monitor_data_collection_rule.collection_rule_vm_insights[0].name}" = {
- id = data.azurerm_monitor_data_collection_rule.collection_rule_vm_insights[0].id
- }
-
- } : {}
-
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/providers.tf
deleted file mode 100644
index ae8863f42..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/providers.tf
+++ /dev/null
@@ -1,24 +0,0 @@
-terraform {
- required_version = ">=1.6.0"
- required_providers {
- azurerm = {
- source = "hashicorp/azurerm"
- version = ">=3.11.0, <4.0"
- }
- azapi = {
- source = "Azure/azapi"
- version = "=1.8.0"
- }
- }
-}
-
-provider "azurerm" {
- features {
- resource_group {
- prevent_deletion_if_contains_resources = true
- }
- virtual_machine {
- delete_os_disk_on_deletion = true
- }
- }
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/resource_lock.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/resource_lock.tf
deleted file mode 100644
index e15a22bd5..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/resource_lock.tf
+++ /dev/null
@@ -1,27 +0,0 @@
-resource "azurerm_management_lock" "subscription" {
- count = length(var.subscription_locks) > 1 && length(try(var.subscription_locks.name, "")) > 0 ? 1 : 0
- name = var.subscription_locks.name
- scope = data.azurerm_subscription.current.id
- lock_level = var.subscription_locks.type
-}
-
-resource "azurerm_management_lock" "resource_group" {
- count = length(var.resource_group_locks) > 1 && length(try(var.resource_group_locks.name, "")) > 0 ? 1 : 0
- name = var.resource_group_locks.name
- scope = data.azurerm_resource_group.rg.id
- lock_level = var.resource_group_locks.type
-
- depends_on = [azurerm_resource_group.rg]
-}
-
-resource "azurerm_management_lock" "storage_account_diagnostic" {
- count = (length(var.resource_group_locks) > 1 && length(try(var.resource_group_locks.name, "")) > 0 && var.is_diagnostic_settings_enabled ) ? 1 : 0
- name = var.resource_group_locks.name
- scope = data.azurerm_storage_account.diagnostic[0].id
- lock_level = var.resource_group_locks.type
-
- depends_on = [azurerm_resource_group.rg, data.azurerm_storage_account.diagnostic]
-}
-
-#ToDo: Add more locks for other resources
-
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/role_assignments.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/role_assignments.tf
deleted file mode 100644
index 43603476f..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/role_assignments.tf
+++ /dev/null
@@ -1,17 +0,0 @@
-data "azurerm_client_config" "current" {}
-
-data "azurerm_role_definition" "builtin" {
- for_each = var.role_assignments
- name = each.value.name
-}
-
-resource "azurerm_role_assignment" "assignment" {
- for_each = var.role_assignments
- role_definition_name = data.azurerm_role_definition.builtin[each.key].name
- principal_id = data.azurerm_client_config.current.object_id
- scope = try(each.value.scope, data.azurerm_subscription.current.id)
- skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, null)
- description = try(each.value.description, null)
- condition = try(each.value.condition, null)
- condition_version = try(each.value.condition_version, null)
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_global.tf
deleted file mode 100644
index aa85b1c1f..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_global.tf
+++ /dev/null
@@ -1,95 +0,0 @@
-variable "infrastructure" {}
-
-variable "is_data_guard" {
- description = "Whether Data Guard is enabled"
- default = false
-}
-
-variable "is_diagnostic_settings_enabled" {
- description = "Whether diagnostic settings are enabled"
- default = false
-}
-
-variable "diagnostic_target" {
- description = "The destination type of the diagnostic settings"
- default = "Log_Analytics_Workspace"
- validation {
- condition = contains(["Log_Analytics_Workspace", "Storage_Account", "Event_Hubs", "Partner_Solutions"], var.diagnostic_target)
- error_message = "Allowed values are Log_Analytics_Workspace, Storage_Account, Event_Hubs, Partner_Solutions"
- }
-}
-
-variable "eventhub_permission" {
- description = "Authorization rule permissions for Event Hub"
- default = {
- listen = true
- send = true
- manage = true
- }
-}
-
-variable "log_destinations" {
- type = map(object({
- type = string // E.g., "LogAnalytics", "EventHub", "StorageBlob"
- resource_id = optional(string) // For Log Analytics, Event Hub, Storage Account
- # workspace_id = optional(string) // For Log Analytics
- # eventhub_id = optional(string) // For Event Hub
- # storage_account_id = optional(string) // For Storage Account
- container_name = optional(string) // For Blob container
- name = string // Destination name within the DCR
- }))
- default = {}
-}
-
-
-
-
-
-variable "logz_user" {
- description = "Logz.io"
- default = {
- email = "user@example.com"
- first_name = "Example"
- last_name = "User"
- phone_number = "+12313803556"
- }
-}
-
-variable "role_assignments" {
- description = "Role assignments"
- default = {}
-}
-
-variable "subscription_locks" {
- type = object({
- name = optional(string, "")
- type = optional(string, "CanNotDelete")
- })
- default = {}
- validation {
- condition = contains(["CanNotDelete", "ReadOnly"], var.subscription_locks.type)
- error_message = "Lock type must be one of: CanNotDelete, ReadOnly."
- }
-}
-
-variable "resource_group_locks" {
- type = object({
- name = optional(string, "")
- type = optional(string, "CanNotDelete")
- })
- default = {}
- validation {
- condition = contains(["CanNotDelete", "ReadOnly"], var.resource_group_locks.type)
- error_message = "Lock type must be one of: CanNotDelete, ReadOnly."
- }
-}
-
-variable "availability_zone" {
- description = "The availability zones of the resource"
- default = null
-}
-
-variable "tags" {
- description = "Tags to be added to the resources"
- default = {}
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_local.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_local.tf
deleted file mode 100644
index f8a6cc21b..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/common_infrastructure/variables_local.tf
+++ /dev/null
@@ -1,43 +0,0 @@
-locals {
- resource_group_exists = length(try(var.infrastructure.resource_group.arm_id, "")) > 0
- // If resource ID is specified extract the resourcegroup name from it otherwise read it either from input of create using the naming convention
- rg_name = local.resource_group_exists ? (
- try(split("/", var.infrastructure.resource_group.arm_id))[4]) : (
- length(var.infrastructure.resource_group.name) > 0 ? (
- var.infrastructure.resource_group.name) : (
- format("%s-%s-%s-%s-%s",
- "rg",
- local.prefix,
- "demo",
- var.infrastructure.region,
- "001"
- )
- )
- )
-
- // Resource group
- prefix = "oracle"
-
-
- law_destination_settings = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Log_Analytics_Workspace" ? { Log_Analytics_Workspace = {
- type = "Log_Analytics_Workspace"
- resource_id = data.azurerm_log_analytics_workspace.diagnostic[0].id
- name = data.azurerm_log_analytics_workspace.diagnostic[0].name
- } } : {}
-
- storage_account_destination_settings = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Storage_Account" ? { Storage_Account = {
- type = "Storage_Account"
- resource_id = data.azurerm_storage_account.diagnostic[0].id
- container_name = data.azurerm_storage_account_sas.diagnostic[0].sas
- name = data.azurerm_storage_account.diagnostic[0].name
- } } : {}
-
- eventhub_destination_settings = var.is_diagnostic_settings_enabled && var.diagnostic_target == "Event_Hubs" ? { Event_Hubs = {
- type = "Event_Hubs"
- resource_id = azurerm_eventhub_namespace_authorization_rule.diagnostic[0].id
- name = azurerm_eventhub_namespace_authorization_rule.diagnostic[0].name
- } } : {}
-
-
- tags = {}
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/availability_set.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/availability_set.tf
deleted file mode 100644
index 816c3fd28..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/availability_set.tf
+++ /dev/null
@@ -1,17 +0,0 @@
-resource "azurerm_availability_set" "oracle_vm" {
- count = var.availability_zone == null ? 1 : 0
- name = "as-${count.index}"
- location = var.location
- resource_group_name = var.resource_group_name
-
- platform_fault_domain_count = 2
-
-}
-
-data "azurerm_availability_set" "oracle_vm" {
- count = var.availability_zone == null ? 1 : 0
- name = "as-${count.index}"
- resource_group_name = var.resource_group_name
-
- depends_on = [azurerm_availability_set.oracle_vm]
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/data.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/data.tf
deleted file mode 100644
index a526daaa6..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/data.tf
+++ /dev/null
@@ -1,14 +0,0 @@
-data "azurerm_virtual_machine" "oracle_vm_primary" {
- name = module.avm-res-compute-virtualmachine[keys(local.vm_config_data_parameter)[0]].virtual_machine.name
- resource_group_name = var.resource_group_name
-
- depends_on = [module.avm-res-compute-virtualmachine]
-}
-
-data "azurerm_virtual_machine" "oracle_vms" {
- for_each = { for vm in module.avm-res-compute-virtualmachine : vm.name => vm.virtual_machine }
- name = each.value.name
- resource_group_name = var.resource_group_name
-
- depends_on = [module.avm-res-compute-virtualmachine]
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/monitoring_settings.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/monitoring_settings.tf
deleted file mode 100644
index 44ce990ec..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/monitoring_settings.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-# Create Data Collection Rule Association for VM created
-resource "azurerm_monitor_data_collection_rule_association" "dcra_vm_insights" {
- # Create association for each data collection rule
- for_each = { for key, val in var.data_collection_rules : key => val if(var.log_analytics_workspace != null && var.is_diagnostic_settings_enabled) }
-
- name = each.key
- target_resource_id = data.azurerm_virtual_machine.oracle_vm_primary.id
- data_collection_rule_id = each.value.id
-}
-
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/outputs.tf
deleted file mode 100644
index 81eb5373c..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/outputs.tf
+++ /dev/null
@@ -1,32 +0,0 @@
-output "vm" {
- value = data.azurerm_virtual_machine.oracle_vm_primary
-}
-
-output "database_server_count" {
- value = var.database_server_count
-}
-
-output "availability_zone" {
- value = var.availability_zone != null ? var.availability_zone : null
-}
-
-output "oracle_vms" {
- value = data.azurerm_virtual_machine.oracle_vms
- sensitive = true
-}
-
-output "vm_map_collection" {
- value = { for vm in module.avm-res-compute-virtualmachine : vm.name => {
- name = vm.name
- id = vm.resource_id
- public_ips = vm.public_ips
-
- } }
- sensitive = false
-}
-
-
-output "vm_collection" {
- value = flatten([for vm in module.avm-res-compute-virtualmachine : vm.name])
- sensitive = false
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/variable_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/variable_global.tf
deleted file mode 100644
index cc53a8fe4..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/variable_global.tf
+++ /dev/null
@@ -1,366 +0,0 @@
-variable "database_server_count" {
- description = "The number of database servers"
- default = 1
- type = number
-}
-
-variable "vm_name" {
- description = "The name of the Oracle VM"
- type = string
-}
-
-# variable "resource_group" {
-# description = "Details of the resource group"
-# default = {}
-# }
-
-
-variable "resource_group_name" {
- description = "Created resource group name"
- type = string
-}
-
-variable "location" {
- description = "The location of the resource"
- type = string
-}
-
-
-
-variable "database" {
- description = "Details of the database node"
- type = object({
- use_DHCP = string
- authentication = object({
- type = string
- })
- })
- default = {
- use_DHCP = true
- authentication = {
- type = "key"
- }
- }
-}
-
-variable "nic_locks" {
- type = object({
- name = optional(string, "")
- type = optional(string, "CanNotDelete")
- })
- default = {}
- validation {
- condition = contains(["CanNotDelete", "ReadOnly"], var.nic_locks.type)
- error_message = "Lock type must be one of: CanNotDelete, ReadOnly."
- }
-}
-
-variable "aad_system_assigned_identity" {
- description = "AAD system assigned identity"
- type = bool
-}
-
-variable "skip_service_principal_aad_check" {
- type = bool
- description = "If the principal_id is a newly provisioned `Service Principal` set this value to true to skip the Azure Active Directory check which may fail due to replication lag."
- default = true
-}
-
-variable "storage_account_id" {
- description = "Storage account ID used for diagnostics"
- type = string
- default = null
-}
-
-variable "storage_account_sas_token" {
- description = "Storage account SAS token used for diagnostics"
- type = string
- default = null
-}
-
-variable "log_analytics_workspace" {
- type = object({
- id = string
- name = string
- })
-
- description = "Log Analytics workspace"
- default = null
-}
-
-variable "eventhub_authorization_rule_id" {
- description = "ID of an Event Hub authorization rule"
- type = string
- default = null
-}
-
-variable "partner_solution_id" {
- description = "Value of the partner solution ID"
- default = null
-}
-
-variable "is_diagnostic_settings_enabled" {
- description = "Whether diagnostic settings are enabled"
- default = false
-}
-
-variable "diagnostic_target" {
- description = "The destination type of the diagnostic settings"
- default = "Log_Analytics_Workspace"
- validation {
- condition = contains(["Log_Analytics_Workspace", "Storage_Account", "Event_Hubs", "Partner_Solutions"], var.diagnostic_target)
- error_message = "Allowed values are Log_Analytics_Workspace, Storage_Account, Event_Hubs, Partner_Solutions"
- }
-}
-
-variable "data_collection_rules" {
- type = map(object({
- id = string
- }))
- description = "Data collection rules"
- default = {}
-}
-
-# variable "role_assignments" {
-# description = "Role assignments"
-# default = {}
-# }
-
-variable "role_assignments" {
- type = map(object({
- role_definition_id_or_name = string
- principal_id = optional(string)
- condition = optional(string)
- condition_version = optional(string)
- description = optional(string)
- skip_service_principal_aad_check = optional(bool, true)
- delegated_managed_identity_resource_id = optional(string)
- }
- ))
- default = {}
-}
-
-variable "vm_lock" {
- type = object({
- name = optional(string, null)
- kind = optional(string, "None")
- })
- default = {}
- description = < {
- name = ipconfig.name
- private_ip_subnet_resource_id = ipconfig.subnet_id
- create_public_ip_address = ipconfig.create_public_ip_address
- public_ip_address_resource_id = ipconfig.public_ip_address_resource_id
- public_ip_address_name = ipconfig.create_public_ip_address ? "${var.vm_name}-pip" : ""
- private_ip_address_allocation = ipconfig.private_ip_address_allocation
- is_primary_ipconfiguration = ipconfig.primary
- private_ip_address = var.database.use_DHCP ? ipconfig.nic_ips[0] : ""
- }
- }
-
- # role_assignments_nic_parameter = {for key, value in var.role_assignments_nic : key => {
- # principal_id = value.principal_id
- # role_definition_id_or_name = value.role_definition_id_or_name
- # assign_to_child_public_ip_addresses = true
- # skip_service_principal_aad_check = value.skip_service_principal_aad_check
- # }
-
-
-
- vm_default_config_data = {
- "vm-0" = {
- name = var.vm_name
- os_type = "Linux"
- generate_admin_password_or_ssh_key = false
- enable_auth_password = local.enable_auth_password
- admin_username = var.sid_username
- admin_ssh_keys = {
- username = var.sid_username
- public_key = var.public_key
- }
- source_image_reference = var.vm_source_image_reference
- virtualmachine_sku_size = var.vm_sku
- os_disk = var.vm_os_disk
- availability_zone = var.availability_zone
- enable_telemetry = var.enable_telemetry
- user_assigned_identity_id = var.vm_user_assigned_identity_id
- role_assignments = var.role_assignments
- skip_service_principal_aad_check = var.skip_service_principal_aad_check
-
- #Network Interfaces
- network_interfaces = {
-
- network_interface_1 = {
- name = "oraclevmnic-${var.vm_name}"
- location = var.location
- resource_group_name = var.resource_group_name
- tags = merge(local.tags, var.tags)
- accelerated_networking_enabled = true
-
- ip_configurations = local.network_interface_ipconfigs
-
- #ToDo: role_assignments_nic_parameter
- # role_assignments = {
- # role_assignment_1 = {
- # role_definition_id_or_name = "Contributor"
- # principal_id = data.azurerm_client_config.current.object_id
- # skip_service_principal_aad_check = var.skip_service_principal_aad_check
- # }
- # }
-
-
- }
- }
- }
- }
-
-
- # Variable with the data to create the Oracle VM
- vm_config_data_parameter = merge(var.vm_config_data, local.vm_default_config_data)
-
-
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/vm.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/vm.tf
deleted file mode 100644
index 6596fe074..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/compute/vm.tf
+++ /dev/null
@@ -1,45 +0,0 @@
-#########################################################################################
-# #
-# Virtual Machine #
-# #
-#########################################################################################
-
-
-module "avm-res-compute-virtualmachine" {
- source = "Azure/avm-res-compute-virtualmachine/azurerm"
- version = "0.17.0"
- for_each = local.vm_config_data_parameter
-
-
- name = each.value.name
- location = var.location
- resource_group_name = var.resource_group_name
- os_type = each.value.os_type
-
- generate_admin_password_or_ssh_key = each.value.generate_admin_password_or_ssh_key
- disable_password_authentication = !each.value.enable_auth_password #!local.enable_auth_password #should be true
- admin_username = each.value.admin_username
- admin_ssh_keys = [each.value.admin_ssh_keys]
- source_image_reference = each.value.source_image_reference
- sku_size = each.value.virtualmachine_sku_size
- os_disk = each.value.os_disk
- extensions = var.vm_extensions
- network_interfaces = each.value.network_interfaces
-
-
- zone = each.value.availability_zone
- availability_set_resource_id = var.availability_zone == null ? data.azurerm_availability_set.oracle_vm[0].id : null
- tags = merge(local.tags, var.tags)
-
-
-
- managed_identities = {
- system_assigned = var.aad_system_assigned_identity
- user_assigned_resource_ids = [each.value.user_assigned_identity_id]
- }
-
- role_assignments = each.value.role_assignments
-}
-
-
-
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/diagnostic_settings.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/diagnostic_settings.tf
deleted file mode 100644
index 46e6a34d2..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/diagnostic_settings.tf
+++ /dev/null
@@ -1,145 +0,0 @@
-
-#ToDo: Should be replicated on VM Module
-# resource "azurerm_monitor_diagnostic_setting" "nic" {
-# count = var.is_diagnostic_settings_enabled ? 1 : 0
-# name = "nic-${count.index}-diag"
-# target_resource_id = azurerm_network_interface.oracle_db[count.index].id
-# storage_account_id = var.diagnostic_target == "Storage_Account" ? var.storage_account_id : null
-# log_analytics_workspace_id = var.diagnostic_target == "Log_Analytics_Workspace" ? var.log_analytics_workspace_id : null
-# eventhub_authorization_rule_id = var.diagnostic_target == "Event_Hubs" ? var.eventhub_authorization_rule_id : null
-# partner_solution_id = var.diagnostic_target == "Partner_Solutions" ? var.partner_solution_id : null
-
-# metric {
-# category = "AllMetrics"
-# retention_policy {
-# enabled = false
-# }
-# }
-# }
-
-resource "azurerm_monitor_diagnostic_setting" "nsg" {
- count = var.is_diagnostic_settings_enabled ? 1 : 0
- name = "nsg"
- target_resource_id = azurerm_network_security_group.blank.id
- storage_account_id = var.diagnostic_target == "Storage_Account" ? var.storage_account_id : null
- log_analytics_workspace_id = var.diagnostic_target == "Log_Analytics_Workspace" ? var.log_analytics_workspace_id : null
- eventhub_authorization_rule_id = var.diagnostic_target == "Event_Hubs" ? var.eventhub_authorization_rule_id : null
- partner_solution_id = var.diagnostic_target == "Partner_Solutions" ? var.partner_solution_id : null
-
- dynamic "enabled_log" {
- for_each = toset(data.azurerm_monitor_diagnostic_categories.nsg[count.index].log_category_types)
- content {
- category = enabled_log.value
- retention_policy {
- enabled = false
- }
- }
- }
-}
-
-#ToDo: It does not work
-# resource "azurerm_monitor_diagnostic_setting" "pip" {
-# count = var.is_diagnostic_settings_enabled ? var.is_data_guard ? 2 : 1 : 0
-# name = "pip"
-# target_resource_id = azurerm_public_ip.vm_pip[count.index].id
-# storage_account_id = var.diagnostic_target == "Storage_Account" ? var.storage_account_id : null
-# log_analytics_workspace_id = var.diagnostic_target == "Log_Analytics_Workspace" ? var.log_analytics_workspace_id : null
-# eventhub_authorization_rule_id = var.diagnostic_target == "Event_Hubs" ? var.eventhub_authorization_rule_id : null
-# partner_solution_id = var.diagnostic_target == "Partner_Solutions" ? var.partner_solution_id : null
-
-# dynamic "enabled_log" {
-# for_each = toset(data.azurerm_monitor_diagnostic_categories.pip[count.index].log_category_types)
-# content {
-# category = enabled_log.value
-# retention_policy {
-# enabled = false
-# }
-# }
-# }
-
-# metric {
-# category = "AllMetrics"
-# retention_policy {
-# enabled = false
-# }
-# }
-# }
-
-resource "azurerm_monitor_diagnostic_setting" "vnet" {
- count = var.is_diagnostic_settings_enabled ? 1 : 0
- name = "vnet"
- target_resource_id = data.azurerm_virtual_network.vnet_oracle[count.index].id
- storage_account_id = var.diagnostic_target == "Storage_Account" ? var.storage_account_id : null
- log_analytics_workspace_id = var.diagnostic_target == "Log_Analytics_Workspace" ? var.log_analytics_workspace_id : null
- eventhub_authorization_rule_id = var.diagnostic_target == "Event_Hubs" ? var.eventhub_authorization_rule_id : null
- partner_solution_id = var.diagnostic_target == "Partner_Solutions" ? var.partner_solution_id : null
-
- dynamic "enabled_log" {
- for_each = toset(data.azurerm_monitor_diagnostic_categories.vnet[count.index].log_category_types)
- content {
- category = enabled_log.value
- retention_policy {
- enabled = false
- }
- }
- }
-
- metric {
- category = "AllMetrics"
- retention_policy {
- enabled = false
- }
- }
-}
-
-# data "azurerm_monitor_diagnostic_categories" "nic" {
-# count = var.is_diagnostic_settings_enabled ? 1 : 0
-# resource_id = data.azurerm_network_interface.nic[count.index].id
-# }
-
-data "azurerm_monitor_diagnostic_categories" "nsg" {
- count = var.is_diagnostic_settings_enabled ? 1 : 0
- resource_id = data.azurerm_network_security_group.nsg[count.index].id
-}
-
-data "azurerm_monitor_diagnostic_categories" "pip" {
- count = var.is_diagnostic_settings_enabled ? 1 : 0
- resource_id = data.azurerm_public_ip.pip[count.index].id
-}
-
-data "azurerm_monitor_diagnostic_categories" "vnet" {
- count = var.is_diagnostic_settings_enabled ? 1 : 0
- resource_id = data.azurerm_virtual_network.vnet[count.index].id
-}
-
-# data "azurerm_network_interface" "nic" {
-# count = var.is_data_guard ? 2 : 1
-# name = "oraclevmnic-${count.index}"
-# resource_group_name = var.resource_group.name
-
-# depends_on = [azurerm_network_interface.oracle_db]
-# }
-
-data "azurerm_network_security_group" "nsg" {
- count = 1
- name = "blank"
- resource_group_name = var.resource_group.name
-
- depends_on = [azurerm_network_security_group.blank]
-}
-
-data "azurerm_public_ip" "pip" {
- count = var.is_data_guard ? 2 : 1
- name = "vmpip-${count.index}"
- resource_group_name = var.resource_group.name
-
- depends_on = [azurerm_public_ip.vm_pip]
-}
-
-data "azurerm_virtual_network" "vnet" {
- count = 1
- name = local.vnet_oracle_name
- resource_group_name = var.resource_group.name
-
- depends_on = [module.vnet]
-}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/nsg.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/nsg.tf
deleted file mode 100644
index d1d7dab5b..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/nsg.tf
+++ /dev/null
@@ -1,26 +0,0 @@
-#########################################################################################
-# #
-# Network Security Group #
-# #
-#########################################################################################
-resource "azurerm_network_security_group" "blank" {
- name = "blank"
- location = var.resource_group.location
- resource_group_name = var.resource_group.name
-
- tags = merge(local.tags, var.tags)
-}
-
-resource "azurerm_subnet_network_security_group_association" "ssh" {
- subnet_id = data.azurerm_subnet.subnet_oracle[0].id
- network_security_group_id = azurerm_network_security_group.blank.id
-}
-
-data "azurerm_network_security_group" "blank" {
- name = "blank"
- resource_group_name = var.resource_group.name
-
- depends_on = [azurerm_network_security_group.blank]
-}
-
-
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/outputs.tf
deleted file mode 100644
index 49b5ba02a..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/outputs.tf
+++ /dev/null
@@ -1,20 +0,0 @@
-###############################################################################
-# #
-# Network #
-# #
-###############################################################################
-output "network_location" {
- value = data.azurerm_virtual_network.vnet_oracle[0].location
-}
-
-output "db_subnet" {
- value = data.azurerm_subnet.subnet_oracle[0]
-}
-
-output "db_server_puplic_ip" {
- value = azurerm_public_ip.vm_pip[0].ip_address
-}
-
-output "db_server_puplic_ip_resources" {
- value = azurerm_public_ip.vm_pip
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/pip.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/pip.tf
deleted file mode 100644
index 5c9c31d39..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/pip.tf
+++ /dev/null
@@ -1,24 +0,0 @@
-#########################################################################################
-# #
-# Public IPs #
-# #
-#########################################################################################
-
-resource "azurerm_public_ip" "vm_pip" {
- count = var.is_data_guard ? 2 : 1
- name = "vmpip-${count.index}"
- location = var.resource_group.location
- resource_group_name = var.resource_group.name
- allocation_method = "Static"
- sku = "Standard"
-
- tags = merge(local.tags, var.tags)
-}
-
-data "azurerm_public_ip" "vm_pip" {
- count = var.is_data_guard ? 2 : 1
- name = "vmpip-${count.index}"
- resource_group_name = var.resource_group.name
-
- depends_on = [azurerm_public_ip.vm_pip]
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/providers.tf
deleted file mode 100644
index c9561f7e8..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/providers.tf
+++ /dev/null
@@ -1,24 +0,0 @@
-terraform {
- required_version = ">=1.2"
- required_providers {
- azurerm = {
- source = "hashicorp/azurerm"
- version = ">=3.11.0, <4.0"
- }
- azapi = {
- source = "Azure/azapi"
- version = "=1.8.0"
- }
- }
-}
-
-provider "azurerm" {
- features {
- resource_group {
- prevent_deletion_if_contains_resources = true
- }
- virtual_machine {
- delete_os_disk_on_deletion = true
- }
- }
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/resource_lock.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/resource_lock.tf
deleted file mode 100644
index 3e83d1603..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/resource_lock.tf
+++ /dev/null
@@ -1,27 +0,0 @@
-
-resource "azurerm_management_lock" "nsg" {
- count = length(var.nsg_locks) > 1 && length(try(var.nsg_locks.name, "")) > 0 ? 1 : 0
- name = var.nsg_locks.name
- scope = data.azurerm_network_security_group.blank.id
- lock_level = var.nsg_locks.type
-
- depends_on = [azurerm_network_security_group.blank]
-}
-
-resource "azurerm_management_lock" "vnet" {
- count = length(var.vnet_locks) > 1 && length(try(var.vnet_locks.name, "")) > 0 ? 1 : 0
- name = var.vnet_locks.name
- scope = data.azurerm_virtual_network.vnet_oracle[0].id
- lock_level = var.vnet_locks.type
-
- depends_on = [data.azurerm_virtual_network.vnet_oracle]
-}
-
-resource "azurerm_management_lock" "subnet" {
- count = length(var.subnet_locks) > 1 && length(try(var.subnet_locks.name, "")) > 0 ? 1 : 0
- name = var.subnet_locks.name
- scope = data.azurerm_subnet.subnet_oracle[0].id
- lock_level = var.subnet_locks.type
-
- depends_on = [data.azurerm_subnet.subnet_oracle]
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/role_assignments.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/role_assignments.tf
deleted file mode 100644
index 93654a6ee..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/role_assignments.tf
+++ /dev/null
@@ -1,82 +0,0 @@
-data "azurerm_client_config" "current" {}
-
-# data "azurerm_role_definition" "nic" {
-# for_each = var.role_assignments_nic
-# name = each.value.name
-# }
-
-data "azurerm_role_definition" "pip" {
- for_each = var.role_assignments_pip
- name = each.value.name
-}
-
-data "azurerm_role_definition" "nsg" {
- for_each = var.role_assignments_nsg
- name = each.value.name
-}
-
-data "azurerm_role_definition" "vnet" {
- for_each = var.role_assignments_vnet
- name = each.value.name
-}
-
-data "azurerm_role_definition" "subnet" {
- for_each = var.role_assignments_subnet
- name = each.value.name
-}
-
-
-# resource "azurerm_role_assignment" "nic" {
-# for_each = var.role_assignments_nic
-# role_definition_name = data.azurerm_role_definition.nic[each.key].name
-# principal_id = data.azurerm_client_config.current.object_id
-# scope = try(each.value.scope, data.azurerm_network_interface.oracle_db[0].id)
-# skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false)
-# description = try(each.value.description, null)
-# condition = try(each.value.condition, null)
-# condition_version = try(each.value.condition_version, null)
-# }
-
-resource "azurerm_role_assignment" "pip" {
- for_each = var.role_assignments_pip
- role_definition_name = data.azurerm_role_definition.pip[each.key].name
- principal_id = data.azurerm_client_config.current.object_id
- scope = try(each.value.scope, data.azurerm_public_ip.vm_pip[0].id)
- skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false)
- description = try(each.value.description, null)
- condition = try(each.value.condition, null)
- condition_version = try(each.value.condition_version, null)
-}
-
-resource "azurerm_role_assignment" "nsg" {
- for_each = var.role_assignments_nsg
- role_definition_name = data.azurerm_role_definition.nsg[each.key].name
- principal_id = data.azurerm_client_config.current.object_id
- scope = try(each.value.scope, data.azurerm_network_security_group.blank.id)
- skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false)
- description = try(each.value.description, null)
- condition = try(each.value.condition, null)
- condition_version = try(each.value.condition_version, null)
-}
-
-resource "azurerm_role_assignment" "vnet" {
- for_each = var.role_assignments_vnet
- role_definition_name = data.azurerm_role_definition.vnet[each.key].name
- principal_id = data.azurerm_client_config.current.object_id
- scope = try(each.value.scope, data.azurerm_virtual_network.vnet_oracle[0].id)
- skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false)
- description = try(each.value.description, null)
- condition = try(each.value.condition, null)
- condition_version = try(each.value.condition_version, null)
-}
-
-resource "azurerm_role_assignment" "subnet" {
- for_each = var.role_assignments_subnet
- role_definition_name = data.azurerm_role_definition.subnet[each.key].name
- principal_id = data.azurerm_client_config.current.object_id
- scope = try(each.value.scope, data.azurerm_subnet.subnet_oracle[0].id)
- skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false)
- description = try(each.value.description, null)
- condition = try(each.value.condition, null)
- condition_version = try(each.value.condition_version, null)
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_global.tf
deleted file mode 100644
index 12a43dedb..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_global.tf
+++ /dev/null
@@ -1,128 +0,0 @@
-variable "resource_group" {
- type = object({
- name = string
- location = string
- id = string
- })
- description = "Details of the resource group"
- default = null
-}
-
-variable "diagnostic_target" {
- type = string
- description = "The destination type of the diagnostic settings"
- default = "Log_Analytics_Workspace"
- validation {
- condition = contains(["Log_Analytics_Workspace", "Storage_Account", "Event_Hubs", "Partner_Solutions"], var.diagnostic_target)
- error_message = "Allowed values are Log_Analytics_Workspace, Storage_Account, Event_Hubs, Partner_Solutions"
- }
-}
-
-variable "storage_account_id" {
- description = "Storage account ID used for diagnostics"
- type = string
- default = null
-}
-
-variable "log_analytics_workspace_id" {
- description = "Log Analytics workspace ID"
- type = string
- default = null
-}
-
-variable "eventhub_authorization_rule_id" {
- description = "ID of an Event Hub authorization rule"
- type = string
- default = null
-}
-
-variable "partner_solution_id" {
- type = string
- description = "Value of the partner solution ID"
- default = null
-}
-
-variable "is_diagnostic_settings_enabled" {
- type = bool
- description = "Whether diagnostic settings are enabled"
- default = false
-}
-
-variable "role_assignments_pip" {
- type = map(object({
- name = string
- }))
- description = "Role assignments scoped to the public IP address"
-}
-
-variable "role_assignments_nsg" {
- type = map(object({
- name = string
- }))
- description = "Role assignments scoped to the network security group"
- default = {}
-}
-
-variable "role_assignments_vnet" {
- type = map(object({
- name = string
- }))
- description = "Role assignments scoped to the virtual network"
- default = {}
-}
-
-variable "role_assignments_subnet" {
- type = map(object({
- name = string
- }))
- description = "Role assignments scoped to the subnet"
- default = {}
-}
-
-variable "nsg_locks" {
- type = object({
- name = optional(string, "")
- type = optional(string, "CanNotDelete")
- })
- default = {}
- validation {
- condition = contains(["CanNotDelete", "ReadOnly"], var.nsg_locks.type)
- error_message = "Lock type must be one of: CanNotDelete, ReadOnly."
- }
-}
-
-variable "vnet_locks" {
- type = object({
- name = optional(string, "")
- type = optional(string, "CanNotDelete")
- })
- default = {}
- validation {
- condition = contains(["CanNotDelete", "ReadOnly"], var.vnet_locks.type)
- error_message = "Lock type must be one of: CanNotDelete, ReadOnly."
- }
-}
-
-variable "subnet_locks" {
- type = object({
- name = optional(string, "")
- type = optional(string, "CanNotDelete")
- })
- default = {}
- validation {
- condition = contains(["CanNotDelete", "ReadOnly"], var.subnet_locks.type)
- error_message = "Lock type must be one of: CanNotDelete, ReadOnly."
- }
-}
-
-variable "is_data_guard" {
- type = bool
- description = "Whether Data Guard is enabled"
- default = false
-}
-
-variable "tags" {
- type = map(any)
- description = "Tags to be added to the resources"
- default = {}
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_local.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_local.tf
deleted file mode 100644
index 1d32d1016..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/variables_local.tf
+++ /dev/null
@@ -1,13 +0,0 @@
-locals {
- vnet_oracle_name = "vnet1"
- database_subnet_name = "subnet1"
- vnet_oracle_addr = "10.0.0.0/16"
- database_subnet_prefix = "10.0.0.0/24"
-
- vnet_oracle_arm_id = try(local.vnet_oracle_name.arm_id, "")
- vnet_oracle_exists = length(local.vnet_oracle_arm_id) > 0
- subnet_oracle_arm_id = try(local.database_subnet_name.arm_id, "")
- subnet_oracle_exists = length(local.subnet_oracle_arm_id) > 0
-
- tags = {}
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/vnet_main.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/vnet_main.tf
deleted file mode 100644
index 9c7548304..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/network/vnet_main.tf
+++ /dev/null
@@ -1,37 +0,0 @@
-module "vnet" {
- source = "Azure/avm-res-network-virtualnetwork/azurerm"
- version = "0.1.3"
-
- resource_group_name = var.resource_group.name
- vnet_location = var.resource_group.location
- vnet_name = local.vnet_oracle_name
- virtual_network_address_space = [local.vnet_oracle_addr]
- subnets = {
- subnet1 = {
- address_prefixes = [local.database_subnet_prefix]
- azurerm_network_security_group = {
- id = azurerm_network_security_group.blank.id
- }
- }
- }
-
- tags = merge(local.tags, var.tags)
-}
-
-
-data "azurerm_virtual_network" "vnet_oracle" {
- count = local.vnet_oracle_exists ? 0 : 1
- name = local.vnet_oracle_name
- resource_group_name = var.resource_group.name
-
- depends_on = [module.vnet]
-}
-
-data "azurerm_subnet" "subnet_oracle" {
- count = local.subnet_oracle_exists ? 0 : 1
- name = local.database_subnet_name
- resource_group_name = var.resource_group.name
- virtual_network_name = data.azurerm_virtual_network.vnet_oracle[count.index].name
-
- depends_on = [module.vnet]
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/data_disk.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/data_disk.tf
deleted file mode 100644
index 18d437046..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/data_disk.tf
+++ /dev/null
@@ -1,83 +0,0 @@
-resource "azurerm_managed_disk" "data_disk" {
- count = length(local.data_disks)
- name = "${var.naming}-data-${count.index}"
- location = var.resource_group.location
- resource_group_name = var.resource_group.name
- storage_account_type = var.disk_type
- create_option = local.data_disks[count.index].create_option
- disk_size_gb = local.data_disks[count.index].disk_size_gb
- zone = var.availability_zone
-
- tags = merge(local.tags, var.tags)
-}
-
-resource "azurerm_managed_disk" "asm_disk" {
- count = length(local.asm_disks)
- name = "${var.naming}-asm-${count.index}"
- location = var.resource_group.location
- resource_group_name = var.resource_group.name
- storage_account_type = var.disk_type
- create_option = local.asm_disks[count.index].create_option
- disk_size_gb = local.asm_disks[count.index].disk_size_gb
- zone = var.availability_zone
-
- tags = merge(local.tags, var.tags)
-}
-
-resource "azurerm_managed_disk" "redo_disk" {
- count = length(local.redo_disks)
- name = "${var.naming}-redo-${count.index}"
- location = var.resource_group.location
- resource_group_name = var.resource_group.name
- storage_account_type = var.disk_type
- create_option = local.redo_disks[count.index].create_option
- disk_size_gb = local.redo_disks[count.index].disk_size_gb
- zone = var.availability_zone
-
- tags = merge(local.tags, var.tags)
-}
-
-resource "azurerm_virtual_machine_data_disk_attachment" "data_disk_attachment" {
- count = length(local.data_disks)
- managed_disk_id = azurerm_managed_disk.data_disk[count.index].id
- virtual_machine_id = var.vm.id
- caching = local.data_disks[count.index].caching
- write_accelerator_enabled = local.data_disks[count.index].write_accelerator_enabled
- lun = local.data_disks[count.index].lun
-}
-
-resource "azurerm_virtual_machine_data_disk_attachment" "asm_disk_attachment" {
- count = length(local.asm_disks)
- managed_disk_id = azurerm_managed_disk.asm_disk[count.index].id
- virtual_machine_id = var.vm.id
- caching = local.asm_disks[count.index].caching
- write_accelerator_enabled = local.asm_disks[count.index].write_accelerator_enabled
- lun = local.asm_disks[count.index].lun
-}
-
-resource "azurerm_virtual_machine_data_disk_attachment" "redo_disk_attachment" {
- count = length(local.redo_disks)
- managed_disk_id = azurerm_managed_disk.redo_disk[count.index].id
- virtual_machine_id = var.vm.id
- caching = local.redo_disks[count.index].caching
- write_accelerator_enabled = local.redo_disks[count.index].write_accelerator_enabled
- lun = local.redo_disks[count.index].lun
-}
-
-data "azurerm_managed_disk" "data_disk" {
- count = length(local.data_disks)
- name = azurerm_managed_disk.data_disk[count.index].name
- resource_group_name = var.resource_group.name
-}
-
-data "azurerm_managed_disk" "asm_disk" {
- count = length(local.asm_disks)
- name = azurerm_managed_disk.asm_disk[count.index].name
- resource_group_name = var.resource_group.name
-}
-
-data "azurerm_managed_disk" "redo_disk" {
- count = length(local.redo_disks)
- name = azurerm_managed_disk.redo_disk[count.index].name
- resource_group_name = var.resource_group.name
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/outputs.tf
deleted file mode 100644
index 5a50b7284..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/outputs.tf
+++ /dev/null
@@ -1,24 +0,0 @@
-output "data_disks" {
- value = local.data_disks
-}
-
-output "asm_disks" {
- value = local.asm_disks
-}
-
-output "redo_disks" {
- value = local.redo_disks
-}
-
-
-output "data_disks_resource" {
- value = data.azurerm_managed_disk.data_disk
-}
-
-output "asm_disks_resource" {
- value = data.azurerm_managed_disk.asm_disk
-}
-
-output "redo_disks_resource" {
- value = data.azurerm_managed_disk.redo_disk
-}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/providers.tf
deleted file mode 100644
index c9561f7e8..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/providers.tf
+++ /dev/null
@@ -1,24 +0,0 @@
-terraform {
- required_version = ">=1.2"
- required_providers {
- azurerm = {
- source = "hashicorp/azurerm"
- version = ">=3.11.0, <4.0"
- }
- azapi = {
- source = "Azure/azapi"
- version = "=1.8.0"
- }
- }
-}
-
-provider "azurerm" {
- features {
- resource_group {
- prevent_deletion_if_contains_resources = true
- }
- virtual_machine {
- delete_os_disk_on_deletion = true
- }
- }
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/resource_lock.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/resource_lock.tf
deleted file mode 100644
index cb4c1be3f..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/resource_lock.tf
+++ /dev/null
@@ -1,8 +0,0 @@
-resource "azurerm_management_lock" "data_disk" {
- count = length(var.data_disk_locks) > 1 && length(try(var.data_disk_locks.name, "")) > 1 ? 1 : 0
- name = var.data_disk_locks.name
- scope = data.azurerm_managed_disk.data_disk[0].id
- lock_level = var.data_disk_locks.type
-
- depends_on = [azurerm_managed_disk.data_disk]
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/role_assignments.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/role_assignments.tf
deleted file mode 100644
index d7aff7956..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/role_assignments.tf
+++ /dev/null
@@ -1,17 +0,0 @@
-data "azurerm_client_config" "current" {}
-
-data "azurerm_role_definition" "builtin" {
- for_each = var.role_assignments
- name = each.value.name
-}
-
-resource "azurerm_role_assignment" "assignment" {
- for_each = var.role_assignments
- role_definition_name = data.azurerm_role_definition.builtin[each.key].name
- principal_id = data.azurerm_client_config.current.object_id
- scope = try(each.value.scope, data.azurerm_managed_disk.data_disk[0].id)
- skip_service_principal_aad_check = try(each.value.skip_service_principal_aad_check, false)
- description = try(each.value.description, null)
- condition = try(each.value.condition, null)
- condition_version = try(each.value.condition_version, null)
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_global.tf
deleted file mode 100644
index 69ee40511..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_global.tf
+++ /dev/null
@@ -1,92 +0,0 @@
-variable "naming" {
- description = "Defines the names for the resources"
-}
-
-variable "vm" {
- description = "Virtual machine name"
-}
-
-variable "resource_group" {
- description = "Details of the resource group"
- default = {}
-}
-
-variable "disk_type" {
- description = "The type of the storage account"
- default = "Premium_LRS"
- validation {
- condition = contains(["Standard_LRS", "StandardSSD_ZRS", "Premium_LRS", "PremiumV2_LRS", "Premium_ZRS", "StandardSSD_LRS", "UltraSSD_LRS"], var.disk_type)
- error_message = "Allowed values are Standard_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS, StandardSSD_LRS, UltraSSD_LRS"
- }
-}
-
-variable "database_disks_options" {
- description = "Details of the database node"
- default = {
- data_disks = [
- {
- count = 1
- caching = "ReadOnly"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 20
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ],
- asm_disks = [
- {
- count = 1
- caching = "ReadOnly"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 10
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ]
- redo_disks = [
- {
- count = 1
- caching = "None"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 60
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ]
- }
-}
-
-variable "role_assignments" {
- description = "Role assignments"
- default = {}
-}
-
-variable "data_disk_locks" {
- type = object({
- name = optional(string, "")
- type = optional(string, "CanNotDelete")
- })
- default = {}
- validation {
- condition = contains(["CanNotDelete", "ReadOnly"], var.data_disk_locks.type)
- error_message = "Lock type must be one of: CanNotDelete, ReadOnly."
- }
-}
-
-variable "availability_zone" {
- description = "The availability zones of the resource"
- default = null
-}
-
-variable "is_data_guard" {
- description = "Whether Data Guard is enabled"
- default = false
-}
-
-variable "tags" {
- description = "Tags to be added to the resources"
- default = {}
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_local.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_local.tf
deleted file mode 100644
index 6a03a5998..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/modules/storage/variables_local.tf
+++ /dev/null
@@ -1,51 +0,0 @@
-locals {
- data_disks = flatten(
- [
- for disk in var.database_disks_options.data_disks : [
- for i in range(0, disk.count) : {
- name = "${var.vm.name}-datadisk${i}"
- caching = disk.caching
- create_option = disk.create_option
- disk_size_gb = disk.disk_size_gb
- lun = disk.lun + i
- managed_disk_type = disk.disk_type
- storage_account_type = disk.disk_type
- write_accelerator_enabled = disk.write_accelerator_enabled
- }
- ]
- ]
- )
- asm_disks = flatten(
- [
- for disk in var.database_disks_options.asm_disks : [
- for i in range(0, disk.count) : {
- name = "${var.vm.name}-asmdisk${i}"
- caching = disk.caching
- create_option = disk.create_option
- disk_size_gb = disk.disk_size_gb
- lun = disk.lun + i
- managed_disk_type = disk.disk_type
- storage_account_type = disk.disk_type
- write_accelerator_enabled = disk.write_accelerator_enabled
- }
- ]
- ]
- )
- redo_disks = flatten(
- [
- for disk in var.database_disks_options.redo_disks : [
- for i in range(0, disk.count) : {
- name = "${var.vm.name}-redodisk${i}"
- caching = disk.caching
- create_option = disk.create_option
- disk_size_gb = disk.disk_size_gb
- lun = disk.lun + i
- managed_disk_type = disk.disk_type
- storage_account_type = disk.disk_type
- write_accelerator_enabled = disk.write_accelerator_enabled
- }
- ]
- ]
- )
- tags = {}
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/outputs.tf
deleted file mode 100644
index 2c94f297a..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/outputs.tf
+++ /dev/null
@@ -1,70 +0,0 @@
-# ###############################################################################
-# # #
-# # Resource Group #
-# # #
-# ###############################################################################
-output "resource_group" {
- value = module.common_infrastructure.resource_group
-}
-
-# output "created_resource_group_id" {
-# description = "Created resource group ID"
-# value = module.common_infrastructure.resource_group.id
-# }
-
-# output "created_resource_group_name" {
-# description = "Created resource group name"
-# value = module.common_infrastructure.resource_group.name
-# }
-
-# output "created_resource_group_subscription_id" {
-# description = "Created resource group' subscription ID"
-# value = module.common_infrastructure.resource_group.id
-# }
-
-# output "created_resource_group_tags" {
-# description = "Created resource group tags"
-# value = module.common_infrastructure.tags
-# }
-
-# ###############################################################################
-# # #
-# # Network #
-# # #
-# ###############################################################################
-# output "network_location" {
-# value = module.network.network_location
-# }
-
-# output "db_subnet" {
-# value = module.network.db_subnet
-# }
-
-# ###############################################################################
-# # #
-# # Storage #
-# # #
-# ###############################################################################
-# output "database_data_disks_primary" {
-# value = module.storage_primary.data_disks
-# }
-
-# output "database_asm_disks_primary" {
-# value = module.storage_primary.asm_disks
-# }
-
-# output "database_redo_disks_primary" {
-# value = module.storage_primary.redo_disks
-# }
-
-# output "database_data_disks_secondary" {
-# value = module.storage_secondary.data_disks
-# }
-
-# output "database_asm_disks_secondary" {
-# value = module.storage_secondary.asm_disks
-# }
-
-# output "database_redo_disks_secondary" {
-# value = module.storage_secondary.redo_disks
-# }
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/providers.tf
deleted file mode 100644
index a6c69adcb..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/providers.tf
+++ /dev/null
@@ -1,29 +0,0 @@
-terraform {
- required_version = ">=1.7.0"
- required_providers {
- azurerm = {
- source = "hashicorp/azurerm"
- version = ">=3.11.0, <4.0"
- }
- azapi = {
- source = "Azure/azapi"
- version = ">=1.8.0"
- }
- }
-}
-
-provider "azurerm" {
- skip_provider_registration = true
- features {
- resource_group {
- prevent_deletion_if_contains_resources = true
- }
- virtual_machine {
- delete_os_disk_on_deletion = true
- }
- }
-}
-
-provider "azapi" {
- use_oidc = true
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/resources.telemetry.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/resources.telemetry.tf
deleted file mode 100644
index 0a320c3cc..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/resources.telemetry.tf
+++ /dev/null
@@ -1,15 +0,0 @@
-# The following random id is created once per module instantiation and is appended to the teleletry deployment name
-resource "random_id" "telem" {
- count = local.disable_telemetry ? 0 : 1
- byte_length = 4
-}
-
-# This is the core module telemetry deployment that is only created if telemetry is enabled.
-# It is deployed to the default subscription
-resource "azurerm_subscription_template_deployment" "telemetry_core" {
- count = local.telem_core_deployment_enabled ? 1 : 0
- provider = azurerm
- name = local.telem_core_arm_deployment_name
- location = var.location
- template_content = local.telem_arm_subscription_template_content
-}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/transform.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/transform.tf
deleted file mode 100644
index 17780f880..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/transform.tf
+++ /dev/null
@@ -1,39 +0,0 @@
-locals {
- infrastructure = {
- region = coalesce(var.location, try(var.infrastructure.region, ""))
- resource_group = {
- name = try(
- coalesce(
- var.resourcegroup_name,
- try(var.infrastructure.resource_group.name, "")
- ),
- ""
- )
- }
- vnet = {
- name = try(
- coalesce(
- local.vnet_oracle_name,
- try(var.infrastructure.vnet.name, "")
- ),
- ""
- )
- }
- subnet = {
- name = try(
- coalesce(
- local.database_subnet_name,
- try(var.infrastructure.subnet.name, "")
- ),
- ""
- )
- }
- tags = try(
- coalesce(
- var.resourcegroup_tags,
- try(var.infrastructure.tags, {})
- ),
- {}
- )
- }
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables.md
deleted file mode 100644
index 8bc43b1eb..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables.md
+++ /dev/null
@@ -1,173 +0,0 @@
-# Terraform Variable Explanations
-
-1. **[Common Parameters](#common-parameters)**
-
- - [`location`](#location)
- - [`resourcegroup_name`](#resourcegroup_name)
- - [`resourcegroup_tags`](#resourcegroup_tags)
- - [`is_diagnostic_settings_enabled`](#is_diagnostic_settings_enabled)
- - [`diagnostic_target`](#diagnostic_target)
- - [`infrastructure`](#infrastructure)
-
-2. **[Virtual Machine Parameters](#virtual-machine-parameters)**
-
- - [`ssh_key`](#ssh_key)
- - [`vm_sku`](#vm_sku)
- - [`vm_source_image_reference`](#vm_source_image_reference)
- - [`vm_os_disk`](#vm_os_disk)
-
-3. **[Database Parameters](#database-parameters)**
- - [`database`](#database)
- - [`database_disks_options`](#database_disks_options)
- - [`database_db_nic_ips`](#database_db_nic_ips)
-
-### `location`
-
-- **Description:** Defines the Azure location where the resources will be deployed.
-- **Type:** String
-- **Default Value:** "eastus"
-
-### `resourcegroup_name`
-
-- **Description:** If defined, this variable specifies the name of the resource group into which the resources will be deployed.
-- **Default Value:** ""
-
-### `resourcegroup_tags`
-
-- **Description:** Tags to be added to the resource group.
-- **Default Value:** {}
-
-### `is_diagnostic_settings_enabled`
-
-- **Description:** Whether diagnostic settings are enabled.
-- **Default Value:** false
-
-### `diagnostic_target`
-
-- **Description:** The destination type of the diagnostic settings. Allowed values are "Log_Analytics_Workspace," "Storage_Account," "Event_Hubs," or "Partner_Solutions."
-- **Default Value:** "Log_Analytics_Workspace"
-
-### `infrastructure`
-
-- **Description:** Details of the Azure infrastructure to deploy the SAP landscape into.
-- **Default Value:** {}
-
-## Virtual Machine Parameters
-
-### `ssh_key`
-
-- **Description:** Value of the SSH public key to be used for the virtual machines.
-
-### `vm_sku`
-
-- **Description:** The SKU of the virtual machine.
-- **Default Value:** "Standard_D4s_v3"
-
-### `vm_source_image_reference`
-
-- **Description:** The source image reference of the virtual machine.
-- **Default Value:**
- ```hcl
- {
- publisher = "Oracle"
- offer = "Oracle-Linux"
- sku = "79-gen2"
- version = "7.9.36"
- }
- ```
-
-### `vm_os_disk`
-
-- **Description:** Details of the OS disk, including name, caching, storage account type, disk encryption set, and disk size.
-- **Default Value:**
- ```hcl
- {
- name = "osdisk"
- caching = "ReadWrite"
- storage_account_type = "Premium_LRS"
- disk_encryption_set_id = null
- disk_size_gb = 128
- }
- ```
-
-## Database Parameters
-
-### `database`
-
-- **Description:** Details of the database node, including options such as DHCP, authentication type, and data disks.
-- **Default Value:**
- ```hcl
- {
- use_DHCP = true
- authentication = {
- type = "key"
- }
- data_disks = [
- {
- count = 1
- caching = "ReadOnly"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 0
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- },
- {
- count = 1
- caching = "None"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 1
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ]
- }
- ```
-
-### `database_disks_options`
-
-- **Description:** Details of the database node's disk options, including data disks, ASM disks, and redo disks.
-- **Default Value:**
- ```hcl
- {
- data_disks = [
- {
- count = 1
- caching = "ReadOnly"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 20
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ],
- asm_disks = [
- {
- count = 1
- caching = "ReadOnly"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 10
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ]
- redo_disks = [
- {
- count = 1
- caching = "None"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 60
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ]
- }
- ```
-
-### `database_db_nic_ips`
-
-- **Description:** If provided, the database tier virtual machines will be configured using the specified IPs.
-- **Default Value:** [""]
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_global.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_global.tf
deleted file mode 100644
index 0843270a1..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_global.tf
+++ /dev/null
@@ -1,184 +0,0 @@
-#########################################################################################
-# Common parameters #
-#########################################################################################
-variable "location" {
- description = "Defines the Azure location where the resources will be deployed"
- type = string
- default = "germanywestcentral"
-}
-
-variable "resourcegroup_name" {
- description = "If defined, the name of the resource group into which the resources will be deployed"
- default = "rg-mh-oracle4"
-}
-
-variable "resourcegroup_tags" {
- description = "tags to be added to the resource group"
- default = {}
-}
-
-variable "is_diagnostic_settings_enabled" {
- description = "Whether diagnostic settings are enabled"
- default = false
-}
-
-variable "diagnostic_target" {
- description = "The destination type of the diagnostic settings"
- default = "Log_Analytics_Workspace"
- validation {
- condition = contains(["Log_Analytics_Workspace", "Storage_Account", "Event_Hubs", "Partner_Solutions"], var.diagnostic_target)
- error_message = "Allowed values are Log_Analytics_Workspace, Storage_Account, Event_Hubs, Partner_Solutions"
- }
-}
-
-variable "infrastructure" {
- description = "Details of the Azure infrastructure to deploy the SAP landscape into"
- default = {}
-}
-
-variable "disable_telemetry" {
- type = bool
- description = "If set to true, will disable telemetry for the module. See https://aka.ms/alz-terraform-module-telemetry."
- default = false
-}
-#########################################################################################
-# Virtual Machine parameters #
-#########################################################################################
-variable "ssh_key" {
- description = "value of the ssh public key to be used for the virtual machines"
-}
-
-variable "vm_sku" {
- description = "The SKU of the virtual machine"
- default = "Standard_D4s_v5"
-}
-
-variable "vm_source_image_reference" {
- description = "The source image reference of the virtual machine"
- default = {
- publisher = "Oracle"
- offer = "oracle-database-19-3"
- sku = "oracle-database-19-0904"
- version = "latest"
- }
-}
-
-variable "vm_os_disk" {
- description = "Details of the OS disk"
- default = {
- name = "osdisk"
- caching = "ReadWrite"
- storage_account_type = "Premium_LRS"
- disk_encryption_set_id = null
- disk_size_gb = 128
- }
-}
-
-variable "vm_user_assigned_identity_id" {
- description = "The ID of the user assigned identity to be used for the virtual machine"
-}
-
-variable "jit_wait_for_vm_creation" {
- description = "The duration to wait for the virtual machine to be created before creating the JIT policy"
- default = "60s"
-}
-
-variable "vm_extensions" {
- description = "The extensions to be added to the virtual machine"
- type = map(object({
- name = string
- publisher = string
- type = string
- type_handler_version = string
- auto_upgrade_minor_version = optional(bool)
- automatic_upgrade_enabled = optional(bool)
- failure_suppression_enabled = optional(bool, false)
- settings = optional(string)
- protected_settings = optional(string)
- provision_after_extensions = optional(list(string), [])
- tags = optional(map(any))
- protected_settings_from_key_vault = optional(object({
- secret_url = string
- source_vault_id = string
- }))
- }))
- default = {}
-}
-
-
-#########################################################################################
-# Database parameters #
-#########################################################################################
-variable "database" {
- description = "Details of the database node"
- default = {
- use_DHCP = true
- authentication = {
- type = "key"
- }
- data_disks = [
- {
- count = 1
- caching = "ReadOnly"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 0
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- },
- {
- count = 1
- caching = "None"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 1
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ]
- }
-}
-
-variable "database_disks_options" {
- description = "Details of the database node"
- default = {
- data_disks = [
- {
- count = 1
- caching = "ReadOnly"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 1
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ],
- asm_disks = [
- {
- count = 1
- caching = "ReadOnly"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 0
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ]
- redo_disks = [
- {
- count = 1
- caching = "None"
- create_option = "Empty"
- disk_size_gb = 1024
- lun = 2
- disk_type = "Premium_LRS"
- write_accelerator_enabled = false
- }
- ]
- }
-}
-
-variable "database_db_nic_ips" {
- description = "If provided, the database tier virtual machines will be configured using the specified IPs"
- default = [""]
-}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_local.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_local.tf
deleted file mode 100644
index 124d5e8e0..000000000
--- a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/environment_setup/terraform/data_guard/variables_local.tf
+++ /dev/null
@@ -1,46 +0,0 @@
-locals {
- vnet_oracle_name = "vnet1"
- database_subnet_name = "subnet1"
- disable_telemetry = var.disable_telemetry
- telem_core_puid = "440d81eb-6657-4a7d-ad93-c7e9cc09e5da"
- empty_string = ""
- telem_random_hex = can(random_id.telem[0].hex) ? random_id.telem[0].hex : local.empty_string
-}
-
-
-# This constructs the ARM deployment name that is used for the telemetry.
-# We shouldn't ever hit the 64 character limit but use substr just in case
-locals {
- telem_core_arm_deployment_name = substr(
- format(
- "pid-%s_%s",
- local.telem_core_puid,
- local.telem_random_hex,
- ),
- 0,
- 64
- )
-}
-
-locals {
- telem_arm_subscription_template_content = < -p -s -w -i -j -t -d -c -l
+
+ -l "YourTNSConnectionString" \Description
+
+ -c java -t 3 -d 30 Connect and run a simple SQL and return the elapsed time statistics. SQL can be run in multiple concurrent threads and using various clients. As of version 1.0, we are supporting SQL*Plus and Java clients.
+
+
+
+# Run with Oracle wallet Options:
+
+docker run --rm -v $(pwd)/wallet:/opt/oracle/wallet adb-nettest adbping \
+
+ -u admin -p password -s service_name -w /opt/oracle/walletName:
+
+
+
+# Network diagnostics adbping - ADB Ping and Latency Benchmarking Tool 1.0
+
+docker run --rm adb-nettest network-test all your-adb-host.oraclecloud.com
+
+```Synopsis:
+
+
+
+## Azure Container Registry Deployment adbping -u -p -s -w -i -j -t -d -c
+
+
+
+```bashDescription:
+
+# Tag for ACR
+
+docker tag adb-nettest:latest your-registry.azurecr.io/adb-nettest:v2.1 Connect and run a simple SQL and return the elapsed time statistics. SQL can be run in multiple concurrent threads and using various clients.
+
+
+
+# Push to ACR Options:
+
+docker push your-registry.azurecr.io/adb-nettest:v2.1
+
+```-u / --username : Username to connect to the DB. If username option is not provided in the command line, user will be prompted to enter the username.
+
+
+
+## Notes-p / --password : Password to connect to the DB. If password is not provided in the command line, user will be prompted to enter the password.
+
+
+
+- This is the **final production version** incorporating all improvements-s / --service : Service name to connect. It can be any of the service names listed in the tnsnames.ora in the downloaded wallet. Example: dbname_tp/dbname_high
+
+- The adbping tool is **pre-extracted** and immediately available
+
+- All Oracle JDBC drivers are pre-configured in the CLASSPATH-t / --threads : Optional. Number of concurrent threads to be used to connect to the DB. Depending on the client type, there will be 't' parallel connection attempts spawned. Defaults to 1.
+
+- For Kubernetes deployment, use the YAML files in the `resources\infra\k8s\` directory
+
+-d / --duration : Optional. Duration in seconds to run the adbing. If not provided, adbping would run for one iteration of 't' parallel connections. Defaults to 5 secs.
+
+## About adbping Tool
+
+-n / --numconnections : Optional. Number of total connections to run. If numconnections is provided, then duration will be set to a max of 60 minutes and the test will execute till the total number of connections reach the numconnections or 60 minutes, whichever is earlier.
+
+The adbping tool is Oracle's official connection and latency testing tool for Oracle Autonomous Database. It supports both wallet-based and one-way TLS connections with comprehensive performance metrics including percentile statistics.
+
+-c / --testclient : Optional. In version 1.0, adbing can run the connection benchmarking test using 'SQL*Plus' and 'Java' clients. Valid input values are sqlplus / java. Defaults to sqlplus.
+
+**Key Features:**
+
+- Multi-threaded performance testing-e / --continueonerror : Optional. By default, the java client will abort the run when there is a pool error. -e option will let the tool to continue on error doing a pool recycle in case of pool based test (default).
+
+- Java and SQLPlus client support
+
+- Connection pooling statistics-i / --instantclient : Optional. Instant client location is required if the adbping needs to be run using a specific client version. This input is optional if adbping can use the 'sqlplus' available in the PATH.
+
+- CSV output for analysis
+
+- Custom SQL file execution-j / --javahome : Optional. javahome location is required if the adbping needs to be run using a specific Java version. This input is optional if adbping can use 'java' available in the PATH.
+
+- Comprehensive latency metrics (P90, P95, P99)
+
+-o / --onewaytls : Flag to indicate that we need to connect with a one-way TLS URL. Any wallet location provided will be a no-op since we will use direct TLS url to connect.
+
+For detailed adbping usage and options, run `docker run --rm adb-nettest adbping --help`
+-l / --tlsurl : Optional. TLS connection URL obtained from Autonomous Database console -> DB connection -> TLS. This is mandatory when --onewaytls option is specified.
+
+-q / --customsqlfile : Optional. Custom SQL file to execute for the connection test. SQL file can contain SQL statements or PL/SQL blocks. If not provided, the default connection test sql 'select 1 from dual' will be executed. Sample SQL files can be found in the samples folder. Custom sql files would be executed as is, with the specified client, so the script syntax should be compatible with respective client used.
+
+-r / --trace : Optional. Enables finer debug tracing into adbping.trc file.
+
+-f / --outputformat : Specify format option csv to print the test results in a csv format which can be used for result post processing. Default is to print the test summary in the adbping standard format.
+
+-z / --jdbcoptions : Optional. Custom JDBC options can be provided in the input command line. Valid JDBC options are the following. One or more of these options can be provided as a comma separated list. Note: These options are for advanced users and we expect the user to be aware of the impact.
+
+a.INIT_POOL_SIZE - Max pool size: if nothing is provided, defaults to threads count.
+
+b.MIN_POOL_SIZE - Min pool size: if nothing is provided, defaults to threads count.
+
+c.MAX_POOL_SIZE - Max pool size: if nothing is provided, defaults to threads count.
+
+d.JDBC_PREFETCH_SIZE -
+
+Example: -z 'INIT_POOL_SIZE=5,JDBC_PREFETCH_SIZE=25'
+
+Example Usage
+
+Case 1
+
+Run one thread of SQL*Plus connection back to back for duration of 10 seconds. No test client (-c) option is specified. Uses "sqlplus" by default.
+If -i / --instantclient option is not specified, then the tool uses "sqlplus" from PATH and if there is no "sqlplus" in PATH, the tool errors out.
+
+$ ./adbping -u admin -p xxxxxxxx -w /home/opc/siraj/Wallet_db01 -c java -j /home/opc/siraj/jdk1.8.0_301 -s db01_low -d 30
++++Test Summary+++
+Test Client: java
+Number of concurrent threads: 1
+Duration (secs): 30
+SQL executed: select 1 from dual;
+Pass: 27079 Fail: 0
+Test start date: 2022-11-29 06:39:53.701708+00:00
+Test end date: 2022-11-29 06:40:25.940934+00:00
+Java connection pool Stats: Initsize:1, Maxsize:1, Pool setup time(ms):1713.559
+SQL Execution Time(ms) : Min:0.423 Max:26.32 Avg:1.05 Median:0.819 Perc90:1.683 Perc95:2.156 Perc99:4.513
+Connect + SQL Execution Time(ms) : Min:0.43 Max:26.34 Avg:1.07 Median:0.841 Perc90:1.698 Perc95:2.173 Perc99:4.528
+
+Interpretation of the results
+-----------------------------
+
+1. Pass/Fail count: Indicates the total number of connections passed/failed in defined duration by the defined number of threads.
+
+2. SQL execution time: Time taken to just execute the SQL. Connection time not included.
+For sqlplus, this would be the elapsed time reported by sqlplus.
+
+3. Connect + SQL Execution Time: Time taken to connect and execute SQL.
+For sqlplus, this would be the time to connect and run the sql.
+For java, it would be time taken to getConnection() and execute the query.
+
+4. Java connection pool stats: Reports the time taken to setup the java connection pool and the initial and max size.
+All query executions do a getConnection() and execute the SQL.
+
+5. Perc90, Perc95, Perc99: This is the percentile value indicating 90%, 95% or 99% of the latencies are below the respective value.
+
+Case 2
+
+Run 5 parallel threads of JDBC client. Repeats for duration of 10 seconds.
+If -c / --testclient java option is specified, then the tool uses "java" from path.
+If the test needs to use a custom JDK, then the java home can be specified in -j / --javahome option
+
+
+
+$ ./adbping -u admin -p xxxxxxxx -i /home/opc/instantclient_18_5 -w /home/opc/siraj/Wallet -s db002_high -t 1 -j /home/opc/jdk1.8.0_231 -c java -t 10 -d 30
+
++++Test Summary+++
+Test Client: java
+Number of concurrent threads: 10
+Duration (secs): 30
+Custom SQL executed: /adbadmin/satish/adbping/test.sql
+Pass: 51709 Fail: 0
+Test start date: 2022-11-29 23:02:30.157091+00:00
+Test end date: 2022-11-29 23:03:18.777946+00:00
+Java connection pool Stats: Initsize:1, Maxsize:1, Pool setup time(ms):17177.584
+SQL Execution Time(ms) : Min:0.366 Max:31.755 Avg:0.515 Median:0.489 Perc90:0.596 Perc95:0.667 Perc99:1.023
+Connect + SQL Execution Time(ms) : Min:0.38 Max:31.766 Avg:0.537 Median:0.508 Perc90:0.626 Perc95:0.719 Perc99:1.078
+
+Case 3
+
+Run 5 parallel threads of JDBC client. Repeats for duration of 10 seconds.
+--onewaytls option along with the --tlsurl option can be used to run tests against the database using a One-way TLS URL
+Reference docs:
+How to configure DBs for one-way TLS - https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/support-tls-mtls-authentication.html#GUID-6A34B30A-3692-4D1F-8458-FD8F32736199
+How to setup ACLs for the DB - https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/network-access-control-list-configure.html#GUID-B6389402-3F4D-45A2-A4DE-EAF1B31D8E50
+
+$ ./adbping -u admin -p xxxxxxxx --onewaytls --tlsurl '(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=adb.us-ashburn-1.oraclecloud.com))(connect_data=(service_name=tenant_db_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=yes)(ssl_server_cert_dn="CN=adwc.uscom-east-1.oraclecloud.com, OU=Oracle BMCS US, O=Oracle Corporation, L=Redwood City, ST=California, C=US")))' -t 1 -d 5 -c java -j /home/opc/siraj/jdk1.8.0_231
+
++++Test Summary+++
+Test Client: java
+Number of concurrent threads: 1
+Duration (secs): 5
+Custom SQL executed: select 1 from dual
+Pass: 8540 Fail: 0
+Test start date: 2022-11-29 23:02:30.157091+00:00
+Test end date: 2022-11-29 23:03:52.777946+00:00
+Java connection pool Stats: Initsize:1, Maxsize:1, Pool setup time(ms):17177.584
+SQL Execution Time(ms) : Min:0.366 Max:31.755 Avg:0.515 Median:0.489 Perc90:0.596 Perc95:0.667 Perc99:1.023
+Connect + SQL Execution Time(ms) : Min:0.38 Max:31.766 Avg:0.537 Median:0.508 Perc90:0.626 Perc95:0.719 Perc99:1.078
+
+ Case 4:
+
+Only Java client is allowed on Windows in this release of adbping.
+
+Java client test using adbping on Windows machines.
+
+C:\temp> adbping.exe -u admin -p xxxxxxx -w C:\Users\opc\Wallet_iad_regional -c java -o -l "(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=adb.us-ashburn-1.oraclecloud.com))(connect_data=(service_name=tenant_db_low.adb.oraclecloud.com))(security=(ssl_server_dn_match=yes)))"
+
++++Test Summary+++
+Test Client: java
+Number of concurrent threads: 1
+Duration (secs): 5
+SQL executed: select 1 from dual;
+JDBC Options:NA
+Pass: 3052 Fail: 0
+Test start date: 2023-03-01 19:30:25.537090+00:00
+Test end date: 2023-03-01 19:30:39.307521+00:00
+Java connection pool Stats: Initsize:1, Maxsize:1, Pool setup time(ms):8048.760
+SQL Execution Time(ms) : Min:1.059 Max:27.683 Avg:1.457 Median:1.337 Perc90:1.63 Perc95:1.78 Perc99:2.911
+Connect + SQL Execution Time(ms) : Min:1.087 Max:27.711 Avg:1.539 Median:1.397 Perc90:1.776 Perc95:1.961 Perc99:3.54
+
+C:\temp> adbping.exe -u admin -p xxxxxx -s db_low -w C:\Users\opc\Wallet_iad_regional -c java
+
++++Test Summary+++
+Test Client: java
+Number of concurrent threads: 1
+Duration (secs): 5
+SQL executed: select 1 from dual;
+JDBC Options:NA
+Pass: 1756 Fail: 0
+Test start date: 2023-03-01 19:32:34.408112+00:00
+Test end date: 2023-03-01 19:32:47.922135+00:00
+Java connection pool Stats: Initsize:1, Maxsize:1, Pool setup time(ms):7852.751
+SQL Execution Time(ms) : Min:1.797 Max:211.033 Avg:2.62 Median:2.04 Perc90:2.311 Perc95:2.625 Perc99:7.547
+Connect + SQL Execution Time(ms) : Min:1.83 Max:211.118 Avg:2.733 Median:2.137 Perc90:2.471 Perc95:2.823 Perc99:7.67
+
+
+
+
+Attachments :
+2863450.1-ADBPING_LINUX.X64-adbping_Linux.X64_230127.zip
+2863450.1-ADBPING_MACOS-adbping_macOS_230130.zip
+2863450.1-ADBPING_LIN_WIN-adbping_Linux.X64_Windows_230301.zip
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/build.sh b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/build.sh
new file mode 100644
index 000000000..d59d2a765
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/build.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# Build script for Oracle ADB Network Testing Container
+
+set -e
+
+# Configuration
+IMAGE_NAME="adb-nettest"
+VERSION="v2.1"
+REGISTRY="your-registry.azurecr.io"
+
+echo "π Building Oracle ADB Network Testing Container"
+echo "================================================="
+
+# Build the Docker image
+echo "π¦ Building image: ${IMAGE_NAME}:${VERSION}"
+docker build -t "${IMAGE_NAME}:latest" .
+docker build -t "${IMAGE_NAME}:${VERSION}" .
+
+echo "β
Build completed successfully!"
+echo ""
+
+# Test the image
+echo "π§ͺ Testing the built image..."
+docker run --rm "${IMAGE_NAME}:latest" adbping --help | head -5
+
+echo ""
+echo "π Next steps:"
+echo "1. Tag for your registry:"
+echo " docker tag ${IMAGE_NAME}:${VERSION} ${REGISTRY}/${IMAGE_NAME}:${VERSION}"
+echo ""
+echo "2. Push to registry:"
+echo " docker push ${REGISTRY}/${IMAGE_NAME}:${VERSION}"
+echo ""
+echo "3. Test the image:"
+echo " docker run --rm -it ${IMAGE_NAME}:latest bash"
+echo ""
+echo "β
Build script completed!"
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/entrypoint.sh b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/entrypoint.sh
new file mode 100644
index 000000000..ea5177a40
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/entrypoint.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+
+# Entrypoint script for ADB Network Testing Container
+set -e
+
+# Colors for output
+BLUE='\033[0;34m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+print_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
+print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
+print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
+
+# Show banner
+show_banner() {
+ cat << 'EOF'
+ ____ ____ ____ _ _ _ _
+ / _ || _ \| _ \ | \ | | ___| |_ __ _____ _ __| | __
+| |_| || | | | |_) | | \| |/ _ \ __\ \ /\ / / _ \| '__| |/ /
+| _ || |_| | _ < | |\ | __/ |_ \ V V / (_) | | | <
+|_| |_||____/|_| \_\ |_| \_|\___|\__| \_/\_/ \___/|_| |_|\_\
+ _____ _ _
+|_ _|__ ___| |_(_)_ __ __ _
+ | |/ _ \/ __| __| | '_ \ / _` |
+ | | __/\__ \ |_| | | | | (_| |
+ |_|\___||___/\__|_|_| |_|\__, |
+ |___/
+EOF
+ echo
+ print_success "Oracle ADB Network Testing Container"
+ echo "Version: 1.0"
+ echo "Available tools: adbping, dig, ping, traceroute, nc, curl, wget"
+ echo
+}
+
+# Show help information
+show_help() {
+ cat << EOF
+Oracle ADB Network Testing Container
+
+USAGE:
+ # Run adbping tool directly
+ docker run --rm -v /path/to/wallet:/opt/oracle/wallet adb-nettest \\
+ adbping -u admin -p password -s service_name -w /opt/oracle/wallet
+
+ # Run network tests
+ docker run --rm adb-nettest network-test dns adb.us-ashburn-1.oraclecloud.com
+ docker run --rm adb-nettest network-test ping adb.us-ashburn-1.oraclecloud.com
+ docker run --rm adb-nettest network-test all adb.us-ashburn-1.oraclecloud.com
+
+ # Interactive shell
+ docker run --rm -it adb-nettest bash
+
+AVAILABLE COMMANDS:
+ adbping - Oracle ADB ping and latency benchmarking tool
+ network-test - Comprehensive network testing script
+ dig - DNS lookup utility
+ nslookup - DNS lookup utility
+ ping - ICMP ping utility
+ traceroute - Network route tracing
+ nc (netcat) - TCP/UDP connectivity testing
+ curl - HTTP/HTTPS client
+ wget - HTTP/HTTPS downloader
+ telnet - Telnet client
+ nmap - Network discovery and port scanning
+ tcpdump - Network packet analyzer
+
+ENVIRONMENT VARIABLES:
+ TNS_ADMIN - Oracle wallet location (default: /opt/oracle/wallet)
+ ORACLE_HOME - Oracle client home (default: /usr/lib/oracle/21/client64)
+ JAVA_HOME - Java home (default: /usr/lib/jvm/java-11-openjdk)
+
+VOLUME MOUNTS:
+ /opt/oracle/wallet - Mount your Oracle wallet here
+ /opt/adbping/sql - Mount custom SQL files here
+
+EXAMPLES:
+ # Test with wallet authentication
+ docker run --rm -v \$(pwd)/wallet:/opt/oracle/wallet adb-nettest \\
+ adbping -u admin -p MyPassword123 -s mydb_high -w /opt/oracle/wallet
+
+ # Test with one-way TLS
+ docker run --rm adb-nettest \\
+ adbping -u admin -p MyPassword123 --onewaytls \\
+ --tlsurl '(description=...your_tls_connection_string...)'
+
+ # Comprehensive network testing
+ docker run --rm adb-nettest network-test all adb.us-ashburn-1.oraclecloud.com
+
+ # Interactive troubleshooting
+ docker run --rm -it -v \$(pwd)/wallet:/opt/oracle/wallet adb-nettest bash
+
+EOF
+}
+
+# Main execution
+if [[ $# -eq 0 ]] || [[ "$1" == "--help" ]] || [[ "$1" == "-h" ]]; then
+ show_banner
+ show_help
+ exit 0
+fi
+
+# Check if wallet directory is mounted and has files
+if [[ -d "/opt/oracle/wallet" ]]; then
+ wallet_files=$(ls -la /opt/oracle/wallet 2>/dev/null | wc -l)
+ if [[ $wallet_files -gt 3 ]]; then # More than just . and ..
+ print_success "Oracle wallet detected in /opt/oracle/wallet"
+ else
+ print_warning "Oracle wallet directory is empty. Mount your wallet to /opt/oracle/wallet for adbping tests"
+ fi
+else
+ print_warning "Oracle wallet directory not found. Mount your wallet to /opt/oracle/wallet for adbping tests"
+fi
+
+# Execute the provided command
+exec "$@"
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/network-test.sh b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/network-test.sh
new file mode 100644
index 000000000..9e3fc2f99
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/adbping/network-test.sh
@@ -0,0 +1,300 @@
+#!/bin/bash
+
+# Network Testing Script for Oracle ADB
+# This script provides comprehensive network testing capabilities
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# Function to print colored output
+print_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
+print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
+print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
+print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
+
+# Function to show usage
+show_usage() {
+ cat << EOF
+Network Testing Script for Oracle ADB
+
+Usage: $0 [OPTIONS] [target]
+
+Test Types:
+ dns - DNS resolution test using dig
+ ping - ICMP ping test
+ trace - Traceroute test
+ port - TCP port connectivity test
+ http - HTTP/HTTPS connectivity test
+ oracle - Oracle-specific connectivity test
+ all - Run all network tests
+
+Options:
+ -h, --help Show this help message
+ -v, --verbose Verbose output
+ -c, --count Number of ping/test iterations (default: 4)
+ -t, --timeout Timeout in seconds (default: 10)
+
+Examples:
+ $0 dns adb.us-ashburn-1.oraclecloud.com
+ $0 ping adb.us-ashburn-1.oraclecloud.com
+ $0 port adb.us-ashburn-1.oraclecloud.com 1521
+ $0 all adb.us-ashburn-1.oraclecloud.com
+ $0 oracle adb.us-ashburn-1.oraclecloud.com 1521
+
+EOF
+}
+
+# Default values
+COUNT=4
+TIMEOUT=10
+VERBOSE=false
+
+# Parse command line arguments
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ -v|--verbose)
+ VERBOSE=true
+ shift
+ ;;
+ -c|--count)
+ COUNT="$2"
+ shift 2
+ ;;
+ -t|--timeout)
+ TIMEOUT="$2"
+ shift 2
+ ;;
+ -*)
+ print_error "Unknown option: $1"
+ show_usage
+ exit 1
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+# Check if test type is provided
+if [[ $# -lt 1 ]]; then
+ print_error "Test type is required"
+ show_usage
+ exit 1
+fi
+
+TEST_TYPE="$1"
+TARGET="$2"
+PORT="$3"
+
+# Function to test DNS resolution
+test_dns() {
+ local host="$1"
+ print_info "Testing DNS resolution for: $host"
+
+ if dig +short "$host" > /dev/null 2>&1; then
+ print_success "DNS resolution successful"
+ if $VERBOSE; then
+ dig "$host"
+ else
+ dig +short "$host"
+ fi
+ else
+ print_error "DNS resolution failed"
+ return 1
+ fi
+}
+
+# Function to test ping connectivity
+test_ping() {
+ local host="$1"
+ print_info "Testing ICMP connectivity to: $host (count: $COUNT)"
+
+ if ping -c "$COUNT" -W "$TIMEOUT" "$host"; then
+ print_success "Ping test successful"
+ else
+ print_error "Ping test failed"
+ return 1
+ fi
+}
+
+# Function to test traceroute
+test_traceroute() {
+ local host="$1"
+ print_info "Running traceroute to: $host"
+
+ if command -v traceroute >/dev/null 2>&1; then
+ traceroute -w "$TIMEOUT" "$host"
+ else
+ print_warning "traceroute command not available"
+ return 1
+ fi
+}
+
+# Function to test TCP port connectivity
+test_port() {
+ local host="$1"
+ local port="$2"
+ print_info "Testing TCP connectivity to: $host:$port"
+
+ if timeout "$TIMEOUT" nc -z "$host" "$port" 2>/dev/null; then
+ print_success "Port $port is open on $host"
+ else
+ print_error "Port $port is closed or filtered on $host"
+ return 1
+ fi
+}
+
+# Function to test HTTP/HTTPS connectivity
+test_http() {
+ local url="$1"
+ print_info "Testing HTTP/HTTPS connectivity to: $url"
+
+ if curl -I -m "$TIMEOUT" -s "$url" >/dev/null 2>&1; then
+ print_success "HTTP/HTTPS connectivity successful"
+ if $VERBOSE; then
+ curl -I -m "$TIMEOUT" "$url"
+ fi
+ else
+ print_error "HTTP/HTTPS connectivity failed"
+ return 1
+ fi
+}
+
+# Function to test Oracle-specific connectivity
+test_oracle() {
+ local host="$1"
+ local port="${2:-1521}"
+
+ print_info "Testing Oracle connectivity to: $host:$port"
+
+ # Test basic TCP connectivity first
+ if ! test_port "$host" "$port"; then
+ return 1
+ fi
+
+ # Test TNS ping if available
+ if command -v tnsping >/dev/null 2>&1; then
+ print_info "Testing TNS connectivity"
+ if tnsping "$host:$port" > /dev/null 2>&1; then
+ print_success "TNS connectivity successful"
+ else
+ print_warning "TNS ping failed, but TCP connection is working"
+ fi
+ else
+ print_info "tnsping not available, skipping TNS test"
+ fi
+}
+
+# Function to run all tests
+test_all() {
+ local host="$1"
+ local failed_tests=0
+
+ print_info "Running comprehensive network tests for: $host"
+ echo "================================================"
+
+ # DNS test
+ echo
+ test_dns "$host" || ((failed_tests++))
+
+ # Ping test
+ echo
+ test_ping "$host" || ((failed_tests++))
+
+ # Traceroute test
+ echo
+ test_traceroute "$host" || ((failed_tests++))
+
+ # Common Oracle ports
+ local oracle_ports=(1521 1522 2484)
+ for port in "${oracle_ports[@]}"; do
+ echo
+ test_port "$host" "$port" || true # Don't count as failure
+ done
+
+ # Oracle-specific tests
+ echo
+ test_oracle "$host" || ((failed_tests++))
+
+ # HTTPS test (common for Oracle Cloud)
+ echo
+ test_http "https://$host" || true # Don't count as failure
+
+ echo
+ echo "================================================"
+ if [ $failed_tests -eq 0 ]; then
+ print_success "All critical tests passed!"
+ else
+ print_warning "$failed_tests critical tests failed"
+ fi
+
+ return $failed_tests
+}
+
+# Main execution logic
+case "$TEST_TYPE" in
+ dns)
+ if [[ -z "$TARGET" ]]; then
+ print_error "Hostname is required for DNS test"
+ exit 1
+ fi
+ test_dns "$TARGET"
+ ;;
+ ping)
+ if [[ -z "$TARGET" ]]; then
+ print_error "Hostname is required for ping test"
+ exit 1
+ fi
+ test_ping "$TARGET"
+ ;;
+ trace|traceroute)
+ if [[ -z "$TARGET" ]]; then
+ print_error "Hostname is required for traceroute test"
+ exit 1
+ fi
+ test_traceroute "$TARGET"
+ ;;
+ port)
+ if [[ -z "$TARGET" || -z "$PORT" ]]; then
+ print_error "Hostname and port are required for port test"
+ exit 1
+ fi
+ test_port "$TARGET" "$PORT"
+ ;;
+ http|https)
+ if [[ -z "$TARGET" ]]; then
+ print_error "URL is required for HTTP test"
+ exit 1
+ fi
+ test_http "$TARGET"
+ ;;
+ oracle)
+ if [[ -z "$TARGET" ]]; then
+ print_error "Hostname is required for Oracle test"
+ exit 1
+ fi
+ test_oracle "$TARGET" "$PORT"
+ ;;
+ all)
+ if [[ -z "$TARGET" ]]; then
+ print_error "Hostname is required for comprehensive test"
+ exit 1
+ fi
+ test_all "$TARGET"
+ ;;
+ *)
+ print_error "Unknown test type: $TEST_TYPE"
+ show_usage
+ exit 1
+ ;;
+esac
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/Dockerfile b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/Dockerfile
new file mode 100644
index 000000000..eee8d3672
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/Dockerfile
@@ -0,0 +1,89 @@
+# Oracle ADB Connping Testing Container
+# Uses rwloadsim tool (connping/ociping) for Oracle ADB latency testing
+FROM oraclelinux:8
+
+# Install system packages and network tools
+RUN dnf update -y && \
+ dnf install -y \
+ # Network diagnostic tools
+ bind-utils \
+ iputils \
+ net-tools \
+ nmap-ncat \
+ telnet \
+ traceroute \
+ tcpdump \
+ wget \
+ curl \
+ # Oracle prerequisites
+ oracle-instantclient-release-el8 \
+ unzip \
+ tar \
+ gzip \
+ && dnf clean all
+
+# Install Oracle Instant Client 23c (as required in specifications)
+# Note: dnf may install 21c by default, so we explicitly check and update
+RUN dnf install -y \
+ oracle-instantclient-basic \
+ oracle-instantclient-sqlplus \
+ && dnf clean all
+
+# Detect installed Oracle client version
+RUN ORACLE_VERSION=$(ls /usr/lib/oracle/ | head -1) && \
+ echo "Detected Oracle Client version: $ORACLE_VERSION" && \
+ echo "export ORACLE_HOME=/usr/lib/oracle/${ORACLE_VERSION}/client64" >> /etc/profile.d/oracle.sh && \
+ echo "export LD_LIBRARY_PATH=/usr/lib/oracle/${ORACLE_VERSION}/client64/lib:\$LD_LIBRARY_PATH" >> /etc/profile.d/oracle.sh && \
+ echo "export PATH=/opt/rwloadsim:/usr/lib/oracle/${ORACLE_VERSION}/client64/bin:\$PATH" >> /etc/profile.d/oracle.sh
+
+# Set environment variables (will be overridden if different version detected)
+ENV ORACLE_HOME=/usr/lib/oracle/21/client64 \
+ TNS_ADMIN=/opt/oracle/wallet \
+ PATH="/usr/local/bin:/opt/rwloadsim:$PATH:/usr/lib/oracle/21/client64/bin" \
+ LD_LIBRARY_PATH="/usr/lib/oracle/21/client64/lib"
+
+# Create directories
+RUN mkdir -p /opt/connping /opt/oracle/wallet /opt/rwloadsim
+
+# Download and install rwloadsim v3.2.1
+RUN cd /tmp && \
+ wget -q https://github.com/oracle/rwloadsim/releases/download/v.3.2.1/rwloadsim-linux-x86_64-bin-3.2.1.tgz && \
+ tar -xzf rwloadsim-linux-x86_64-bin-3.2.1.tgz -C /opt/rwloadsim --strip-components=1 && \
+ rm -f rwloadsim-linux-x86_64-bin-3.2.1.tgz && \
+ echo "rwloadsim installed to /opt/rwloadsim"
+
+# Create symlinks to make rwloadsim and connping available in PATH
+# Detect Oracle version and link appropriate rwloadsim binary
+RUN ORACLE_VERSION=$(ls /usr/lib/oracle/ | head -1) && \
+ echo "Creating symlinks for Oracle Client version: $ORACLE_VERSION" && \
+ if [ -f "/opt/rwloadsim/rwloadsim${ORACLE_VERSION}" ]; then \
+ ln -sf "/opt/rwloadsim/rwloadsim${ORACLE_VERSION}" /opt/rwloadsim/rwloadsim; \
+ else \
+ ln -sf /opt/rwloadsim/rwloadsim21 /opt/rwloadsim/rwloadsim; \
+ fi && \
+ ln -sf /opt/rwloadsim/rwloadsim /usr/local/bin/rwloadsim && \
+ echo "Symlinks created: rwloadsim -> /opt/rwloadsim/rwloadsim" && \
+ echo '#!/bin/sh' > /usr/local/bin/connping && \
+ echo 'cd /opt/rwloadsim && exec /opt/rwloadsim/rwloadsim --pretend-gen-banner="RWP*Connect/OCIPing" -u connping.rwl "$@"' >> /usr/local/bin/connping && \
+ chmod +x /usr/local/bin/connping && \
+ echo "Created connping wrapper script"
+
+# Verify connping is available
+RUN ls -la /usr/local/bin/connping && ls -la /usr/local/bin/rwloadsim
+
+# Copy support scripts
+COPY entrypoint.sh /opt/connping/
+RUN chmod +x /opt/connping/*.sh
+
+# Create non-root user for security
+RUN groupadd -r connping && useradd -r -g connping connping && \
+ chown -R connping:connping /opt/connping /opt/oracle /opt/rwloadsim && \
+ chmod 755 /opt/rwloadsim/connping /opt/rwloadsim/rwloadsim
+
+# Switch to non-root user
+USER connping
+WORKDIR /opt/connping
+
+# Default entrypoint
+ENTRYPOINT ["/opt/connping/entrypoint.sh"]
+CMD ["--help"]
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/README.md
new file mode 100644
index 000000000..fb60ed383
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/README.md
@@ -0,0 +1,239 @@
+# Oracle ADB Connping Testing Container
+
+Connection and latency test tool for Oracle Autonomous Database using **rwloadsim** (connping/ociping) created by Oracle's Real World Performance team.
+
+## Overview
+
+This container provides the rwloadsim tool suite for testing Oracle ADB connection latency. The primary metric to monitor is **ociping**, which measures the connection latency to your Oracle Autonomous Database.
+
+**Based on:** [rwloadsim GitHub Repository](https://github.com/oracle/rwloadsim)
+**Version:** 3.2.1
+
+## Features
+
+β
**Pre-installed rwloadsim**: Complete tool suite including connping and ociping
+β
**Oracle Instant Client 23c**: Latest Oracle client with SQL*Plus
+β
**Network testing tools**: dig, ping, traceroute, nc, curl, wget
+β
**Security**: Runs as non-root user
+β
**Kubernetes ready**: Pre-configured for AKS deployment
+β
**ACR integrated**: Built for `odaamh.azurecr.io` registry
+
+## Essential Files
+
+- **`Dockerfile`** - Production Docker build with Oracle Client 23c and rwloadsim
+- **`entrypoint.sh`** - Container entrypoint script with help and diagnostics
+- **`build.sh`** - Build script for local Docker Desktop and ACR push
+
+## Quick Build & Test
+
+### Prerequisites
+
+- Docker Desktop installed and running
+- Azure CLI installed
+- Access to `odaamh` Azure Container Registry
+
+### Build Locally
+
+```bash
+# Navigate to the connping directory
+cd resources/infra/connping
+
+# Make the build script executable (Linux/Mac) or run in Git Bash (Windows)
+chmod +x build.sh entrypoint.sh
+./build.sh
+
+# Or build manually
+docker build -t connping:v1.0 .
+```
+
+### Test Locally
+
+```bash
+# Test connping is available
+docker run --rm connping:v1.0 connping --help
+
+# Test with interactive shell
+docker run --rm -it connping:v1.0 bash
+```
+
+## Push to Azure Container Registry
+
+```bash
+# Login to Azure and ACR
+az login
+az account set --subscription 09808f31-065f-4231-914d-776c2d6bbe34
+az acr login --name odaamh
+
+# Push the image
+docker push odaamh.azurecr.io/connping:v1.0
+docker push odaamh.azurecr.io/connping:latest
+
+# Verify
+az acr repository show --name odaamh --image connping:v1.0
+```
+
+## Production Usage
+
+### Basic Latency Test (One-way TLS)
+
+```bash
+docker run --rm odaamh.azurecr.io/connping:v1.0 \
+ connping -l 'admin/YourPassword@"(description= (retry_count=20)(retry_delay=3)\
+ (address=(protocol=tcps)(port=1521)(host=adb.eu-frankfurt-1.oraclecloud.com))\
+ (connect_data=(service_name=mydb_tp.adb.oraclecloud.com))\
+ (security=(ssl_server_dn_match=yes)))"' \
+ --period=300
+```
+
+### Latency Test with Wallet
+
+```bash
+docker run --rm \
+ -v $(pwd)/wallet:/opt/oracle/wallet \
+ odaamh.azurecr.io/connping:v1.0 \
+ connping -l admin/password@mydb_high --period=300
+```
+
+### Network Diagnostics
+
+```bash
+# DNS lookup
+docker run --rm odaamh.azurecr.io/connping:v1.0 \
+ dig adb.eu-frankfurt-1.oraclecloud.com
+
+# Ping test
+docker run --rm odaamh.azurecr.io/connping:v1.0 \
+ ping -c 10 adb.eu-frankfurt-1.oraclecloud.com
+
+# Interactive troubleshooting
+docker run --rm -it odaamh.azurecr.io/connping:v1.0 bash
+```
+
+## Kubernetes Deployment
+
+Deploy to AKS using the pre-configured YAML files:
+
+```powershell
+# Deploy namespace (if not already created)
+kubectl apply -f ..\k8s\namespace.yaml
+
+# Deploy connping pod
+kubectl apply -f ..\k8s\connping-deployment.yaml
+
+# For automated testing
+kubectl apply -f ..\k8s\connping-job.yaml
+```
+
+### Access the Pod
+
+```powershell
+# Get pod name
+$podName = kubectl get pods -n adb-perf-test -l app=connping -o jsonpath='{.items[0].metadata.name}'
+
+# Execute interactive shell
+kubectl exec -it $podName -n adb-perf-test -- /bin/bash
+
+# Run connping test
+kubectl exec -it $podName -n adb-perf-test -- connping -l "admin/pass@..." --period=300
+```
+
+## Understanding connping Output
+
+When you run connping, watch for the **ociping** metric:
+
+```
+ociping: 2.45ms <- This is the key metric to monitor
+```
+
+This represents the connection latency to your Oracle ADB instance.
+
+### Sample Output
+
+```
+RWL*Load Simulator Release 3.2.1.0 Production
+...
+ociping: 2.45ms
+sqlping: 3.21ms
+total connections: 1234
+successful: 1234
+failed: 0
+```
+
+## Tool Details
+
+### connping Command
+
+```bash
+connping -l [options]
+
+Options:
+ -l Connection string (user/pass@tns or full TNS descriptor)
+ --period Duration in seconds to run the test (default: 60)
+```
+
+### Environment Variables
+
+- `TNS_ADMIN` - Oracle wallet location (default: `/opt/oracle/wallet`)
+- `ORACLE_HOME` - Oracle client home (default: `/usr/lib/oracle/23/client64`)
+- `LD_LIBRARY_PATH` - Oracle library path
+
+## Setup Requirements for connping
+
+As per the original specifications:
+
+1. **VM/Container**: System that can connect to ADB with sqlplus installed β
+2. **rwloadsim Tool**: Downloaded from GitHub releases β
+3. **Environment Setup**:
+ - PATH includes rwloadsim bin directory β
+ - LD_LIBRARY_PATH includes Oracle Client 23c β
+
+## Comparison with adbping
+
+| Feature | adbping | connping (rwloadsim) |
+|---------|---------|----------------------|
+| **Source** | Oracle Support (MOS) | Oracle Real World Performance Team |
+| **Tool Type** | Standalone binary | Part of rwloadsim suite |
+| **Primary Metric** | Connection + SQL time | ociping (connection latency) |
+| **Client Support** | Java, SQL*Plus | SQL*Plus based |
+| **Oracle Client** | 21c | 23c |
+
+## Troubleshooting
+
+### Connection Issues
+
+```bash
+# Test DNS resolution
+docker run --rm odaamh.azurecr.io/connping:v1.0 \
+ dig adb.eu-frankfurt-1.oraclecloud.com
+
+# Test network connectivity
+docker run --rm odaamh.azurecr.io/connping:v1.0 \
+ ping -c 4 adb.eu-frankfurt-1.oraclecloud.com
+```
+
+### Wallet Issues
+
+```bash
+# Verify wallet is mounted correctly
+docker run --rm -v $(pwd)/wallet:/opt/oracle/wallet \
+ odaamh.azurecr.io/connping:v1.0 \
+ ls -la /opt/oracle/wallet
+```
+
+## Notes
+
+- This is the production version with Oracle Instant Client 23c
+- The rwloadsim tool is pre-installed and ready to use
+- For Kubernetes deployment, use the YAML files in `resources/infra/k8s/`
+- The container runs as a non-root user for security
+- One-way TLS connections do not require a wallet mount
+
+## Reference Documentation
+
+- **rwloadsim GitHub**: https://github.com/oracle/rwloadsim
+- **Release v3.2.1**: https://github.com/oracle/rwloadsim/releases/tag/v.3.2.1
+- **Oracle Real World Performance**: Oracle's performance engineering team
+
+## About rwloadsim
+
+The RWP*Load Simulator (rwloadsim) is a tool created by Oracle's Real World Performance team for simulating real-world workloads. The connping/ociping utilities within this suite provide accurate connection latency measurements to Oracle databases.
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/build.sh b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/build.sh
new file mode 100644
index 000000000..ab19fbe27
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/build.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+# Build script for Oracle ADB Connping Testing Container
+
+set -e
+
+# Configuration
+IMAGE_NAME="connping"
+VERSION="v1.0"
+ACR_NAME="odaamh"
+ACR_REGISTRY="${ACR_NAME}.azurecr.io"
+FULL_IMAGE_NAME="${ACR_REGISTRY}/${IMAGE_NAME}:${VERSION}"
+LATEST_IMAGE_NAME="${ACR_REGISTRY}/${IMAGE_NAME}:latest"
+
+echo "π Building Oracle ADB Connping Testing Container"
+echo "================================================="
+echo "Image: ${FULL_IMAGE_NAME}"
+echo ""
+
+# Build the Docker image
+echo "π¦ Building image locally with Docker Desktop..."
+docker build -t "${IMAGE_NAME}:${VERSION}" .
+docker build -t "${IMAGE_NAME}:latest" .
+
+echo "β
Local build completed successfully!"
+echo ""
+
+# Tag for ACR
+echo "π·οΈ Tagging images for Azure Container Registry..."
+docker tag "${IMAGE_NAME}:${VERSION}" "${FULL_IMAGE_NAME}"
+docker tag "${IMAGE_NAME}:latest" "${LATEST_IMAGE_NAME}"
+
+echo "β
Images tagged successfully!"
+echo ""
+
+# Test the image
+echo "π§ͺ Testing the built image..."
+docker run --rm "${IMAGE_NAME}:latest" connping --help || echo "Note: connping help displayed"
+
+echo ""
+echo "π Next steps to push to Azure Container Registry:"
+echo "================================================="
+echo ""
+echo "1. Login to ACR (if not already logged in):"
+echo " az login"
+echo " az account set --subscription 09808f31-065f-4231-914d-776c2d6bbe34"
+echo " az acr login --name ${ACR_NAME}"
+echo ""
+echo "2. Push images to ACR:"
+echo " docker push ${FULL_IMAGE_NAME}"
+echo " docker push ${LATEST_IMAGE_NAME}"
+echo ""
+echo "3. Verify the image in ACR:"
+echo " az acr repository show --name ${ACR_NAME} --image ${IMAGE_NAME}:${VERSION}"
+echo ""
+echo "4. Deploy to Kubernetes:"
+echo " kubectl apply -f ../k8s/namespace.yaml"
+echo " kubectl apply -f ../k8s/connping-deployment.yaml"
+echo ""
+echo "β
Build script completed!"
+echo ""
+echo "Local images created:"
+echo " - ${IMAGE_NAME}:${VERSION}"
+echo " - ${IMAGE_NAME}:latest"
+echo ""
+echo "ACR images ready to push:"
+echo " - ${FULL_IMAGE_NAME}"
+echo " - ${LATEST_IMAGE_NAME}"
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/entrypoint.sh b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/entrypoint.sh
new file mode 100644
index 000000000..e880e1dd5
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/connping/entrypoint.sh
@@ -0,0 +1,152 @@
+#!/bin/bash
+
+# Entrypoint script for Connping Network Testing Container
+set -e
+
+# Detect actual Oracle client version and update environment
+ORACLE_VERSION=$(ls /usr/lib/oracle/ 2>/dev/null | head -1)
+if [ -n "$ORACLE_VERSION" ]; then
+ export ORACLE_HOME=/usr/lib/oracle/${ORACLE_VERSION}/client64
+ export LD_LIBRARY_PATH=/usr/lib/oracle/${ORACLE_VERSION}/client64/lib:${LD_LIBRARY_PATH}
+ export PATH=/opt/rwloadsim:/usr/lib/oracle/${ORACLE_VERSION}/client64/bin:${PATH}
+fi
+
+# Colors for output
+BLUE='\033[0;34m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+print_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
+print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
+print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
+
+# Show banner
+show_banner() {
+ cat << 'EOF'
+ ____ _
+ / ___|___ _ __ _ __ _ __ (_)_ __ __ _
+ | | / _ \| '_ \| '_ \| '_ \| | '_ \ / _` |
+ | |__| (_) | | | | | | | |_) | | | | | (_| |
+ \____\___/|_| |_|_| |_| .__/|_|_| |_|\__, |
+ |_| |___/
+ ____ _ _
+ | _ \ __ ___ _| | ___ __ _ ___(_)_ __ ___
+ | |_) |/ _` \ \ /\ / / |/ _ \ / _` / __| | '_ ` _ \
+ | _ <| (_| |\ V V /| | (_) | (_| \__ \ | | | | | |
+ |_| \_\\__,_| \_/\_/ |_|\___/ \__,_|___/_|_| |_| |_|
+
+EOF
+ echo
+ print_success "Oracle ADB Connping Testing Container (rwloadsim)"
+ echo "Version: 1.0"
+ echo "Available tools: connping, ociping, sqlplus, dig, ping, traceroute, nc, curl, wget"
+ echo
+}
+
+# Show help information
+show_help() {
+ cat << EOF
+Oracle ADB Connping Testing Container
+
+DESCRIPTION:
+ This container includes the rwloadsim tool suite (connping/ociping) created by
+ Oracle's Real World Performance team for testing Oracle ADB connection latency.
+
+ The primary metric to watch is 'ociping' which measures the connection latency.
+
+USAGE:
+ # Run connping with one-way TLS connection (recommended)
+ docker run --rm odaamh.azurecr.io/connping:v1.0 \\
+ connping -l 'admin/password@"(description=...)"' --period=300
+
+ # Run connping with wallet authentication
+ docker run --rm -v /path/to/wallet:/opt/oracle/wallet \\
+ odaamh.azurecr.io/connping:v1.0 \\
+ connping -l admin/password@service_name --period=300
+
+ # Network diagnostics
+ docker run --rm odaamh.azurecr.io/connping:v1.0 \\
+ ping -c 4 adb.eu-frankfurt-1.oraclecloud.com
+
+ # Interactive shell
+ docker run --rm -it odaamh.azurecr.io/connping:v1.0 bash
+
+AVAILABLE COMMANDS:
+ connping - Connection and latency testing tool (rwloadsim)
+ ociping - Part of rwloadsim suite
+ sqlplus - Oracle SQL*Plus client (23c)
+ dig, nslookup - DNS lookup utilities
+ ping - ICMP ping utility
+ traceroute - Network route tracing
+ nc (netcat) - TCP/UDP connectivity testing
+ curl, wget - HTTP/HTTPS clients
+
+ENVIRONMENT VARIABLES:
+ TNS_ADMIN - Oracle wallet location (default: /opt/oracle/wallet)
+ ORACLE_HOME - Oracle client home (default: /usr/lib/oracle/23/client64)
+ LD_LIBRARY_PATH - Oracle library path
+
+VOLUME MOUNTS:
+ /opt/oracle/wallet - Mount your Oracle wallet here (optional for one-way TLS)
+
+EXAMPLES:
+ # Basic latency test with one-way TLS (300 seconds duration)
+ connping -l 'admin/pass@"(description= (retry_count=20)(retry_delay=3)\\
+ (address=(protocol=tcps)(port=1521)(host=adb.eu-frankfurt-1.oraclecloud.com))\\
+ (connect_data=(service_name=db_tp.adb.oraclecloud.com))\\
+ (security=(ssl_server_dn_match=yes)))"' --period=300
+
+ # Test with wallet
+ connping -l admin/password@mydb_high --period=300
+
+ # Network connectivity test
+ ping -c 10 adb.eu-frankfurt-1.oraclecloud.com
+
+ # DNS resolution test
+ dig adb.eu-frankfurt-1.oraclecloud.com
+
+ # Interactive troubleshooting
+ docker run --rm -it -v \$(pwd)/wallet:/opt/oracle/wallet \\
+ odaamh.azurecr.io/connping:v1.0 bash
+
+IMPORTANT NOTES:
+ - Watch for the 'ociping' metric in the output - this is the key latency measurement
+ - The --period parameter specifies the test duration in seconds
+ - One-way TLS connections do not require a wallet
+ - For wallet-based connections, ensure TNS_ADMIN points to the wallet directory
+
+REFERENCE:
+ rwloadsim GitHub: https://github.com/oracle/rwloadsim
+ Version: 3.2.1
+
+EOF
+}
+
+# Main execution
+if [[ $# -eq 0 ]] || [[ "$1" == "--help" ]] || [[ "$1" == "-h" ]]; then
+ show_banner
+ show_help
+ exit 0
+fi
+
+# Check if wallet directory is mounted and has files
+if [[ -d "/opt/oracle/wallet" ]]; then
+ wallet_files=$(ls -A /opt/oracle/wallet 2>/dev/null | wc -l)
+ if [[ $wallet_files -gt 0 ]]; then
+ print_success "Oracle wallet detected in /opt/oracle/wallet"
+ else
+ print_info "Oracle wallet directory is empty. For wallet-based connections, mount your wallet to /opt/oracle/wallet"
+ fi
+else
+ print_info "For wallet-based connections, mount your wallet to /opt/oracle/wallet"
+fi
+
+# Verify connping is available
+if ! command -v connping &> /dev/null; then
+ print_warning "connping command not found in PATH. Checking /opt/rwloadsim/bin..."
+ export PATH="/opt/rwloadsim/bin:$PATH"
+fi
+
+# Execute the provided command
+exec "$@"
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/README.md
new file mode 100644
index 000000000..f875ed303
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/README.md
@@ -0,0 +1,64 @@
+# Kubernetes YAML Files
+
+This directory contains pre-configured Kubernetes YAML files for Oracle ADB performance testing.
+
+## Files Overview
+
+### `namespace.yaml`
+Creates the `adb-perf-test` namespace for organizing all performance testing resources.
+
+### `adbping-deployment.yaml`
+Creates a long-running deployment with the adbping container for interactive testing.
+
+### `adbping-job.yaml`
+Creates a Kubernetes Job for automated performance testing.
+
+β οΈ **Important**: Before using `adbping-job.yaml`, you must edit it and replace:
+- `YOUR_PASSWORD_HERE` with your actual Oracle ADB password
+- `YOUR_TNS_CONNECTION_STRING_HERE` with your actual TNS connection string from your ADB wallet
+
+### `connping-deployment.yaml`
+Creates a long-running deployment with the connping container (rwloadsim) for interactive testing.
+
+### `connping-job.yaml`
+Creates a Kubernetes Job for automated connping performance testing.
+
+β οΈ **Important**: Before using `connping-job.yaml`, you must edit it and replace:
+- `YOUR_PASSWORD_HERE` with your actual Oracle ADB password
+- `YOUR_TNS_CONNECTION_STRING_HERE` with your actual TNS connection string from your ADB
+
+## Usage
+
+```powershell
+# Deploy all adbping resources (from the project root directory)
+kubectl apply -f resources\infra\k8s\namespace.yaml
+kubectl apply -f resources\infra\k8s\adbping-deployment.yaml
+
+# For adbping automated testing (after editing the placeholders):
+kubectl apply -f resources\infra\k8s\adbping-job.yaml
+
+# Deploy connping resources
+kubectl apply -f resources\infra\k8s\connping-deployment.yaml
+
+# For connping automated testing (after editing the placeholders):
+kubectl apply -f resources\infra\k8s\connping-job.yaml
+```
+
+### Access Interactive Pods
+
+```powershell
+# Get adbping pod name and access
+$podName = kubectl get pods -n adb-perf-test -l app=adbping -o jsonpath='{.items[0].metadata.name}'
+kubectl exec -it $podName -n adb-perf-test -- /bin/bash
+
+# Get connping pod name and access
+$podName = kubectl get pods -n adb-perf-test -l app=connping -o jsonpath='{.items[0].metadata.name}'
+kubectl exec -it $podName -n adb-perf-test -- /bin/bash
+```
+
+## Resource Cleanup
+
+```powershell
+# Delete all resources
+kubectl delete namespace adb-perf-test
+```
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/adbping-deployment.yaml b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/adbping-deployment.yaml
new file mode 100644
index 000000000..0bca05166
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/adbping-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: adbping-deployment
+ namespace: adb-perf-test
+ labels:
+ app: adbping
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: adbping
+ template:
+ metadata:
+ labels:
+ app: adbping
+ spec:
+ containers:
+ - name: adbping
+ image: odaamh.azurecr.io/adb-nettest:v2.1
+ command: ["/bin/sleep"]
+ args: ["3600"]
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+ env:
+ - name: ORACLE_HOME
+ value: "/usr/local/oracle"
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/adbping-job.yaml b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/adbping-job.yaml
new file mode 100644
index 000000000..f38d72f17
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/adbping-job.yaml
@@ -0,0 +1,44 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: adbping-performance-test
+ namespace: adb-perf-test
+spec:
+ template:
+ spec:
+ containers:
+ - name: adbping-tester
+ image: odaamh.azurecr.io/adb-nettest:v2.1
+ command: ["/bin/bash"]
+ args:
+ - -c
+ - |
+ echo "π Oracle ADB Performance Test"
+ echo "=============================="
+
+ # Replace these placeholder values with your actual values:
+ # - USER: Your Oracle ADB admin username
+ # - PASSWORD: Your Oracle ADB admin password
+ # - TNS: Your complete TNS connection string from the ADB wallet
+
+ USER="admin"
+ PASSWORD="YOUR_PASSWORD_HERE"
+ TNS="YOUR_TNS_CONNECTION_STRING_HERE"
+
+ echo "π Starting performance test..."
+ echo "Threads: 3, Duration: 30 seconds"
+ echo ""
+
+ adbping -u "$USER" -p "$PASSWORD" -o -l "$TNS" -c java -t 3 -d 90
+
+ echo ""
+ echo "β
Performance test completed!"
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+ restartPolicy: Never
+ backoffLimit: 3
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/connping-deployment.yaml b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/connping-deployment.yaml
new file mode 100644
index 000000000..50ee3dabf
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/connping-deployment.yaml
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: connping-deployment
+ namespace: adb-perf-test
+ labels:
+ app: connping
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: connping
+ template:
+ metadata:
+ labels:
+ app: connping
+ spec:
+ containers:
+ - name: connping
+ image: odaamh.azurecr.io/connping:v1.2
+ command: ["/bin/sleep"]
+ args: ["3600"]
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+ env:
+ - name: ORACLE_HOME
+ value: "/usr/lib/oracle/23/client64"
+ - name: TNS_ADMIN
+ value: "/opt/oracle/wallet"
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/namespace.yaml b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/namespace.yaml
new file mode 100644
index 000000000..55a0878a8
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/namespace.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: adb-perf-test
+ labels:
+ name: adb-perf-test
+ purpose: oracle-adb-performance-testing
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/network-test-pod.yaml b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/network-test-pod.yaml
new file mode 100644
index 000000000..4b93201d0
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/k8s/network-test-pod.yaml
@@ -0,0 +1,96 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: network-test-pod
+ namespace: default
+ labels:
+ app: network-test
+spec:
+ # Optional: set to true if you need to test hostNetwork performance
+ # hostNetwork: true
+ containers:
+ - name: net-tools
+ image: ubuntu:22.04
+ command: ["/bin/bash","-c"]
+ args:
+ - |
+ set -euo pipefail
+ export DEBIAN_FRONTEND=noninteractive
+ apt-get update && apt-get install -y --no-install-recommends \
+ sockperf iperf3 qperf iputils-ping dnsutils curl tcpdump netcat-openbsd nmap traceroute mtr-tiny ca-certificates jq wget \
+ && rm -rf /var/lib/apt/lists/*
+ printf '%s\n' \
+ '#!/usr/bin/env bash' \
+ 'if [ $# -lt 2 ]; then echo "Usage: tcpping host port [intervalSeconds]"; exit 1; fi' \
+ 'H="$1"; P="$2"; I="${3:-1}"' \
+ 'while true; do' \
+ ' START=$(date +%s%3N)' \
+ ' timeout 1 bash -c "/dev/null' \
+ ' RC=$?' \
+ ' END=$(date +%s%3N); ELAPSED=$((END-START))' \
+ ' TS=$(date -Iseconds)' \
+ ' if [ $RC -eq 0 ]; then' \
+ ' echo "$TS OK $H $P ${ELAPSED}ms"' \
+ ' else' \
+ ' echo "$TS FAIL $H $P timeout"' \
+ ' fi' \
+ ' sleep $I' \
+ 'done' > /usr/local/bin/tcpping
+ chmod +x /usr/local/bin/tcpping
+ echo "Container ready"; sleep infinity
+ securityContext:
+ capabilities:
+ add: ["NET_ADMIN", "NET_RAW"]
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "250m"
+ limits:
+ memory: "512Mi"
+ cpu: "1"
+ ports:
+ - name: iperf3
+ containerPort: 5201
+ protocol: TCP
+ - name: qperf
+ containerPort: 19765
+ protocol: TCP
+ - name: sockperf-server
+ image: mellanox/sockperf:latest
+ args: ["sockperf", "server", "-p", "11111"] # default UDP server port
+ ports:
+ - name: sockperf
+ containerPort: 11111
+ protocol: UDP
+ resources:
+ requests:
+ memory: "64Mi"
+ cpu: "100m"
+ limits:
+ memory: "256Mi"
+ cpu: "500m"
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: network-test-svc
+ namespace: default
+spec:
+ selector:
+ app: network-test
+ ports:
+ - name: sockperf
+ port: 11111
+ targetPort: 11111
+ protocol: UDP
+ - name: iperf3
+ port: 5201
+ targetPort: 5201
+ protocol: TCP
+ - name: qperf
+ port: 19765
+ targetPort: 19765
+ protocol: TCP
+ type: ClusterIP
+# tcpping script created via printf to avoid YAML heredoc parsing issues.
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/.env.example b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/.env.example
new file mode 100644
index 000000000..7eff3f392
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/.env.example
@@ -0,0 +1,8 @@
+# Copy this file to .env (in the same .devcontainer folder) and fill in your values
+# The .env file is gitignored and stays only on your machine / inside the container
+
+# Service Principal credentials (from Entra ID App Registration)
+export ARM_CLIENT_ID="your-service-principal-app-id"
+export ARM_CLIENT_SECRET="your-service-principal-secret"
+export ARM_TENANT_ID="your-tenant-id"
+export ARM_SUBSCRIPTION_ID="your-subscription-id"
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/.gitattributes b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/.gitattributes
new file mode 100644
index 000000000..f1f173ac6
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/.gitattributes
@@ -0,0 +1,4 @@
+# Ensure shell scripts and env files use Unix line endings
+*.sh text eol=lf
+.env text eol=lf
+.env.example text eol=lf
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/Dockerfile b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/Dockerfile
new file mode 100644
index 000000000..3d44c6e3d
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/Dockerfile
@@ -0,0 +1,22 @@
+FROM mcr.microsoft.com/devcontainers/base:ubuntu
+
+# Install additional tools
+RUN apt-get update && apt-get install -y \
+ jq \
+ curl \
+ unzip \
+ python3 \
+ python3-pip \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install OCI CLI
+RUN pip3 install oci-cli --break-system-packages
+
+# Create workspace directory
+RUN mkdir -p /workspace
+
+# Create OCI config directory
+RUN mkdir -p /home/vscode/.oci
+
+# Set working directory
+WORKDIR /workspace
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/devcontainer.json b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/devcontainer.json
new file mode 100644
index 000000000..264356149
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/devcontainer.json
@@ -0,0 +1,30 @@
+{
+ "name": "Terraform Azure SP",
+ "build": {
+ "dockerfile": "Dockerfile"
+ },
+ "features": {
+ "ghcr.io/devcontainers/features/azure-cli:1": {},
+ "ghcr.io/devcontainers/features/terraform:1": {},
+ "ghcr.io/devcontainers/features/powershell:1": {}
+ },
+ "mounts": [
+ "source=${localEnv:USERPROFILE}/.oci,target=/home/vscode/.oci,type=bind,consistency=cached"
+ ],
+ "customizations": {
+ "vscode": {
+ "extensions": [
+ "hashicorp.terraform",
+ "ms-azuretools.vscode-azureterraform",
+ "redhat.vscode-yaml"
+ ],
+ "settings": {
+ "terraform.experimentalFeatures.validateOnSave": true,
+ "terminal.integrated.defaultProfile.linux": "pwsh"
+ }
+ }
+ },
+ "shutdownAction": "stopContainer",
+ "postCreateCommand": "pwsh -File .devcontainer/setup-profile.ps1 && echo 'PS1=\"\\W > \"' >> ~/.bashrc && echo 'alias tf=terraform' >> ~/.bashrc",
+ "postStartCommand": "pwsh -File .devcontainer/login-sp.ps1"
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/login-sp.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/login-sp.ps1
new file mode 100644
index 000000000..74596602e
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/login-sp.ps1
@@ -0,0 +1,59 @@
+#!/usr/bin/env pwsh
+# Login script for Azure Service Principal
+
+Write-Host "============================================" -ForegroundColor Cyan
+Write-Host "Logging in Azure CLI as Service Principal" -ForegroundColor Cyan
+Write-Host "============================================" -ForegroundColor Cyan
+
+# Source .env file if environment variables not already set
+if (-not $env:ARM_CLIENT_ID) {
+ $envFile = Join-Path $PSScriptRoot ".env"
+ if (Test-Path $envFile) {
+ Write-Host "Loading credentials from .devcontainer/.env..."
+ Get-Content $envFile | ForEach-Object {
+ if ($_ -match '^export\s+(\w+)="?([^"]*)"?$') {
+ [Environment]::SetEnvironmentVariable($Matches[1], $Matches[2], "Process")
+ }
+ elseif ($_ -match '^(\w+)="?([^"]*)"?$') {
+ [Environment]::SetEnvironmentVariable($Matches[1], $Matches[2], "Process")
+ }
+ }
+ }
+}
+
+if (-not $env:ARM_CLIENT_ID -or -not $env:ARM_CLIENT_SECRET -or -not $env:ARM_TENANT_ID) {
+ Write-Host "ERROR: Missing required environment variables!" -ForegroundColor Red
+ Write-Host ""
+ Write-Host "Create .devcontainer/.env from .devcontainer/.env.example:"
+ Write-Host " cp .devcontainer/.env.example .devcontainer/.env"
+ Write-Host " # Edit .env with your SP credentials"
+ Write-Host " # Then rebuild the container"
+ exit 1
+}
+
+# Login as service principal
+az login --service-principal `
+ --username $env:ARM_CLIENT_ID `
+ --password $env:ARM_CLIENT_SECRET `
+ --tenant $env:ARM_TENANT_ID `
+ --output none
+
+if ($LASTEXITCODE -ne 0) {
+ Write-Host "ERROR: Azure login failed!" -ForegroundColor Red
+ exit 1
+}
+
+# Set subscription if provided
+if ($env:ARM_SUBSCRIPTION_ID) {
+ az account set --subscription $env:ARM_SUBSCRIPTION_ID
+}
+
+Write-Host ""
+Write-Host "Logged in as Service Principal:" -ForegroundColor Green
+az account show --query "{name:name, user:user.name, type:user.type}" -o table
+
+Write-Host ""
+Write-Host "============================================" -ForegroundColor Cyan
+Write-Host "Dev Container Ready!" -ForegroundColor Cyan
+Write-Host "Use 'tf' as shortcut for 'terraform'" -ForegroundColor Yellow
+Write-Host "============================================" -ForegroundColor Cyan
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/login-sp.sh b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/login-sp.sh
new file mode 100644
index 000000000..deb93b07d
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/login-sp.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+set -e
+
+echo "============================================"
+echo "Logging in Azure CLI as Service Principal"
+echo "============================================"
+
+# Source .env file if environment variables not already set
+if [ -z "$ARM_CLIENT_ID" ] && [ -f ".devcontainer/.env" ]; then
+ echo "Loading credentials from .devcontainer/.env..."
+ source .devcontainer/.env
+fi
+
+if [ -z "$ARM_CLIENT_ID" ] || [ -z "$ARM_CLIENT_SECRET" ] || [ -z "$ARM_TENANT_ID" ]; then
+ echo "ERROR: Missing required environment variables!"
+ echo ""
+ echo "Create .devcontainer/.env from .devcontainer/.env.example:"
+ echo " cp .devcontainer/.env.example .devcontainer/.env"
+ echo " # Edit .env with your SP credentials"
+ echo " # Then rebuild the container"
+ exit 1
+fi
+
+# Login as service principal
+az login --service-principal \
+ --username "$ARM_CLIENT_ID" \
+ --password "$ARM_CLIENT_SECRET" \
+ --tenant "$ARM_TENANT_ID" \
+ --output none
+
+# Set subscription if provided
+if [ -n "$ARM_SUBSCRIPTION_ID" ]; then
+ az account set --subscription "$ARM_SUBSCRIPTION_ID"
+fi
+
+echo ""
+echo "β
Logged in as Service Principal:"
+az account show --query "{name:name, user:user.name, type:user.type}" -o table
+
+echo ""
+echo "β
Verifying Graph API access for authentication methods..."
+# Test that we can access auth methods API (will fail gracefully if no users exist yet)
+az rest --method GET --uri "https://graph.microsoft.com/v1.0/me" --query "displayName" -o tsv 2>/dev/null || echo "(SP doesn't have /me endpoint - this is expected)"
+
+echo ""
+echo "============================================"
+echo "Dev Container Ready!"
+echo "All az and terraform commands will run as SP"
+echo "============================================"
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/setup-profile.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/setup-profile.ps1
new file mode 100644
index 000000000..80b657dcd
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.devcontainer/setup-profile.ps1
@@ -0,0 +1,57 @@
+#!/usr/bin/env pwsh
+# Setup script for dev container - configures PowerShell profile and OCI
+
+$profilePath = $PROFILE
+$profileDir = Split-Path $profilePath -Parent
+
+# Create profile directory if it doesn't exist
+if (!(Test-Path $profileDir)) {
+ New-Item -Path $profileDir -ItemType Directory -Force | Out-Null
+}
+
+# Create or update PowerShell profile
+$profileContent = @'
+# Terraform alias
+Set-Alias tf terraform
+
+# Short prompt - shows only current folder name
+function prompt {
+ $folder = Split-Path -Leaf (Get-Location)
+ "$folder > "
+}
+'@
+
+Set-Content -Path $profilePath -Value $profileContent -Force
+
+Write-Host "PowerShell profile configured:" -ForegroundColor Green
+Write-Host " - 'tf' alias for terraform" -ForegroundColor Cyan
+Write-Host " - Short prompt (folder name only)" -ForegroundColor Cyan
+
+# ============================================================================
+# OCI CLI Configuration - Fix Windows path to Linux path
+# ============================================================================
+$ociConfigPath = "/home/vscode/.oci/config"
+$ociConfigLinux = "/home/vscode/.oci/config_linux"
+
+if (Test-Path $ociConfigPath) {
+ Write-Host "`nConfiguring OCI CLI..." -ForegroundColor Cyan
+
+ # Read the Windows config and fix the key_file path
+ $configContent = Get-Content $ociConfigPath -Raw
+
+ # Replace Windows-style paths with Linux paths
+ # Matches patterns like: key_file=C:\Users\...\filename.pem
+ $fixedContent = $configContent -replace 'key_file=.*\\([^\\]+\.pem)', 'key_file=/home/vscode/.oci/$1'
+
+ # Write the fixed config
+ Set-Content -Path $ociConfigLinux -Value $fixedContent -NoNewline
+
+ # Set OCI_CLI_CONFIG_FILE environment variable in profile
+ Add-Content -Path $profilePath -Value "`n# OCI CLI config with Linux paths`n`$env:OCI_CLI_CONFIG_FILE = '$ociConfigLinux'"
+
+ Write-Host " - OCI config fixed for Linux paths" -ForegroundColor Green
+ Write-Host " - Using: $ociConfigLinux" -ForegroundColor Gray
+} else {
+ Write-Host "`nOCI config not found at $ociConfigPath" -ForegroundColor Yellow
+ Write-Host " Mount your ~/.oci folder or run 'oci setup config'" -ForegroundColor Gray
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.gitignore b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.gitignore
new file mode 100644
index 000000000..5f85e1d3b
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/.gitignore
@@ -0,0 +1,70 @@
+# Terraform state files
+*.tfstate
+*.tfstate.*
+*.tfstate.backup
+
+# Crash log files
+crash.log
+crash.*.log
+
+# Terraform variable files (may contain sensitive data)
+*.tfvars
+*.tfvars.json
+
+# Override files
+override.tf
+override.tf.json
+*_override.tf
+*_override.tf.json
+
+# Terraform plan files
+*.tfplan
+
+# Terraform lock file (comment out if you want to commit it)
+.terraform.lock.hcl
+
+# Terraform provider cache
+.terraform/
+.terraform.tfstate.lock.info
+
+# Local terraform directories
+**/.terraform/*
+
+# Kubeconfig files (contain cluster credentials)
+**/kubeconfig*
+*.kubeconfig
+kubeconfig-*
+*-kubeconfig.yaml
+
+# Scripts logs directory
+scripts/logs/
+logs/
+
+# Temporary files
+*.tmp
+*.bak
+.DS_Store
+Thumbs.db
+
+# IDE and editor files
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# User credentials file (contains sensitive passwords)
+user_credentials.txt
+user_credentials.json
+**/user_credentials.json
+
+# Backup files
+*.backup
+
+# Dev Container secrets
+.env
+.devcontainer/.env
+
+# Members JSON (contains user object IDs)
+members.json
+**/members.json
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/README.md
new file mode 100644
index 000000000..1b5bab6e1
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/README.md
@@ -0,0 +1,549 @@
+# Oracle on Azure Microhack Terraform
+
+This Terraform project automates the provisioning of a scalable infrastructure for running Oracle on Azure, specifically tailored for a multi-user workshop (like a "MicroHack").
+
+It is designed to create multiple, isolated Azure Kubernetes Service (AKS) environments, one for each participant, while enabling connectivity to a central Oracle Database@Azure (ODAA) deployment.
+
+## Table of Contents
+
+* [Core Functionality](#core-functionality)
+* [Important Notes](#important)
+* [Architecture Diagrams](#azure-resource-topology)
+* [Prerequisites](#prerequisites)
+* [Azure Quotas and Limits](#azure-quotas-and-limits)
+* [Setup](#setup)
+ * [Pre-flight Validation](#pre-flight-validation-recommended)
+ * [Azure Authentication](#sign-in-and-set-the-active-subscription-repeat-for-each-subscription-that-will-host-resources)
+ * [Oracle SDN Registration](#register-the-oracle-sdn-appliance-preview-feature-on-every-hosting-subscription-so-odaa-networking-works)
+* [Configuration](#configuration)
+* [Post-Deployment Checks](#post-deployment-checks)
+* [Destroying the Environment](#destroying-the-environment)
+* [Troubleshooting](#troubleshooting)
+
+## Core Functionality
+
+1. **Multi-User Environment**: Provisions multiple, environments for a specified number of users (`user_count`). Each user gets their own Azure Kubernetes Service (AKS) cluster.
+2. **Subscription Management**: Distributes the deployments across up to five different Azure subscriptions in a round-robin fashion to manage resource allocation and quotas.
+3. **Identity and Access Management**:
+ * For each user, it creates a new Entra ID user with a generated password and adds them to an already existing security group.
+ * Assigns the necessary RBAC roles to these users so they can manage their AKS cluster and interact with the shared Oracle database resources.
+4. **Oracle Database@Azure (ODAA)**:
+ * Sets up a single, shared virtual network (VNet) in a dedicated subscription to host the Oracle database infrastructure.
+6. **Networking**:
+ * Creates a VNet for each AKS cluster.
+ * Establishes VNet peering between each user's AKS VNet and the central, shared ODAA VNet, allowing applications on AKS to communicate with the Oracle database.
+ * Configures Private DNS zones to resolve Oracle's service endpoints from within the AKS VNets.
+7. **Ingress Control**: Deploys an NGINX Ingress Controller into each AKS cluster using a Helm chart, enabling external access to applications.
+8. **Credential Management**: Generates a `user_credentials.json` file containing the initial login details for the newly created Entra ID users.
+
+## **IMPORTANT**
+
+- Oracle Database@Azure service needs to be bought via Azure Marketplace, you will not be able to use an Azure Subscription which is backed up with Microsoft Credits.
+- Microsoft operates an Azure Tenant which can be used during the Microhack.
+- Setting up the Environment can only be done by authorized personnel, reach out to your Account Team or contact the Microsoft Microhack organizers for assistance.
+- The following instruction are only relevant in case you belong to the authorized team which is allowed to setup the Microhack environment.
+
+## Azure Resource Topology
+
+The following diagram illustrates the default topology created by this Terraform configuration. Multiple participant environments are created , but all are peered to a **single, shared ODAA VNet** located in the central ODAA subscription.
+
+```mermaid
+flowchart TD
+ subgraph Participant_Subscription_00["Participant Subscription 0"]
+ subgraph rgAKS_00["AKS Resource Group 0"]
+ vnetAKS_00["AKS VNet 0
10.0.0.0/16"]
+ aks_00["AKS Cluster 0"]
+ ingress_00["Ingress NGINX"]
+ vnetAKS_00 --> aks_00 --> ingress_00
+ end
+ end
+
+ subgraph Participant_Subscription_01["Participant Subscription 1"]
+ subgraph rgAKS_01["AKS Resource Group 1"]
+ vnetAKS_01["AKS VNet 1
10.1.0.0/16"]
+ aks_01["AKS Cluster 1"]
+ ingress_01["Ingress NGINX"]
+ vnetAKS_01 --> aks_01 --> ingress_01
+ end
+ end
+
+ subgraph Shared_ODAA_Subscription["Shared ODAA Subscription"]
+ subgraph rgODAA_shared["Shared ODAA Resource Group"]
+ direction TB
+ vnetODAA_shared["Shared ODAA VNet
192.168.0.0/16"]
+ adb_00[("ODAA-ADB 0")]
+ adb_01[("ODAA-ADB 1")]
+ vnetODAA_shared --> adb_00
+ vnetODAA_shared --> adb_01
+ end
+ end
+
+ vnetAKS_00 <-.-> |"VNet Peering"| vnetODAA_shared
+ vnetAKS_01 <-.-> |"VNet Peering"| vnetODAA_shared
+```
+
+With the default settings (`user_count = 1`), Terraform provisions a single AKS environment and peers its VNet to the shared ODAA VNet. If `user_count` is increased, the configuration creates additional AKS environments in the next available subscription slots, each peered to the same shared ODAA network.
+
+## Identity and Access Management
+
+The project's identity and access management (IAM) structure is designed to provide secure, role-based access for each participant. The following diagram illustrates how users, groups, and roles are interconnected:
+
+```mermaid
+flowchart TD
+ subgraph Entra_ID ["Entra ID Tenant"]
+ direction LR
+ subgraph Users
+ direction TB
+ user00["User 00 (mh00)"]
+ user01["User 01 (mh01)"]
+ end
+
+ group["Shared Deployment Group
(e.g., mh-odaa-user-grp)"]
+
+ user00 --> |Member Of| group
+ user01 --> |Member Of| group
+ end
+
+ subgraph Azure_Resources ["Azure Resources"]
+ direction LR
+ subgraph AKS_Resources ["AKS Resources (Per User)"]
+ aks_cluster_00["AKS Cluster 00"]
+ end
+
+ subgraph ODAA_Resources ["Shared ODAA Resources"]
+ odaa_rg["ODAA Resource Group"]
+ end
+ end
+
+ subgraph Oracle_Cloud_App ["Oracle Cloud Enterprise App"]
+ oci_app["OCI Service Principal"]
+ end
+
+ user00 --> |"AKS Cluster User
AKS RBAC Writer
Subscription Reader"| aks_cluster_00
+ user00 --> |"Oracle ADB Admin
Private DNS Zone Reader"| odaa_rg
+
+ group --> |"App Role Assignment
(e.g., 'User')"| oci_app
+```
+
+- **Entra ID Group**: A single security group is created to contain all workshop participants.
+- **User Accounts**: A unique Entra ID user is created for each participant and added to the shared group.
+- **Azure RBAC**: Each user is granted specific roles directly on their own resources (like their AKS cluster) and on the shared ODAA resources, following the principle of least privilege.
+- **Oracle Cloud Access**: The entire group is granted an App Role on the Oracle Cloud enterprise application, allowing all members to access Oracle Cloud services.
+
+## Subscription Distribution
+
+Terraform keeps the subscription assignments inside `locals.deployments`. Each participant is indexed (starting at zero) and mapped to one of the five `subscription_targets` by taking `index % length(subscription_targets)`. This round-robin pattern repeats once all five slots are used, so participant 5 returns to slot 0, participant 6 to slot 1, and so on. The diagram below visualizes the default `subscription_targets` list and how the first six participants are routed:
+
+```mermaid
+flowchart LR
+ subgraph Participants["Participants (index order)"]
+ direction TB
+ U0["user00
index 0"]
+ U1["user01
index 1"]
+ U2["user02
index 2"]
+ U3["user03
index 3"]
+ U4["user04
index 4"]
+ U5["user05
index 5"]
+ end
+
+ subgraph Slots["Round-robin slots"]
+ direction TB
+ S0["Slot 0"]
+ S1["Slot 1"]
+ S2["Slot 2"]
+ S3["Slot 3"]
+ S4["Slot 4"]
+ end
+
+ subgraph Subscriptions["subscription_targets"]
+ direction TB
+ Sub0["subscription id 0"]
+ Sub1["subscription id 1"]
+ Sub2["subscription id 2"]
+ Sub3["subscription id 3"]
+ Sub4["subscription id 4"]
+ end
+
+ U0 --> S0
+ U1 --> S1
+ U2 --> S2
+ U3 --> S3
+ U4 --> S4
+ U5 --> S0
+
+ S0 --> Sub0
+ S1 --> Sub1
+ S2 --> Sub2
+ S3 --> Sub3
+ S4 --> Sub4
+```
+
+Adding or reordering entries in `subscription_targets` immediately changes the destinations for all participants because Terraform recomputes the locals on the next plan/apply.
+
+## Azure Quotas and Limits
+
+> NOTE: The current architecture is setup to support 50 users in parallel.
+
+The main constrain of how many user we can support depends on two main factors:
+
+- vCPU quota for the VM family used by AKS clusters (default: Standard_D4as_v5) cross all 5 subscriptions.
+- Number of eCPU allowed for Oracle Autonomous Database instances in the linked OCI Tenancy,
+
+At azure we can provide a current max of 100 vCPU per subscriptions for the Standard_Dv5 family. Each AKS cluster with 2 nodes of size Standard_D4as_v5 consumes in total 8 vCPU. Therefore we can support a 12 users per Subscription (12 * 8 = 96 vCPU < 100 vCPU). Currently we support 5 subscriptions for the Microhack, therefore the total max user count is 60 users (5 * 12 = 60).
+
+Screenshot of the current vCPU quota and usage for one single user
+
+
+## OCI Limits
+
+On the OCI tenancy we do support a total of 128 eCPUs. Each Oracle Autonomous Database instance consumes 2 eCPU. Therefore the max user count is limited to 64 users.
+
+Reference: [Oracle Database@Azure Service Limits and Quotas](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/quotas-service-limits.htm)
+
+
+
+If it comes to delegated subnets we need to consider that our OracleSubscription of plan "pay-as-you-go" does only support 2 delegated subnets:
+
+## Prerequisites
+
+### Required Tools
+
+- Windows 10/11 host with PowerShell 7+
+- **Terraform** (>= 1.5.0) - Install via: `winget install Hashicorp.Terraform`
+- **Azure CLI** - Install via: `winget install Microsoft.AzureCLI`
+- **kubectl** - Install via: `winget install Kubernetes.kubectl`
+- **helm** - Install via: `winget install Helm.Helm`
+- **OCI CLI** - Install via: `winget install Oracle.OCI-CLI`
+
+All tools must be available in your `PATH`.
+
+### Service Principal (Required)
+
+**IMPORTANT:** This deployment requires a Service Principal with the necessary permissions. Azure CLI authentication with a real User would run into a token expiration issue, so it alone is not sufficient because of the long time required for the deployment.
+
+**Create the Service Principal:**
+
+```powershell
+cd resources\infra\terraform\scripts
+.\create-service-principal.ps1 -OutputPath ..\mhodaa-sp-credentials.json
+```
+
+This script creates a service principal with:
+- **Azure RBAC roles**: Contributor and User Access Administrator on the mhteams and mhodaa management groups
+- **Entra ID directory roles**: User Administrator and Application Administrator
+- **Microsoft Graph API permissions**: User.ReadWrite.All and AppRoleAssignment.ReadWrite.All
+
+The credentials will be saved to `mhodaa-sp-credentials.json`.
+
+**Configure Terraform to use the Service Principal:**
+
+Add the credentials to your `terraform.tfvars`:
+
+```hcl
+client_id = "your-app-id-here"
+client_secret = "your-client-secret-here"
+```
+
+### Azure Permissions
+
+The Service Principal requires the following role assignments on every target subscription (including the shared ODAA subscription):
+- **Contributor** - For resource management
+- **User Access Administrator** - For RBAC role assignments
+
+Additionally, the Service Principal must have these Entra ID permissions:
+- **Directory Roles**: User Administrator, Application Administrator
+- **Microsoft Graph API Application Permissions**: User.ReadWrite.All, AppRoleAssignment.ReadWrite.All
+
+### Management Group Access
+
+- Read access to the **mhteams** management group for policy definitions and assignments
+
+### Oracle Cloud Integration
+
+- Oracle Cloud service principal (enterprise application) must be registered in your Entra ID tenant
+- Default object ID: `6240ab05-e243-48b2-9619-c3e3f53c6dca`
+- The service principal must have an enabled app role that workshop users can be assigned to
+
+### Resource Quotas
+
+Ensure adequate quotas in all target subscriptions:
+- **VM Quota**: Standard_DASv5Family (Standard_D4as_v5) - Each user requires 8 vCPUs
+- **Oracle eCPU**: Each Oracle Autonomous Database requires 2 eCPUs in the linked OCI tenancy
+- **Public IPs**: For AKS ingress controllers (1 per cluster)
+
+### Register the Oracle SDN appliance preview feature on every hosting subscription so ODAA networking works
+
+> NOTE: This may already be done for your subscriptions; check the feature state before registering.
+>
+> Reference: [Oracle Database@Azure Network Planning - Advanced Network Features](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-network-plan#advanced-network-features)
+
+```powershell
+az feature register --namespace Microsoft.Baremetal --name EnableRotterdamSdnApplianceForOracle
+az feature register --namespace Microsoft.Network --name EnableRotterdamSdnApplianceForOracle
+az feature show --namespace Microsoft.Baremetal --name EnableRotterdamSdnApplianceForOracle --query properties.state
+az feature show --namespace Microsoft.Network --name EnableRotterdamSdnApplianceForOracle --query properties.state
+az provider register --namespace Microsoft.Baremetal
+az provider register --namespace Microsoft.Network
+```
+
+For bulk registration across many subscriptions use `scripts/register-oracle-sdn.ps1` after updating its subscription list.
+
+### Configure terraform.tfvars
+
+Populate `terraform.tfvars` with the values for your event:
+
+* `microhack_event_name` - Event identifier for tagging (e.g., "mhtest1")
+* `user_count` - Number of participants (each gets an isolated AKS environment)
+
+### Configure users.json
+
+This project comes with a `users.json` file template. The `users.json` is located in the terraform root directory (`resources\infra\terraform`) and contain participant information. This is a simple array of user profiles indexed by position (0, 1, 2, etc.):
+
+```json
+[
+ {
+ "given_name": "Peter",
+ "surname": "Parker",
+ "hero_name": "Spider-Man"
+ },
+ {
+ "given_name": "Bruce",
+ "surname": "Wayne",
+ "hero_name": "Batman"
+ }
+]
+```
+
+**Required fields per user:**
+
+* `given_name` - User's first name
+* `surname` - User's last name
+* `hero_name` - Optional friendly identifier for documentation
+
+> IMPORTANT: Ensure the file user.json contains at least as many entries as your configured `user_count` inside `terraform.tfvars`. The deployment will fail if there are insufficient user profiles.
+
+### Deploy the Infrastructure
+
+Run all Terraform commands from `resources\infra\terraform`:
+
+```powershell
+# Initialize Terraform (download providers)
+terraform init
+
+# Validate configuration
+terraform validate
+
+# Preview changes
+terraform plan -out tfplan
+
+# Apply the plan
+terraform apply tfplan
+```
+
+**Important:** The `user_credentials.json` file contains sensitive initial passwords. Secure this file and distribute credentials through a secure channel (e.g., password manager, encrypted email).
+
+## Post-Deployment Checks
+
+After `terraform apply` completes successfully, verify your deployment using the following methods.
+
+### Review Terraform Outputs
+
+The most efficient verification method is using Terraform's output command:
+
+```powershell
+terraform output
+```
+
+**Key outputs to review:**
+
+* **`deployment_summary`**: Confirms the total number of AKS deployments and subscription distribution
+* **`aks_clusters`**: Lists cluster names, IDs, resource groups, and DNS zone information
+* **`vnet_peering_connections`**: Confirms VNet peering between AKS and ODAA networks
+* **`entra_id_deployment_group`**: Shows the shared security group details
+
+**Security reminder:** The `user_credentials.json` file contains initial passwords. Distribute securely and instruct users to change passwords on first login.
+
+### Verify AKS Cluster Access (Per-User)
+
+Test connectivity to a specific user's AKS cluster:
+
+```powershell
+# Example for user 'mh00'
+$rgName = "aks-user00"
+$aksName = "aks-user00"
+
+# Get cluster credentials
+az aks get-credentials --resource-group $rgName --name $aksName --overwrite-existing
+
+# Verify node status
+kubectl get nodes
+
+# Expected output: 2-3 nodes in Ready state
+```
+
+### Verify Ingress Controller
+
+Check the NGINX Ingress Controller deployment and external IP assignment:
+
+```powershell
+kubectl get services --namespace ingress-nginx
+```
+
+**Expected output:**
+
+```text
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
+ingress-nginx-controller LoadBalancer 10.0.123.456 20.123.45.67 80:31234/TCP,443:32101/TCP
+ingress-nginx-controller-admission ClusterIP 10.0.78.9 443/TCP
+```
+
+The `EXTERNAL-IP` should be assigned (not ``). This may take 1-2 minutes after deployment.
+
+### Verify Oracle Database Connectivity (If Enabled)
+
+> NOTE: This verification applies only if you created Oracle Autonomous Databases and also already installed the onprem simulation on AKS.
+
+```powershell
+# List Oracle Autonomous Databases in the shared resource group
+az oracle autonomous-database list \
+ --resource-group odaa-user00 \
+ --query "[].{Name:name, State:state, ComputeCount:computeCount}" \
+ --output table
+```
+
+All databases should show `State: AVAILABLE`.
+
+### Verify VNet Peering
+
+Check that VNet peering is established and active:
+
+```powershell
+# Example for user 'mh00'
+az network vnet peering list \
+ --resource-group aks-mh00 \
+ --vnet-name aks-mh00 \
+ --query "[].{Name:name, PeeringState:peeringState, RemoteVNet:remoteVirtualNetwork.id}" \
+ --output table
+```
+
+Expected `PeeringState`: **Connected**
+
+## Destroying the Environment
+
+### Pre-Destroy Checklist
+
+Before destroying the infrastructure, ensure to remove Oracle databases before. this will otherwise cause issues during the terraform destroy.
+
+```powershell
+# 1. Verify current state matches Azure reality
+terraform plan
+
+# 2. Destroy all resources
+terraform destroy
+
+# You will be prompted to confirm. Type 'yes' to proceed.
+```
+
+### Post-Destroy Verification
+
+After successful destruction, verify:
+
+```powershell
+# clean up ODAA resources
+.\scripts\cleanup-odaa-and-destroy.ps1
+
+# Check no resource groups remain
+az group list --query "[?starts_with(name, 'aks-mh') || starts_with(name, 'odaa-')].name" --output table
+
+# Verify no orphaned peerings
+az network vnet peering list --resource-group --vnet-name
+
+# Check custom role definitions (if you want to remove them)
+az role definition list --custom-role-only true --query "[?starts_with(roleName, 'Oracle')].roleName" --output table
+```
+
+## Additional Resources
+
+* [Oracle Database@Azure Documentation](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/)
+* [Oracle Database@Azure - Onboard and provision](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/onboard-oracle-database)
+* [Oracle Database@Azure - Network Planning](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-network-plan)
+* [Azure Kubernetes Service Documentation](https://learn.microsoft.com/en-us/azure/aks/)
+* [Terraform Azure Provider Documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs)
+* [Oracle Database@Azure Service Limits](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/quotas-service-limits.htm)
+
+## OCI SSO
+
+For OCI-Azure SSO integration, refer to:
+[Terraform OCI Multicloud Azure Landing Zones](https://github.com/oci-landing-zones/terraform-oci-multicloud-azure)
+
+---
+
+## Appendix: Entra ID Group
+
+**IMPORTANT:** Before deploying, you must create an Entra ID security group for workshop participants. Terraform will add users to this existing group but will not create it.
+
+**Create the group using Azure CLI:**
+
+```powershell
+# Set the group name (must match the value in terraform.tfvars)
+$groupName = "mh-odaa-user-grp"
+
+# Create the security group
+az ad group create --display-name $groupName --mail-nickname $groupName --description "Security group for Oracle on Azure workshop participants"
+
+# Verify the group was created
+az ad group show --group $groupName --query "{Name:displayName, ObjectId:id}" --output table
+```
+
+> **Note:** The group name must match the `aks_deployment_group_name` value in your `terraform.tfvars` file (default: `mh-odaa-user-grp`).
+
+## Appendix: Monitoring Oracle Autonomous Database Deployment
+
+### Via Azure Portal
+
+1. Navigate to the resource group
+2. Select "Deployments" under Settings
+3. View deployment status and operation details
+4. Check "Activity Log" for detailed progress (refreshes every ~15 seconds)
+
+### Via Azure CLI
+
+```powershell
+# Monitor deployment status
+az oracle autonomous-database show \
+ --resource-group \
+ --name \
+ --query "{Name:name, State:state, Message:lifecycleDetails}" \
+ --output table
+
+# Watch for state changes
+az oracle autonomous-database list \
+ --resource-group \
+ --query "[].{Name:name, State:state, Updated:timeUpdated}" \
+ --output table
+```
+
+### Via OCI CLI
+
+```powershell
+# List resources created today in compartment
+$TODAY = (Get-Date).ToString("yyyy-MM-dd")
+$compartmentId = ""
+
+oci search resource structured-search \
+ --query-text "query all resources where timeCreated >= '${TODAY}T00:00:00Z' && compartmentId = '$compartmentId'" \
+ --query "data.items[*].{ResourceType:\`"resource-type\`",DisplayName:\`"display-name\`",State:\`"lifecycle-state\`"}" \
+ --output table
+```
+
+#### Example: Real Deployment Log Query
+
+```powershell
+# Get detailed creation timeline for a specific compartment
+$compartmentId = "ocid1.compartment.oc1..aaaaaaaayehuog6myqxudqejx3ddy6bzkr2f3dnjuuygs424taimn4av4wbq"
+oci search resource structured-search \
+ --query-text "query all resources where compartmentId = '$compartmentId' && timeCreated >= '2025-11-02T08:00:00Z'" \
+ --query "data.items[*].{ResourceType:\`"resource-type\`",DisplayName:\`"display-name\`",TimeCreated:\`"time-created\`"}" \
+ --output json | ConvertFrom-Json | Sort-Object TimeCreated
+```
+
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/CLOUDSHELL_SHARED_STORAGE.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/CLOUDSHELL_SHARED_STORAGE.md
new file mode 100644
index 000000000..5428ca4ec
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/CLOUDSHELL_SHARED_STORAGE.md
@@ -0,0 +1,165 @@
+# Shared Storage Account for Cloud Shell
+
+## Configuration
+
+Your Terraform is now configured to use the **shared storage account** `/subscriptions/09808f31-065f-4231-914d-776c2d6bbe34/resourceGroups/odaa/providers/Microsoft.Storage/storageAccounts/odaamh` for all users.
+
+### What Terraform Will Create
+
+β
**Per-User File Shares** in the shared storage account:
+- `cloudshell-user00` (6 GB)
+- `cloudshell-user01` (6 GB)
+- `cloudshell-user02` (6 GB)
+- `cloudshell-user03` (6 GB)
+- `cloudshell-user04` (6 GB)
+
+β
**RBAC Permissions** for each user:
+- **Storage Blob Data Contributor** on storage account `odaamh`
+- **Storage File Data SMB Share Contributor** on storage account `odaamh`
+- **Reader** on resource group `odaa`
+
+### Configuration in terraform.tfvars
+
+```hcl
+use_shared_cloudshell_storage = true
+shared_cloudshell_storage_account_id = "/subscriptions/09808f31-065f-4231-914d-776c2d6bbe34/resourceGroups/odaa/providers/Microsoft.Storage/storageAccounts/odaamh"
+shared_cloudshell_storage_account_name = "odaamh"
+shared_cloudshell_resource_group_name = "odaa"
+shared_cloudshell_subscription_id = "09808f31-065f-4231-914d-776c2d6bbe34"
+cloudshell_file_share_quota = 6
+```
+
+## Deployment
+
+```bash
+# Plan to see what will be created
+terraform plan
+
+# Apply to create file shares and RBAC assignments
+terraform apply
+```
+
+## What Gets Created
+
+```
+Storage Account: odaamh (existing - not created)
+βββ File Shares (NEW):
+ βββ cloudshell-user00 (6 GB) β user00 has access
+ βββ cloudshell-user01 (6 GB) β user01 has access
+ βββ cloudshell-user02 (6 GB) β user02 has access
+ βββ cloudshell-user03 (6 GB) β user03 has access
+ βββ cloudshell-user04 (6 GB) β user04 has access
+
+RBAC Assignments (NEW):
+βββ user00 β Storage Blob Data Contributor on odaamh
+βββ user00 β Storage File Data SMB Share Contributor on odaamh
+βββ user00 β Reader on odaa RG
+βββ user01 β (same permissions...)
+βββ user02 β (same permissions...)
+βββ user03 β (same permissions...)
+βββ user04 β (same permissions...)
+```
+
+## User Setup Instructions
+
+After Terraform deployment, each user should:
+
+1. **Log in to Azure Portal** with their credentials (user00@cptazure.org, etc.)
+2. **Click Cloud Shell icon** (>_) in top navigation
+3. **Select environment**: Bash or PowerShell
+4. **Choose "Show advanced settings"**
+5. **Select subscription**: `09808f31-065f-4231-914d-776c2d6bbe34`
+6. **Select resource group**: `odaa`
+7. **Select storage account**: `odaamh`
+8. **Enter file share name**:
+ - user00 enters: `cloudshell-user00`
+ - user01 enters: `cloudshell-user01`
+ - user02 enters: `cloudshell-user02`
+ - user03 enters: `cloudshell-user03`
+ - user04 enters: `cloudshell-user04`
+9. **Click "Attach storage"**
+
+## View Outputs
+
+After deployment, check the outputs:
+
+```bash
+# View shared storage information
+terraform output shared_cloudshell_storage
+
+# View setup guide for users
+terraform output shared_cloudshell_setup_guide
+```
+
+## Security & Isolation
+
+β
**Each user can only access their own file share** due to RBAC permissions
+β
**Users cannot see or access other users' Cloud Shell files**
+β
**Shared storage account reduces cost** compared to per-user storage accounts
+β
**All users have the same setup experience** (same storage account/RG)
+
+## Cost
+
+**File Shares** (5 users Γ 6 GB each = 30 GB total):
+- Transaction Optimized tier: ~$6.00/month total
+- Blob Storage (Cloud Shell state): ~$0.25/month total
+
+**Estimated Total**: ~$6.25/month for all 5 users
+
+Compare to per-user storage accounts: ~$10/month (5 accounts Γ $2/each)
+
+**Savings**: ~$3.75/month (~38% cost reduction)
+
+## Troubleshooting
+
+### Issue: User can't see storage account in dropdown
+**Solution**: User needs Reader permission on the subscription or resource group (already configured via Terraform)
+
+### Issue: "Failed to attach storage" error
+**Check**:
+- Storage account `odaamh` has public network access enabled
+- User has correct RBAC roles (verify with `az role assignment list`)
+- File share exists with correct name
+
+### Issue: User selects wrong file share
+**Solution**: Each user MUST use their own file share (`cloudshell-userXX`). If they select another user's file share, they won't have access.
+
+## Advanced: Manual File Share Creation
+
+If you need to create file shares manually (without Terraform):
+
+```bash
+# Create file shares
+az storage share create --account-name odaamh --name cloudshell-user00 --quota 6
+az storage share create --account-name odaamh --name cloudshell-user01 --quota 6
+az storage share create --account-name odaamh --name cloudshell-user02 --quota 6
+az storage share create --account-name odaamh --name cloudshell-user03 --quota 6
+az storage share create --account-name odaamh --name cloudshell-user04 --quota 6
+
+# Get user object IDs (from Entra ID)
+USER00_ID=$(az ad user show --id user00@cptazure.org --query id -o tsv)
+USER01_ID=$(az ad user show --id user01@cptazure.org --query id -o tsv)
+# ... etc
+
+# Assign RBAC roles
+STORAGE_ID="/subscriptions/09808f31-065f-4231-914d-776c2d6bbe34/resourceGroups/odaa/providers/Microsoft.Storage/storageAccounts/odaamh"
+
+az role assignment create --assignee $USER00_ID --role "Storage Blob Data Contributor" --scope $STORAGE_ID
+az role assignment create --assignee $USER00_ID --role "Storage File Data SMB Share Contributor" --scope $STORAGE_ID
+# ... repeat for each user
+```
+
+## Related Files
+
+- `cloudshell-shared.tf` - Shared storage configuration logic
+- `variables.tf` - Cloud Shell variables
+- `terraform.tfvars` - Your current configuration
+- `terraform.tfvars.cloudshell.example` - Configuration examples
+
+## Next Steps
+
+1. **Review the configuration** in `terraform.tfvars` (already added)
+2. **Run `terraform plan`** to preview changes
+3. **Run `terraform apply`** to create file shares and RBAC
+4. **Distribute setup instructions** to users (from `terraform output shared_cloudshell_setup_guide`)
+5. **Test with one user first** before rolling out to all users
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/CLOUDSHELL_STORAGE.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/CLOUDSHELL_STORAGE.md
new file mode 100644
index 000000000..2fc9781a1
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/CLOUDSHELL_STORAGE.md
@@ -0,0 +1,283 @@
+# Azure Cloud Shell Storage for Users
+
+This configuration provides **per-user Azure Cloud Shell storage accounts** that can be provisioned via Terraform. Each user gets their own dedicated storage account with a pre-configured file share for Cloud Shell.
+
+## Important: Cloud Shell Limitations
+
+β οΈ **Azure Cloud Shell cannot be fully automated** because it requires **user-interactive first-time setup** in the Azure Portal or CLI. However, we can pre-provision the storage infrastructure that users will select during their first Cloud Shell launch.
+
+## Two Approaches
+
+### Approach 1: Create New Storage Accounts (Recommended)
+
+Pre-provision new storage accounts that users select during Cloud Shell setup.
+
+**Enable in `terraform.tfvars`:**
+```hcl
+create_cloudshell_storage = true
+cloudshell_storage_account_prefix = "csshell" # Optional: customize prefix
+cloudshell_file_share_quota = 6 # Optional: default is 6 GB
+```
+
+**What gets created:**
+- β
Resource group per user: `rg-cloudshell-user00`, `rg-cloudshell-user01`, etc.
+- β
Storage account per user: `csshellmh2025muc00xyz` (globally unique)
+- β
File share per user: `cloudshell-user00`, `cloudshell-user01`, etc.
+- β
RBAC permissions: Storage Blob Data Contributor, File Data SMB Share Contributor, RG Contributor
+
+**User Setup Process:**
+1. User logs into Azure Portal
+2. Clicks Cloud Shell icon (>_) in top navigation
+3. Selects Bash or PowerShell
+4. Chooses "Show advanced settings"
+5. Selects "Use existing" resources
+6. Picks their pre-created resource group and storage account
+7. Enters their pre-created file share name
+8. Clicks "Attach storage"
+
+### Approach 2: Use Existing Storage Accounts
+
+Reference storage accounts that already exist (managed outside Terraform).
+
+**Enable in `terraform.tfvars`:**
+```hcl
+use_existing_cloudshell_storage = true
+
+existing_cloudshell_storage_accounts = {
+ "0" = {
+ name = "existingstorageuser00"
+ resource_group_name = "existing-rg-user00"
+ }
+ "1" = {
+ name = "existingstorageuser01"
+ resource_group_name = "existing-rg-user01"
+ }
+ # Add more as needed for each user index
+}
+```
+
+**What happens:**
+- β
Terraform references existing storage accounts via data sources
+- β
No new storage resources are created
+- β
Outputs provide information about the existing accounts
+- β RBAC assignments must be managed separately (not created by Terraform)
+
+## Deployment
+
+### Step 1: Enable Cloud Shell Storage
+
+Edit `terraform.tfvars`:
+```hcl
+# For NEW storage accounts:
+create_cloudshell_storage = true
+
+# OR for EXISTING storage accounts:
+use_existing_cloudshell_storage = true
+existing_cloudshell_storage_accounts = {
+ # ... your existing storage config
+}
+```
+
+### Step 2: Deploy
+
+```bash
+terraform init
+terraform plan
+terraform apply
+```
+
+### Step 3: Get Storage Information
+
+```bash
+# View Cloud Shell storage details (sensitive output)
+terraform output cloudshell_storage
+
+# View setup instructions for users
+terraform output cloudshell_setup_guide
+```
+
+### Step 4: Distribute Information to Users
+
+After deployment, provide each user with:
+- Their storage account name
+- Their resource group name
+- Their file share name
+- Setup instructions (from `cloudshell_setup_guide` output)
+
+## Storage Account Naming
+
+Storage accounts are named with this pattern:
+```
+{prefix}{event_name}{user_postfix}{random_suffix}
+```
+
+Example: `csshellmh2025muc00abc`
+- `csshell` = prefix (configurable via `cloudshell_storage_account_prefix`)
+- `mh2025muc` = event name (from `microhack_event_name`)
+- `00` = user postfix (user00, user01, etc.)
+- `abc` = random 3-char suffix (ensures global uniqueness)
+
+Maximum length: 24 characters (Azure limit for storage account names)
+
+## RBAC Permissions
+
+When creating new storage accounts, Terraform automatically grants each user:
+
+| Role | Scope | Purpose |
+|------|-------|---------|
+| **Storage Blob Data Contributor** | Storage Account | Full access to blobs (Cloud Shell state files) |
+| **Storage File Data SMB Share Contributor** | Storage Account | Full access to file share (Cloud Shell home directory) |
+| **Contributor** | Resource Group | Manage resources in their Cloud Shell RG |
+
+## Security Considerations
+
+### β
Implemented
+- HTTPS-only traffic enforced
+- TLS 1.2 minimum version
+- Per-user isolation (separate storage accounts)
+- Per-user RBAC (users only access their own storage)
+
+### β οΈ Not Implemented (Optional Enhancements)
+- **Private Endpoints**: Storage accounts use public endpoints for Cloud Shell access
+- **Network Rules**: No IP restrictions by default
+- **Soft Delete**: Not configured (can be enabled if needed)
+
+## Cost Considerations
+
+**Storage Account Costs** (per user):
+- Storage Account: ~$0.00/month (no charge for account itself)
+- File Share (6 GB): ~$1.20/month (Transaction Optimized tier)
+- Blob Storage: ~$0.05/GB/month (Cloud Shell state files, typically < 1 GB)
+
+**Estimated Total**: ~$1.50-$2.00 per user per month
+
+## Customization
+
+### Change Storage Account Prefix
+```hcl
+cloudshell_storage_account_prefix = "myprefix" # Max 11 chars
+```
+
+### Increase File Share Size
+```hcl
+cloudshell_file_share_quota = 10 # GB (minimum 6)
+```
+
+### Change Storage Location
+Storage accounts are created in the same location as other user resources (controlled by `location` variable).
+
+## Terraform Outputs
+
+### `cloudshell_storage` (Sensitive)
+Contains detailed information for each user:
+```json
+{
+ "user00": {
+ "storage_account_name": "csshellmh2025muc00abc",
+ "storage_account_id": "/subscriptions/.../storageAccounts/...",
+ "resource_group_name": "rg-cloudshell-user00",
+ "file_share_name": "cloudshell-user00",
+ "location": "francecentral",
+ "primary_access_key": "...",
+ "connection_string": "...",
+ "setup_instructions": "..."
+ }
+}
+```
+
+### `cloudshell_setup_guide`
+User-friendly setup instructions with step-by-step guidance.
+
+### `existing_cloudshell_storage` (when using existing accounts)
+Information about referenced existing storage accounts.
+
+## Troubleshooting
+
+### Issue: Storage account name too long
+**Solution**: Use shorter prefix (max 11 chars) or shorter event name
+
+### Issue: User can't see storage account in dropdown
+**Solution**: Verify user has Reader permissions on the resource group
+
+### Issue: "Failed to attach storage"
+**Solution**: Check that:
+- Storage account has public network access enabled
+- User has required RBAC roles
+- File share exists and has correct name
+
+### Issue: Want to use existing storage but Terraform tries to create new
+**Solution**: Ensure `create_cloudshell_storage = false` and `use_existing_cloudshell_storage = true`
+
+## Advanced: Manual Storage Account Creation
+
+If you prefer to create storage accounts outside Terraform:
+
+### PowerShell Script Example
+```powershell
+$users = 0..4 # user00 to user04
+$location = "francecentral"
+$prefix = "csshell"
+
+foreach ($i in $users) {
+ $userPostfix = "{0:D2}" -f $i
+ $rgName = "rg-cloudshell-user$userPostfix"
+ $storageAccountName = "${prefix}mh2025muc$userPostfix$(Get-Random -Maximum 999)"
+ $fileShareName = "cloudshell-user$userPostfix"
+
+ # Create resource group
+ az group create --name $rgName --location $location
+
+ # Create storage account
+ az storage account create `
+ --name $storageAccountName `
+ --resource-group $rgName `
+ --location $location `
+ --sku Standard_LRS `
+ --kind StorageV2 `
+ --https-only true `
+ --min-tls-version TLS1_2
+
+ # Create file share
+ az storage share create `
+ --name $fileShareName `
+ --account-name $storageAccountName `
+ --quota 6
+
+ Write-Host "Created: $storageAccountName in $rgName"
+}
+```
+
+Then reference in `terraform.tfvars`:
+```hcl
+use_existing_cloudshell_storage = true
+existing_cloudshell_storage_accounts = {
+ "0" = { name = "csshellmh2025muc00123", resource_group_name = "rg-cloudshell-user00" }
+ "1" = { name = "csshellmh2025muc01456", resource_group_name = "rg-cloudshell-user01" }
+ # ... etc
+}
+```
+
+## Best Practices
+
+1. **Enable Cloud Shell storage for training/workshop scenarios** where users need persistent shell environments
+2. **Use existing storage accounts** if you already have a separate storage provisioning process
+3. **Document storage account names** in user credentials file (consider extending output to include in credentials JSON)
+4. **Test Cloud Shell setup** with one user before rolling out to all users
+5. **Consider lifecycle policies** for cleaning up old Cloud Shell files if storage costs become a concern
+
+## Related Files
+
+- `cloudshell-storage.tf` - Main Cloud Shell storage provisioning logic
+- `cloudshell-existing.tf` - Data sources for existing storage accounts
+- `variables.tf` - Cloud Shell configuration variables
+- `terraform.tfvars` - Enable/configure Cloud Shell storage here
+
+## Future Enhancements
+
+Potential improvements (not currently implemented):
+- [ ] Private endpoint configuration for storage accounts
+- [ ] Network rules to restrict access by IP
+- [ ] Soft delete configuration for file shares
+- [ ] Azure Policy assignments for storage compliance
+- [ ] Automatic Cloud Shell profile customization (bashrc, PS profile)
+- [ ] Pre-install common tools in Cloud Shell environment
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/DEPLOYMENT_GUIDE.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/DEPLOYMENT_GUIDE.md
new file mode 100644
index 000000000..75d68d0f4
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/DEPLOYMENT_GUIDE.md
@@ -0,0 +1,388 @@
+# Deployment Guide - Oracle on Azure Infrastructure
+
+## Overview
+
+This Terraform configuration provisions isolated AKS environments across multiple Azure subscriptions with Oracle Database@Azure networking integration. The infrastructure supports **unlimited users** by using a two-step deployment process.
+
+## Architecture Changes (v2.0)
+
+**Previous limitation:** Helm provider constraint limited deployment to exactly 5 users (1:1 subscription-to-cluster mapping)
+
+**New approach:**
+- Terraform manages infrastructure (AKS clusters, networking, RBAC)
+- PowerShell script deploys ingress controllers post-provisioning
+- **No limit on user count** - scale to 10, 20, 50+ users
+
+## Prerequisites
+
+### Required Tools
+- **Terraform** v1.x or higher
+- **Azure CLI** (for authentication only)
+- **Helm** v3.x or higher
+- **kubectl** CLI
+
+### Authentication
+```powershell
+# Login to Azure
+az login
+
+# Set the subscription context (if needed)
+az account set --subscription
+```
+
+## Configuration
+
+### 1. Configure Variables
+
+Edit `terraform.tfvars`:
+
+```hcl
+# Number of users to provision (no limit!)
+user_count = 10 # Change to any number
+
+# Microhack event name
+microhack_event_name = "mhtest1"
+
+# Azure subscriptions for round-robin deployment
+subscription_targets = [
+ { subscription_id = "556f9b63-...", tenant_id = "f71980b2-..." }, # Slot 0
+ { subscription_id = "a0844269-...", tenant_id = "f71980b2-..." }, # Slot 1
+ { subscription_id = "b1658f1f-...", tenant_id = "f71980b2-..." }, # Slot 2
+ { subscription_id = "9aa72379-...", tenant_id = "f71980b2-..." }, # Slot 3
+ { subscription_id = "98525264-...", tenant_id = "f71980b2-..." }, # Slot 4
+]
+
+# ODAA subscription
+odaa_subscription_id = "4aecf0e8-..."
+odaa_tenant_id = "f71980b2-..."
+
+# Service principal credentials (for ODAA operations)
+client_id = "8a9f736e-..."
+client_secret = "aW18Q~..."
+```
+
+### 2. User Scaling Guidelines
+
+- **5 subscriptions**: Supports 5, 10, 15, 20... users (multiples of 5 recommended)
+- **10 subscriptions**: Supports 10, 20, 30... users
+- Users are distributed round-robin across subscriptions
+- Each user gets an isolated AKS cluster with dedicated networking
+
+## Deployment Steps
+
+### Step 1: Initialize Terraform
+
+```powershell
+terraform init
+```
+
+### Step 2: Review Plan
+
+```powershell
+terraform plan
+```
+
+Expected resources (for 10 users):
+- 10 AKS clusters
+- 10 VNets with peering to ODAA shared network
+- 40 Private DNS zones (4 per cluster)
+- 10 Log Analytics workspaces
+- 10 Entra ID users with RBAC assignments
+- 1 shared ODAA network
+
+### Step 3: Apply Infrastructure
+
+```powershell
+terraform apply -auto-approve
+```
+
+**Duration:** ~15-20 minutes for 10 clusters
+
+### Step 4: Deploy Ingress Controllers
+
+After Terraform completes successfully, run the deployment script:
+
+```powershell
+.\scripts\deploy-ingress-controllers.ps1
+```
+
+**What it does:**
+- Reads cluster kubeconfig directly from Terraform output (no `az login` needed)
+- Deploys ingress-nginx v4.14.0 to each cluster
+- Configures Azure Load Balancer health probes
+- Verifies deployment success
+- Provides detailed progress output
+
+**Duration:** ~2-3 minutes for 10 clusters
+
+**Example output:**
+```
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β β
+β AKS Ingress Controller Deployment Automation β
+β β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+
+==> Checking prerequisites...
+ β Helm CLI found: v3.15.0
+ β kubectl found
+ β Terraform found
+
+==> Reading Terraform outputs...
+ β Found 10 AKS cluster(s)
+
+==> Processing 10 cluster(s)...
+
+ Processing cluster: user00
+ β Testing cluster connectivity...
+ β Connected to cluster
+ β Adding/updating ingress-nginx helm repository...
+ β Deploying ingress-nginx v4.14.0...
+ β Ingress controller deployed successfully
+ β Verifying deployment...
+ β Found 1 running pod(s) in namespace 'ingress-nginx'
+
+ [... 8 more clusters ...]
+
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+Deployment Summary:
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+
+ Total clusters processed: 10
+ Successful: 10
+
+β All ingress controllers deployed successfully!
+```
+
+## Advanced Usage
+
+### Custom Helm Version
+
+```powershell
+.\scripts\deploy-ingress-controllers.ps1 -HelmVersion "4.15.0"
+```
+
+### Custom Namespace
+
+```powershell
+.\scripts\deploy-ingress-controllers.ps1 -Namespace "my-ingress"
+```
+
+### Uninstall Ingress Controllers
+
+```powershell
+.\scripts\deploy-ingress-controllers.ps1 -Uninstall
+```
+
+## Verification
+
+### Check AKS Clusters
+
+```powershell
+# View all clusters
+terraform output aks_clusters
+
+# Get specific cluster info
+terraform output -json aks_clusters | ConvertFrom-Json | Select-Object -ExpandProperty user00
+```
+
+### Verify Ingress Controllers
+
+```powershell
+# Connect to a cluster (example: user00)
+az aks get-credentials --name aks-user00 --resource-group aks-user00 --overwrite-existing
+
+# Check ingress controller pods
+kubectl get pods -n ingress-nginx
+
+# Check ingress controller service
+kubectl get svc -n ingress-nginx
+```
+
+### View User Credentials
+
+```powershell
+# Display all user credentials (sensitive output)
+terraform output -json entra_id_deployment_users
+
+# View credentials file
+Get-Content user_credentials.txt
+```
+
+## Scaling Operations
+
+### Scale Up (Add Users)
+
+1. Update `terraform.tfvars`:
+ ```hcl
+ user_count = 15 # Increase from 10 to 15
+ ```
+
+2. Apply changes:
+ ```powershell
+ terraform apply -auto-approve
+ ```
+
+3. Deploy ingress to new clusters:
+ ```powershell
+ .\scripts\deploy-ingress-controllers.ps1
+ ```
+
+### Scale Down (Remove Users)
+
+1. Update `terraform.tfvars`:
+ ```hcl
+ user_count = 8 # Decrease from 10 to 8
+ ```
+
+2. Apply changes:
+ ```powershell
+ terraform apply -auto-approve
+ ```
+
+The script automatically handles removed clusters.
+
+## Cleanup
+
+### Remove Ingress Controllers Only
+
+```powershell
+.\scripts\deploy-ingress-controllers.ps1 -Uninstall
+```
+
+### Destroy All Infrastructure
+
+```powershell
+terraform destroy -auto-approve
+```
+
+**Warning:** This will delete:
+- All AKS clusters
+- All VNets and peerings
+- All Entra ID users
+- All RBAC assignments
+- ODAA shared network (if no other resources depend on it)
+
+## Outputs
+
+### Available Terraform Outputs
+
+```powershell
+# All AKS clusters with details
+terraform output aks_clusters
+
+# ODAA shared network info
+terraform output odaa_network
+
+# VNet peering connections
+terraform output vnet_peering_connections
+
+# Deployment summary
+terraform output deployment_summary
+
+# User credentials (sensitive)
+terraform output entra_id_deployment_users
+
+# Kubeconfigs for automation (sensitive)
+terraform output aks_kubeconfigs
+```
+
+## Troubleshooting
+
+### Terraform Issues
+
+**Problem:** "Permission denied" errors during apply
+```powershell
+# Solution: Verify Azure CLI authentication
+az account show
+az account list
+```
+
+**Problem:** "Provider not found" errors
+```powershell
+# Solution: Reinitialize Terraform
+terraform init -upgrade
+```
+
+### Script Issues
+
+**Problem:** "Helm not found"
+```powershell
+# Solution: Install Helm
+winget install Helm.Helm
+```
+
+**Problem:** "Cannot connect to cluster"
+```powershell
+# Solution: Verify AKS cluster is running
+az aks show --name aks-user00 --resource-group aks-user00 --query provisioningState
+```
+
+**Problem:** Script fails on specific cluster
+```powershell
+# Solution: Check terraform output
+terraform output -json aks_kubeconfigs | ConvertFrom-Json | Select-Object -ExpandProperty user00
+
+# Manually deploy to that cluster
+$env:KUBECONFIG = "path-to-kubeconfig.yaml"
+helm upgrade --install nginx-quick ingress-nginx/ingress-nginx --version 4.14.0 --namespace ingress-nginx --create-namespace
+```
+
+## Security Notes
+
+### Sensitive Files (Excluded from Git)
+
+The `.gitignore` file excludes:
+- `*.tfvars` (contains subscription IDs and secrets)
+- `*.tfstate` (contains full infrastructure state)
+- `kubeconfig*` (contains cluster credentials)
+- `user_credentials.txt` (contains user passwords)
+- `scripts/logs/` (may contain sensitive output)
+
+**Important:** Never commit these files to version control!
+
+### Kubeconfig Handling
+
+The deployment script:
+- Creates temporary kubeconfig files in `$env:TEMP`
+- Uses unique filenames to avoid conflicts
+- Automatically deletes temp files after use
+- Never writes kubeconfig to the repo directory
+
+## Architecture Diagram
+
+```
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β Terraform Apply β
+β Provisions: AKS Clusters, VNets, RBAC, DNS, Users β
+ββββββββββββββββββββββββββββββ¬βββββββββββββββββββββββββββββββββββββββββ
+ β
+ βΌ
+ terraform output aks_kubeconfigs
+ β
+ βΌ
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β deploy-ingress-controllers.ps1 β
+β Reads kubeconfig from Terraform β Deploys Helm charts β
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+ β
+ βΌ
+ ββββββββββββββββ΄βββββββββββββββ
+ βΌ βΌ
+ βββββββββββββββββββ βββββββββββββββββββ
+ β AKS user00 β ... β AKS user09 β
+ β + ingress-nginxβ β + ingress-nginxβ
+ βββββββββββββββββββ βββββββββββββββββββ
+```
+
+## Support
+
+For issues or questions:
+1. Check the troubleshooting section above
+2. Review Terraform/Helm logs in `scripts/logs/`
+3. Validate Azure permissions and quotas
+4. Consult the SCALING_PROPOSAL.md document for architecture details
+
+## License
+
+This project is licensed under the MIT License.
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/IMPLEMENTATION_SUMMARY_NAMESPACE_RBAC.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/IMPLEMENTATION_SUMMARY_NAMESPACE_RBAC.md
new file mode 100644
index 000000000..b9a9a5055
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/IMPLEMENTATION_SUMMARY_NAMESPACE_RBAC.md
@@ -0,0 +1,170 @@
+# Implementation Summary: Namespace-Based RBAC & Access Rights Review
+
+## Completed Tasks β
+
+### 1. RBAC Access Rights Documentation
+**File Created:** `docs/RBAC_SUMMARY.md`
+
+Comprehensive documentation summarizing access rights for each user including:
+- **Azure Subscription Level:**
+ - AKS Subscription: Reader role + Cluster User role (kubeconfig access)
+ - ODAA Subscription: Per-user RG-scoped ADB Administrator role
+ - Storage Account: Per-user file share access for Cloud Shell
+
+- **Network Isolation:**
+ - Per-user AKS VNet (10.0.0.0/16 each)
+ - Per-user ODAA VNet (192.168.0.0/16 each)
+ - 1:1 VNet peering per user
+
+- **Current Limitation Identified:**
+ - Users had cluster-wide "Azure Kubernetes Service RBAC Writer" role
+ - Could deploy to ANY namespace (default, kube-system, etc.)
+ - **This has now been fixed** (see below)
+
+### 2. Namespace-Based Access Control Implementation
+**Files Modified:**
+- `versions.tf` - Added kubernetes provider (~> 2.30)
+- `modules/aks/variables.tf` - Added deployment_user_principal_name and deployment_user_name
+- `modules/aks/main.tf` - Added kubernetes provider requirement, **REMOVED** cluster-wide RBAC Writer
+- `modules/aks/kubernetes-rbac.tf` - **NEW** - Implements namespace restrictions
+- `modules/aks/outputs.tf` - Updated to remove rbac_writer_assignment reference
+- `main.tf` - Updated all 5 aks_slot module calls with new variables
+
+**Architecture Implemented:**
+```
+Before (Insecure):
+User β Azure RBAC Writer (cluster-wide) β Access to ALL namespaces β
+
+After (Secure):
+User β Azure Cluster User Role (kubeconfig only)
+ β Kubernetes RoleBinding (microhack namespace) β Access to microhack ONLY β
+```
+
+**Components Created:**
+1. **Namespace:** `microhack` - Dedicated namespace for user workloads
+2. **Role:** `microhack-deployer` - Permissions for deploying applications
+3. **RoleBinding:** `{user}-microhack-binding` - Binds user to role in namespace
+
+**Permissions Granted (within microhack namespace only):**
+- Pods, Services, Deployments, ReplicaSets, StatefulSets, DaemonSets
+- ConfigMaps, Secrets, PersistentVolumeClaims, ServiceAccounts
+- Jobs, CronJobs
+- Ingresses, NetworkPolicies
+- HorizontalPodAutoscalers, PodDisruptionBudgets
+- Events (read-only), Roles/RoleBindings (read-only)
+
+**Permissions DENIED:**
+- β Access to other namespaces (default, kube-system, etc.)
+- β Creating new namespaces
+- β Viewing cluster-wide resources (nodes, ClusterRoles, etc.)
+- β Modifying RBAC settings
+
+### 3. Implementation Approach
+
+Due to Terraform limitations with Kubernetes provider in modules using `for_each`, implemented using **null_resource with kubectl commands** instead of kubernetes provider resources. This approach:
+- Works with multiple AKS clusters (5 clusters, one per user)
+- Applies YAML manifests using kubectl after cluster creation
+- Triggers re-application when YAML content or cluster changes
+- Uses PowerShell for Windows compatibility
+
+**Alternative Considered:**
+- kubernetes provider resources (kubernetes_namespace, kubernetes_role, kubernetes_role_binding)
+- **Issue:** Provider configuration in modules incompatible with for_each
+- **Workaround:** Used null_resource + kubectl instead
+
+### 4. Documentation Created
+**File Created:** `docs/NAMESPACE_RBAC.md` (comprehensive 500+ line guide)
+
+Includes:
+- Architecture diagrams (before/after comparison)
+- Component descriptions (Namespace, Role, RoleBinding)
+- Azure RBAC vs Kubernetes RBAC explanation
+- Deployment steps
+- Testing & validation procedures
+- User guide with examples
+- Troubleshooting section
+- Security considerations
+
+## Validation β
+
+```bash
+terraform init # Success - kubernetes provider v2.38.0 installed
+terraform validate # Success - Configuration is valid
+```
+
+## Security Improvements
+
+| Aspect | Before | After |
+|--------|--------|-------|
+| **AKS Namespace Access** | ALL namespaces | microhack ONLY |
+| **Azure RBAC** | AKS RBAC Writer (cluster-wide) | Cluster User (kubeconfig only) |
+| **Kubernetes RBAC** | None (inherited from Azure) | Namespace-scoped Role + RoleBinding |
+| **Can Create Namespaces** | β
Yes | β No |
+| **Can Access kube-system** | β
Yes | β No |
+| **Blast Radius** | Entire cluster | Single namespace |
+
+## Next Steps (Deployment)
+
+When ready to deploy:
+
+```powershell
+# 1. Review planned changes
+terraform plan
+
+# 2. Apply configuration
+terraform apply
+
+# 3. Verify namespace creation (for each user's cluster)
+kubectl get namespace microhack --context aks-user00
+kubectl get role -n microhack --context aks-user00
+kubectl get rolebinding -n microhack --context aks-user00
+
+# 4. Test access restrictions
+# As user00: Try to deploy to microhack (should work)
+kubectl create deployment nginx --image=nginx -n microhack
+
+# As user00: Try to deploy to default (should fail)
+kubectl create deployment nginx --image=nginx -n default
+# Expected: Error - User "user00@cptazure.org" cannot create resource "deployments"
+```
+
+## File Locations
+
+- **RBAC Summary:** `docs/RBAC_SUMMARY.md`
+- **Namespace RBAC Guide:** `docs/NAMESPACE_RBAC.md`
+- **Implementation:** `modules/aks/kubernetes-rbac.tf`
+- **Provider Config:** `versions.tf`, `modules/aks/main.tf`
+
+## User Impact
+
+**For Each User (e.g., user00@cptazure.org):**
+
+β
**Can Now:**
+- Deploy applications to `microhack` namespace
+- Create pods, services, deployments in `microhack`
+- View logs and events in `microhack`
+- Use kubectl freely within their namespace
+
+β **Can No Longer:**
+- Deploy to default, kube-system, or other namespaces
+- Create new namespaces
+- View or modify cluster-level resources
+- Accidentally break system components
+
+**Migration Path:**
+Users must add `-n microhack` to kubectl commands or set default namespace:
+```bash
+kubectl config set-context --current --namespace=microhack
+```
+
+## Summary
+
+Successfully implemented namespace-based RBAC restrictions that:
+1. β
Reviewed and documented current access rights per user
+2. β
Created "microhack" namespace as default in each AKS cluster
+3. β
Restricted users to ONLY deploy within the microhack namespace
+4. β
Removed insecure cluster-wide RBAC Writer assignment
+5. β
Validated configuration with terraform init and terraform validate
+6. β
Created comprehensive documentation for users and administrators
+
+The infrastructure is now ready for deployment with enhanced security boundaries.
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/NAMESPACE_RBAC.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/NAMESPACE_RBAC.md
new file mode 100644
index 000000000..8879e9817
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/NAMESPACE_RBAC.md
@@ -0,0 +1,654 @@
+# Kubernetes Namespace-Based Access Control (RBAC)
+
+## Overview
+
+This document describes the namespace-based RBAC implementation that restricts users to the `microhack` namespace within their AKS clusters. This security model ensures users can only deploy and manage workloads in a specific namespace, preventing unauthorized access to cluster-wide resources.
+
+---
+
+## Architecture
+
+### Previous Model (Insecure - Removed)
+
+```
+User (user00@cptazure.org)
+ ββ Azure RBAC: "Azure Kubernetes Service RBAC Writer" (Cluster-wide)
+ ββ Full access to ALL namespaces
+ ββ default β
(can deploy)
+ ββ kube-system β
(can deploy)
+ ββ microhack β
(can deploy)
+ ββ ANY custom namespace β
(can create and deploy)
+```
+
+**Problem:** Users had unrestricted access to the entire cluster, including system namespaces.
+
+### Current Model (Secure - Implemented)
+
+```
+User (user00@cptazure.org)
+ ββ Azure RBAC: "Azure Kubernetes Service Cluster User Role"
+ β ββ Allows getting kubeconfig (kubectl access) β
+ β
+ ββ Kubernetes RBAC: RoleBinding in "microhack" namespace
+ ββ Role: "microhack-deployer"
+ ββ ONLY access to "microhack" namespace
+ ββ default β (cannot deploy)
+ ββ kube-system β (cannot deploy)
+ ββ microhack β
(can deploy - ONLY THIS)
+ ββ OTHER namespaces β (cannot create or access)
+```
+
+**Solution:** Users can ONLY interact with the `microhack` namespace. All other namespaces are inaccessible.
+
+---
+
+## Components
+
+### 1. Namespace: `microhack`
+
+**File:** `modules/aks/kubernetes-rbac.tf`
+
+```hcl
+resource "kubernetes_namespace" "microhack" {
+ metadata {
+ name = "microhack"
+ labels = {
+ name = "microhack"
+ environment = "training"
+ managed-by = "terraform"
+ purpose = "user-workloads"
+ }
+ }
+}
+```
+
+**Purpose:**
+- Dedicated namespace for all user workload deployments
+- Isolated from system namespaces (kube-system, default, etc.)
+- Managed by Terraform for consistency across all AKS clusters
+
+**Properties:**
+- Name: `microhack`
+- Created in: Each AKS cluster (aks-user00 through aks-user04)
+- Lifecycle: Managed by Terraform (created with cluster, destroyed with cluster)
+
+### 2. Role: `microhack-deployer`
+
+**File:** `modules/aks/kubernetes-rbac.tf`
+
+**Permissions Granted (within `microhack` namespace only):**
+
+| Resource Type | API Group | Verbs | Description |
+|---------------|-----------|-------|-------------|
+| **Pods** | "" (core) | get, list, watch, create, update, patch, delete | Full CRUD on pods |
+| **Services** | "" (core) | get, list, watch, create, update, patch, delete | Full CRUD on services |
+| **ConfigMaps** | "" (core) | get, list, watch, create, update, patch, delete | Full CRUD on config maps |
+| **Secrets** | "" (core) | get, list, watch, create, update, patch, delete | Full CRUD on secrets |
+| **PVCs** | "" (core) | get, list, watch, create, update, patch, delete | Full CRUD on persistent volume claims |
+| **ServiceAccounts** | "" (core) | get, list, watch, create, update, patch, delete | Full CRUD on service accounts |
+| **Deployments** | apps | get, list, watch, create, update, patch, delete | Full CRUD on deployments |
+| **ReplicaSets** | apps | get, list, watch, create, update, patch, delete | Full CRUD on replica sets |
+| **StatefulSets** | apps | get, list, watch, create, update, patch, delete | Full CRUD on stateful sets |
+| **DaemonSets** | apps | get, list, watch, create, update, patch, delete | Full CRUD on daemon sets |
+| **Jobs** | batch | get, list, watch, create, update, patch, delete | Full CRUD on jobs |
+| **CronJobs** | batch | get, list, watch, create, update, patch, delete | Full CRUD on cron jobs |
+| **Ingresses** | networking.k8s.io | get, list, watch, create, update, patch, delete | Full CRUD on ingresses |
+| **NetworkPolicies** | networking.k8s.io | get, list, watch, create, update, patch, delete | Full CRUD on network policies |
+| **HorizontalPodAutoscalers** | autoscaling | get, list, watch, create, update, patch, delete | Full CRUD on HPAs |
+| **PodDisruptionBudgets** | policy | get, list, watch, create, update, patch, delete | Full CRUD on PDBs |
+| **Events** | "" (core) | get, list, watch | Read-only access to events |
+| **Roles** | rbac.authorization.k8s.io | get, list, watch | Read-only access to roles |
+| **RoleBindings** | rbac.authorization.k8s.io | get, list, watch | Read-only access to role bindings |
+
+**What Users CAN Do:**
+- β
Deploy applications (deployments, pods, services)
+- β
Create and manage configurations (configmaps, secrets)
+- β
Set up ingress for external access
+- β
Configure autoscaling (HPA)
+- β
Run batch jobs and cron jobs
+- β
Create stateful applications (StatefulSets)
+- β
View logs and events for troubleshooting
+- β
Manage network policies within the namespace
+
+**What Users CANNOT Do:**
+- β Access other namespaces (default, kube-system, etc.)
+- β Create new namespaces
+- β View or modify cluster-wide resources (ClusterRoles, ClusterRoleBindings)
+- β Modify node settings or cluster configuration
+- β Access other users' resources (each user has their own cluster)
+- β Create PersistentVolumes (only PersistentVolumeClaims)
+- β Modify RBAC settings (cannot escalate privileges)
+
+### 3. RoleBinding: `{user}-microhack-binding`
+
+**File:** `modules/aks/kubernetes-rbac.tf`
+
+**Purpose:** Binds the user's Entra ID identity to the `microhack-deployer` Role within the `microhack` namespace.
+
+**Example:**
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: user00-microhack-binding
+ namespace: microhack
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: microhack-deployer
+subjects:
+- kind: User
+ name: user00@cptazure.org # Entra ID UPN
+ apiGroup: rbac.authorization.k8s.io
+```
+
+**Binding Details:**
+- **User:** Entra ID principal name (e.g., `user00@cptazure.org`)
+- **Role:** `microhack-deployer`
+- **Scope:** `microhack` namespace only
+- **Result:** User can perform all actions defined in the Role, but ONLY within `microhack` namespace
+
+---
+
+## Azure RBAC vs Kubernetes RBAC
+
+### Understanding the Two Layers
+
+| Aspect | Azure RBAC | Kubernetes RBAC |
+|--------|------------|-----------------|
+| **Scope** | Azure resources (AKS cluster, subscriptions, RGs) | Kubernetes objects (pods, services, namespaces) |
+| **Manages Access To** | Getting kubeconfig, viewing cluster in Azure Portal | Deploying workloads, creating resources in cluster |
+| **Assignment Level** | Subscription, Resource Group, or Cluster | Cluster-wide (ClusterRole) or Namespace (Role) |
+| **Identity Provider** | Entra ID (Azure AD) | Kubernetes (mapped from Entra ID) |
+| **Configuration** | Terraform azurerm_role_assignment | Terraform kubernetes_role_binding |
+
+### Our Implementation
+
+#### Azure RBAC (Kept)
+- **Role:** `Azure Kubernetes Service Cluster User Role`
+- **Scope:** AKS Cluster (e.g., aks-user00)
+- **Purpose:** Allows user to run `az aks get-credentials` to obtain kubeconfig
+- **Result:** User can connect to cluster with kubectl
+
+#### Kubernetes RBAC (New)
+- **Role:** `microhack-deployer` (custom Kubernetes Role)
+- **Scope:** `microhack` namespace only
+- **Purpose:** Defines what user can do INSIDE the cluster
+- **Result:** User can deploy workloads only to `microhack` namespace
+
+**Why Both Are Needed:**
+1. **Azure RBAC** gets you INTO the cluster (kubeconfig)
+2. **Kubernetes RBAC** determines what you can DO inside the cluster
+
+---
+
+## Deployment
+
+### Files Modified
+
+1. **versions.tf**
+ - Added `kubernetes` provider requirement (~> 2.30)
+
+2. **modules/aks/variables.tf**
+ - Added `deployment_user_principal_name` (UPN for RoleBinding)
+ - Added `deployment_user_name` (short name for labels)
+
+3. **modules/aks/kubernetes-rbac.tf** (NEW)
+ - Kubernetes provider configuration
+ - `kubernetes_namespace.microhack` resource
+ - `kubernetes_role.microhack_deployer` resource
+ - `kubernetes_role_binding.user_microhack` resource
+
+4. **modules/aks/main.tf**
+ - **REMOVED:** `azurerm_role_assignment.aks_rbac_writer` (cluster-wide access)
+ - Added extensive comments explaining why it was removed
+
+5. **modules/aks/outputs.tf**
+ - Removed `rbac_writer_assignment` from outputs
+ - Added comment explaining replacement with Kubernetes RBAC
+
+6. **main.tf**
+ - Updated all 5 `module.aks_slot_*` calls to pass new variables:
+ * `deployment_user_principal_name`
+ * `deployment_user_name`
+
+### Deployment Steps
+
+```bash
+# 1. Initialize Terraform (downloads kubernetes provider)
+terraform init
+
+# 2. Validate configuration
+terraform validate
+
+# 3. Review planned changes
+terraform plan
+
+# 4. Apply changes (creates namespace + RBAC in all clusters)
+terraform apply
+
+# 5. Verify namespace creation
+kubectl get namespace microhack
+
+# 6. Verify RBAC configuration
+kubectl get role -n microhack
+kubectl get rolebinding -n microhack
+```
+
+---
+
+## Testing & Validation
+
+### 1. Verify Namespace Exists
+
+```bash
+# Connect to user's AKS cluster
+az login -u user00@cptazure.org
+az aks get-credentials --resource-group rg-aks-user00 --name aks-user00
+
+# Check namespace
+kubectl get namespace microhack
+# Expected output:
+# NAME STATUS AGE
+# microhack Active 10m
+```
+
+### 2. Test Namespace Access - ALLOWED
+
+```bash
+# Deploy to microhack namespace (should SUCCEED)
+kubectl create deployment nginx --image=nginx -n microhack
+kubectl get pods -n microhack
+
+# Create a service (should SUCCEED)
+kubectl expose deployment nginx --port=80 --type=ClusterIP -n microhack
+kubectl get svc -n microhack
+
+# View logs (should SUCCEED)
+kubectl logs deployment/nginx -n microhack
+
+# Clean up
+kubectl delete deployment nginx -n microhack
+kubectl delete service nginx -n microhack
+```
+
+### 3. Test Default Namespace Access - DENIED
+
+```bash
+# Try to deploy to default namespace (should FAIL)
+kubectl create deployment nginx --image=nginx -n default
+
+# Expected error:
+# Error from server (Forbidden): deployments.apps is forbidden:
+# User "user00@cptazure.org" cannot create resource "deployments"
+# in API group "apps" in the namespace "default"
+```
+
+### 4. Test Kube-System Access - DENIED
+
+```bash
+# Try to view pods in kube-system (should FAIL)
+kubectl get pods -n kube-system
+
+# Expected error:
+# Error from server (Forbidden): pods is forbidden:
+# User "user00@cptazure.org" cannot list resource "pods"
+# in API group "" in the namespace "kube-system"
+```
+
+### 5. Test Namespace Creation - DENIED
+
+```bash
+# Try to create a new namespace (should FAIL)
+kubectl create namespace test
+
+# Expected error:
+# Error from server (Forbidden): namespaces is forbidden:
+# User "user00@cptazure.org" cannot create resource "namespaces"
+# in API group "" at the cluster scope
+```
+
+### 6. Test Authorization with kubectl auth can-i
+
+```bash
+# Check microhack namespace permissions (should be "yes")
+kubectl auth can-i create pods -n microhack
+kubectl auth can-i create deployments -n microhack
+kubectl auth can-i create services -n microhack
+
+# Check default namespace permissions (should be "no")
+kubectl auth can-i create pods -n default
+kubectl auth can-i create deployments -n default
+
+# Check cluster-wide permissions (should be "no")
+kubectl auth can-i create namespaces
+kubectl auth can-i get nodes
+```
+
+**Expected Results:**
+```
+β
kubectl auth can-i create pods -n microhack β yes
+β
kubectl auth can-i create deployments -n microhack β yes
+β kubectl auth can-i create pods -n default β no
+β kubectl auth can-i create namespaces β no
+β kubectl auth can-i get nodes β no
+```
+
+---
+
+## User Guide
+
+### Getting Started
+
+1. **Log in to Azure:**
+ ```bash
+ az login -u user00@cptazure.org
+ ```
+
+2. **Get AKS credentials:**
+ ```bash
+ az aks get-credentials --resource-group rg-aks-user00 --name aks-user00
+ ```
+
+3. **Verify access:**
+ ```bash
+ kubectl get namespace microhack
+ ```
+
+### Deploying Applications
+
+**Always specify `-n microhack` or set default namespace:**
+
+```bash
+# Option 1: Specify namespace in every command
+kubectl create deployment myapp --image=nginx -n microhack
+kubectl get pods -n microhack
+
+# Option 2: Set microhack as default namespace (recommended)
+kubectl config set-context --current --namespace=microhack
+
+# Now all commands default to microhack
+kubectl create deployment myapp --image=nginx
+kubectl get pods
+```
+
+### Example Deployment
+
+```bash
+# Set default namespace
+kubectl config set-context --current --namespace=microhack
+
+# Deploy application
+kubectl create deployment webapp --image=nginx:latest --replicas=3
+
+# Expose service
+kubectl expose deployment webapp --port=80 --type=LoadBalancer
+
+# Check status
+kubectl get pods
+kubectl get svc
+
+# View logs
+kubectl logs deployment/webapp
+
+# Scale deployment
+kubectl scale deployment webapp --replicas=5
+
+# Clean up
+kubectl delete deployment webapp
+kubectl delete service webapp
+```
+
+### Using YAML Manifests
+
+Ensure `namespace: microhack` is specified in your YAML:
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: myapp
+ namespace: microhack # β IMPORTANT
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: myapp
+ template:
+ metadata:
+ labels:
+ app: myapp
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: myapp-service
+ namespace: microhack # β IMPORTANT
+spec:
+ selector:
+ app: myapp
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 80
+ type: LoadBalancer
+```
+
+Apply the manifest:
+```bash
+kubectl apply -f deployment.yaml
+```
+
+---
+
+## Troubleshooting
+
+### Error: "Forbidden: User cannot create resource"
+
+**Symptom:**
+```
+Error from server (Forbidden): deployments.apps is forbidden:
+User "user00@cptazure.org" cannot create resource "deployments"
+in API group "apps" in the namespace "default"
+```
+
+**Cause:** User trying to deploy to a namespace other than `microhack`.
+
+**Solution:**
+1. Always specify `-n microhack` in commands
+2. Set default namespace: `kubectl config set-context --current --namespace=microhack`
+3. Ensure YAML manifests include `namespace: microhack`
+
+### Error: "Cannot get kubeconfig"
+
+**Symptom:**
+```
+az aks get-credentials fails with permission error
+```
+
+**Cause:** Azure RBAC not properly configured (missing Cluster User Role).
+
+**Solution:**
+Verify Azure RBAC assignment exists:
+```bash
+az role assignment list --assignee user00@cptazure.org \
+ --scope /subscriptions/{sub-id}/resourceGroups/{rg}/providers/Microsoft.ContainerService/managedClusters/aks-user00
+```
+
+Should see "Azure Kubernetes Service Cluster User Role" assigned.
+
+### Namespace Not Found
+
+**Symptom:**
+```
+Error from server (NotFound): namespaces "microhack" not found
+```
+
+**Cause:** Terraform not applied or namespace creation failed.
+
+**Solution:**
+```bash
+# Re-apply Terraform
+terraform apply
+
+# Verify namespace exists
+kubectl get namespace microhack
+```
+
+### Checking Your Permissions
+
+```bash
+# See all your permissions in microhack namespace
+kubectl auth can-i --list -n microhack
+
+# Check specific permission
+kubectl auth can-i create pods -n microhack
+kubectl auth can-i delete deployments -n microhack
+```
+
+---
+
+## Security Considerations
+
+### Why Namespace Restrictions?
+
+1. **Principle of Least Privilege:** Users only have access to resources they need
+2. **Blast Radius Limitation:** Mistakes confined to `microhack` namespace, not entire cluster
+3. **System Protection:** kube-system and other critical namespaces are protected
+4. **Multi-Tenancy:** Clear separation between users (each has own cluster + restricted namespace)
+
+### What's Protected?
+
+| Resource | Protection Level | Reason |
+|----------|-----------------|---------|
+| **kube-system namespace** | β No access | Contains critical cluster components |
+| **default namespace** | β No access | Prevent cluttering default namespace |
+| **Other namespaces** | β No access | Users shouldn't see each other's work |
+| **Cluster-level resources** | β No access | Nodes, ClusterRoles, PVs are admin-only |
+| **microhack namespace** | β
Full access | User's workspace for deployments |
+
+### Privilege Escalation Prevention
+
+Users CANNOT:
+- Modify their own RBAC (Role, RoleBinding are read-only)
+- Create ClusterRoles or ClusterRoleBindings
+- Bind to existing ClusterRoles
+- Create new namespaces
+- Access ServiceAccounts in other namespaces
+
+---
+
+## Comparison with Previous Model
+
+| Aspect | Previous (Insecure) | Current (Secure) |
+|--------|---------------------|------------------|
+| **Azure RBAC** | AKS RBAC Writer (cluster-wide) | Cluster User Role (kubeconfig only) |
+| **Kubernetes RBAC** | None (inherited from Azure) | Namespace-scoped Role + RoleBinding |
+| **Accessible Namespaces** | ALL (default, kube-system, etc.) | ONLY microhack |
+| **Can Create Namespaces** | β
Yes | β No |
+| **Can Access kube-system** | β
Yes | β No |
+| **Can Modify RBAC** | β
Yes (via Azure RBAC) | β No (read-only) |
+| **Security Model** | Trust-based (admin access) | Zero-trust (explicit permissions) |
+| **Blast Radius** | Entire cluster | Single namespace |
+
+---
+
+## Maintenance & Updates
+
+### Adding New Permissions
+
+If users need additional permissions (e.g., access to Custom Resource Definitions):
+
+1. **Update the Role** in `modules/aks/kubernetes-rbac.tf`:
+ ```hcl
+ rule {
+ api_groups = ["example.com"]
+ resources = ["customresources"]
+ verbs = ["get", "list", "watch", "create", "update", "patch", "delete"]
+ }
+ ```
+
+2. **Apply changes:**
+ ```bash
+ terraform apply
+ ```
+
+### Creating Additional Namespaces
+
+If you need a second namespace for a different purpose:
+
+1. **Add new namespace resource:**
+ ```hcl
+ resource "kubernetes_namespace" "staging" {
+ metadata {
+ name = "staging"
+ }
+ }
+ ```
+
+2. **Create Role and RoleBinding for new namespace:**
+ ```hcl
+ resource "kubernetes_role" "staging_deployer" {
+ metadata {
+ name = "staging-deployer"
+ namespace = kubernetes_namespace.staging.metadata[0].name
+ }
+ # ... rules ...
+ }
+
+ resource "kubernetes_role_binding" "user_staging" {
+ metadata {
+ name = "${var.deployment_user_name}-staging-binding"
+ namespace = kubernetes_namespace.staging.metadata[0].name
+ }
+ # ... binding configuration ...
+ }
+ ```
+
+### Re-enabling Cluster-Wide Access (NOT RECOMMENDED)
+
+If absolutely necessary, uncomment the Azure RBAC Writer assignment in `modules/aks/main.tf`:
+
+```hcl
+resource "azurerm_role_assignment" "aks_rbac_writer" {
+ scope = azurerm_kubernetes_cluster.aks.id
+ role_definition_name = "Azure Kubernetes Service RBAC Writer"
+ principal_id = var.deployment_user_object_id
+}
+```
+
+β οΈ **WARNING:** This grants full cluster access and defeats the security model.
+
+---
+
+## Related Documentation
+
+- [RBAC_SUMMARY.md](./RBAC_SUMMARY.md) - Complete access rights per user
+- [VNET_ISOLATION_CORRECT.md](./VNET_ISOLATION_CORRECT.md) - Network isolation architecture
+- [CLOUDSHELL_SHARED_STORAGE.md](./CLOUDSHELL_SHARED_STORAGE.md) - Cloud Shell configuration
+- [Kubernetes RBAC Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
+- [Azure Kubernetes Service RBAC](https://learn.microsoft.com/en-us/azure/aks/azure-ad-rbac)
+
+---
+
+## Summary
+
+β
**Users NOW have:**
+- Access to `microhack` namespace only
+- Full deployment capabilities within their namespace
+- Protection from accidental system modifications
+- Clear security boundaries
+
+β **Users NO LONGER have:**
+- Cluster-wide admin access
+- Ability to create/access other namespaces
+- Access to kube-system or other system namespaces
+- Ability to modify cluster-level resources
+
+This model balances **usability** (users can deploy applications freely) with **security** (restricted to safe boundaries).
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/RBAC_SUMMARY.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/RBAC_SUMMARY.md
new file mode 100644
index 000000000..c97165f64
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/RBAC_SUMMARY.md
@@ -0,0 +1,291 @@
+# RBAC Access Rights Summary - Per User
+
+This document provides a comprehensive overview of all access rights granted to a single user (e.g., user00) in the infrastructure.
+
+## Overview
+
+Each user receives isolated access to their own dedicated resources with **complete isolation** from other users. The architecture ensures users can only access their own AKS cluster, ODAA VNet, Oracle database resources, and Cloud Shell storage.
+
+---
+
+## User00 Example - Complete Access Rights
+
+### 1. Azure Kubernetes Service (AKS) Permissions
+
+**Cluster:** `aks-user00` (in subscription: depends on round-robin assignment)
+
+| Role | Scope | Permissions | Purpose |
+|------|-------|-------------|---------|
+| **Azure Kubernetes Service Cluster User Role** | AKS Cluster `aks-user00` | Get cluster credentials | Allows user to run `az aks get-credentials` to obtain kubeconfig |
+| **Azure Kubernetes Service RBAC Writer** | AKS Cluster `aks-user00` | Full Kubernetes RBAC write access | Allows user to deploy workloads, create namespaces, manage pods/services/deployments in ALL namespaces |
+| **Reader** | AKS Subscription | Read all resources | View resources in the subscription (VNets, AKS clusters, resource groups) |
+
+**What user CAN do in AKS:**
+- β
Get kubeconfig and connect to cluster
+- β
Create/delete/modify ANY namespace
+- β
Deploy workloads to ANY namespace
+- β
Create services, ingresses, secrets, configmaps
+- β
View cluster nodes, pods, events
+- β
Install helm charts anywhere in the cluster
+- β
Run kubectl commands with admin-like privileges
+
+**What user CANNOT do in AKS:**
+- β Access other users' AKS clusters (different subscriptions/clusters)
+- β Modify cluster-level settings (node pools, networking, upgrades)
+- β Create new AKS clusters
+
+---
+
+### 2. Oracle Database@Azure (ODAA) Permissions
+
+**ODAA VNet:** `odaa-user00` (in ODAA subscription: `4aecf0e8-2fe2-4187-bc93-0356bd2676f5`)
+**Resource Group:** `odaa-user00`
+
+| Role | Scope | Permissions | Purpose |
+|------|-------|-------------|---------|
+| **Oracle.Database Autonomous Database Administrator** | Resource Group `odaa-user00` | Full admin on ADB resources | Create, delete, modify Oracle Autonomous Databases in their RG |
+| **Private DNS Zone Reader** (custom) | ODAA Subscription | Read Private DNS Zones | View Oracle DNS zones for connectivity |
+| **Oracle Subscriptions Manager Reader** (custom) | ODAA Subscription | Read Oracle subscription details | View Oracle subscription information |
+| **Private DNS Zone Contributor** | Oracle Private DNS Zones | Manage DNS records | Create DNS records for ADB connectivity (4 zones) |
+
+**What user CAN do in ODAA:**
+- β
Create/delete/manage Oracle Autonomous Databases in their own RG (`odaa-user00`)
+- β
View Oracle subscription details
+- β
View Private DNS zones
+- β
Create DNS A records in Oracle Private DNS zones
+- β
Access their ODAA VNet resources
+
+**What user CANNOT do in ODAA:**
+- β Access other users' ODAA resource groups (`odaa-user01`, `odaa-user02`, etc.)
+- β Create/delete/modify other users' databases
+- β View or access other users' ODAA VNets (network isolation)
+- β Modify ODAA subscription settings
+
+---
+
+### 3. Cloud Shell Storage Permissions
+
+**Storage Account:** `odaamh` (shared by all users)
+**File Share:** `cloudshell-user00` (dedicated to user00)
+**Resource Group:** `odaa`
+
+| Role | Scope | Permissions | Purpose |
+|------|-------|-------------|---------|
+| **Storage Blob Data Contributor** | Storage Account `odaamh` | Read/write/delete blobs | Access Cloud Shell state files and blobs |
+| **Storage File Data SMB Share Contributor** | Storage Account `odaamh` | Full access to file shares | Access their Cloud Shell home directory |
+| **Reader** | Resource Group `odaa` | Read resources | View storage account in Azure Portal during Cloud Shell setup |
+
+**What user CAN do with Cloud Shell Storage:**
+- β
Read/write files in their file share (`cloudshell-user00`)
+- β
Store Cloud Shell state and profile
+- β
Access their home directory in Cloud Shell
+- β
View storage account details
+
+**What user CANNOT do:**
+- β Access other users' file shares (`cloudshell-user01`, `cloudshell-user02`, etc.)
+- β Delete or modify the storage account
+- β View or access other users' Cloud Shell files
+
+---
+
+### 4. Entra ID / Azure AD Permissions
+
+**User Account:** `user00@cptazure.org`
+**Group Membership:** `mh-odaa-user-grp` (all users)
+
+| Permission | Scope | Purpose |
+|------------|-------|---------|
+| **User** (standard) | Entra ID Tenant | Standard user account permissions |
+| **Group Member** | `mh-odaa-user-grp` | Access to Oracle Cloud Infrastructure Console (app role assigned) |
+
+**What user CAN do:**
+- β
Log in to Azure Portal
+- β
Access assigned subscriptions and resources
+- β
Use Azure CLI / PowerShell with their credentials
+- β
Access Oracle Cloud Infrastructure Console (via group membership)
+
+**What user CANNOT do:**
+- β Create new users or modify other users
+- β Manage group memberships
+- β Modify Entra ID settings
+
+---
+
+## Network Isolation
+
+### VNet Architecture (user00)
+
+**AKS VNet:** `vnet-aks-user00` (CIDR: `10.0.0.0/16`)
+- Subnet: `snet-aks-user00` (`10.0.0.0/23`)
+- Service CIDR: `172.16.0.0/24` (unique per user)
+
+**ODAA VNet:** `odaa-user00` (CIDR: `192.168.0.0/16`)
+- Subnet: `snet-odaa-user00` (`192.168.0.0/24`)
+
+**Peering:** `aks-user00 β odaa-user00` (1:1 peering)
+
+**Isolation:**
+- β
user00's AKS VNet is peered ONLY to user00's ODAA VNet
+- β
NO peering to other users' VNets
+- β
All users use same CIDR ranges (isolation by VNet boundaries, not CIDR)
+- β
Network traffic cannot cross between users
+
+---
+
+## Security Summary
+
+### β
What Each User Has ACCESS To
+
+| Resource Type | Scope | Access Level |
+|---------------|-------|--------------|
+| **AKS Cluster** | Their own cluster only | Full RBAC Writer (admin-like) |
+| **AKS Subscription** | Read-only | View resources |
+| **ODAA Resource Group** | Their own RG only | Full ADB Administrator |
+| **ODAA VNet** | Their own VNet only | Network access via peering |
+| **Private DNS Zones** | ODAA subscription | Contributor (create records) |
+| **Cloud Shell Storage** | Shared storage account | Access only their file share |
+| **Oracle Subscription** | ODAA subscription | Read-only (subscription details) |
+
+### β What Each User Does NOT Have Access To
+
+| Resource Type | Restriction |
+|---------------|-------------|
+| **Other Users' AKS Clusters** | Complete isolation - different subscriptions/clusters |
+| **Other Users' ODAA RGs** | No permissions - RBAC scoped to own RG only |
+| **Other Users' ODAA VNets** | Network isolation - no peering to other users |
+| **Other Users' Databases** | Cannot view, modify, or delete |
+| **Other Users' Cloud Shell** | Cannot access file shares or files |
+| **Cluster Admin Operations** | Cannot modify node pools, cluster settings |
+| **Subscription Management** | Cannot create resources outside assigned scopes |
+
+---
+
+## Comparison: Current vs Previous Architecture
+
+### Previous (Shared ODAA VNet)
+
+| Issue | Risk Level |
+|-------|-----------|
+| All users in same ODAA VNet | **HIGH** - Network access to all databases |
+| Group-level ADB Admin on shared RG | **HIGH** - user00 could delete user01's database |
+| Same ODAA resource group | **MEDIUM** - Resource confusion/conflicts |
+
+### Current (Per-User Isolation)
+
+| Improvement | Benefit |
+|-------------|---------|
+| Per-user ODAA VNets | **Complete network isolation** |
+| Per-user ODAA RGs | **Complete resource isolation** |
+| Per-user RBAC scoping | **Cannot access other users' databases** |
+| Per-user file shares | **Cloud Shell data isolation** |
+
+---
+
+## Permission Hierarchy
+
+```
+user00@cptazure.org
+β
+βββ AKS Subscription (Round-robin assigned)
+β βββ AKS Cluster: aks-user00
+β β βββ Cluster User Role β
+β β βββ RBAC Writer β
(FULL ACCESS TO ALL NAMESPACES)
+β βββ Subscription Reader β
+β
+βββ ODAA Subscription (4aecf0e8-2fe2-4187-bc93-0356bd2676f5)
+β βββ Resource Group: odaa-user00
+β β βββ ADB Administrator β
(scoped to this RG only)
+β βββ Private DNS Zones (4 zones)
+β β βββ Private DNS Zone Contributor β
+β βββ Subscription Level
+β βββ Private DNS Zone Reader β
+β βββ Oracle Subscriptions Manager Reader β
+β
+βββ Cloud Shell Storage Subscription (09808f31-065f-4231-914d-776c2d6bbe34)
+ βββ Storage Account: odaamh
+ βββ Storage Blob Data Contributor β
+ βββ Storage File Data SMB Share Contributor β
+ βββ Resource Group Reader β
+```
+
+---
+
+## Current Limitation: AKS Namespace Access
+
+β οΈ **Users currently have FULL access to ALL namespaces in their AKS cluster**
+
+The `Azure Kubernetes Service RBAC Writer` role grants:
+- Full admin access to the entire cluster
+- Can create/delete ANY namespace
+- Can deploy to ANY namespace
+- Can modify/delete ANY resource
+
+### Recommendation: Implement Namespace Restrictions
+
+See **Namespace-Based Access Control** section in the implementation plan below for details on how to restrict users to specific namespaces (e.g., `microhack` namespace only).
+
+---
+
+## Cost Per User (Approximate Monthly)
+
+| Resource | Cost |
+|----------|------|
+| AKS Cluster (3 nodes) | ~$200-300 |
+| ODAA VNet | Free |
+| VNet Peering | ~$5 |
+| Cloud Shell Storage (6 GB share) | ~$1.20 |
+| Oracle Database (if created) | Variable |
+| **Total (without ADB)** | **~$210/month** |
+
+---
+
+## Testing User Access
+
+### Verify AKS Access
+
+```bash
+# Login as user00
+az login -u user00@cptazure.org
+
+# Get AKS credentials (should work)
+az aks get-credentials --resource-group rg-aks-user00 --name aks-user00
+
+# Deploy to cluster (should work)
+kubectl create namespace test
+kubectl run nginx --image=nginx -n test
+
+# Try to access user01's cluster (should fail)
+az aks get-credentials --resource-group rg-aks-user01 --name aks-user01
+# Error: User does not have access
+```
+
+### Verify ODAA Access
+
+```bash
+# List databases in own RG (should work)
+az oracle-database autonomous-database list --resource-group odaa-user00
+
+# Try to list databases in user01's RG (should fail)
+az oracle-database autonomous-database list --resource-group odaa-user01
+# Error: Authorization failed
+```
+
+### Verify Cloud Shell Isolation
+
+```bash
+# Access own file share (works)
+# Cloud Shell will mount cloudshell-user00
+
+# Try to access another user's files (blocked by RBAC)
+az storage file list --account-name odaamh --share-name cloudshell-user01
+# Error: This request is not authorized
+```
+
+---
+
+## Related Documentation
+
+- `VNET_ISOLATION_CORRECT.md` - VNet isolation architecture
+- `RBAC_ANALYSIS.md` - Detailed RBAC analysis and security assessment
+- `CLOUDSHELL_SHARED_STORAGE.md` - Cloud Shell storage configuration
+- `NAMESPACE_RBAC.md` - **NEW** - Namespace-based access control (to be created)
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/configure-oci-app-role.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/configure-oci-app-role.md
new file mode 100644
index 000000000..a1b03b281
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/configure-oci-app-role.md
@@ -0,0 +1,45 @@
+# Configuring App Roles for OCI Confidential Application
+
+## Overview
+
+The Entra ID "Oracle Cloud Infrastructure Console" is an Enterprise Application that enables Single Sign-On (SSO) between Azure and OCI.
+
+When configured correctly, users authenticate through Entra ID and are seamlessly granted access to OCI resources without requiring separate OCI credentials by using SCIM.
+
+
+## Find your Enterprise Application on the Azure Portal
+
+1. Navigate to the [Azure Portal](https://portal.azure.com)
+2. Go to **Microsoft Entra ID** β **Enterprise applications**
+3. In the search box, type **"Oracle Cloud"**
+4. Select **Oracle Cloud Infrastructure Console** from the results
+
+
+
+
+## Find your corresponding OCI confidential Application on the OCI Console (Web Portal)
+
+Log into the OCI console via https://cloud.oracle.com/.
+
+
+
+Click on the Hamburger menu (three horizontal lines) in the top left corner, then navigate to **Identity & Security** β **Security** β **Domoains**.
+
+
+
+Select your Domain, in our case it the "Default" domain.
+
+
+
+Select "Confidetial Applications" tab to see the list of confidential applications.
+Select the once called "cptazure.org confidential application".
+
+
+
+Inside the confidential application select "OAuth configuration" and "Edit OAuth configuration":
+
+
+
+Scroll to the bottom to the "App Roles" section, click on "Add App Role", select the appropriate app role (typically **Identity Domain Administrator**), then click **Save Changes**.
+
+
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/entra-id-enterprise-application.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/entra-id-enterprise-application.png
new file mode 100644
index 000000000..8584dae44
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/entra-id-enterprise-application.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-app-role-assignment.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-app-role-assignment.png
new file mode 100644
index 000000000..35faae0f2
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-app-role-assignment.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-confidential-application.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-confidential-application.png
new file mode 100644
index 000000000..c179c0ccb
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-confidential-application.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-console-start-page.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-console-start-page.png
new file mode 100644
index 000000000..4704f3988
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-console-start-page.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-domains-overview.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-domains-overview.png
new file mode 100644
index 000000000..b7f5c6764
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-domains-overview.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-identity-security-menu.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-identity-security-menu.png
new file mode 100644
index 000000000..912889508
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-identity-security-menu.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-oauth-configuration.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-oauth-configuration.png
new file mode 100644
index 000000000..6c5b57d89
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/docs/media/configure-oci-app-role/oci-oauth-configuration.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/README.md
new file mode 100644
index 000000000..3e1194400
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/README.md
@@ -0,0 +1,155 @@
+# Identity Management (Terraform)
+
+This folder contains a **separate Terraform configuration** for managing Entra ID users
+and group memberships independently from the main infrastructure.
+
+## Design Principles
+
+1. **Create Once** - Users and group memberships are created once and never modified
+2. **Password Rotation Only** - After initial creation, only passwords can be rotated
+3. **Single Output File** - `user_credentials.json` contains both object IDs and passwords
+4. **No Race Conditions** - `ignore_changes = all` prevents Azure AD eventual consistency issues
+
+## How It Works
+
+```
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β INITIAL DEPLOYMENT (run once) β
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ€
+β 1. azuread_user - Creates users with initial password β
+β 2. time_sleep - Waits for Azure AD propagation (90s) β
+β 3. azuread_group_member - Adds users to security group β
+β 4. local_file - Exports user_credentials.json β
+β β
+β After this, azuread_user and azuread_group_member have β
+β ignore_changes = all, so they're never touched again. β
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+ β
+ βΌ
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β PASSWORD ROTATION (run before each event) β
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ€
+β 1. random_password - Generates new passwords (keeper trigger) β
+β 2. null_resource - Calls `az ad user update --password` β
+β 3. local_file - Updates user_credentials.json β
+β β
+β The azuread_user resource is NOT modified - password update β
+β happens via Azure CLI local-exec, avoiding race conditions. β
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+```
+
+## Output File Format
+
+`user_credentials.json` (single consolidated file):
+
+```json
+{
+ "generated_at": "2025-11-29T10:00:00Z",
+ "password_rotation_trigger": "event-december-2025",
+ "microhack_event_name": "mh2025muc",
+ "user_count": 20,
+ "group": {
+ "object_id": "5fbc2654-d343-401a-be86-08327fe66ec2",
+ "display_name": "mh-odaa-user-grp"
+ },
+ "users": {
+ "user00": {
+ "object_id": "abc12345-...",
+ "user_principal_name": "user00@cptazure.org",
+ "display_name": "Peter Parker",
+ "password": "xK9mNp2qR4"
+ },
+ "user01": { ... }
+ }
+}
+```
+
+## Workflow
+
+### 1. Initial Setup (Run Once)
+
+```powershell
+cd identity
+terraform init
+terraform apply
+```
+
+This creates:
+- Users (user00 through userN)
+- Group membership in `mh-odaa-user-grp`
+- Exports `user_credentials.json`
+
+### 2. Deploy Main Infrastructure
+
+```powershell
+cd .. # Back to main terraform folder
+terraform apply -var="use_external_identity=true"
+```
+
+The main configuration reads `identity/user_credentials.json` for object IDs.
+
+### 3. Password Rotation (Before Each Event)
+
+```powershell
+cd identity
+# Update terraform.tfvars: password_rotation_trigger = "event-december-2025"
+terraform apply
+```
+
+Or use the helper script:
+```powershell
+.\scripts\rotate-passwords.ps1 -Phase start -EventName "december-workshop"
+```
+
+### 4. Revoke Access (After Each Event)
+
+```powershell
+.\scripts\rotate-passwords.ps1 -Phase end
+```
+
+## Variables
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `user_count` | 1 | Number of users to create |
+| `tenant_id` | (required) | Azure AD tenant ID |
+| `entra_user_principal_domain` | `cptazure.org` | User principal name domain |
+| `azuread_propagation_wait_seconds` | 90 | Seconds to wait for AD propagation (first run only) |
+| `password_rotation_trigger` | `initial` | Change to rotate passwords |
+
+## Troubleshooting
+
+### First Run Fails with Race Condition
+
+If the initial deployment fails with "Provider produced inconsistent result":
+
+1. **Wait and retry** - Azure AD may need more time
+ ```powershell
+ terraform apply -var="azuread_propagation_wait_seconds=180"
+ ```
+
+2. **Import orphaned resources** - If group member exists but state is inconsistent
+ ```powershell
+ $userId = (az ad user show --id "user00@cptazure.org" --query id -o tsv)
+ $groupId = (az ad group show --group "mh-odaa-user-grp" --query id -o tsv)
+ terraform import 'module.entra_id_users.azuread_group_member.aks_deployment_users[\"0\"]' "${groupId}/member/${userId}"
+ ```
+
+### Subsequent Runs Should Be Safe
+
+After the first successful run:
+- `azuread_user` has `ignore_changes = all` β no modifications
+- `azuread_group_member` has `ignore_changes = all` β no modifications
+- Password rotation uses `az ad user update` via local-exec β no Terraform state issues
+
+## Security Notes
+
+β οΈ **IMPORTANT**:
+- `user_credentials.json` contains passwords - **do not commit to git!**
+- Rotate passwords after each event to revoke participant access
+- The `password_rotation_trigger` value is logged - use descriptive names for audit trail
+
+## Related Documentation
+
+- [DEPLOYMENT_GUIDE.md](../docs/DEPLOYMENT_GUIDE.md) - Full deployment instructions
+- [RBAC_SUMMARY.md](../docs/RBAC_SUMMARY.md) - RBAC configuration details
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/main.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/main.tf
new file mode 100644
index 000000000..291798d42
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/main.tf
@@ -0,0 +1,206 @@
+# ===============================================================================
+# Entra ID User Management - Separate Terraform Configuration
+# ===============================================================================
+# This configuration manages Entra ID users and group memberships separately
+# from the main infrastructure deployment. This separation:
+# - Avoids Azure AD eventual consistency race conditions
+# - Allows users to fully propagate before infrastructure deployment
+# - Enables faster iterations on infrastructure changes
+# ===============================================================================
+
+locals {
+ default_prefix = "user"
+
+ user_indices = range(var.user_count)
+
+ deployment_users = {
+ for idx in local.user_indices :
+ tostring(idx) => {
+ identifier = lower(format("%s%02d", local.default_prefix, idx))
+ }
+ }
+
+ shared_deployment_group = {
+ name = "mh-odaa-user-grp"
+ description = "Security group with rights to deploy applications to the Oracle AKS cluster"
+ }
+
+ common_tags = {
+ Project = var.microhack_event_name
+ ManagedBy = "Terraform"
+ Component = "Identity"
+ }
+}
+
+# ===============================================================================
+# Entra ID Users and Group Membership
+# ===============================================================================
+
+module "entra_id_users" {
+ source = "../modules/entra-id"
+
+ providers = {
+ azuread = azuread
+ }
+
+ aks_deployment_group_name = local.shared_deployment_group.name
+ aks_deployment_group_description = local.shared_deployment_group.description
+ tenant_id = var.tenant_id
+ user_principal_domain = var.entra_user_principal_domain
+ users = local.deployment_users
+ azuread_propagation_wait_seconds = var.azuread_propagation_wait_seconds
+ user_reset_trigger = var.user_reset_trigger
+
+ tags = merge(local.common_tags, {
+ AKSDeploymentGroup = local.shared_deployment_group.name
+ })
+}
+
+# ===============================================================================
+# Consolidated User Credentials Export
+# ===============================================================================
+# Single JSON file containing:
+# - Group information (object_id, display_name)
+# - User information (object_id, user_principal_name, display_name, password)
+#
+# This file is consumed by:
+# - Main infrastructure Terraform (reads object IDs for RBAC)
+# - Event organizers (distribute credentials to participants)
+#
+# Output location: terraform/user_credentials.json (parent folder, not identity/)
+# ===============================================================================
+
+locals {
+ # Output to parent folder (terraform root) for easy access
+ # path.root is identity/, so we go up one level
+ user_credentials_output_path = "${path.root}/../user_credentials.json"
+}
+
+resource "local_file" "user_credentials" {
+ filename = local.user_credentials_output_path
+ content = jsonencode({
+ generated_at = timestamp()
+ user_reset_trigger = var.user_reset_trigger
+ microhack_event_name = var.microhack_event_name
+ user_count = var.user_count
+
+ group = {
+ object_id = module.entra_id_users.group_object_id
+ display_name = local.shared_deployment_group.name
+ }
+
+ users = {
+ for idx in local.user_indices :
+ format("%s%02d", local.default_prefix, idx) => {
+ object_id = module.entra_id_users.user_object_ids[tostring(idx)]
+ user_principal_name = module.entra_id_users.user_principal_names[tostring(idx)]
+ display_name = module.entra_id_users.user_credentials[tostring(idx)].display_name
+ password = module.entra_id_users.user_credentials[tostring(idx)].initial_password
+ }
+ }
+ })
+
+ # Format the JSON file after creation for better readability
+ provisioner "local-exec" {
+ command = "Get-Content '${local.user_credentials_output_path}' | ConvertFrom-Json | ConvertTo-Json -Depth 10 | Set-Content '${local.user_credentials_output_path}' -Encoding UTF8"
+ interpreter = ["pwsh", "-Command"]
+ }
+}
+
+# ===============================================================================
+# MFA Reset for Workshop Users
+# ===============================================================================
+# This null_resource triggers MFA reset when mfa_reset_trigger changes.
+# It removes all MFA authentication methods (except password) from users,
+# allowing new workshop attendees to register their own MFA on first login.
+#
+# Required: UserAuthenticationMethod.ReadWrite.All permission on service principal
+# Alternative: Run scripts/reset-user-mfa.ps1 manually as Authentication Administrator
+# ===============================================================================
+
+resource "null_resource" "mfa_reset" {
+ count = var.user_reset_trigger != "disabled" ? 1 : 0
+
+ triggers = {
+ user_reset_trigger = var.user_reset_trigger
+ user_count = var.user_count
+ }
+
+ # Depends on users being created first
+ depends_on = [module.entra_id_users]
+
+ provisioner "local-exec" {
+ command = <<-EOT
+ $ErrorActionPreference = "Continue"
+ $users = @(${join(",", [for idx in local.user_indices : format("'%s%02d@%s'", local.default_prefix, idx, var.entra_user_principal_domain)])})
+
+ Write-Host "========================================" -ForegroundColor Cyan
+ Write-Host "MFA Reset - Trigger: ${var.user_reset_trigger}" -ForegroundColor Cyan
+ Write-Host "========================================" -ForegroundColor Cyan
+ Write-Host "Users to process: $($users.Count)" -ForegroundColor Yellow
+
+ $successCount = 0
+ $errorCount = 0
+ $noMfaCount = 0
+
+ foreach ($upn in $users) {
+ Write-Host "`nProcessing: $upn" -ForegroundColor Cyan
+
+ try {
+ # Get all authentication methods
+ $methodsJson = az rest --method GET --uri "https://graph.microsoft.com/v1.0/users/$upn/authentication/methods" 2>&1
+
+ if ($LASTEXITCODE -ne 0) {
+ Write-Host " WARN: Cannot access auth methods (permission issue) - $methodsJson" -ForegroundColor Yellow
+ $errorCount++
+ continue
+ }
+
+ $methods = $methodsJson | ConvertFrom-Json
+ $mfaMethods = $methods.value | Where-Object { $_.'@odata.type' -ne '#microsoft.graph.passwordAuthenticationMethod' }
+
+ if ($mfaMethods.Count -eq 0) {
+ Write-Host " No MFA methods registered" -ForegroundColor Gray
+ $noMfaCount++
+ continue
+ }
+
+ foreach ($method in $mfaMethods) {
+ $methodType = $method.'@odata.type' -replace '#microsoft.graph.', ''
+ $methodId = $method.id
+
+ $deleteUri = switch ($methodType) {
+ "phoneAuthenticationMethod" { "https://graph.microsoft.com/v1.0/users/$upn/authentication/phoneMethods/$methodId" }
+ "microsoftAuthenticatorAuthenticationMethod" { "https://graph.microsoft.com/v1.0/users/$upn/authentication/microsoftAuthenticatorMethods/$methodId" }
+ "softwareOathAuthenticationMethod" { "https://graph.microsoft.com/v1.0/users/$upn/authentication/softwareOathMethods/$methodId" }
+ "fido2AuthenticationMethod" { "https://graph.microsoft.com/v1.0/users/$upn/authentication/fido2Methods/$methodId" }
+ "emailAuthenticationMethod" { "https://graph.microsoft.com/v1.0/users/$upn/authentication/emailMethods/$methodId" }
+ default { $null }
+ }
+
+ if ($deleteUri) {
+ az rest --method DELETE --uri $deleteUri 2>&1 | Out-Null
+ if ($LASTEXITCODE -eq 0) {
+ Write-Host " Removed: $methodType" -ForegroundColor Green
+ }
+ }
+ }
+ $successCount++
+ } catch {
+ Write-Host " ERROR: $_" -ForegroundColor Red
+ $errorCount++
+ }
+ }
+
+ Write-Host "`n========================================" -ForegroundColor Cyan
+ Write-Host "MFA Reset Complete" -ForegroundColor Cyan
+ Write-Host "Processed: $successCount | No MFA: $noMfaCount | Errors: $errorCount" -ForegroundColor Yellow
+
+ if ($errorCount -gt 0) {
+ Write-Host "`nNote: Permission errors are expected if service principal lacks" -ForegroundColor Yellow
+ Write-Host "UserAuthenticationMethod.ReadWrite.All. Run reset-user-mfa.ps1 manually." -ForegroundColor Yellow
+ }
+ EOT
+ interpreter = ["pwsh", "-Command"]
+ }
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/outputs.tf
new file mode 100644
index 000000000..cdf10a083
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/outputs.tf
@@ -0,0 +1,36 @@
+# ===============================================================================
+# Entra ID User Management - Outputs
+# ===============================================================================
+# These outputs are used by the main infrastructure Terraform configuration
+# to assign RBAC roles to the created users.
+# ===============================================================================
+
+output "user_object_ids" {
+ description = "Map of username to Azure AD object ID for all created users"
+ value = module.entra_id_users.user_object_ids
+}
+
+output "user_principal_names" {
+ description = "Map of username to user principal name (UPN) for all created users"
+ value = module.entra_id_users.user_principal_names
+}
+
+output "group_object_id" {
+ description = "Object ID of the Entra ID group containing all deployment users"
+ value = module.entra_id_users.group_object_id
+}
+
+output "user_credentials_file" {
+ description = "Path to the JSON file containing user credentials (in terraform root folder)"
+ value = local_file.user_credentials.filename
+}
+
+output "user_count" {
+ description = "Number of users created"
+ value = var.user_count
+}
+
+output "microhack_event_name" {
+ description = "Event name used for this deployment"
+ value = var.microhack_event_name
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/providers.tf
new file mode 100644
index 000000000..0997069af
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/providers.tf
@@ -0,0 +1,9 @@
+# ===============================================================================
+# Entra ID User Management - Provider Configuration
+# ===============================================================================
+
+provider "azuread" {
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/users.json b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/users.json
new file mode 100644
index 000000000..92058219c
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/users.json
@@ -0,0 +1,52 @@
+[
+ {"given_name": "Peter", "surname": "Parker", "hero_name": "Spider-Man"},
+ {"given_name": "Bruce", "surname": "Wayne", "hero_name": "Batman"},
+ {"given_name": "Diana", "surname": "Prince", "hero_name": "Wonder Woman"},
+ {"given_name": "Clark", "surname": "Kent", "hero_name": "Superman"},
+ {"given_name": "Barry", "surname": "Allen", "hero_name": "The Flash"},
+ {"given_name": "Natasha", "surname": "Romanoff", "hero_name": "Black Widow"},
+ {"given_name": "Tony", "surname": "Stark", "hero_name": "Iron Man"},
+ {"given_name": "Carol", "surname": "Danvers", "hero_name": "Captain Marvel"},
+ {"given_name": "Stephen", "surname": "Strange", "hero_name": "Doctor Strange"},
+ {"given_name": "Wanda", "surname": "Maximoff", "hero_name": "Scarlet Witch"},
+ {"given_name": "T'Challa", "surname": "Udaku", "hero_name": "Black Panther"},
+ {"given_name": "Shuri", "surname": "Udaku", "hero_name": "Shuri"},
+ {"given_name": "Sam", "surname": "Wilson", "hero_name": "Falcon"},
+ {"given_name": "Scott", "surname": "Lang", "hero_name": "Ant-Man"},
+ {"given_name": "Ororo", "surname": "Munroe", "hero_name": "Storm"},
+ {"given_name": "Hal", "surname": "Jordan", "hero_name": "Green Lantern"},
+ {"given_name": "Arthur", "surname": "Curry", "hero_name": "Aquaman"},
+ {"given_name": "Victor", "surname": "Stone", "hero_name": "Cyborg"},
+ {"given_name": "Billy", "surname": "Batson", "hero_name": "Shazam"},
+ {"given_name": "Barbara", "surname": "Gordon", "hero_name": "Batgirl"},
+ {"given_name": "Kamala", "surname": "Khan", "hero_name": "Ms. Marvel"},
+ {"given_name": "Kate", "surname": "Bishop", "hero_name": "Hawkeye"},
+ {"given_name": "Jessica", "surname": "Jones", "hero_name": "Jewel"},
+ {"given_name": "Matt", "surname": "Murdock", "hero_name": "Daredevil"},
+ {"given_name": "Luke", "surname": "Cage", "hero_name": "Power Man"},
+ {"given_name": "Jean", "surname": "Grey", "hero_name": "Phoenix"},
+ {"given_name": "Logan", "surname": "Howlett", "hero_name": "Wolverine"},
+ {"given_name": "Remy", "surname": "LeBeau", "hero_name": "Gambit"},
+ {"given_name": "Raven", "surname": "Darkholme", "hero_name": "Mystique"},
+ {"given_name": "Scott", "surname": "Summers", "hero_name": "Cyclops"},
+ {"given_name": "Charles", "surname": "Xavier", "hero_name": "Professor X"},
+ {"given_name": "Kurt", "surname": "Wagner", "hero_name": "Nightcrawler"},
+ {"given_name": "Kitty", "surname": "Pryde", "hero_name": "Shadowcat"},
+ {"given_name": "Piotr", "surname": "Rasputin", "hero_name": "Colossus"},
+ {"given_name": "Betsy", "surname": "Braddock", "hero_name": "Psylocke"},
+ {"given_name": "Monica", "surname": "Rambeau", "hero_name": "Spectrum"},
+ {"given_name": "Jennifer", "surname": "Walters", "hero_name": "She-Hulk"},
+ {"given_name": "Marc", "surname": "Spector", "hero_name": "Moon Knight"},
+ {"given_name": "Janet", "surname": "Van Dyne", "hero_name": "Wasp"},
+ {"given_name": "Hope", "surname": "Van Dyne", "hero_name": "Wasp (Hope)"},
+ {"given_name": "Miles", "surname": "Morales", "hero_name": "Spider-Man (Miles)"},
+ {"given_name": "Gwen", "surname": "Stacy", "hero_name": "Ghost-Spider"},
+ {"given_name": "Eddie", "surname": "Brock", "hero_name": "Venom"},
+ {"given_name": "Felicia", "surname": "Hardy", "hero_name": "Black Cat"},
+ {"given_name": "Stephen", "surname": "Grant", "hero_name": "Mr. Knight"},
+ {"given_name": "Marcella", "surname": "Fury", "hero_name": "Agent Fury"},
+ {"given_name": "Nick", "surname": "Fury", "hero_name": "Nick Fury"},
+ {"given_name": "Maria", "surname": "Hill", "hero_name": "Maria Hill"},
+ {"given_name": "Phil", "surname": "Coulson", "hero_name": "Agent Coulson"},
+ {"given_name": "Daisy", "surname": "Johnson", "hero_name": "Quake"}
+]
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/variables.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/variables.tf
new file mode 100644
index 000000000..9fcb23eac
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/variables.tf
@@ -0,0 +1,114 @@
+# ===============================================================================
+# Entra ID User Management - Variable Definitions
+# ===============================================================================
+
+variable "microhack_event_name" {
+ description = "Name of the microhack event, used for resource tagging"
+ type = string
+ default = "mh2025muc"
+}
+
+variable "user_count" {
+ description = "Number of isolated user environments to provision"
+ type = number
+ default = 1
+
+ validation {
+ condition = var.user_count >= 1
+ error_message = "At least one user environment must be provisioned."
+ }
+}
+
+variable "tenant_id" {
+ description = "Azure AD tenant ID for service principal authentication"
+ type = string
+
+ validation {
+ condition = can(regex("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", var.tenant_id))
+ error_message = "The tenant_id must be a valid GUID/UUID format."
+ }
+}
+
+variable "client_id" {
+ description = "The Client ID (Application ID) for the Service Principal"
+ type = string
+
+ validation {
+ condition = var.client_id != null && var.client_id != "" && can(regex("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", var.client_id))
+ error_message = "The client_id must be a valid GUID/UUID format."
+ }
+}
+
+variable "client_secret" {
+ description = "The Client Secret for the Service Principal"
+ type = string
+ sensitive = true
+
+ validation {
+ condition = var.client_secret != null && var.client_secret != "" && length(var.client_secret) > 0
+ error_message = "The client_secret must be provided and cannot be empty."
+ }
+}
+
+variable "entra_user_principal_domain" {
+ description = "Domain suffix used to construct Entra user principal names"
+ type = string
+ default = "cptazure.org"
+}
+
+# ===============================================================================
+# Azure AD Workarounds
+# ===============================================================================
+
+variable "azuread_propagation_wait_seconds" {
+ description = <<-EOT
+ Wait time in seconds for Azure AD changes to propagate before adding group membership.
+ Set to 0 to disable wait.
+
+ WORKAROUND: Azure AD has eventual consistency issues with group membership operations.
+ GitHub Issue: https://github.com/hashicorp/terraform-provider-azuread/issues/1810
+
+ Recommended values:
+ - Small tenants: 90-180 seconds
+ - Medium tenants: 180-300 seconds
+ - Large tenants (5000+ users): Up to 48-72 hours reported
+
+ Since users are in a separate Terraform run, you can use a lower value (60-90s)
+ because the infrastructure deployment will occur later anyway.
+ EOT
+ type = number
+ default = 90
+
+ validation {
+ condition = var.azuread_propagation_wait_seconds >= 0
+ error_message = "azuread_propagation_wait_seconds must be 0 or greater."
+ }
+}
+
+variable "user_reset_trigger" {
+ description = <<-EOT
+ Change this value to reset ALL users for the next workshop event.
+ This performs TWO operations:
+ 1. Rotates passwords - generates new random passwords for all users
+ 2. Resets MFA - removes all registered MFA methods (authenticator apps, phone, etc.)
+
+ Set to "disabled" to skip both operations.
+
+ Examples:
+ - Use a date: "2025-11-29" (reset before each event)
+ - Use an event name: "workshop-december-2025"
+ - Use "disabled" to skip reset
+
+ Workflow:
+ 1. After event ends: change to "post-event-X" and apply (revokes access)
+ 2. Before next event: change to "event-Y" and apply (new credentials)
+ 3. Distribute new user_credentials.json to participants
+ 4. Attendees register their own MFA on first Azure login
+
+ IMPORTANT for MFA reset: Requires UserAuthenticationMethod.ReadWrite.All
+ permission on service principal, OR run scripts/reset-user-mfa.ps1 manually
+ as Authentication Administrator.
+ EOT
+ type = string
+ default = "disabled"
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/versions.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/versions.tf
new file mode 100644
index 000000000..38ba39236
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/identity/versions.tf
@@ -0,0 +1,26 @@
+# ===============================================================================
+# Entra ID User Management - Terraform Version Constraints
+# ===============================================================================
+
+terraform {
+ required_version = ">= 1.0"
+
+ required_providers {
+ azuread = {
+ source = "hashicorp/azuread"
+ version = "~> 2.53"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "~> 3.1"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = "~> 2.0"
+ }
+ time = {
+ source = "hashicorp/time"
+ version = "~> 0.9"
+ }
+ }
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/main.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/main.tf
new file mode 100644
index 000000000..316e931ad
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/main.tf
@@ -0,0 +1,874 @@
+# ===============================================================================
+# Terraform Configuration for Oracle on Azure Infrastructure
+# ===============================================================================
+# This configuration provisions multiple isolated AKS environments across up to
+# five subscriptions together with the shared Oracle Database@Azure networking.
+# Kubernetes users, credentials, and role assignments are created per workspace
+# while networking is shared via a single delegated subnet.
+# ===============================================================================
+
+# ===============================================================================
+# Local Values
+# ===============================================================================
+
+locals {
+ default_location = var.location
+ default_prefix = "user"
+ default_aks_vm_size = var.aks_vm_size
+ default_aks_os_disk_type = var.aks_os_disk_type
+ default_fqdn_odaa_fra = var.fqdn_odaa_fra
+ default_fqdn_odaa_app_fra = var.fqdn_odaa_app_fra
+ default_fqdn_odaa_par = var.fqdn_odaa_par
+ default_fqdn_odaa_app_par = var.fqdn_odaa_app_par
+
+ default_aks_cidr_base = var.aks_cidr_base
+ default_service_cidr = var.aks_service_cidr
+ default_odaa_cidr_base = var.odaa_cidr_base
+
+ common_tags = {
+ Project = local.microhack_event_name
+ ManagedBy = "Terraform"
+ }
+
+ subscription_targets = var.subscription_targets
+ subscription_target_count = length(local.subscription_targets)
+ user_indices = range(var.user_count)
+
+ deployments = {
+ for idx in local.user_indices :
+ tostring(idx) => {
+ index = idx # originates from the user count.
+ provider_index = idx % local.subscription_target_count # round robin assignment to subscription slots
+ subscription_id = local.subscription_targets[idx % local.subscription_target_count].subscription_id # round robin assignment to subscription id
+ tenant_id = var.tenant_id # single tenant ID for all deployments
+ postfix = format("%02d", idx)
+ prefix = local.default_prefix
+ location = local.default_location
+ aks_cidr = "10.0.0.0" # Same CIDR for all users (isolated by VNet, no cross-user peering)
+ aks_service_cidr = "172.${16 + floor(idx / 256)}.${idx % 256}.0/24" # Unique service CIDR in 172.16.0.0/12 to avoid 10.0.0.0/8 and 192.168.0.0/16 overlaps
+ aks_vm_size = local.default_aks_vm_size
+ aks_os_disk_type = local.default_aks_os_disk_type
+ odaa_cidr = local.default_odaa_cidr_base
+ fqdn_odaa_fra = local.default_fqdn_odaa_fra
+ fqdn_odaa_app_fra = local.default_fqdn_odaa_app_fra
+ fqdn_odaa_par = local.default_fqdn_odaa_par
+ fqdn_odaa_app_par = local.default_fqdn_odaa_app_par
+ name = format("%s%02d", local.default_prefix, idx)
+ user_identifier = lower(format("%s%02d", local.default_prefix, idx))
+ }
+ }
+
+ aks_deployments_by_slot = {
+ for idx in range(5) :
+ tostring(idx) => {
+ for key, deployment in local.deployments :
+ key => deployment if deployment.provider_index == idx
+ }
+ }
+
+ deployment_names = [for deployment in values(local.deployments) : deployment.name]
+
+ shared_deployment_group = {
+ name = "mh-odaa-user-grp"
+ description = "Security group with rights to deploy applications to the Oracle AKS cluster"
+ }
+
+ deployment_users = {
+ for key, deployment in local.deployments :
+ key => {
+ identifier = deployment.user_identifier
+ }
+ }
+}
+
+# ===============================================================================
+# Identity Configuration
+# ===============================================================================
+# User identities are managed separately in the identity/ folder to avoid
+# Azure AD eventual consistency issues. This configuration reads from the
+# identity/user_credentials.json file which contains user object IDs, UPNs,
+# group information, passwords, and event metadata.
+#
+# Workflow:
+# 1. Run 'terraform apply' in identity/ folder to create/manage users
+# 2. Run 'terraform apply' here to deploy infrastructure
+# ===============================================================================
+
+locals {
+ identity_file_path = var.identity_file_path != null ? var.identity_file_path : abspath("${path.root}/identity/user_credentials.json")
+}
+
+data "local_file" "identity" {
+ filename = local.identity_file_path
+}
+
+locals {
+ # Parse the identity file
+ identity_data = jsondecode(data.local_file.identity.content)
+
+ # Event name from identity file (single source of truth)
+ # Falls back to variable if not present in file (backwards compatibility)
+ microhack_event_name = try(local.identity_data.microhack_event_name, var.microhack_event_name)
+
+ # User object IDs (map from index key to object ID)
+ deployment_user_object_ids = {
+ for idx in local.user_indices :
+ tostring(idx) => local.identity_data.users[format("user%02d", idx)].object_id
+ }
+
+ # User principal names
+ deployment_user_principal_names = {
+ for idx in local.user_indices :
+ tostring(idx) => local.identity_data.users[format("user%02d", idx)].user_principal_name
+ }
+
+ # Group information
+ identity_group_object_id = local.identity_data.group.object_id
+ identity_group_display_name = local.identity_data.group.display_name
+}
+
+# ===============================================================================
+# Oracle Cloud Enterprise App Access
+# ===============================================================================
+
+data "azuread_service_principal" "oracle_cloud" {
+ count = var.oracle_cloud_service_principal_object_id == null ? 0 : 1
+ object_id = var.oracle_cloud_service_principal_object_id
+}
+
+data "azurerm_subscription" "odaa" {
+ subscription_id = var.odaa_subscription_id
+}
+
+locals {
+ oracle_cloud_service_principal = var.oracle_cloud_service_principal_object_id == null ? null : try(data.azuread_service_principal.oracle_cloud[0], null)
+
+ oracle_cloud_app_roles = local.oracle_cloud_service_principal == null ? [] : [
+ for role in local.oracle_cloud_service_principal.app_roles : role
+ if role.enabled
+ ]
+
+ oracle_cloud_app_role_id_from_value = (
+ local.oracle_cloud_service_principal == null ? null : (
+ var.oracle_cloud_service_principal_app_role_value == null ? null : (
+ contains(keys(local.oracle_cloud_service_principal.app_role_ids), var.oracle_cloud_service_principal_app_role_value) ?
+ local.oracle_cloud_service_principal.app_role_ids[var.oracle_cloud_service_principal_app_role_value] :
+ try(([
+ for role in local.oracle_cloud_app_roles : role.id
+ if role.value == var.oracle_cloud_service_principal_app_role_value
+ ])[0], null)
+ )
+ )
+ )
+
+ oracle_cloud_app_role_id_by_display_name = local.oracle_cloud_service_principal == null ? null : try(([
+ for role in local.oracle_cloud_app_roles : role.id
+ if lower(role.display_name) == "user"
+ ])[0], null)
+
+ oracle_cloud_app_role_default_id = local.oracle_cloud_service_principal == null ? null : try(local.oracle_cloud_app_roles[0].id, null)
+
+ oracle_cloud_app_role_id = local.oracle_cloud_service_principal == null ? null : (
+ local.oracle_cloud_app_role_id_from_value != null ?
+ local.oracle_cloud_app_role_id_from_value : (
+ local.oracle_cloud_app_role_id_by_display_name != null ?
+ local.oracle_cloud_app_role_id_by_display_name :
+ local.oracle_cloud_app_role_default_id
+ )
+ )
+}
+
+# Commented out - App role assignment already exists in Azure AD (created manually)
+# resource "azuread_app_role_assignment" "oracle_cloud_group" {
+# count = local.oracle_cloud_app_role_id == null ? 0 : 1
+#
+# resource_object_id = local.oracle_cloud_service_principal.object_id
+# principal_object_id = local.identity_group_object_id
+# app_role_id = local.oracle_cloud_app_role_id
+#
+# lifecycle {
+# precondition {
+# condition = local.oracle_cloud_app_role_id != null
+# error_message = "Unable to determine an app role ID for the Oracle Cloud service principal. Ensure it exposes an enabled app role (for example 'User') or set 'oracle_cloud_service_principal_app_role_value' accordingly."
+# }
+# }
+# }
+
+# ===============================================================================
+# Role Assignments for Shared ODAA Resources
+# ===============================================================================
+
+resource "azurerm_role_assignment" "odaa_autonomous_database_admin_per_user" {
+ for_each = local.deployments
+
+ provider = azurerm.odaa
+ scope = local.odaa_modules[each.key].resource_group_id
+ role_definition_name = "Oracle.Database Autonomous Database Administrator"
+ principal_id = local.deployment_user_object_ids[each.key]
+ description = "Grants ${each.value.name} exclusive admin permissions for their Oracle Autonomous Database resources in resource group ${local.odaa_modules[each.key].resource_group_name}."
+}
+
+resource "azurerm_role_definition" "private_dns_zone_reader" {
+ name = "custom-private-dns-zone-reader"
+ scope = data.azurerm_subscription.odaa.id
+ description = "Allows read-only access to Private DNS Zones."
+
+ permissions {
+ actions = [
+ "Microsoft.Network/privateDnsZones/read",
+ "Microsoft.Network/privateDnsZones/*/read"
+ ]
+ }
+
+ assignable_scopes = [
+ data.azurerm_subscription.odaa.id
+ ]
+}
+
+resource "azurerm_role_assignment" "odaa_private_dns_zone_reader_group" {
+ provider = azurerm.odaa
+ scope = data.azurerm_subscription.odaa.id
+ role_definition_id = azurerm_role_definition.private_dns_zone_reader.role_definition_resource_id
+ principal_id = local.identity_group_object_id
+ description = "Grants ${local.identity_group_display_name} read access to Private DNS Zones across subscription ${data.azurerm_subscription.odaa.display_name}."
+}
+
+
+
+resource "azurerm_role_assignment" "odaa_subscription_manager_reader_group" {
+ provider = azurerm.odaa
+ scope = data.azurerm_subscription.odaa.id
+ role_definition_id = azurerm_role_definition.oracle_subscriptions_manager_reader.role_definition_resource_id
+ principal_id = local.identity_group_object_id
+ description = "Grants ${local.identity_group_display_name} read access to Oracle Subscription resources across subscription ${data.azurerm_subscription.odaa.display_name}."
+}
+
+# ===============================================================================
+# AKS Deployments per Subscription Slot
+# ===============================================================================
+
+module "aks_slot_0" {
+ for_each = local.aks_deployments_by_slot["0"]
+ source = "./modules/aks"
+
+ providers = {
+ azurerm = azurerm.aks_deployment_slot_0
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.aks_cidr
+ service_cidr = each.value.aks_service_cidr
+ aks_vm_size = each.value.aks_vm_size
+ os_disk_type = each.value.aks_os_disk_type
+ deployment_user_object_id = local.deployment_user_object_ids[each.key]
+ subscription_id = each.value.subscription_id
+ fqdn_odaa_fra = each.value.fqdn_odaa_fra
+ fqdn_odaa_app_fra = each.value.fqdn_odaa_app_fra
+ fqdn_odaa_par = each.value.fqdn_odaa_par
+ fqdn_odaa_app_par = each.value.fqdn_odaa_app_par
+ enabled_odaa_regions = var.enabled_odaa_regions
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ })
+}
+
+module "aks_slot_1" {
+ for_each = local.aks_deployments_by_slot["1"]
+ source = "./modules/aks"
+
+ providers = {
+ azurerm = azurerm.aks_deployment_slot_1
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.aks_cidr
+ service_cidr = each.value.aks_service_cidr
+ aks_vm_size = each.value.aks_vm_size
+ os_disk_type = each.value.aks_os_disk_type
+ deployment_user_object_id = local.deployment_user_object_ids[each.key]
+ subscription_id = each.value.subscription_id
+ fqdn_odaa_fra = each.value.fqdn_odaa_fra
+ fqdn_odaa_app_fra = each.value.fqdn_odaa_app_fra
+ fqdn_odaa_par = each.value.fqdn_odaa_par
+ fqdn_odaa_app_par = each.value.fqdn_odaa_app_par
+ enabled_odaa_regions = var.enabled_odaa_regions
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ })
+}
+
+module "aks_slot_2" {
+ for_each = local.aks_deployments_by_slot["2"]
+ source = "./modules/aks"
+
+ providers = {
+ azurerm = azurerm.aks_deployment_slot_2
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.aks_cidr
+ service_cidr = each.value.aks_service_cidr
+ aks_vm_size = each.value.aks_vm_size
+ os_disk_type = each.value.aks_os_disk_type
+ deployment_user_object_id = local.deployment_user_object_ids[each.key]
+ subscription_id = each.value.subscription_id
+ fqdn_odaa_fra = each.value.fqdn_odaa_fra
+ fqdn_odaa_app_fra = each.value.fqdn_odaa_app_fra
+ fqdn_odaa_par = each.value.fqdn_odaa_par
+ fqdn_odaa_app_par = each.value.fqdn_odaa_app_par
+ enabled_odaa_regions = var.enabled_odaa_regions
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ })
+}
+
+module "aks_slot_3" {
+ for_each = local.aks_deployments_by_slot["3"]
+ source = "./modules/aks"
+
+ providers = {
+ azurerm = azurerm.aks_deployment_slot_3
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.aks_cidr
+ service_cidr = each.value.aks_service_cidr
+ aks_vm_size = each.value.aks_vm_size
+ os_disk_type = each.value.aks_os_disk_type
+ deployment_user_object_id = local.deployment_user_object_ids[each.key]
+ subscription_id = each.value.subscription_id
+ fqdn_odaa_fra = each.value.fqdn_odaa_fra
+ fqdn_odaa_app_fra = each.value.fqdn_odaa_app_fra
+ fqdn_odaa_par = each.value.fqdn_odaa_par
+ fqdn_odaa_app_par = each.value.fqdn_odaa_app_par
+ enabled_odaa_regions = var.enabled_odaa_regions
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ })
+}
+
+module "aks_slot_4" {
+ for_each = local.aks_deployments_by_slot["4"]
+ source = "./modules/aks"
+
+ providers = {
+ azurerm = azurerm.aks_deployment_slot_4
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.aks_cidr
+ service_cidr = each.value.aks_service_cidr
+ aks_vm_size = each.value.aks_vm_size
+ os_disk_type = each.value.aks_os_disk_type
+ deployment_user_object_id = local.deployment_user_object_ids[each.key]
+ subscription_id = each.value.subscription_id
+ fqdn_odaa_fra = each.value.fqdn_odaa_fra
+ fqdn_odaa_app_fra = each.value.fqdn_odaa_app_fra
+ fqdn_odaa_par = each.value.fqdn_odaa_par
+ fqdn_odaa_app_par = each.value.fqdn_odaa_app_par
+ enabled_odaa_regions = var.enabled_odaa_regions
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ })
+}
+
+locals {
+ aks_modules = merge(
+ module.aks_slot_0,
+ module.aks_slot_1,
+ module.aks_slot_2,
+ module.aks_slot_3,
+ module.aks_slot_4,
+ )
+}
+
+# ===============================================================================
+# Ingress Controller Deployment
+# ===============================================================================
+# Ingress controllers are now deployed via the deploy-ingress-controllers.ps1
+# script after terraform apply completes. This approach removes the helm provider
+# constraint and allows scaling to unlimited users.
+#
+# To deploy ingress controllers:
+# .\scripts\deploy-ingress-controllers.ps1
+#
+# To uninstall ingress controllers:
+# .\scripts\deploy-ingress-controllers.ps1 -Uninstall
+# ===============================================================================
+
+# ===============================================================================
+# Per-User Oracle Database@Azure Networks (Isolated)
+# ===============================================================================
+# Each user gets their own ODAA VNet (192.168.0.0/16) - isolated by VNet boundaries
+# No peering between ODAA VNets = complete user isolation
+
+module "odaa_slot_0" {
+ source = "./modules/odaa"
+
+ for_each = local.aks_deployments_by_slot["0"]
+
+ providers = {
+ azurerm = azurerm.odaa
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.odaa_cidr
+ password = null
+ create_autonomous_database = false
+
+ tags = merge(local.common_tags, {
+ ODAAFor = each.value.name
+ UserIndex = each.value.index
+ })
+}
+
+module "odaa_slot_1" {
+ source = "./modules/odaa"
+
+ for_each = local.aks_deployments_by_slot["1"]
+
+ providers = {
+ azurerm = azurerm.odaa
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.odaa_cidr
+ password = null
+ create_autonomous_database = false
+
+ tags = merge(local.common_tags, {
+ ODAAFor = each.value.name
+ UserIndex = each.value.index
+ })
+}
+
+module "odaa_slot_2" {
+ source = "./modules/odaa"
+
+ for_each = local.aks_deployments_by_slot["2"]
+
+ providers = {
+ azurerm = azurerm.odaa
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.odaa_cidr
+ password = null
+ create_autonomous_database = false
+
+ tags = merge(local.common_tags, {
+ ODAAFor = each.value.name
+ UserIndex = each.value.index
+ })
+}
+
+module "odaa_slot_3" {
+ source = "./modules/odaa"
+
+ for_each = local.aks_deployments_by_slot["3"]
+
+ providers = {
+ azurerm = azurerm.odaa
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.odaa_cidr
+ password = null
+ create_autonomous_database = false
+
+ tags = merge(local.common_tags, {
+ ODAAFor = each.value.name
+ UserIndex = each.value.index
+ })
+}
+
+module "odaa_slot_4" {
+ source = "./modules/odaa"
+
+ for_each = local.aks_deployments_by_slot["4"]
+
+ providers = {
+ azurerm = azurerm.odaa
+ }
+
+ prefix = each.value.prefix
+ postfix = each.value.postfix
+ location = each.value.location
+ cidr = each.value.odaa_cidr
+ password = null
+ create_autonomous_database = false
+
+ tags = merge(local.common_tags, {
+ ODAAFor = each.value.name
+ UserIndex = each.value.index
+ })
+}
+
+# ===============================================================================
+# ODAA Module Outputs Map
+# ===============================================================================
+# Maps each deployment key to its corresponding ODAA module output
+
+locals {
+ odaa_modules = merge(
+ module.odaa_slot_0,
+ module.odaa_slot_1,
+ module.odaa_slot_2,
+ module.odaa_slot_3,
+ module.odaa_slot_4
+ )
+}
+
+# ===============================================================================
+# Deterministic Suffix for ADB Names
+# ===============================================================================
+# Creates a human-readable suffix using airport code + creation date (e.g., par251102)
+# This ensures uniqueness while providing context about location and deployment time
+
+# Capture creation timestamp once (stable across future applies)
+resource "null_resource" "adb_creation_time" {
+ triggers = {
+ timestamp = timestamp()
+ }
+
+ lifecycle {
+ ignore_changes = [triggers]
+ }
+}
+
+locals {
+ # Sanitize event name for OCI (alphanumeric only, max 8 chars)
+ sanitized_event_name = lower(replace(replace(replace(
+ substr(local.microhack_event_name, 0, 8),
+ "-", ""), "_", ""), ".", ""))
+
+ # Map Azure regions to IATA airport codes
+ location_to_airport_code = {
+ "francecentral" = "par" # Paris
+ "germanywestcentral" = "fra" # Frankfurt
+ }
+
+ # Get airport code for current location (fallback to first 3 chars if not found)
+ airport_code = lookup(
+ local.location_to_airport_code,
+ lower(var.location),
+ substr(replace(var.location, "/[^a-z]/", ""), 0, 3)
+ )
+
+ # Create deterministic suffix: airport code + YYMMDD (e.g., par251102)
+ adb_descriptive_suffix = {
+ for key, deployment in local.deployments : key => lower(format("%s%s",
+ local.airport_code,
+ formatdate("YYMMDD", null_resource.adb_creation_time.triggers.timestamp)
+ ))
+ }
+}
+
+# ===============================================================================
+# Oracle Autonomous Databases
+# ===============================================================================
+# Creates ADB instances for each deployment. All ADBs are created in parallel.
+
+resource "azurerm_oracle_autonomous_database" "user" {
+ for_each = var.create_oracle_database ? local.deployments : {}
+
+ name = lower(format("%s%s%s%s",
+ local.sanitized_event_name,
+ each.value.prefix,
+ each.value.postfix,
+ local.adb_descriptive_suffix[each.key]
+ ))
+
+ display_name = lower(format("%s%s%s%s",
+ var.microhack_event_name,
+ each.value.prefix,
+ each.value.postfix,
+ local.adb_descriptive_suffix[each.key]
+ ))
+
+ resource_group_name = local.odaa_modules[each.key].resource_group_name
+ location = var.location
+
+ admin_password = var.adb_admin_password
+ allowed_ips = []
+ auto_scaling_enabled = false
+ auto_scaling_for_storage_enabled = false
+ backup_retention_period_in_days = 1
+ character_set = "AL32UTF8"
+ compute_count = 2
+ compute_model = "ECPU"
+ customer_contacts = ["maik.sandmann@gmx.net"]
+ data_storage_size_in_tbs = 1
+ db_version = "23ai"
+ db_workload = "OLTP"
+ license_model = "BringYourOwnLicense"
+ mtls_connection_required = false
+ national_character_set = "AL16UTF16"
+ subnet_id = local.odaa_modules[each.key].subnet_id
+ virtual_network_id = local.odaa_modules[each.key].vnet_id
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ ODAAFor = each.value.name
+ })
+
+ depends_on = [
+ module.odaa_slot_0,
+ module.odaa_slot_1,
+ module.odaa_slot_2,
+ module.odaa_slot_3,
+ module.odaa_slot_4
+ ]
+}
+
+# ===============================================================================
+# VNet Peering Between AKS and Shared ODAA Network
+# ===============================================================================
+
+module "vnet_peering_slot_0" {
+ for_each = local.aks_deployments_by_slot["0"]
+ source = "./modules/vnet-peering"
+
+ providers = {
+ azurerm.aks = azurerm.aks_deployment_slot_0
+ azurerm.odaa = azurerm.odaa
+ }
+
+ aks_vnet_id = local.aks_modules[each.key].vnet_id
+ aks_vnet_name = local.aks_modules[each.key].vnet_name
+ aks_resource_group = local.aks_modules[each.key].resource_group_name
+ odaa_vnet_id = module.odaa_slot_0[each.key].vnet_id
+ odaa_vnet_name = module.odaa_slot_0[each.key].vnet_name
+ odaa_resource_group = module.odaa_slot_0[each.key].resource_group_name
+ odaa_subscription_id = var.odaa_subscription_id
+ peering_suffix = each.value.name
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ PeeringFor = each.value.name
+ })
+}
+
+module "vnet_peering_slot_1" {
+ for_each = local.aks_deployments_by_slot["1"]
+ source = "./modules/vnet-peering"
+
+ providers = {
+ azurerm.aks = azurerm.aks_deployment_slot_1
+ azurerm.odaa = azurerm.odaa
+ }
+
+ aks_vnet_id = local.aks_modules[each.key].vnet_id
+ aks_vnet_name = local.aks_modules[each.key].vnet_name
+ aks_resource_group = local.aks_modules[each.key].resource_group_name
+ odaa_vnet_id = module.odaa_slot_1[each.key].vnet_id
+ odaa_vnet_name = module.odaa_slot_1[each.key].vnet_name
+ odaa_resource_group = module.odaa_slot_1[each.key].resource_group_name
+ odaa_subscription_id = var.odaa_subscription_id
+ peering_suffix = each.value.name
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ PeeringFor = each.value.name
+ })
+}
+
+module "vnet_peering_slot_2" {
+ for_each = local.aks_deployments_by_slot["2"]
+ source = "./modules/vnet-peering"
+
+ providers = {
+ azurerm.aks = azurerm.aks_deployment_slot_2
+ azurerm.odaa = azurerm.odaa
+ }
+
+ aks_vnet_id = local.aks_modules[each.key].vnet_id
+ aks_vnet_name = local.aks_modules[each.key].vnet_name
+ aks_resource_group = local.aks_modules[each.key].resource_group_name
+ odaa_vnet_id = module.odaa_slot_2[each.key].vnet_id
+ odaa_vnet_name = module.odaa_slot_2[each.key].vnet_name
+ odaa_resource_group = module.odaa_slot_2[each.key].resource_group_name
+ odaa_subscription_id = var.odaa_subscription_id
+ peering_suffix = each.value.name
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ PeeringFor = each.value.name
+ })
+}
+
+module "vnet_peering_slot_3" {
+ for_each = local.aks_deployments_by_slot["3"]
+ source = "./modules/vnet-peering"
+
+ providers = {
+ azurerm.aks = azurerm.aks_deployment_slot_3
+ azurerm.odaa = azurerm.odaa
+ }
+
+ aks_vnet_id = local.aks_modules[each.key].vnet_id
+ aks_vnet_name = local.aks_modules[each.key].vnet_name
+ aks_resource_group = local.aks_modules[each.key].resource_group_name
+ odaa_vnet_id = module.odaa_slot_3[each.key].vnet_id
+ odaa_vnet_name = module.odaa_slot_3[each.key].vnet_name
+ odaa_resource_group = module.odaa_slot_3[each.key].resource_group_name
+ odaa_subscription_id = var.odaa_subscription_id
+ peering_suffix = each.value.name
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ PeeringFor = each.value.name
+ })
+}
+
+module "vnet_peering_slot_4" {
+ for_each = local.aks_deployments_by_slot["4"]
+ source = "./modules/vnet-peering"
+
+ providers = {
+ azurerm.aks = azurerm.aks_deployment_slot_4
+ azurerm.odaa = azurerm.odaa
+ }
+
+ aks_vnet_id = local.aks_modules[each.key].vnet_id
+ aks_vnet_name = local.aks_modules[each.key].vnet_name
+ aks_resource_group = local.aks_modules[each.key].resource_group_name
+ odaa_vnet_id = module.odaa_slot_4[each.key].vnet_id
+ odaa_vnet_name = module.odaa_slot_4[each.key].vnet_name
+ odaa_resource_group = module.odaa_slot_4[each.key].resource_group_name
+ odaa_subscription_id = var.odaa_subscription_id
+ peering_suffix = each.value.name
+
+ tags = merge(local.common_tags, {
+ AKSDeployment = each.value.name
+ PeeringFor = each.value.name
+ })
+}
+
+locals {
+ vnet_peering_modules = merge(
+ module.vnet_peering_slot_0,
+ module.vnet_peering_slot_1,
+ module.vnet_peering_slot_2,
+ module.vnet_peering_slot_3,
+ module.vnet_peering_slot_4,
+ )
+}
+
+# ===============================================================================
+# Outputs
+# ===============================================================================
+
+output "aks_clusters" {
+ description = "Information about all AKS clusters deployed"
+ value = {
+ for key, deployment in local.deployments : deployment.name => {
+ cluster_id = local.aks_modules[key].aks_cluster_id
+ cluster_name = local.aks_modules[key].aks_cluster_name
+ vnet_id = local.aks_modules[key].vnet_id
+ vnet_name = local.aks_modules[key].vnet_name
+ resource_group_name = local.aks_modules[key].resource_group_name
+ dns_zones = local.aks_modules[key].dns_zones
+ }
+ }
+}
+
+output "odaa_networks" {
+ description = "Information about the per-user ODAA networks"
+ value = {
+ for key, deployment in local.deployments : deployment.name => {
+ resource_group_name = local.odaa_modules[key].resource_group_name
+ resource_group_id = local.odaa_modules[key].resource_group_id
+ vnet_id = local.odaa_modules[key].vnet_id
+ vnet_name = local.odaa_modules[key].vnet_name
+ subnet_id = local.odaa_modules[key].subnet_id
+ }
+ }
+}
+
+output "odaa_autonomous_databases" {
+ description = "Oracle Autonomous Databases provisioned for each deployment"
+ value = {
+ for key, deployment in local.deployments : deployment.name => (
+ var.create_oracle_database && contains(keys(azurerm_oracle_autonomous_database.user), key) ?
+ {
+ id = azurerm_oracle_autonomous_database.user[key].id
+ name = azurerm_oracle_autonomous_database.user[key].name
+ display_name = azurerm_oracle_autonomous_database.user[key].display_name
+ resource_group_name = local.odaa_modules[key].resource_group_name
+ descriptive_suffix = local.adb_descriptive_suffix[key]
+ } : null
+ )
+ }
+}
+
+output "entra_id_deployment_group" {
+ description = "Information about the Entra ID deployment group"
+ value = {
+ for key, deployment in local.deployments : deployment.name => {
+ object_id = local.identity_group_object_id
+ display_name = local.identity_group_display_name
+ mail_nickname = local.shared_deployment_group.name
+ }
+ }
+}
+
+output "vnet_peering_connections" {
+ description = "Information about all VNet peering connections"
+ value = {
+ for key, deployment in local.deployments : deployment.name => {
+ aks_to_odaa_peering_id = local.vnet_peering_modules[key].aks_to_odaa_peering_id
+ odaa_to_aks_peering_id = local.vnet_peering_modules[key].odaa_to_aks_peering_id
+ }
+ }
+}
+
+output "deployment_summary" {
+ description = "Summary of all deployments"
+ value = {
+ total_aks_deployments = length(local.deployments)
+ deployment_names = local.deployment_names
+ odaa_subscription_id = var.odaa_subscription_id
+ identity_file_path = local.identity_file_path
+ entra_group_display_names = {
+ for key, deployment in local.deployments : deployment.name => local.identity_group_display_name
+ }
+ }
+}
+
+output "aks_kubeconfigs" {
+ description = "Kubeconfig files for all AKS clusters (for deployment automation only)"
+ value = {
+ for key, deployment in local.deployments : deployment.name => {
+ kubeconfig_raw = local.aks_modules[key].aks_cluster_kube_config_raw
+ cluster_name = local.aks_modules[key].aks_cluster_name
+ resource_group_name = local.aks_modules[key].resource_group_name
+ subscription_id = deployment.subscription_id
+ }
+ }
+ sensitive = true
+}
+
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/image copy.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/image copy.png
new file mode 100644
index 000000000..f8410bdb3
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/image copy.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/image.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/image.png
new file mode 100644
index 000000000..8d96e82a5
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/image.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/imagecopy.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/imagecopy.png
new file mode 100644
index 000000000..f3827b57d
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/imagecopy.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/oci_delegated_subnets_limit.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/oci_delegated_subnets_limit.png
new file mode 100644
index 000000000..b69db154e
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/media/oci_delegated_subnets_limit.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/main.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/main.tf
new file mode 100644
index 000000000..1053d8626
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/main.tf
@@ -0,0 +1,375 @@
+# ===============================================================================
+# AKS Module - Main Configuration
+# ===============================================================================
+# This module creates an Azure Kubernetes Service cluster with supporting
+# infrastructure including virtual network, Log Analytics workspace, and
+# managed identity configuration.
+# ===============================================================================
+
+terraform {
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = "~> 4.0"
+ }
+ azapi = {
+ source = "azure/azapi"
+ version = "~> 2.0"
+ }
+ }
+}
+
+# ===============================================================================
+# Resource Group
+# ===============================================================================
+
+resource "azurerm_resource_group" "aks" {
+ name = "aks-${var.prefix}${var.postfix}"
+ location = var.location
+ tags = var.tags
+}
+
+locals {
+ # Azure automatically creates a managed node resource group named
+ # `MC__` for agent pools. Matching that
+ # pattern lets the service own the group lifecycle, so deleting the cluster
+ # tears down the node resource group without manual cleanup.
+ cluster_name = "aks-${var.prefix}${var.postfix}"
+ node_resource_group_name = "MC_${azurerm_resource_group.aks.name}_${local.cluster_name}"
+
+ # Normalize enabled regions to lowercase for comparison
+ enabled_regions = [for r in var.enabled_odaa_regions : lower(r)]
+
+ # All possible DNS zone configurations
+ all_private_dns_configs = {
+ odaa_fra = {
+ zone_name = var.fqdn_odaa_fra
+ link_name = "aks-pdns-link-odaa-fra"
+ region = "frankfurt"
+ }
+ odaa_app_fra = {
+ zone_name = var.fqdn_odaa_app_fra
+ link_name = "aks-pdns-link-odaa-app-fra"
+ region = "frankfurt"
+ }
+ odaa_par = {
+ zone_name = var.fqdn_odaa_par
+ link_name = "aks-pdns-link-odaa-par"
+ region = "paris"
+ }
+ odaa_app_par = {
+ zone_name = var.fqdn_odaa_app_par
+ link_name = "aks-pdns-link-odaa-app-par"
+ region = "paris"
+ }
+ }
+
+ # Filter to only enabled regions
+ private_dns_configs = {
+ for key, config in local.all_private_dns_configs :
+ key => config if contains(local.enabled_regions, config.region)
+ }
+}
+
+# ===============================================================================
+# Log Analytics Workspace
+# ===============================================================================
+
+resource "azurerm_log_analytics_workspace" "aks" {
+ name = "aks-${var.prefix}${var.postfix}"
+ location = azurerm_resource_group.aks.location
+ resource_group_name = azurerm_resource_group.aks.name
+ sku = "PerGB2018"
+ retention_in_days = 30
+ tags = var.tags
+}
+
+# ===============================================================================
+# Virtual Network
+# ===============================================================================
+
+resource "azurerm_virtual_network" "aks" {
+ name = "aks-${var.prefix}${var.postfix}"
+ location = azurerm_resource_group.aks.location
+ resource_group_name = azurerm_resource_group.aks.name
+ address_space = ["${var.cidr}/16"]
+ tags = var.tags
+}
+
+resource "azurerm_subnet" "aks" {
+ name = "aks"
+ resource_group_name = azurerm_resource_group.aks.name
+ virtual_network_name = azurerm_virtual_network.aks.name
+ address_prefixes = ["${var.cidr}/23"]
+}
+
+# ===============================================================================
+# AKS Cluster
+# ===============================================================================
+
+resource "azurerm_kubernetes_cluster" "aks" {
+ name = local.cluster_name
+ location = azurerm_resource_group.aks.location
+ resource_group_name = azurerm_resource_group.aks.name
+ dns_prefix = "${var.prefix}${var.postfix}"
+ kubernetes_version = "1.32.6"
+ node_resource_group = local.node_resource_group_name
+ sku_tier = "Free"
+
+ # Network Profile
+ network_profile {
+ network_plugin = "azure"
+ network_policy = "azure"
+ dns_service_ip = cidrhost(var.service_cidr, 10) # deterministic CoreDNS VIP within the service CIDR
+ service_cidr = var.service_cidr
+ load_balancer_sku = "standard"
+ outbound_type = "loadBalancer"
+ ip_versions = ["IPv4"]
+ }
+
+ # Default Node Pool (System)
+ default_node_pool {
+ name = "agentpool"
+ node_count = 2
+ vm_size = var.aks_vm_size
+ os_disk_size_gb = 128
+ os_disk_type = var.os_disk_type
+ vnet_subnet_id = azurerm_subnet.aks.id
+ max_pods = 30
+ type = "VirtualMachineScaleSets"
+ auto_scaling_enabled = false
+ orchestrator_version = "1.32.6"
+ node_public_ip_enabled = false
+
+ os_sku = "Ubuntu"
+ kubelet_disk_type = "OS"
+
+ upgrade_settings {
+ max_surge = "1"
+ drain_timeout_in_minutes = 0
+ node_soak_duration_in_minutes = 0
+ }
+ }
+
+ # Identity
+ identity {
+ type = "SystemAssigned"
+ }
+
+
+
+ # Auto Scaler Profile
+ auto_scaler_profile {
+ balance_similar_node_groups = false
+ expander = "random"
+ max_graceful_termination_sec = "600"
+ max_node_provisioning_time = "15m"
+ max_unready_nodes = 3
+ max_unready_percentage = 45
+ new_pod_scale_up_delay = "0s"
+ scale_down_delay_after_add = "10m"
+ scale_down_delay_after_delete = "10s"
+ scale_down_delay_after_failure = "3m"
+ scale_down_unneeded = "10m"
+ scale_down_unready = "20m"
+ scale_down_utilization_threshold = "0.5"
+ scan_interval = "10s"
+ skip_nodes_with_local_storage = false
+ skip_nodes_with_system_pods = true
+ }
+
+ # Workload Identity
+ workload_identity_enabled = true
+ oidc_issuer_enabled = true
+
+ # Monitor Metrics
+ monitor_metrics {
+ annotations_allowed = null
+ labels_allowed = null
+ }
+
+ # Add-ons
+ azure_policy_enabled = true
+
+ oms_agent {
+ log_analytics_workspace_id = azurerm_log_analytics_workspace.aks.id
+ msi_auth_for_monitoring_enabled = true
+ }
+
+ # Storage Profile
+ storage_profile {
+ blob_driver_enabled = false
+ disk_driver_enabled = true
+ file_driver_enabled = true
+ snapshot_controller_enabled = true
+ }
+
+ # Maintenance Configuration
+ maintenance_window_auto_upgrade {
+ frequency = "Weekly"
+ interval = 1
+ duration = 4
+ day_of_week = "Sunday"
+ start_time = "00:00"
+ utc_offset = "+01:00"
+ }
+
+ maintenance_window_node_os {
+ frequency = "Weekly"
+ interval = 1
+ duration = 4
+ day_of_week = "Sunday"
+ start_time = "00:00"
+ utc_offset = "+01:00"
+ }
+
+ tags = var.tags
+
+ depends_on = [
+ azurerm_subnet.aks
+ ]
+}
+
+# ===============================================================================
+# RBAC Role Assignments for Deployment Group
+# ===============================================================================
+
+# Azure Kubernetes Service Cluster User Role - allows getting cluster credentials
+resource "azurerm_role_assignment" "aks_cluster_user" {
+ scope = azurerm_kubernetes_cluster.aks.id
+ role_definition_name = "Azure Kubernetes Service Cluster User Role"
+ principal_id = var.deployment_user_object_id
+ description = "Allows the deployment user to get cluster credentials for ${azurerm_kubernetes_cluster.aks.name}"
+}
+
+# Azure Kubernetes Service RBAC Writer - allows full cluster access
+resource "azurerm_role_assignment" "aks_rbac_writer" {
+ scope = azurerm_kubernetes_cluster.aks.id
+ role_definition_name = "Azure Kubernetes Service RBAC Writer"
+ principal_id = var.deployment_user_object_id
+ description = "Allows the deployment user to deploy Kubernetes workloads in ${azurerm_kubernetes_cluster.aks.name}"
+}
+
+# Reader role for visibility into the AKS resource group
+resource "azurerm_role_assignment" "resource_group_reader" {
+ scope = azurerm_resource_group.aks.id
+ role_definition_name = "Reader"
+ principal_id = var.deployment_user_object_id
+ description = "Allows the deployment user to view resources in resource group ${azurerm_resource_group.aks.name}"
+}
+
+# ===============================================================================
+# ACR Pull Access
+# ===============================================================================
+
+# Grant AKS cluster managed identity pull access to shared ACR
+resource "azurerm_role_assignment" "acr_pull" {
+ scope = "/subscriptions/09808f31-065f-4231-914d-776c2d6bbe34/resourceGroups/odaa/providers/Microsoft.ContainerRegistry/registries/odaamh"
+ role_definition_name = "AcrPull"
+ principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id
+ skip_service_principal_aad_check = true
+ description = "Allows AKS cluster ${azurerm_kubernetes_cluster.aks.name} to pull images from odaamh ACR"
+}
+
+# ===============================================================================
+# User Node Pool
+# ===============================================================================
+
+resource "azurerm_kubernetes_cluster_node_pool" "user" {
+ name = "userpool"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id
+ vm_size = var.aks_vm_size
+ node_count = 1
+ os_disk_size_gb = 128
+ os_disk_type = var.os_disk_type
+ vnet_subnet_id = azurerm_subnet.aks.id
+ max_pods = 30
+ auto_scaling_enabled = true
+ min_count = 1
+ max_count = 2
+ orchestrator_version = "1.32.6"
+ node_public_ip_enabled = false
+ os_type = "Linux"
+ os_sku = "Ubuntu"
+ mode = "User"
+ kubelet_disk_type = "OS"
+
+ upgrade_settings {
+ max_surge = "10%"
+ drain_timeout_in_minutes = 0
+ node_soak_duration_in_minutes = 0
+ }
+
+ tags = var.tags
+}
+
+# ===============================================================================
+# Private DNS Zones (Integrated DNS)
+# ===============================================================================
+
+# Private DNS zones for Oracle endpoints (FRA and PAR base/app domains)
+resource "azurerm_private_dns_zone" "odaa" {
+ for_each = local.private_dns_configs
+
+ name = each.value.zone_name
+ resource_group_name = azurerm_resource_group.aks.name
+ tags = var.tags
+}
+
+resource "azurerm_private_dns_zone_virtual_network_link" "odaa" {
+ for_each = local.private_dns_configs
+
+ name = each.value.link_name
+ resource_group_name = azurerm_resource_group.aks.name
+ private_dns_zone_name = azurerm_private_dns_zone.odaa[each.key].name
+ virtual_network_id = azurerm_virtual_network.aks.id
+ registration_enabled = false
+ tags = var.tags
+}
+
+resource "azurerm_role_assignment" "private_dns_contributor_odaa" {
+ for_each = local.private_dns_configs
+
+ scope = azurerm_private_dns_zone.odaa[each.key].id
+ role_definition_name = "Private DNS Zone Contributor"
+ principal_id = var.deployment_user_object_id
+ description = "Allows the deployment user to manage private DNS zone ${azurerm_private_dns_zone.odaa[each.key].name}"
+}
+
+# ===============================================================================
+# Container Network Observability Logs
+# ===============================================================================
+# Enable Container Network Observability logs using AzAPI provider.
+# This feature provides Layer 3/4/7 network traffic visibility using eBPF/Cilium.
+#
+# Requirements (already satisfied):
+# - Cilium data plane (configured via network_policy = "azure")
+# - Azure Monitor agent (configured via oms_agent block)
+# - Log Analytics workspace (already provisioned)
+#
+# Note: Commented out to avoid conflicts with in-progress node pool operations.
+# Uncomment and apply after initial cluster creation completes.
+# Reference: https://learn.microsoft.com/en-us/azure/aks/container-network-observability-logs
+# ===============================================================================
+
+# resource "azapi_update_resource" "enable_container_network_logs" {
+# type = "Microsoft.ContainerService/managedClusters@2024-05-01"
+# resource_id = azurerm_kubernetes_cluster.aks.id
+#
+# body = {
+# properties = {
+# networkProfile = {
+# advancedNetworking = {
+# observability = {
+# enabled = true
+# }
+# }
+# }
+# }
+# }
+#
+# depends_on = [
+# azurerm_kubernetes_cluster.aks,
+# azurerm_kubernetes_cluster_node_pool.user
+# ]
+# }
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/outputs.tf
new file mode 100644
index 000000000..41bb07ed0
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/outputs.tf
@@ -0,0 +1,111 @@
+# ===============================================================================
+# AKS Module - Outputs
+# ===============================================================================
+
+output "aks_cluster_id" {
+ description = "The ID of the AKS cluster"
+ value = azurerm_kubernetes_cluster.aks.id
+}
+
+output "aks_cluster_name" {
+ description = "The name of the AKS cluster"
+ value = azurerm_kubernetes_cluster.aks.name
+}
+
+output "aks_cluster_fqdn" {
+ description = "The FQDN of the AKS cluster"
+ value = azurerm_kubernetes_cluster.aks.fqdn
+}
+
+output "aks_cluster_private_fqdn" {
+ description = "The private FQDN of the AKS cluster"
+ value = azurerm_kubernetes_cluster.aks.private_fqdn
+}
+
+output "aks_cluster_kube_config" {
+ description = "The Kubernetes configuration for the AKS cluster"
+ value = azurerm_kubernetes_cluster.aks.kube_config
+ sensitive = true
+}
+
+output "aks_cluster_kube_config_raw" {
+ description = "The raw kubeconfig for the AKS cluster"
+ value = azurerm_kubernetes_cluster.aks.kube_config_raw
+ sensitive = true
+}
+
+output "resource_group_name" {
+ description = "The name of the resource group"
+ value = azurerm_resource_group.aks.name
+}
+
+output "resource_group_id" {
+ description = "The ID of the resource group"
+ value = azurerm_resource_group.aks.id
+}
+
+output "vnet_id" {
+ description = "The ID of the virtual network"
+ value = azurerm_virtual_network.aks.id
+}
+
+output "vnet_name" {
+ description = "The name of the virtual network"
+ value = azurerm_virtual_network.aks.name
+}
+
+output "subnet_id" {
+ description = "The ID of the AKS subnet"
+ value = azurerm_subnet.aks.id
+}
+
+output "log_analytics_workspace_id" {
+ description = "The ID of the Log Analytics workspace"
+ value = azurerm_log_analytics_workspace.aks.id
+}
+
+output "aks_identity_principal_id" {
+ description = "The principal ID of the AKS cluster managed identity"
+ value = azurerm_kubernetes_cluster.aks.identity[0].principal_id
+}
+
+output "aks_identity_tenant_id" {
+ description = "The tenant ID of the AKS cluster managed identity"
+ value = azurerm_kubernetes_cluster.aks.identity[0].tenant_id
+}
+
+output "rbac_assignments" {
+ description = "Information about RBAC role assignments"
+ value = {
+ cluster_user_assignment = azurerm_role_assignment.aks_cluster_user.id
+ rbac_writer_assignment = azurerm_role_assignment.aks_rbac_writer.id
+ resource_group_reader_assignment = azurerm_role_assignment.resource_group_reader.id
+ acr_pull_assignment = azurerm_role_assignment.acr_pull.id
+ private_dns_contributor = {
+ for key, assignment in azurerm_role_assignment.private_dns_contributor_odaa :
+ key => assignment.id
+ }
+ }
+}
+
+# ===============================================================================
+# DNS Outputs
+# ===============================================================================
+
+output "dns_zones" {
+ description = "Information about the private DNS zones created"
+ value = {
+ zones = {
+ for key, zone in azurerm_private_dns_zone.odaa :
+ key => {
+ id = zone.id
+ name = zone.name
+ }
+ }
+
+ links = {
+ for key, link in azurerm_private_dns_zone_virtual_network_link.odaa :
+ key => link.id
+ }
+ }
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/variables.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/variables.tf
new file mode 100644
index 000000000..f1d2db056
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/aks/variables.tf
@@ -0,0 +1,101 @@
+# ===============================================================================
+# AKS Module - Variables
+# ===============================================================================
+
+variable "prefix" {
+ description = "The prefix for resource names"
+ type = string
+}
+
+variable "postfix" {
+ description = "The postfix for resource names"
+ type = string
+ default = ""
+}
+
+variable "location" {
+ description = "The Azure region where resources will be deployed"
+ type = string
+}
+
+variable "cidr" {
+ description = "The CIDR block for the virtual network"
+ type = string
+}
+
+variable "service_cidr" {
+ description = "Service CIDR for the AKS cluster"
+ type = string
+}
+
+variable "aks_vm_size" {
+ description = "The VM size for AKS node pools"
+ type = string
+ default = "Standard_D8ds_v6"
+}
+
+variable "os_disk_type" {
+ description = "The OS disk type for AKS node pools (Ephemeral or Managed)"
+ type = string
+ default = "Ephemeral"
+
+ validation {
+ condition = contains(["Ephemeral", "Managed"], var.os_disk_type)
+ error_message = "os_disk_type must be either 'Ephemeral' or 'Managed'."
+ }
+}
+
+variable "deployment_user_object_id" {
+ description = "The object ID of the Entra ID user that should have deployment access to AKS"
+ type = string
+}
+
+variable "subscription_id" {
+ description = "The subscription hosting the AKS resources"
+ type = string
+}
+
+# ===============================================================================
+# DNS Configuration Variables
+# ===============================================================================
+
+variable "fqdn_odaa_par" {
+ description = "The FQDN for Oracle Database on Autonomous Azure"
+ type = string
+ default = ""
+}
+
+variable "fqdn_odaa_app_par" {
+ description = "The FQDN for Oracle Database on Autonomous Azure applications"
+ type = string
+ default = ""
+}
+
+variable "fqdn_odaa_fra" {
+ description = "The FQDN for Oracle Database on Autonomous Azure"
+ type = string
+ default = ""
+}
+
+variable "fqdn_odaa_app_fra" {
+ description = "The FQDN for Oracle Database on Autonomous Azure applications"
+ type = string
+ default = ""
+}
+
+variable "tags" {
+ description = "A mapping of tags to assign to the resources"
+ type = map(string)
+ default = {}
+}
+
+variable "enabled_odaa_regions" {
+ description = "List of ODAA regions to create Private DNS zones for. Valid values: 'paris', 'frankfurt'"
+ type = list(string)
+ default = ["paris"]
+
+ validation {
+ condition = alltrue([for r in var.enabled_odaa_regions : contains(["paris", "frankfurt"], lower(r))])
+ error_message = "enabled_odaa_regions must only contain 'paris' and/or 'frankfurt'."
+ }
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/main.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/main.tf
new file mode 100644
index 000000000..b349c44d4
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/main.tf
@@ -0,0 +1,191 @@
+# ===============================================================================
+# Entra ID Module - Main Configuration
+# ===============================================================================
+# This module creates an Entra ID security group for AKS deployment access
+# and assigns appropriate RBAC roles for Kubernetes deployment operations.
+# ===============================================================================
+
+terraform {
+ required_providers {
+ azuread = {
+ source = "hashicorp/azuread"
+ version = "~> 2.0"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "~> 3.0"
+ }
+ }
+}
+
+# Get current Azure AD client configuration
+data "azuread_client_config" "current" {}
+
+locals {
+ user_principal_domain = lower(trimspace(coalesce(var.user_principal_domain, "not-set.local")))
+ user_catalog_entries = jsondecode(file("${path.root}/users.json"))
+
+ # Create a map of user configs using index-based lookup instead of identifier
+ user_configs = {
+ for key, user in var.users :
+ key => {
+ identifier = lower(user.identifier)
+ index = tonumber(key) # Convert string key to number
+ catalog_entry = (
+ tonumber(key) < length(local.user_catalog_entries) ?
+ local.user_catalog_entries[tonumber(key)] :
+ null
+ )
+ }
+ }
+
+ # Check if we have enough entries in users.json
+ users_json_count = length(local.user_catalog_entries)
+ required_count = length(var.users)
+
+ user_definitions = {
+ for key, cfg in local.user_configs :
+ key => {
+ identifier = cfg.identifier
+ display_name = cfg.catalog_entry != null ? "${cfg.catalog_entry.given_name} ${cfg.catalog_entry.surname}" : upper(cfg.identifier)
+ user_principal_name = format("%s@%s", cfg.identifier, local.user_principal_domain)
+ mail_nickname = cfg.identifier
+ given_name = cfg.catalog_entry != null ? cfg.catalog_entry.given_name : upper(cfg.identifier)
+ surname = cfg.catalog_entry != null ? cfg.catalog_entry.surname : "User"
+ employee_id = cfg.catalog_entry != null ? cfg.catalog_entry.hero_name : null
+ }
+ }
+}
+
+resource "random_password" "aks_deployment_users" {
+ for_each = local.user_definitions
+
+ length = var.user_password_length
+ min_upper = 1
+ min_lower = 1
+ min_numeric = var.user_password_min_numeric
+ min_special = var.user_password_include_special ? 1 : 0
+ special = var.user_password_include_special
+ override_special = var.user_password_special_characters
+
+ # Keepers trigger password regeneration when changed
+ # Change user_reset_trigger to rotate all passwords
+ keepers = {
+ reset_trigger = var.user_reset_trigger
+ user_id = each.key
+ }
+
+ lifecycle {
+ precondition {
+ condition = var.user_password_length >= 8
+ error_message = "Entra ID passwords must be at least 8 characters long."
+ }
+
+ precondition {
+ condition = var.user_password_min_numeric > 0 || var.user_password_include_special
+ error_message = "Entra ID passwords must contain at least three character categories (uppercase, lowercase, numeric or special). Keep numeric characters or enable special characters."
+ }
+ }
+}
+
+# ===============================================================================
+# Entra ID Group for AKS Deployment
+# ===============================================================================
+# This group must already exist in Entra ID. Terraform will only add users to it
+# and assign roles to it. The group is not created or managed by Terraform.
+# ===============================================================================
+
+data "azuread_group" "aks_deployment" {
+ display_name = var.aks_deployment_group_name
+ security_enabled = true
+}
+
+resource "azuread_user" "aks_deployment_users" {
+ for_each = local.user_definitions
+
+ display_name = each.value.display_name
+ user_principal_name = each.value.user_principal_name
+ mail_nickname = each.value.mail_nickname
+ given_name = each.value.given_name
+ surname = each.value.surname
+ employee_id = each.value.employee_id
+ password = random_password.aks_deployment_users[each.key].result
+ force_password_change = true
+ account_enabled = true
+
+ lifecycle {
+ # CRITICAL: After initial creation, never modify the user resource.
+ # This prevents Azure AD eventual consistency issues on subsequent applies.
+ # Password updates are handled separately via null_resource.update_passwords
+ ignore_changes = all
+
+ precondition {
+ condition = local.users_json_count >= local.required_count
+ error_message = format("users.json contains %d entries but %d users are being deployed. Please add more entries to users.json.", local.users_json_count, local.required_count)
+ }
+ precondition {
+ condition = var.user_principal_domain != null && trimspace(var.user_principal_domain) != ""
+ error_message = "The user_principal_domain variable must be set to a valid domain."
+ }
+ }
+}
+
+# Wait for Azure AD eventual consistency after user creation (conditionally applied)
+# Only created if azuread_propagation_wait_seconds > 0
+# GitHub Issue: https://github.com/hashicorp/terraform-provider-azuread/issues/1810
+# Community reports show 60-90s delays commonly needed, some environments require 180-300s or up to 48-72 hours
+# Azure AD has no SLA for Graph API replication times
+resource "time_sleep" "wait_for_user_propagation" {
+ count = var.azuread_propagation_wait_seconds > 0 ? 1 : 0
+ depends_on = [azuread_user.aks_deployment_users]
+
+ create_duration = "${var.azuread_propagation_wait_seconds}s"
+}
+
+resource "azuread_group_member" "aks_deployment_users" {
+ for_each = azuread_user.aks_deployment_users
+
+ group_object_id = data.azuread_group.aks_deployment.object_id
+ member_object_id = each.value.object_id
+
+ depends_on = [
+ azuread_user.aks_deployment_users,
+ time_sleep.wait_for_user_propagation
+ ]
+
+ lifecycle {
+ ignore_changes = all
+ }
+}
+
+# ===============================================================================
+# Password Rotation via Azure CLI
+# ===============================================================================
+# Since azuread_user has ignore_changes = all, we use a null_resource with
+# local-exec to update passwords when password_rotation_trigger changes.
+# This avoids touching the user resource and prevents race conditions.
+# ===============================================================================
+
+resource "null_resource" "update_passwords" {
+ for_each = local.user_definitions
+
+ # Trigger when user reset is requested
+ triggers = {
+ reset_trigger = var.user_reset_trigger
+ password_hash = sha256(random_password.aks_deployment_users[each.key].result)
+ user_upn = each.value.user_principal_name
+ }
+
+ # Update password via Azure CLI (works with service principal)
+ provisioner "local-exec" {
+ command = "az ad user update --id '${each.value.user_principal_name}' --password '${random_password.aks_deployment_users[each.key].result}' --force-change-password-next-sign-in true"
+ interpreter = ["pwsh", "-Command"]
+ on_failure = continue # Don't fail if user doesn't exist yet (first run)
+ }
+
+ depends_on = [
+ azuread_user.aks_deployment_users,
+ azuread_group_member.aks_deployment_users
+ ]
+}
+
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/outputs.tf
new file mode 100644
index 000000000..9e55f6868
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/outputs.tf
@@ -0,0 +1,48 @@
+# ===============================================================================
+# Entra ID Module - Outputs
+# ===============================================================================
+
+output "group_object_id" {
+ description = "The object ID of the AKS deployment group"
+ value = data.azuread_group.aks_deployment.object_id
+}
+
+output "group_display_name" {
+ description = "The display name of the AKS deployment group"
+ value = data.azuread_group.aks_deployment.display_name
+}
+
+output "group_mail_nickname" {
+ description = "The mail nickname of the AKS deployment group"
+ value = data.azuread_group.aks_deployment.mail_nickname
+}
+
+output "user_credentials" {
+ description = "Initial credentials for the users created for this deployment group"
+ value = {
+ for key, user in azuread_user.aks_deployment_users :
+ key => {
+ display_name = user.display_name
+ user_principal_name = user.user_principal_name
+ initial_password = random_password.aks_deployment_users[key].result
+ }
+ }
+ sensitive = true
+}
+
+output "user_object_ids" {
+ description = "Object IDs for each user created by this module"
+ value = {
+ for key, user in azuread_user.aks_deployment_users :
+ key => user.object_id
+ }
+}
+
+output "user_principal_names" {
+ description = "User principal names for each created user"
+ value = {
+ for key, user in azuread_user.aks_deployment_users :
+ key => user.user_principal_name
+ }
+}
+
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/variables.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/variables.tf
new file mode 100644
index 000000000..5446dbf66
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/entra-id/variables.tf
@@ -0,0 +1,78 @@
+# ===============================================================================
+# Entra ID Module - Variables
+# ===============================================================================
+
+variable "aks_deployment_group_name" {
+ description = "Name of the Entra ID group for AKS deployment access"
+ type = string
+}
+
+variable "aks_deployment_group_description" {
+ description = "Description of the Entra ID group for AKS deployment access"
+ type = string
+ default = "Security group with rights to deploy applications to the Oracle AKS cluster"
+}
+
+variable "tenant_id" {
+ description = "Tenant ID where the Entra ID group should be created"
+ type = string
+}
+
+variable "user_principal_domain" {
+ description = "Optional domain used to construct user principal names when not supplied in the user catalog"
+ type = string
+ default = null
+}
+
+variable "users" {
+ description = "Map of user definitions keyed by deployment identifier"
+ type = map(object({
+ identifier = string
+ }))
+}
+
+variable "tags" {
+ description = "A mapping of tags to assign to the resources"
+ type = map(string)
+ default = {}
+}
+
+variable "user_password_length" {
+ description = "Length of the generated user passwords."
+ type = number
+ default = 12
+}
+
+variable "user_password_include_special" {
+ description = "Set to true to include special characters in generated passwords."
+ type = bool
+ default = false
+}
+
+variable "user_password_special_characters" {
+ description = "Special characters to use when user_password_include_special is true."
+ type = string
+ default = "!#$%&*()-_=+[]{}"
+}
+
+variable "user_password_min_numeric" {
+ description = "Minimum numeric characters in the generated user passwords."
+ type = number
+ default = 1
+}
+
+variable "azuread_propagation_wait_seconds" {
+ description = "Wait time in seconds for Azure AD changes to propagate before adding group membership. Set to 0 to disable wait."
+ type = number
+ default = 180
+}
+
+variable "user_reset_trigger" {
+ description = <<-EOT
+ Change this value to reset passwords for all users.
+ The passwords will be regenerated when this value changes.
+ Set to "disabled" to skip password rotation.
+ EOT
+ type = string
+ default = "disabled"
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/main.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/main.tf
new file mode 100644
index 000000000..77df560c6
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/main.tf
@@ -0,0 +1,111 @@
+# ===============================================================================
+# ODAA Module - Main Configuration
+# ===============================================================================
+# This module creates Oracle Database on Autonomous Azure (ODAA) infrastructure
+# including virtual network, subnet with Oracle delegation, and autonomous database.
+# ===============================================================================
+
+# ===============================================================================
+# Resource Group
+# ===============================================================================
+
+resource "azurerm_resource_group" "odaa" {
+ name = "odaa-${var.prefix}${var.postfix}"
+ location = var.location
+ tags = var.tags
+}
+
+# ===============================================================================
+# Virtual Network
+# ===============================================================================
+
+resource "azurerm_virtual_network" "odaa" {
+ name = "odaa-${var.prefix}${var.postfix}"
+ location = azurerm_resource_group.odaa.location
+ resource_group_name = azurerm_resource_group.odaa.name
+ address_space = ["${var.cidr}/16"]
+ tags = var.tags
+}
+
+# ===============================================================================
+# Subnet with Oracle Delegation
+# ===============================================================================
+
+resource "azurerm_subnet" "odaa" {
+ name = "odaa-${var.prefix}${var.postfix}"
+ resource_group_name = azurerm_resource_group.odaa.name
+ virtual_network_name = azurerm_virtual_network.odaa.name
+ address_prefixes = ["${var.cidr}/24"]
+
+ delegation {
+ name = "oracle-delegation"
+ service_delegation {
+ name = "Oracle.Database/networkAttachments"
+ actions = [
+ "Microsoft.Network/virtualNetworks/subnets/action"
+ ]
+ }
+ }
+
+ # Prevent deletion while Oracle databases might be using this subnet
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+# ===============================================================================
+# Oracle Autonomous Database
+# ===============================================================================
+terraform {
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = "~> 4.0"
+ }
+ }
+}
+
+locals {
+ raw_autonomous_database_name = lower("odaa${var.prefix}${var.postfix}")
+ # Oracle Autonomous DB name must be alphanumeric; strip common separators.
+ autonomous_database_name = replace(
+ replace(
+ replace(
+ replace(local.raw_autonomous_database_name, "-", ""),
+ "_", ""),
+ ".", ""),
+ " ", "")
+}
+
+resource "azurerm_oracle_autonomous_database" "autonomous" {
+ count = var.create_autonomous_database ? 1 : 0
+
+ name = local.autonomous_database_name
+ resource_group_name = azurerm_resource_group.odaa.name
+ location = azurerm_resource_group.odaa.location
+ display_name = local.autonomous_database_name
+
+ admin_password = var.password
+ allowed_ips = []
+ auto_scaling_enabled = false
+ auto_scaling_for_storage_enabled = false
+ backup_retention_period_in_days = 1
+ character_set = "AL32UTF8"
+ compute_count = 2
+ compute_model = "ECPU"
+ customer_contacts = ["maik.sandmann@gmx.net"]
+ data_storage_size_in_tbs = 1
+ db_version = "23ai"
+ db_workload = "OLTP"
+ license_model = "BringYourOwnLicense"
+ mtls_connection_required = false
+ national_character_set = "AL16UTF16"
+ subnet_id = azurerm_subnet.odaa.id
+ virtual_network_id = azurerm_virtual_network.odaa.id
+
+ tags = var.tags
+
+ depends_on = [
+ azurerm_subnet.odaa
+ ]
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/outputs.tf
new file mode 100644
index 000000000..b9672b6a0
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/outputs.tf
@@ -0,0 +1,38 @@
+# ===============================================================================
+# ODAA Module - Outputs
+# ===============================================================================
+
+output "resource_group_name" {
+ description = "The name of the ODAA resource group"
+ value = azurerm_resource_group.odaa.name
+}
+
+output "resource_group_id" {
+ description = "The ID of the ODAA resource group"
+ value = azurerm_resource_group.odaa.id
+}
+
+output "vnet_id" {
+ description = "The ID of the ODAA virtual network"
+ value = azurerm_virtual_network.odaa.id
+}
+
+output "vnet_name" {
+ description = "The name of the ODAA virtual network"
+ value = azurerm_virtual_network.odaa.name
+}
+
+output "subnet_id" {
+ description = "The ID of the ODAA subnet"
+ value = azurerm_subnet.odaa.id
+}
+
+output "adb_id" {
+ description = "The ID of the Oracle Autonomous Database"
+ value = length(azurerm_oracle_autonomous_database.autonomous) > 0 ? azurerm_oracle_autonomous_database.autonomous[0].id : null
+}
+
+output "adb_name" {
+ description = "The name of the Oracle Autonomous Database"
+ value = length(azurerm_oracle_autonomous_database.autonomous) > 0 ? azurerm_oracle_autonomous_database.autonomous[0].name : null
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/variables.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/variables.tf
new file mode 100644
index 000000000..c4af98345
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/odaa/variables.tf
@@ -0,0 +1,53 @@
+# ===============================================================================
+# ODAA Module - Variables
+# ===============================================================================
+
+variable "prefix" {
+ description = "The prefix for resource names"
+ type = string
+}
+
+variable "postfix" {
+ description = "The postfix for resource names"
+ type = string
+ default = ""
+}
+
+variable "location" {
+ description = "The Azure region where resources will be deployed"
+ type = string
+}
+
+variable "cidr" {
+ description = "The CIDR block for the virtual network"
+ type = string
+}
+
+variable "password" {
+ description = "The admin password for the Oracle Autonomous Database"
+ type = string
+ sensitive = true
+ default = null
+ validation {
+ condition = var.create_autonomous_database ? (
+ var.password != null &&
+ length(var.password) >= 12 &&
+ length(var.password) <= 30
+ ) : (
+ var.password == null || trimspace(var.password) == ""
+ )
+ error_message = "Provide an admin password (12-30 characters) when the Oracle Autonomous Database is enabled."
+ }
+}
+
+variable "tags" {
+ description = "A mapping of tags to assign to the resources"
+ type = map(string)
+ default = {}
+}
+
+variable "create_autonomous_database" {
+ description = "Controls whether the Oracle Autonomous Database is provisioned in this deployment."
+ type = bool
+ default = false
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/main.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/main.tf
new file mode 100644
index 000000000..b8d33f542
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/main.tf
@@ -0,0 +1,64 @@
+# ===============================================================================
+# VNet Peering Module - Main Configuration
+# ===============================================================================
+# This module creates bidirectional VNet peering between AKS and ODAA networks
+# across different Azure subscriptions.
+# ===============================================================================
+
+terraform {
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = "~> 4.0"
+ configuration_aliases = [azurerm.aks, azurerm.odaa]
+ }
+ }
+}
+
+# ===============================================================================
+# VNet Peering: AKS to ODAA
+# ===============================================================================
+
+resource "azurerm_virtual_network_peering" "aks_to_odaa" {
+ provider = azurerm.aks
+ name = var.peering_suffix != "" ? "peer-aks-to-odaa-${var.peering_suffix}" : "peer-aks-to-odaa"
+ resource_group_name = var.aks_resource_group
+ virtual_network_name = var.aks_vnet_name
+ remote_virtual_network_id = var.odaa_vnet_id
+
+ # Peering settings
+ allow_virtual_network_access = true
+ allow_forwarded_traffic = true
+ allow_gateway_transit = false
+ use_remote_gateways = false
+
+ # Peering can be destroyed independently of subnets
+ # This prevents blocking subnet deletion
+ lifecycle {
+ create_before_destroy = false
+ }
+}
+
+# ===============================================================================
+# VNet Peering: ODAA to AKS
+# ===============================================================================
+
+resource "azurerm_virtual_network_peering" "odaa_to_aks" {
+ provider = azurerm.odaa
+ name = var.peering_suffix != "" ? "peer-odaa-to-aks-${var.peering_suffix}" : "peer-odaa-to-aks"
+ resource_group_name = var.odaa_resource_group
+ virtual_network_name = var.odaa_vnet_name
+ remote_virtual_network_id = var.aks_vnet_id
+
+ # Peering settings
+ allow_virtual_network_access = true
+ allow_forwarded_traffic = true
+ allow_gateway_transit = false
+ use_remote_gateways = false
+
+ # Peering can be destroyed independently of subnets
+ # This prevents blocking subnet deletion
+ lifecycle {
+ create_before_destroy = false
+ }
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/outputs.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/outputs.tf
new file mode 100644
index 000000000..2a8b6a326
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/outputs.tf
@@ -0,0 +1,31 @@
+# ===============================================================================
+# VNet Peering Module - Outputs
+# ===============================================================================
+
+output "aks_to_odaa_peering_id" {
+ description = "The ID of the AKS to ODAA VNet peering"
+ value = azurerm_virtual_network_peering.aks_to_odaa.id
+}
+
+output "odaa_to_aks_peering_id" {
+ description = "The ID of the ODAA to AKS VNet peering"
+ value = azurerm_virtual_network_peering.odaa_to_aks.id
+}
+
+output "aks_vnet_info" {
+ description = "Information about the AKS virtual network"
+ value = {
+ id = var.aks_vnet_id
+ name = var.aks_vnet_name
+ resource_group = var.aks_resource_group
+ }
+}
+
+output "odaa_vnet_info" {
+ description = "Information about the ODAA virtual network"
+ value = {
+ id = var.odaa_vnet_id
+ name = var.odaa_vnet_name
+ resource_group = var.odaa_resource_group
+ }
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/variables.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/variables.tf
new file mode 100644
index 000000000..67215ba5d
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/modules/vnet-peering/variables.tf
@@ -0,0 +1,50 @@
+# ===============================================================================
+# VNet Peering Module - Variables
+# ===============================================================================
+
+variable "aks_vnet_id" {
+ description = "The resource ID of the AKS virtual network"
+ type = string
+}
+
+variable "aks_vnet_name" {
+ description = "The name of the AKS virtual network"
+ type = string
+}
+
+variable "aks_resource_group" {
+ description = "The name of the AKS resource group"
+ type = string
+}
+
+variable "odaa_vnet_id" {
+ description = "The resource ID of the ODAA virtual network"
+ type = string
+}
+
+variable "odaa_vnet_name" {
+ description = "The name of the ODAA virtual network"
+ type = string
+}
+
+variable "odaa_resource_group" {
+ description = "The name of the ODAA resource group"
+ type = string
+}
+
+variable "odaa_subscription_id" {
+ description = "The subscription ID where ODAA resources are deployed"
+ type = string
+}
+
+variable "peering_suffix" {
+ description = "Suffix to add to peering names for uniqueness across deployments"
+ type = string
+ default = ""
+}
+
+variable "tags" {
+ description = "A mapping of tags to assign to the resources"
+ type = map(string)
+ default = {}
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/policies.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/policies.tf
new file mode 100644
index 000000000..d825bd126
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/policies.tf
@@ -0,0 +1,56 @@
+# ===============================================================================
+# Azure Policy Definitions and Assignments
+# ===============================================================================
+# This file defines Azure Policy artifacts to control Oracle Autonomous Database
+# deployments across the "mhteams" management group.
+#
+# Current restriction: Location only (francecentral, germanywestcentral)
+# ===============================================================================
+
+data "azurerm_management_group" "mhteams" {
+ name = "mhteams"
+}
+
+locals {
+ oracle_autonomous_database_policy_scope = data.azurerm_management_group.mhteams.id
+}
+
+resource "azurerm_policy_definition" "oracle_autonomous_database_restrictions" {
+ name = "oracle-autonomous-database-restrictions"
+ display_name = "Restrict Oracle Autonomous Database to Allowed Regions"
+ description = "Ensures Oracle Autonomous Database deployments are only allowed in francecentral region."
+ management_group_id = data.azurerm_management_group.mhteams.id
+ policy_type = "Custom"
+ mode = "All"
+
+ metadata = jsonencode({
+ category = "Oracle Database@Azure"
+ version = "1.2.0"
+ })
+
+ policy_rule = jsonencode({
+ if = {
+ allOf = [
+ {
+ field = "type"
+ equals = "Oracle.Database/autonomousDatabases"
+ },
+ {
+ field = "location"
+ notIn = ["francecentral"]
+ }
+ ]
+ }
+ then = {
+ effect = "Deny"
+ }
+ })
+}
+
+resource "azurerm_management_group_policy_assignment" "oracle_autonomous_database_restrictions" {
+ name = "odaa-adb-constraints"
+ display_name = azurerm_policy_definition.oracle_autonomous_database_restrictions.display_name
+ description = "Restricts Oracle Autonomous Database deployments to francecentral and germanywestcentral regions only."
+ management_group_id = data.azurerm_management_group.mhteams.id
+ policy_definition_id = azurerm_policy_definition.oracle_autonomous_database_restrictions.id
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/providers.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/providers.tf
new file mode 100644
index 000000000..9f96f4866
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/providers.tf
@@ -0,0 +1,311 @@
+# ===============================================================================
+# Provider Configuration
+# ===============================================================================
+# This file configures the Azure providers with appropriate feature flags
+# and authentication settings for the Oracle on Azure infrastructure.
+# ===============================================================================
+
+# Default provider (used for Entra ID and shared resources)
+provider "azurerm" {
+ subscription_id = var.odaa_subscription_id
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+ features {
+ resource_group {
+ prevent_deletion_if_contains_resources = false
+ }
+
+ key_vault {
+ purge_soft_delete_on_destroy = true
+ recover_soft_deleted_key_vaults = true
+ purge_soft_deleted_certificates_on_destroy = true
+ purge_soft_deleted_keys_on_destroy = true
+ purge_soft_deleted_secrets_on_destroy = true
+ }
+
+ virtual_machine {
+ delete_os_disk_on_deletion = true
+ skip_shutdown_and_force_delete = false
+ }
+
+ managed_disk {
+ expand_without_downtime = true
+ }
+
+ log_analytics_workspace {
+ permanently_delete_on_destroy = true
+ }
+ }
+}
+
+# Provider aliases for up to five AKS subscriptions (manual module instances)
+provider "azurerm" {
+ alias = "aks_deployment_slot_0"
+ subscription_id = var.subscription_targets[0].subscription_id
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+
+ features {
+ resource_group {
+ prevent_deletion_if_contains_resources = false
+ }
+
+ key_vault {
+ purge_soft_delete_on_destroy = true
+ recover_soft_deleted_key_vaults = true
+ purge_soft_deleted_certificates_on_destroy = true
+ purge_soft_deleted_keys_on_destroy = true
+ purge_soft_deleted_secrets_on_destroy = true
+ }
+
+ virtual_machine {
+ delete_os_disk_on_deletion = true
+ skip_shutdown_and_force_delete = false
+ }
+
+ managed_disk {
+ expand_without_downtime = true
+ }
+
+ log_analytics_workspace {
+ permanently_delete_on_destroy = true
+ }
+ }
+}
+
+provider "azurerm" {
+ alias = "aks_deployment_slot_1"
+ subscription_id = var.subscription_targets[1].subscription_id
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+
+ features {
+ resource_group {
+ prevent_deletion_if_contains_resources = false
+ }
+
+ key_vault {
+ purge_soft_delete_on_destroy = true
+ recover_soft_deleted_key_vaults = true
+ purge_soft_deleted_certificates_on_destroy = true
+ purge_soft_deleted_keys_on_destroy = true
+ purge_soft_deleted_secrets_on_destroy = true
+ }
+
+ virtual_machine {
+ delete_os_disk_on_deletion = true
+ skip_shutdown_and_force_delete = false
+ }
+
+ managed_disk {
+ expand_without_downtime = true
+ }
+
+ log_analytics_workspace {
+ permanently_delete_on_destroy = true
+ }
+ }
+}
+
+provider "azurerm" {
+ alias = "aks_deployment_slot_2"
+ subscription_id = var.subscription_targets[2].subscription_id
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+
+ features {
+ resource_group {
+ prevent_deletion_if_contains_resources = false
+ }
+
+ key_vault {
+ purge_soft_delete_on_destroy = true
+ recover_soft_deleted_key_vaults = true
+ purge_soft_deleted_certificates_on_destroy = true
+ purge_soft_deleted_keys_on_destroy = true
+ purge_soft_deleted_secrets_on_destroy = true
+ }
+
+ virtual_machine {
+ delete_os_disk_on_deletion = true
+ skip_shutdown_and_force_delete = false
+ }
+
+ managed_disk {
+ expand_without_downtime = true
+ }
+
+ log_analytics_workspace {
+ permanently_delete_on_destroy = true
+ }
+ }
+}
+
+provider "azurerm" {
+ alias = "aks_deployment_slot_3"
+ subscription_id = var.subscription_targets[3].subscription_id
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+
+ features {
+ resource_group {
+ prevent_deletion_if_contains_resources = false
+ }
+
+ key_vault {
+ purge_soft_delete_on_destroy = true
+ recover_soft_deleted_key_vaults = true
+ purge_soft_deleted_certificates_on_destroy = true
+ purge_soft_deleted_keys_on_destroy = true
+ purge_soft_deleted_secrets_on_destroy = true
+ }
+
+ virtual_machine {
+ delete_os_disk_on_deletion = true
+ skip_shutdown_and_force_delete = false
+ }
+
+ managed_disk {
+ expand_without_downtime = true
+ }
+
+ log_analytics_workspace {
+ permanently_delete_on_destroy = true
+ }
+ }
+}
+
+provider "azurerm" {
+ alias = "aks_deployment_slot_4"
+ subscription_id = var.subscription_targets[4].subscription_id
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+
+ features {
+ resource_group {
+ prevent_deletion_if_contains_resources = false
+ }
+
+ key_vault {
+ purge_soft_delete_on_destroy = true
+ recover_soft_deleted_key_vaults = true
+ purge_soft_deleted_certificates_on_destroy = true
+ purge_soft_deleted_keys_on_destroy = true
+ purge_soft_deleted_secrets_on_destroy = true
+ }
+
+ virtual_machine {
+ delete_os_disk_on_deletion = true
+ skip_shutdown_and_force_delete = false
+ }
+
+ managed_disk {
+ expand_without_downtime = true
+ }
+
+ log_analytics_workspace {
+ permanently_delete_on_destroy = true
+ }
+ }
+}
+
+# Provider for ODAA subscription (single subscription for all ODAA VNets)
+provider "azurerm" {
+ alias = "odaa"
+ subscription_id = var.odaa_subscription_id
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+
+ features {
+ resource_group {
+ prevent_deletion_if_contains_resources = false
+ }
+
+ key_vault {
+ purge_soft_delete_on_destroy = true
+ recover_soft_deleted_key_vaults = true
+ purge_soft_deleted_certificates_on_destroy = true
+ purge_soft_deleted_keys_on_destroy = true
+ purge_soft_deleted_secrets_on_destroy = true
+ }
+
+ virtual_machine {
+ delete_os_disk_on_deletion = true
+ skip_shutdown_and_force_delete = false
+ }
+
+ managed_disk {
+ expand_without_downtime = true
+ }
+
+ log_analytics_workspace {
+ permanently_delete_on_destroy = true
+ }
+ }
+}
+
+# ===============================================================================
+# End of Providers Configuration
+# ===============================================================================
+
+# provider "azapi" {
+# # AzAPI provider configuration
+# # This provider is used for Oracle Database on Azure resources
+# }
+
+provider "azuread" {
+ # AzureAD provider configuration
+ # This provider is used for managing Entra ID (Azure Active Directory) resources
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+}
+
+# AzureAD provider aliases per AKS deployment slot
+provider "azuread" {
+ alias = "aks_deployment_slot_0"
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+}
+
+provider "azuread" {
+ alias = "aks_deployment_slot_1"
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+}
+
+provider "azuread" {
+ alias = "aks_deployment_slot_2"
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+}
+
+provider "azuread" {
+ alias = "aks_deployment_slot_3"
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+}
+
+provider "azuread" {
+ alias = "aks_deployment_slot_4"
+ tenant_id = var.tenant_id
+ client_id = var.client_id
+ client_secret = var.client_secret
+}
+
+# Get current Azure client configuration
+data "azurerm_client_config" "current" {}
+
+# Get current subscription information
+data "azurerm_subscription" "current" {}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/roles.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/roles.tf
new file mode 100644
index 000000000..52a05b782
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/roles.tf
@@ -0,0 +1,24 @@
+# ===============================================================================
+# Custom Azure RBAC role definitions
+# ===============================================================================
+
+resource "azurerm_role_definition" "oracle_subscriptions_manager_reader" {
+ name = "Oracle Subscriptions Manager Reader"
+ scope = "/providers/Microsoft.Management/managementGroups/mhteams"
+
+ description = "Grants reader access to Oracle Database@Azure subscription resources."
+
+ permissions {
+ actions = [
+ "Oracle.Database/Locations/*/read",
+ "Oracle.Database/oracleSubscriptions/*/read",
+ "Oracle.Database/oracleSubscriptions/listCloudAccountDetails/action"
+ ]
+ not_actions = []
+ }
+
+ assignable_scopes = [
+ "/providers/Microsoft.Management/managementGroups/mhteams",
+ "/subscriptions/${data.azurerm_subscription.odaa.subscription_id}"
+ ]
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/add-mfa-permission.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/add-mfa-permission.ps1
new file mode 100644
index 000000000..a584f4802
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/add-mfa-permission.ps1
@@ -0,0 +1,76 @@
+<#
+.SYNOPSIS
+ Add UserAuthenticationMethod.ReadWrite.All permission to the service principal.
+
+.DESCRIPTION
+ This script adds the Microsoft Graph permission required to reset MFA for users.
+ Must be run by a Global Administrator to grant admin consent.
+
+.PARAMETER ClientId
+ The Application (Client) ID of the service principal
+
+.EXAMPLE
+ .\add-mfa-permission.ps1 -ClientId "8a9f736e-4eb2-4484-ae90-2493f57102b3"
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $true)]
+ [string]$ClientId
+)
+
+$ErrorActionPreference = "Stop"
+
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "Adding MFA Management Permission" -ForegroundColor Cyan
+Write-Host "========================================" -ForegroundColor Cyan
+
+# Microsoft Graph App ID (constant)
+$graphAppId = "00000003-0000-0000-c000-000000000000"
+
+# Permission IDs for Microsoft Graph
+# UserAuthenticationMethod.ReadWrite.All (Application)
+$permissionId = "50483e42-d915-4231-9639-7fdb7fd190e5"
+
+Write-Host "Service Principal: $ClientId" -ForegroundColor Yellow
+Write-Host "Permission: UserAuthenticationMethod.ReadWrite.All (Application)" -ForegroundColor Yellow
+Write-Host ""
+
+# Add the permission
+Write-Host "Step 1: Adding API permission..." -ForegroundColor Cyan
+try {
+ az ad app permission add `
+ --id $ClientId `
+ --api $graphAppId `
+ --api-permissions "$permissionId=Role"
+
+ Write-Host " Permission added successfully" -ForegroundColor Green
+} catch {
+ Write-Host " Permission may already exist or error: $_" -ForegroundColor Yellow
+}
+
+# Grant admin consent
+Write-Host "`nStep 2: Granting admin consent..." -ForegroundColor Cyan
+Write-Host " (This requires Global Administrator privileges)" -ForegroundColor Yellow
+try {
+ az ad app permission admin-consent --id $ClientId
+ Write-Host " Admin consent granted successfully" -ForegroundColor Green
+} catch {
+ Write-Host " ERROR: Could not grant admin consent." -ForegroundColor Red
+ Write-Host " Please grant consent manually in Azure Portal:" -ForegroundColor Yellow
+ Write-Host " 1. Go to: https://portal.azure.com" -ForegroundColor White
+ Write-Host " 2. Navigate to: Entra ID > App registrations > $ClientId" -ForegroundColor White
+ Write-Host " 3. Click: API permissions" -ForegroundColor White
+ Write-Host " 4. Click: Grant admin consent for " -ForegroundColor White
+ exit 1
+}
+
+Write-Host "`n========================================" -ForegroundColor Cyan
+Write-Host "Permission Added Successfully!" -ForegroundColor Green
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host ""
+Write-Host "The service principal can now reset MFA for users." -ForegroundColor White
+Write-Host "To trigger MFA reset via Terraform:" -ForegroundColor White
+Write-Host " 1. Edit identity/terraform.tfvars" -ForegroundColor Gray
+Write-Host " 2. Set: mfa_reset_trigger = `"`"" -ForegroundColor Gray
+Write-Host " 3. Run: terraform apply" -ForegroundColor Gray
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/cleanup-odaa-and-destroy.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/cleanup-odaa-and-destroy.ps1
new file mode 100644
index 000000000..64bc80b9d
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/cleanup-odaa-and-destroy.ps1
@@ -0,0 +1,330 @@
+<#
+.SYNOPSIS
+ Cleanup Oracle Database @ Azure (ODAA) resources with optional exclusions.
+
+.DESCRIPTION
+ This script deletes Oracle Autonomous Databases in Azure and their associated
+ OCI resources (subnets, NSGs, VCNs). You can exclude specific ADBs from deletion
+ by providing their Azure resource IDs or names.
+
+.PARAMETER ExcludeAdbIds
+ Array of Azure ADB resource IDs to exclude from deletion.
+ Example: "/subscriptions/.../providers/Oracle.Database/autonomousDatabases/adb-user00"
+
+.PARAMETER ExcludeAdbNames
+ Array of ADB names to exclude from deletion.
+ Example: @("adb-user00", "adb-user01")
+
+.PARAMETER WhatIf
+ Preview mode - shows what would be deleted without actually deleting.
+
+.PARAMETER SkipOciCleanup
+ Skip OCI resource cleanup (subnets, NSGs, VCNs). Only delete Azure ADBs.
+
+.EXAMPLE
+ # Delete all ADBs and OCI resources
+ .\cleanup-odaa-and-destroy.ps1
+
+.EXAMPLE
+ # Preview what would be deleted
+ .\cleanup-odaa-and-destroy.ps1 -WhatIf
+
+.EXAMPLE
+ # Exclude specific ADBs by name
+ .\cleanup-odaa-and-destroy.ps1 -ExcludeAdbNames @("adb-user00", "adb-user01")
+
+.EXAMPLE
+ # Exclude by Azure resource ID
+ .\cleanup-odaa-and-destroy.ps1 -ExcludeAdbIds @("/subscriptions/xxx/resourceGroups/rg-odaa-user00/providers/Oracle.Database/autonomousDatabases/adb-user00")
+
+.EXAMPLE
+ # Exclude and preview
+ .\cleanup-odaa-and-destroy.ps1 -ExcludeAdbNames @("adb-user00") -WhatIf
+#>
+
+[CmdletBinding(SupportsShouldProcess)]
+param(
+ [Parameter()]
+ [string[]]$ExcludeAdbIds = @(),
+
+ [Parameter()]
+ [string[]]$ExcludeAdbNames = @(),
+
+ [Parameter()]
+ [switch]$SkipOciCleanup
+)
+
+$ErrorActionPreference = "Continue"
+$compartmentId = "ocid1.compartment.oc1..aaaaaaaayehuog6myqxudqejx3ddy6bzkr2f3dnjuuygs424taimn4av4wbq"
+
+# ===============================================================================
+# Helper Functions
+# ===============================================================================
+
+function Get-AdbOciVcnId {
+ <#
+ .SYNOPSIS
+ Extract the OCI VCN ID associated with an Azure ADB by querying its details.
+ #>
+ param([string]$AdbName, [string]$ResourceGroup)
+
+ try {
+ $adbDetails = az oracle-database autonomous-database show `
+ --name $AdbName `
+ --resource-group $ResourceGroup `
+ --query "properties" -o json 2>$null | ConvertFrom-Json
+
+ # The subnetId in Azure maps to an OCI subnet, which belongs to a VCN
+ # We'll extract info from the OCI URL or naming convention
+ if ($adbDetails.ociUrl) {
+ return $adbDetails.ociUrl
+ }
+ return $null
+ } catch {
+ return $null
+ }
+}
+
+# ===============================================================================
+# Main Script
+# ===============================================================================
+
+Write-Host ""
+Write-Host "======================================================================" -ForegroundColor Cyan
+Write-Host " ODAA CLEANUP - Oracle Database @ Azure Destroyer " -ForegroundColor Cyan
+Write-Host "======================================================================" -ForegroundColor Cyan
+Write-Host ""
+
+if ($WhatIfPreference -or $PSBoundParameters.ContainsKey('WhatIf')) {
+ Write-Host " [PREVIEW MODE] No resources will be deleted" -ForegroundColor Yellow
+ Write-Host ""
+}
+
+# Set subscription
+Write-Host "Setting subscription to sub-mhodaa..." -ForegroundColor Gray
+az account set -s sub-mhodaa
+
+# ===============================================================================
+# Step 1: List and Filter ADB Instances
+# ===============================================================================
+
+Write-Host "`n[Step 1] Discovering Azure ADB instances..." -ForegroundColor Cyan
+$adbQuery = '[].{name:name, resourceGroup:resourceGroup, id:id, provisioningState:properties.provisioningState, lifecycleState:properties.lifecycleState}'
+$adbJson = az oracle-database autonomous-database list --query $adbQuery -o json 2>$null
+$adbInstances = @()
+if ($adbJson) {
+ $adbInstances = $adbJson | ConvertFrom-Json
+}
+
+if (-not $adbInstances -or $adbInstances.Count -eq 0) {
+ Write-Host " No ADB instances found." -ForegroundColor Yellow
+} else {
+ Write-Host " Found $($adbInstances.Count) ADB instance(s)" -ForegroundColor White
+}
+
+# Separate into delete and exclude lists
+$adbsToDelete = @()
+$adbsToKeep = @()
+$excludedVcnPatterns = @()
+
+foreach ($Instance in $adbInstances) {
+ $shouldExclude = $false
+ $excludeReason = ""
+
+ # Check exclusion by ID
+ if ($ExcludeAdbIds -contains $Instance.id) {
+ $shouldExclude = $true
+ $excludeReason = "ID in exclusion list"
+ }
+
+ # Check exclusion by name
+ if ($ExcludeAdbNames -contains $Instance.name) {
+ $shouldExclude = $true
+ $excludeReason = "Name in exclusion list"
+ }
+
+ if ($shouldExclude) {
+ $adbsToKeep += $Instance
+ $excludedVcnPatterns += $Instance.name # Use ADB name to match OCI resources
+ Write-Host " [EXCLUDE] $($Instance.name) - $excludeReason" -ForegroundColor Yellow
+ } else {
+ $adbsToDelete += $Instance
+ Write-Host " [DELETE] $($Instance.name) ($($Instance.lifecycleState))" -ForegroundColor Red
+ }
+}
+
+Write-Host ""
+Write-Host " Summary: $($adbsToDelete.Count) to delete, $($adbsToKeep.Count) excluded" -ForegroundColor White
+
+# ===============================================================================
+# Step 2: Delete ADB Instances (excluding protected ones)
+# ===============================================================================
+
+Write-Host "`n[Step 2] Deleting Azure ADB instances..." -ForegroundColor Cyan
+
+foreach ($Instance in $adbsToDelete) {
+ if ($WhatIfPreference -or $PSBoundParameters.ContainsKey('WhatIf')) {
+ Write-Host " [WOULD DELETE] $($Instance.name) in $($Instance.resourceGroup)" -ForegroundColor Magenta
+ } else {
+ Write-Host " Deleting: $($Instance.name) (this may take several minutes)..." -ForegroundColor White
+ Write-Host " Command: az oracle-database autonomous-database delete --name $($Instance.name) --resource-group $($Instance.resourceGroup) --yes" -ForegroundColor Gray
+
+ $deleteResult = az oracle-database autonomous-database delete `
+ --name $Instance.name `
+ --resource-group $Instance.resourceGroup `
+ --yes --verbose 2>&1
+
+ if ($LASTEXITCODE -eq 0) {
+ Write-Host " [OK] Successfully deleted $($Instance.name)" -ForegroundColor Green
+ } else {
+ Write-Host " [FAIL] Failed to delete $($Instance.name): $deleteResult" -ForegroundColor Red
+ }
+ }
+}
+
+# ===============================================================================
+# Step 3: OCI Resource Cleanup (with exclusions)
+# ===============================================================================
+
+if ($SkipOciCleanup) {
+ Write-Host "`n[Step 3] Skipping OCI cleanup (--SkipOciCleanup specified)" -ForegroundColor Yellow
+} else {
+ Write-Host "`n[Step 3] Cleaning up OCI resources..." -ForegroundColor Cyan
+
+ if ($excludedVcnPatterns.Count -gt 0) {
+ Write-Host " Protected patterns (will skip): $($excludedVcnPatterns -join ', ')" -ForegroundColor Yellow
+ }
+
+ # -------------------------------------------------------------------------
+ # 3a: Delete Subnets (excluding those matching protected ADBs)
+ # -------------------------------------------------------------------------
+ Write-Host "`n [3a] Processing OCI Subnets..." -ForegroundColor Cyan
+ $subnetQuery = 'data[].{id:id, displayName:"display-name", vcnId:"vcn-id"}'
+ $subnetJson = oci network subnet list --compartment-id $compartmentId --all --query $subnetQuery 2>$null
+ $subnets = @()
+ if ($subnetJson) {
+ $subnets = $subnetJson | ConvertFrom-Json
+ }
+
+ foreach ($subnet in $subnets) {
+ $subnetName = $subnet.displayName
+ $shouldSkip = $false
+
+ # Check if subnet name contains any excluded ADB pattern
+ foreach ($pattern in $excludedVcnPatterns) {
+ if ($subnetName -like "*$pattern*") {
+ $shouldSkip = $true
+ break
+ }
+ }
+
+ if ($shouldSkip) {
+ Write-Host " [SKIP] Subnet: $subnetName (protected)" -ForegroundColor Yellow
+ } elseif ($WhatIfPreference -or $PSBoundParameters.ContainsKey('WhatIf')) {
+ Write-Host " [WOULD DELETE] Subnet: $subnetName" -ForegroundColor Magenta
+ } else {
+ Write-Host " Deleting Subnet: $subnetName" -ForegroundColor White
+ oci network subnet delete --subnet-id $subnet.id --force --wait-for-state TERMINATED 2>$null
+ if ($LASTEXITCODE -eq 0) {
+ Write-Host " [OK] Deleted" -ForegroundColor Green
+ } else {
+ Write-Host " [WARN] Failed (may be in use)" -ForegroundColor Yellow
+ }
+ }
+ }
+
+ # -------------------------------------------------------------------------
+ # 3b: Delete NSGs (excluding those matching protected ADBs)
+ # -------------------------------------------------------------------------
+ Write-Host "`n [3b] Processing OCI Network Security Groups..." -ForegroundColor Cyan
+ $nsgQuery = 'data[].{id:id, displayName:"display-name", vcnId:"vcn-id"}'
+ $nsgJson = oci network nsg list --compartment-id $compartmentId --all --query $nsgQuery 2>$null
+ $nsgs = @()
+ if ($nsgJson) {
+ $nsgs = $nsgJson | ConvertFrom-Json
+ }
+
+ foreach ($nsg in $nsgs) {
+ $nsgName = $nsg.displayName
+ $shouldSkip = $false
+
+ foreach ($pattern in $excludedVcnPatterns) {
+ if ($nsgName -like "*$pattern*") {
+ $shouldSkip = $true
+ break
+ }
+ }
+
+ if ($shouldSkip) {
+ Write-Host " [SKIP] NSG: $nsgName (protected)" -ForegroundColor Yellow
+ } elseif ($WhatIfPreference -or $PSBoundParameters.ContainsKey('WhatIf')) {
+ Write-Host " [WOULD DELETE] NSG: $nsgName" -ForegroundColor Magenta
+ } else {
+ Write-Host " Deleting NSG: $nsgName" -ForegroundColor White
+ oci network nsg delete --nsg-id $nsg.id --force --wait-for-state TERMINATED 2>$null
+ if ($LASTEXITCODE -eq 0) {
+ Write-Host " [OK] Deleted" -ForegroundColor Green
+ } else {
+ Write-Host " [WARN] Failed (may have VNICs attached)" -ForegroundColor Yellow
+ }
+ }
+ }
+
+ # -------------------------------------------------------------------------
+ # 3c: Delete VCNs (excluding those matching protected ADBs)
+ # -------------------------------------------------------------------------
+ Write-Host "`n [3c] Processing OCI VCNs..." -ForegroundColor Cyan
+ $vcnQuery = 'data[].{id:id, displayName:"display-name"}'
+ $vcnJson = oci network vcn list --compartment-id $compartmentId --all --query $vcnQuery 2>$null
+ $vcns = @()
+ if ($vcnJson) {
+ $vcns = $vcnJson | ConvertFrom-Json
+ }
+
+ foreach ($vcn in $vcns) {
+ $vcnName = $vcn.displayName
+ $shouldSkip = $false
+
+ foreach ($pattern in $excludedVcnPatterns) {
+ if ($vcnName -like "*$pattern*") {
+ $shouldSkip = $true
+ break
+ }
+ }
+
+ if ($shouldSkip) {
+ Write-Host " [SKIP] VCN: $vcnName (protected)" -ForegroundColor Yellow
+ } elseif ($WhatIfPreference -or $PSBoundParameters.ContainsKey('WhatIf')) {
+ Write-Host " [WOULD DELETE] VCN: $vcnName" -ForegroundColor Magenta
+ } else {
+ Write-Host " Deleting VCN: $vcnName" -ForegroundColor White
+ oci network vcn delete --vcn-id $vcn.id --force --wait-for-state TERMINATED 2>$null
+ if ($LASTEXITCODE -eq 0) {
+ Write-Host " [OK] Deleted" -ForegroundColor Green
+ } else {
+ Write-Host " [WARN] Failed (may have dependencies)" -ForegroundColor Yellow
+ }
+ }
+ }
+}
+
+# ===============================================================================
+# Summary
+# ===============================================================================
+
+Write-Host ""
+Write-Host "======================================================================" -ForegroundColor Green
+Write-Host " CLEANUP COMPLETE " -ForegroundColor Green
+Write-Host "======================================================================" -ForegroundColor Green
+Write-Host ""
+Write-Host " ADBs deleted: $($adbsToDelete.Count)" -ForegroundColor White
+Write-Host " ADBs excluded: $($adbsToKeep.Count)" -ForegroundColor Yellow
+
+if ($adbsToKeep.Count -gt 0) {
+ Write-Host "`n Preserved ADBs:" -ForegroundColor Yellow
+ foreach ($kept in $adbsToKeep) {
+ Write-Host " - $($kept.name) ($($kept.resourceGroup))" -ForegroundColor Yellow
+ }
+}
+
+Write-Host ""
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/deploy-base-pods.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/deploy-base-pods.ps1
new file mode 100644
index 000000000..428925a14
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/deploy-base-pods.ps1
@@ -0,0 +1,255 @@
+<#
+.SYNOPSIS
+ Deploy ingress-nginx and K8s manifests to all AKS clusters.
+
+.DESCRIPTION
+ This script deploys ingress-nginx Helm chart and Kubernetes manifests
+ to all AKS clusters from Terraform output. You can exclude specific
+ clusters from deployment. The script automatically starts stopped clusters
+ and stops them again after deployment to save costs.
+
+.PARAMETER ExcludeAksClusters
+ Array of AKS cluster names or resource IDs to exclude from deployment.
+ Example: @("aks-user00", "aks-user01")
+ Example: @("/subscriptions/.../managedClusters/aks-user00")
+
+.PARAMETER CleanupNamespace
+ Name of the namespace to delete before deploying manifests.
+ If the namespace exists, it will be deleted along with all its resources.
+ Default: "microhack"
+
+.PARAMETER SkipNamespaceCleanup
+ Skip the namespace cleanup step.
+
+.PARAMETER KeepRunning
+ Do not stop clusters after deployment (keep them running even if they were originally stopped).
+
+.PARAMETER ShowDebug
+ Enable debug output for troubleshooting.
+
+.EXAMPLE
+ # Deploy to all clusters
+ .\deploy-base-pods.ps1
+
+.EXAMPLE
+ # Exclude specific clusters by name
+ .\deploy-base-pods.ps1 -ExcludeAksClusters @("aks-user00", "aks-user01")
+
+.EXAMPLE
+ # Exclude by resource ID
+ .\deploy-base-pods.ps1 -ExcludeAksClusters @("/subscriptions/xxx/resourceGroups/aks-user00/providers/Microsoft.ContainerService/managedClusters/aks-user00")
+
+.EXAMPLE
+ # Skip namespace cleanup
+ .\deploy-base-pods.ps1 -SkipNamespaceCleanup
+
+.EXAMPLE
+ # Keep clusters running after deployment
+ .\deploy-base-pods.ps1 -KeepRunning
+
+.EXAMPLE
+ # Exclude clusters with debug output
+ .\deploy-base-pods.ps1 -ExcludeAksClusters @("aks-user00") -ShowDebug
+#>
+
+param(
+ [Parameter()]
+ [string[]]$ExcludeAksClusters = @(),
+
+ [Parameter()]
+ [string]$CleanupNamespace = "microhacks",
+
+ [switch]$SkipNamespaceCleanup,
+
+ [switch]$KeepRunning,
+
+ [switch]$ShowDebug
+)
+
+$ErrorActionPreference = "Continue"
+$K8sPath = "$PSScriptRoot\k8s"
+
+# Helper function to check if cluster should be excluded
+function Test-ClusterExcluded {
+ param([string]$ClusterName, [string]$ResourceGroup, [string]$SubscriptionId)
+
+ foreach ($exclusion in $ExcludeAksClusters) {
+ # Check if exclusion matches cluster name
+ if ($exclusion -eq $ClusterName) {
+ return $true
+ }
+ # Check if exclusion matches resource ID pattern
+ if ($exclusion -match "/managedClusters/$ClusterName$") {
+ return $true
+ }
+ }
+ return $false
+}
+
+# Show exclusions if any
+if ($ExcludeAksClusters.Count -gt 0) {
+ Write-Host "Excluding clusters: $($ExcludeAksClusters -join ', ')" -ForegroundColor Yellow
+ Write-Host ""
+}
+
+# Update Helm repositories
+Write-Host "Updating Helm repositories..." -ForegroundColor Cyan
+helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx 2>&1 | Out-Null
+helm repo update 2>&1 | Out-Null
+Write-Host "Helm repositories updated.`n" -ForegroundColor Green
+
+# Get kubeconfigs from Terraform output
+$tfOutput = terraform output -json aks_kubeconfigs | ConvertFrom-Json
+
+# Deploy to each cluster
+foreach ($property in $tfOutput.PSObject.Properties) {
+ $userKey = $property.Name
+ $aksClusterName = $property.Value.cluster_name
+ $resourceGroup = $property.Value.resource_group_name
+ $subscriptionId = $property.Value.subscription_id
+
+ # Check if cluster should be excluded
+ if (Test-ClusterExcluded -ClusterName $aksClusterName -ResourceGroup $resourceGroup -SubscriptionId $subscriptionId) {
+ Write-Host "$aksClusterName SKIPPED (excluded)" -ForegroundColor Yellow
+ continue
+ }
+
+ # Check AKS power state
+ Write-Host "$aksClusterName checking power state... " -NoNewline
+ $powerState = az aks show --name $aksClusterName --resource-group $resourceGroup --subscription $subscriptionId --query "powerState.code" -o tsv 2>$null
+ $wasOriginallyStopped = $false
+
+ if ($powerState -eq "Stopped") {
+ $wasOriginallyStopped = $true
+ Write-Host "STOPPED - starting cluster..." -ForegroundColor Yellow
+ $null = az aks start --name $aksClusterName --resource-group $resourceGroup --subscription $subscriptionId 2>&1
+ if ($LASTEXITCODE -ne 0) {
+ Write-Host "$aksClusterName FAILED to start cluster" -ForegroundColor Red
+ continue
+ }
+ Write-Host "$aksClusterName cluster started successfully" -ForegroundColor Green
+
+ # Wait for nodes to become Ready after starting
+ Write-Host "$aksClusterName waiting for nodes to be Ready... " -NoNewline
+ $maxWaitSeconds = 300
+ $waitInterval = 10
+ $elapsed = 0
+ $nodesReady = $false
+
+ # Get fresh kubeconfig after start
+ $kubeconfig = az aks get-credentials --name $aksClusterName --resource-group $resourceGroup --subscription $subscriptionId --file - 2>$null
+ $tempKubeconfig = [System.IO.Path]::GetTempFileName()
+ $kubeconfig | Out-File -FilePath $tempKubeconfig -Encoding utf8
+
+ while ($elapsed -lt $maxWaitSeconds -and -not $nodesReady) {
+ Start-Sleep -Seconds $waitInterval
+ $elapsed += $waitInterval
+
+ # Check if all nodes are Ready
+ $notReadyNodes = kubectl get nodes --kubeconfig $tempKubeconfig --no-headers 2>$null | Where-Object { $_ -notmatch '\sReady\s' }
+ if ($null -eq $notReadyNodes -or $notReadyNodes.Count -eq 0) {
+ $nodesReady = $true
+ } else {
+ Write-Host "." -NoNewline
+ }
+ }
+
+ if ($nodesReady) {
+ Write-Host " Ready" -ForegroundColor Green
+ } else {
+ Write-Host " TIMEOUT (continuing anyway)" -ForegroundColor Yellow
+ }
+ } else {
+ Write-Host "Running" -ForegroundColor Green
+ # Use kubeconfig from Terraform output
+ $kubeconfig = $property.Value.kubeconfig_raw
+ $tempKubeconfig = [System.IO.Path]::GetTempFileName()
+ $kubeconfig | Out-File -FilePath $tempKubeconfig -Encoding utf8
+ }
+
+ # Deploy ingress-nginx
+ Write-Host "$aksClusterName ingress-nginx " -NoNewline
+
+ $helmArgs = @(
+ "upgrade", "--install", "ingress-nginx", "ingress-nginx",
+ "--repo", "https://kubernetes.github.io/ingress-nginx",
+ "--namespace", "ingress-nginx", "--create-namespace",
+ "--set", "controller.livenessProbe.httpGet.path=/healthz",
+ "--set", "controller.readinessProbe.httpGet.path=/healthz",
+ "--set", "controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz",
+ "--set", "defaultBackend.enabled=false",
+ "--kubeconfig", $tempKubeconfig,
+ "--wait", "--timeout", "5m"
+ )
+
+ if ($ShowDebug) {
+ Write-Host "`n[DEBUG] Helm command: helm $($helmArgs -join ' ')" -ForegroundColor Gray
+ $output = & helm $helmArgs 2>&1
+ Write-Host $output -ForegroundColor Gray
+ $success = $LASTEXITCODE -eq 0
+ } else {
+ $null = & helm $helmArgs 2>&1
+ $success = $LASTEXITCODE -eq 0
+ }
+
+ Write-Host $(if ($success) { "OK" } else { "FAILED" }) -ForegroundColor $(if ($success) { "Green" } else { "Red" })
+
+ # Cleanup namespace if it exists
+ if (-not $SkipNamespaceCleanup) {
+ Write-Host "$aksClusterName namespace/$CleanupNamespace " -NoNewline
+
+ # Check if namespace exists
+ $nsExists = kubectl get namespace $CleanupNamespace --kubeconfig $tempKubeconfig 2>&1
+ if ($LASTEXITCODE -eq 0) {
+ # Namespace exists, delete it
+ if ($ShowDebug) {
+ Write-Host "`n[DEBUG] Namespace exists, deleting..." -ForegroundColor Gray
+ $output = kubectl delete namespace $CleanupNamespace --kubeconfig $tempKubeconfig --wait=true 2>&1
+ Write-Host $output -ForegroundColor Gray
+ $success = $LASTEXITCODE -eq 0
+ } else {
+ $null = kubectl delete namespace $CleanupNamespace --kubeconfig $tempKubeconfig --wait=true 2>&1
+ $success = $LASTEXITCODE -eq 0
+ }
+ Write-Host $(if ($success) { "DELETED" } else { "DELETE FAILED" }) -ForegroundColor $(if ($success) { "Yellow" } else { "Red" })
+ } else {
+ Write-Host "NOT EXISTS (skip cleanup)" -ForegroundColor Gray
+ }
+ }
+
+ # Deploy K8s manifests
+ $yamlFiles = Get-ChildItem -Path $K8sPath -Filter "*.yaml" |
+ Where-Object { $_.Name -notlike "*-job.yaml" } |
+ Sort-Object Name
+
+ foreach ($yamlFile in $yamlFiles) {
+ Write-Host "$aksClusterName $($yamlFile.Name) " -NoNewline
+
+ if ($ShowDebug) {
+ Write-Host "`n[DEBUG] kubectl apply -f $($yamlFile.FullName)" -ForegroundColor Gray
+ $output = kubectl apply -f $yamlFile.FullName --kubeconfig $tempKubeconfig 2>&1
+ Write-Host $output -ForegroundColor Gray
+ $success = $LASTEXITCODE -eq 0
+ } else {
+ $null = kubectl apply -f $yamlFile.FullName --kubeconfig $tempKubeconfig 2>&1
+ $success = $LASTEXITCODE -eq 0
+ }
+
+ Write-Host $(if ($success) { "OK" } else { "FAILED" }) -ForegroundColor $(if ($success) { "Green" } else { "Red" })
+ }
+
+ Remove-Item $tempKubeconfig -Force
+
+ # Stop cluster if it was originally stopped (to save costs)
+ if ($wasOriginallyStopped -and -not $KeepRunning) {
+ Write-Host "$aksClusterName stopping cluster (was originally stopped)... " -NoNewline
+ $null = az aks stop --name $aksClusterName --resource-group $resourceGroup --subscription $subscriptionId 2>&1
+ if ($LASTEXITCODE -eq 0) {
+ Write-Host "STOPPED" -ForegroundColor Yellow
+ } else {
+ Write-Host "FAILED to stop" -ForegroundColor Red
+ }
+ }
+}
+
+Write-Host "`nDeployment complete!" -ForegroundColor Green
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/deploy-two-phase.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/deploy-two-phase.ps1
new file mode 100644
index 000000000..458ff2a28
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/deploy-two-phase.ps1
@@ -0,0 +1,256 @@
+<#
+.SYNOPSIS
+ Two-phase deployment script for Oracle on Azure MicroHack infrastructure.
+
+.DESCRIPTION
+ This script deploys the infrastructure in two phases to avoid Azure AD
+ eventual consistency issues:
+
+ Phase 1: Create Entra ID users and group memberships (identity folder)
+ Phase 2: Deploy AKS clusters and ODAA resources (main folder)
+
+ By separating user creation from infrastructure deployment, we allow
+ Azure AD time to propagate the user objects before they're used in
+ RBAC role assignments.
+
+.PARAMETER Phase
+ Which phase to run: 'identity', 'infrastructure', or 'all' (default).
+ - identity: Only create users in Entra ID
+ - infrastructure: Only deploy AKS/ODAA (requires identity phase first)
+ - all: Run both phases sequentially
+
+.PARAMETER Destroy
+ If specified, destroys resources instead of creating them.
+
+.PARAMETER Plan
+ If specified, only runs terraform plan without applying.
+
+.PARAMETER AutoApprove
+ If specified, skips confirmation prompts (uses -auto-approve).
+
+.PARAMETER PropagationWait
+ Additional wait time (seconds) between phases. Default: 60.
+ The identity module already waits 90 seconds internally.
+
+.EXAMPLE
+ # Full deployment (both phases)
+ .\deploy-two-phase.ps1
+
+.EXAMPLE
+ # Only create users
+ .\deploy-two-phase.ps1 -Phase identity
+
+.EXAMPLE
+ # Only deploy infrastructure (users must exist)
+ .\deploy-two-phase.ps1 -Phase infrastructure
+
+.EXAMPLE
+ # Destroy everything (infrastructure first, then identity)
+ .\deploy-two-phase.ps1 -Destroy
+
+.NOTES
+ Author: Generated for Oracle on Azure MicroHack
+ Requires: Terraform 1.0+, Azure CLI logged in
+#>
+
+[CmdletBinding()]
+param(
+ [ValidateSet('identity', 'infrastructure', 'all')]
+ [string]$Phase = 'all',
+
+ [switch]$Destroy,
+
+ [switch]$Plan,
+
+ [switch]$AutoApprove,
+
+ [int]$PropagationWait = 60
+)
+
+$ErrorActionPreference = 'Stop'
+$ScriptRoot = $PSScriptRoot
+$TerraformRoot = Split-Path $ScriptRoot -Parent
+$IdentityFolder = Join-Path $TerraformRoot 'identity'
+$InfrastructureFolder = $TerraformRoot
+
+function Write-Phase {
+ param([string]$Message, [string]$Color = 'Cyan')
+ Write-Host "`n$('=' * 80)" -ForegroundColor $Color
+ Write-Host $Message -ForegroundColor $Color
+ Write-Host "$('=' * 80)`n" -ForegroundColor $Color
+}
+
+function Write-Step {
+ param([string]$Message)
+ Write-Host ">>> $Message" -ForegroundColor Yellow
+}
+
+function Invoke-Terraform {
+ param(
+ [string]$WorkingDirectory,
+ [string]$Command,
+ [string[]]$Arguments
+ )
+
+ Push-Location $WorkingDirectory
+ try {
+ $allArgs = @($Command) + $Arguments
+ Write-Host "terraform $($allArgs -join ' ')" -ForegroundColor Gray
+ & terraform @allArgs
+ if ($LASTEXITCODE -ne 0) {
+ throw "Terraform $Command failed with exit code $LASTEXITCODE"
+ }
+ }
+ finally {
+ Pop-Location
+ }
+}
+
+function Deploy-Identity {
+ Write-Phase "PHASE 1: ENTRA ID IDENTITY MANAGEMENT"
+
+ if (!(Test-Path $IdentityFolder)) {
+ throw "Identity folder not found: $IdentityFolder"
+ }
+
+ Write-Step "Initializing Terraform in identity folder..."
+ Invoke-Terraform -WorkingDirectory $IdentityFolder -Command 'init'
+
+ if ($Plan) {
+ Write-Step "Planning identity changes..."
+ Invoke-Terraform -WorkingDirectory $IdentityFolder -Command 'plan'
+ return
+ }
+
+ $applyArgs = @()
+ if ($AutoApprove) { $applyArgs += '-auto-approve' }
+
+ Write-Step "Applying identity configuration..."
+ Invoke-Terraform -WorkingDirectory $IdentityFolder -Command 'apply' -Arguments $applyArgs
+
+ Write-Host "`nIdentity deployment complete!" -ForegroundColor Green
+ Write-Host "User credentials saved to: $IdentityFolder\user_credentials.json"
+ Write-Host "Identity outputs saved to: $IdentityFolder\identity_outputs.json"
+}
+
+function Destroy-Identity {
+ Write-Phase "DESTROYING: ENTRA ID IDENTITY RESOURCES" -Color Red
+
+ if (!(Test-Path $IdentityFolder)) {
+ Write-Warning "Identity folder not found, skipping: $IdentityFolder"
+ return
+ }
+
+ $destroyArgs = @()
+ if ($AutoApprove) { $destroyArgs += '-auto-approve' }
+
+ Write-Step "Destroying identity resources..."
+ Invoke-Terraform -WorkingDirectory $IdentityFolder -Command 'destroy' -Arguments $destroyArgs
+}
+
+function Deploy-Infrastructure {
+ Write-Phase "PHASE 2: AKS AND ODAA INFRASTRUCTURE"
+
+ # Check that identity outputs exist
+ $identityOutputs = Join-Path $IdentityFolder 'identity_outputs.json'
+ if (!(Test-Path $identityOutputs)) {
+ throw @"
+Identity outputs not found at: $identityOutputs
+
+Please run the identity phase first:
+ .\deploy-two-phase.ps1 -Phase identity
+
+Or run both phases:
+ .\deploy-two-phase.ps1 -Phase all
+"@
+ }
+
+ Write-Step "Initializing Terraform in infrastructure folder..."
+ Invoke-Terraform -WorkingDirectory $InfrastructureFolder -Command 'init'
+
+ if ($Plan) {
+ Write-Step "Planning infrastructure changes..."
+ Invoke-Terraform -WorkingDirectory $InfrastructureFolder -Command 'plan' -Arguments @('-var=use_external_identity=true')
+ return
+ }
+
+ $applyArgs = @('-var=use_external_identity=true')
+ if ($AutoApprove) { $applyArgs += '-auto-approve' }
+
+ Write-Step "Applying infrastructure configuration..."
+ Invoke-Terraform -WorkingDirectory $InfrastructureFolder -Command 'apply' -Arguments $applyArgs
+
+ Write-Host "`nInfrastructure deployment complete!" -ForegroundColor Green
+}
+
+function Destroy-Infrastructure {
+ Write-Phase "DESTROYING: AKS AND ODAA INFRASTRUCTURE" -Color Red
+
+ $destroyArgs = @('-var=use_external_identity=true')
+ if ($AutoApprove) { $destroyArgs += '-auto-approve' }
+
+ Write-Step "Destroying infrastructure resources..."
+ Invoke-Terraform -WorkingDirectory $InfrastructureFolder -Command 'destroy' -Arguments $destroyArgs
+}
+
+# Main execution
+try {
+ if ($Destroy) {
+ # Destroy in reverse order: infrastructure first, then identity
+ if ($Phase -eq 'all' -or $Phase -eq 'infrastructure') {
+ Destroy-Infrastructure
+ }
+ if ($Phase -eq 'all' -or $Phase -eq 'identity') {
+ Destroy-Identity
+ }
+ Write-Phase "DESTROY COMPLETE" -Color Green
+ }
+ else {
+ # Deploy in order: identity first, then infrastructure
+ if ($Phase -eq 'all' -or $Phase -eq 'identity') {
+ Deploy-Identity
+ }
+
+ if ($Phase -eq 'all') {
+ Write-Phase "WAITING FOR AZURE AD PROPAGATION"
+ Write-Host "Waiting $PropagationWait additional seconds for Azure AD to propagate..."
+ Write-Host "(The identity module already waited 90 seconds internally)"
+
+ for ($i = $PropagationWait; $i -gt 0; $i -= 10) {
+ Write-Host " $i seconds remaining..." -ForegroundColor Gray
+ Start-Sleep -Seconds ([Math]::Min(10, $i))
+ }
+ Write-Host "Propagation wait complete.`n" -ForegroundColor Green
+ }
+
+ if ($Phase -eq 'all' -or $Phase -eq 'infrastructure') {
+ Deploy-Infrastructure
+ }
+
+ Write-Phase "DEPLOYMENT COMPLETE" -Color Green
+
+ if ($Phase -eq 'all') {
+ Write-Host @"
+
+Summary:
+--------
+- Users created and added to group: mh-odaa-user-grp
+- User credentials: identity\user_credentials.json
+- AKS clusters deployed with RBAC roles assigned
+- VNet peering established between AKS and ODAA networks
+
+Next steps:
+-----------
+1. Distribute user credentials to participants
+2. Deploy ingress controllers: .\scripts\deploy-ingress-controllers.ps1
+3. Optionally create Oracle databases: terraform apply -var="create_oracle_database=true" -var="use_external_identity=true"
+
+"@
+ }
+ }
+}
+catch {
+ Write-Host "`nERROR: $_" -ForegroundColor Red
+ Write-Host $_.ScriptStackTrace -ForegroundColor DarkGray
+ exit 1
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/enable-container-network-logs.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/enable-container-network-logs.ps1
new file mode 100644
index 000000000..0a7667989
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/enable-container-network-logs.ps1
@@ -0,0 +1,83 @@
+# Enable Container Network Observability Logs on AKS clusters
+param(
+ [switch]$Debug
+)
+
+$ErrorActionPreference = "Continue"
+
+Write-Host "Enabling Container Network Observability Logs on AKS clusters..." -ForegroundColor Cyan
+Write-Host ""
+
+# Get AKS cluster information from Terraform output
+$tfOutput = terraform output -json aks_clusters | ConvertFrom-Json
+
+if (-not $tfOutput) {
+ Write-Host "Error: No AKS clusters found in Terraform output" -ForegroundColor Red
+ exit 1
+}
+
+# Process each AKS cluster
+foreach ($property in $tfOutput.PSObject.Properties) {
+ $clusterName = $property.Name
+ $clusterInfo = $property.Value
+
+ Write-Host "Processing cluster: $clusterName" -ForegroundColor Yellow
+ Write-Host " Resource Group: $($clusterInfo.resource_group_name)" -ForegroundColor Gray
+ Write-Host " Subscription: $($clusterInfo.subscription_id)" -ForegroundColor Gray
+
+ # Set the subscription context
+ Write-Host " Setting subscription context..." -ForegroundColor Gray
+ az account set -s $clusterInfo.subscription_id 2>&1 | Out-Null
+
+ if ($LASTEXITCODE -ne 0) {
+ Write-Host " [FAILED] Could not set subscription context" -ForegroundColor Red
+ continue
+ }
+
+ # Check if monitoring addon is enabled
+ Write-Host " Checking monitoring addon status..." -ForegroundColor Gray
+ $monitoringStatus = az aks show -n $clusterInfo.name -g $clusterInfo.resource_group_name --query "addonProfiles.omsagent.enabled" -o tsv 2>&1
+
+ if ($monitoringStatus -ne "true") {
+ Write-Host " [SKIPPED] Monitoring addon not enabled (required for Container Network Logs)" -ForegroundColor Yellow
+ continue
+ }
+
+ # Enable Advanced Container Networking Services with Container Network Logs
+ Write-Host " Enabling ACNS and Container Network Logs..." -ForegroundColor Cyan
+
+ $enableCmd = @(
+ "aks", "update",
+ "--enable-acns",
+ "--enable-container-network-logs",
+ "-g", $clusterInfo.resource_group_name,
+ "-n", $clusterInfo.name
+ )
+
+ if ($Debug) {
+ Write-Host " [DEBUG] Command: az $($enableCmd -join ' ')" -ForegroundColor Gray
+ $output = & az $enableCmd 2>&1
+ Write-Host $output -ForegroundColor Gray
+ $success = $LASTEXITCODE -eq 0
+ } else {
+ $null = & az $enableCmd 2>&1
+ $success = $LASTEXITCODE -eq 0
+ }
+
+ if ($success) {
+ Write-Host " [SUCCESS] Container Network Logs enabled" -ForegroundColor Green
+ } else {
+ Write-Host " [FAILED] Could not enable Container Network Logs" -ForegroundColor Red
+ }
+
+ Write-Host ""
+}
+
+Write-Host "Container Network Observability Logs configuration complete!" -ForegroundColor Green
+Write-Host ""
+Write-Host "Next steps:" -ForegroundColor Cyan
+Write-Host " 1. Create ContainerNetworkLog CRDs to define what traffic to capture" -ForegroundColor Gray
+Write-Host " 2. View logs in Log Analytics workspace" -ForegroundColor Gray
+Write-Host " 3. Set up Grafana dashboards for visualization" -ForegroundColor Gray
+Write-Host ""
+Write-Host "Documentation: https://learn.microsoft.com/en-us/azure/aks/container-network-observability-logs" -ForegroundColor Gray
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/enable-tf-logging.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/enable-tf-logging.ps1
new file mode 100644
index 000000000..0fcbda283
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/enable-tf-logging.ps1
@@ -0,0 +1,31 @@
+#!/usr/bin/env pwsh
+# ===============================================================================
+# Enable Terraform Logging Script
+# ===============================================================================
+# This script enables detailed Terraform logging and redirects output to a logs
+# folder for troubleshooting and debugging purposes.
+# ===============================================================================
+
+# Create logs directory if it doesn't exist
+$logsDir = Join-Path -Path $PSScriptRoot -ChildPath "logs"
+if (-not (Test-Path -Path $logsDir)) {
+ New-Item -ItemType Directory -Path $logsDir -Force | Out-Null
+ Write-Host "Created logs directory: $logsDir" -ForegroundColor Green
+} else {
+ Write-Host "Logs directory already exists: $logsDir" -ForegroundColor Yellow
+}
+
+# Set Terraform logging environment variables
+$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
+$logFile = Join-Path -Path $logsDir -ChildPath "terraform_$timestamp.log"
+
+$env:TF_LOG = "TRACE"
+$env:TF_LOG_PATH = $logFile
+
+Write-Host "`nTerraform logging enabled:" -ForegroundColor Cyan
+Write-Host " TF_LOG = $env:TF_LOG" -ForegroundColor White
+Write-Host " TF_LOG_PATH = $env:TF_LOG_PATH" -ForegroundColor White
+Write-Host "`nRun your Terraform commands now. Logs will be written to:" -ForegroundColor Green
+Write-Host " $logFile" -ForegroundColor White
+Write-Host "`nTo disable logging, close this PowerShell session or run:" -ForegroundColor Yellow
+Write-Host " Remove-Item Env:\TF_LOG; Remove-Item Env:\TF_LOG_PATH" -ForegroundColor Gray
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/k8s/deployments.yaml b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/k8s/deployments.yaml
new file mode 100644
index 000000000..e26d3834e
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/k8s/deployments.yaml
@@ -0,0 +1,75 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: adb-perf-test
+ labels:
+ name: adb-perf-test
+ purpose: oracle-adb-performance-testing
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: adbping-deployment
+ namespace: adb-perf-test
+ labels:
+ app: adbping
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: adbping
+ template:
+ metadata:
+ labels:
+ app: adbping
+ spec:
+ containers:
+ - name: adbping
+ image: odaamh.azurecr.io/adb-nettest:v2.1
+ command: ["/bin/sleep"]
+ args: ["3600"]
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+ env:
+ - name: ORACLE_HOME
+ value: "/usr/local/oracle"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: connping-deployment
+ namespace: adb-perf-test
+ labels:
+ app: connping
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: connping
+ template:
+ metadata:
+ labels:
+ app: connping
+ spec:
+ containers:
+ - name: connping
+ image: odaamh.azurecr.io/connping:v1.2
+ command: ["/bin/sleep"]
+ args: ["3600"]
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+ env:
+ - name: ORACLE_HOME
+ value: "/usr/lib/oracle/23/client64"
+ - name: TNS_ADMIN
+ value: "/opt/oracle/wallet"
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/k8s/network-test-pod.yaml b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/k8s/network-test-pod.yaml
new file mode 100644
index 000000000..4b93201d0
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/k8s/network-test-pod.yaml
@@ -0,0 +1,96 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: network-test-pod
+ namespace: default
+ labels:
+ app: network-test
+spec:
+ # Optional: set to true if you need to test hostNetwork performance
+ # hostNetwork: true
+ containers:
+ - name: net-tools
+ image: ubuntu:22.04
+ command: ["/bin/bash","-c"]
+ args:
+ - |
+ set -euo pipefail
+ export DEBIAN_FRONTEND=noninteractive
+ apt-get update && apt-get install -y --no-install-recommends \
+ sockperf iperf3 qperf iputils-ping dnsutils curl tcpdump netcat-openbsd nmap traceroute mtr-tiny ca-certificates jq wget \
+ && rm -rf /var/lib/apt/lists/*
+ printf '%s\n' \
+ '#!/usr/bin/env bash' \
+ 'if [ $# -lt 2 ]; then echo "Usage: tcpping host port [intervalSeconds]"; exit 1; fi' \
+ 'H="$1"; P="$2"; I="${3:-1}"' \
+ 'while true; do' \
+ ' START=$(date +%s%3N)' \
+ ' timeout 1 bash -c "/dev/null' \
+ ' RC=$?' \
+ ' END=$(date +%s%3N); ELAPSED=$((END-START))' \
+ ' TS=$(date -Iseconds)' \
+ ' if [ $RC -eq 0 ]; then' \
+ ' echo "$TS OK $H $P ${ELAPSED}ms"' \
+ ' else' \
+ ' echo "$TS FAIL $H $P timeout"' \
+ ' fi' \
+ ' sleep $I' \
+ 'done' > /usr/local/bin/tcpping
+ chmod +x /usr/local/bin/tcpping
+ echo "Container ready"; sleep infinity
+ securityContext:
+ capabilities:
+ add: ["NET_ADMIN", "NET_RAW"]
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "250m"
+ limits:
+ memory: "512Mi"
+ cpu: "1"
+ ports:
+ - name: iperf3
+ containerPort: 5201
+ protocol: TCP
+ - name: qperf
+ containerPort: 19765
+ protocol: TCP
+ - name: sockperf-server
+ image: mellanox/sockperf:latest
+ args: ["sockperf", "server", "-p", "11111"] # default UDP server port
+ ports:
+ - name: sockperf
+ containerPort: 11111
+ protocol: UDP
+ resources:
+ requests:
+ memory: "64Mi"
+ cpu: "100m"
+ limits:
+ memory: "256Mi"
+ cpu: "500m"
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: network-test-svc
+ namespace: default
+spec:
+ selector:
+ app: network-test
+ ports:
+ - name: sockperf
+ port: 11111
+ targetPort: 11111
+ protocol: UDP
+ - name: iperf3
+ port: 5201
+ targetPort: 5201
+ protocol: TCP
+ - name: qperf
+ port: 19765
+ targetPort: 19765
+ protocol: TCP
+ type: ClusterIP
+# tcpping script created via printf to avoid YAML heredoc parsing issues.
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/microhack.status.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/microhack.status.ps1
new file mode 100644
index 000000000..c691780d1
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/microhack.status.ps1
@@ -0,0 +1,134 @@
+<#
+.SYNOPSIS
+ Checks the status of Private DNS Zones A records on AKS VNets for the MicroHack environment.
+
+.DESCRIPTION
+ This script iterates through all subscription targets defined in terraform.tfvars,
+ finds Private DNS Zones linked to AKS VNets, and shows which ones have A records added.
+
+.EXAMPLE
+ .\scripts\microhack.status.ps1
+#>
+
+# Subscription targets from terraform.tfvars
+$subscriptionTargets = @(
+ @{ Id = "556f9b63-ebc9-4c7e-8437-9a05aa8cdb25"; Name = "sub-mh0" },
+ @{ Id = "a0844269-41ae-442c-8277-415f1283d422"; Name = "sub-mh1" },
+ @{ Id = "b1658f1f-33e5-4e48-9401-f66ba5e64cce"; Name = "sub-mh2" },
+ @{ Id = "9aa72379-2067-4948-b51c-de59f4005d04"; Name = "sub-mh3" },
+ @{ Id = "98525264-1eb4-493f-983d-16a330caa7f6"; Name = "sub-mh4" }
+)
+
+Write-Host ""
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "MicroHack Private DNS Zone Status Check" -ForegroundColor Cyan
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host ""
+
+$results = @()
+
+foreach ($sub in $subscriptionTargets) {
+ Write-Host "Checking subscription: $($sub.Name) ($($sub.Id))" -ForegroundColor Yellow
+
+ # Set the subscription context
+ az account set --subscription $sub.Id 2>$null
+ if ($LASTEXITCODE -ne 0) {
+ Write-Host " [ERROR] Failed to set subscription context" -ForegroundColor Red
+ continue
+ }
+
+ # Get all Private DNS Zones in the subscription
+ $dnsZonesJson = az network private-dns zone list --query "[].{name:name, resourceGroup:resourceGroup}" -o json 2>$null
+ $dnsZones = $dnsZonesJson | ConvertFrom-Json
+
+ if (-not $dnsZones -or $dnsZones.Count -eq 0) {
+ Write-Host " No Private DNS Zones found" -ForegroundColor Gray
+ continue
+ }
+
+ foreach ($zone in $dnsZones) {
+ # Get A records for this zone
+ $aRecordsJson = az network private-dns record-set a list --resource-group $zone.resourceGroup --zone-name $zone.name --query "[].{name:name, fqdn:fqdn, ttl:ttl, ipAddresses:aRecords[].ipv4Address}" -o json 2>$null
+ $aRecords = $aRecordsJson | ConvertFrom-Json
+
+ $hasARecords = $false
+ if ($aRecords -and $aRecords.Count -gt 0) {
+ $hasARecords = $true
+ }
+
+ # Get VNet links to determine if this is an AKS-related zone
+ $vnetLinksJson = az network private-dns link vnet list --resource-group $zone.resourceGroup --zone-name $zone.name --query "[].{name:name, vnetId:virtualNetwork.id}" -o json 2>$null
+ $vnetLinks = $vnetLinksJson | ConvertFrom-Json
+
+ $linkedToAks = $false
+ $linkedVnets = @()
+ foreach ($link in $vnetLinks) {
+ if ($link.vnetId -match "aks-user\d+") {
+ $linkedToAks = $true
+ $vnetName = ($link.vnetId -split "/")[-1]
+ $linkedVnets += $vnetName
+ }
+ }
+
+ if ($linkedToAks) {
+ if ($hasARecords) {
+ $status = "[YES] Has A Records"
+ $statusColor = "Green"
+ } else {
+ $status = "[NO] No A Records"
+ $statusColor = "Red"
+ }
+
+ Write-Host " [$($zone.resourceGroup)] $($zone.name)" -ForegroundColor White -NoNewline
+ Write-Host " - $status" -ForegroundColor $statusColor
+
+ if ($hasARecords) {
+ foreach ($record in $aRecords) {
+ $ips = $record.ipAddresses -join ", "
+ Write-Host " -> $($record.name): $ips" -ForegroundColor Gray
+ }
+ }
+
+ $aRecordCount = 0
+ $aRecordStr = ""
+ if ($aRecords) {
+ $aRecordCount = $aRecords.Count
+ $aRecordItems = @()
+ foreach ($r in $aRecords) {
+ $ipStr = $r.ipAddresses -join ","
+ $aRecordItems += "$($r.name): $ipStr"
+ }
+ $aRecordStr = $aRecordItems -join "; "
+ }
+
+ $results += [PSCustomObject]@{
+ Subscription = $sub.Name
+ SubscriptionId = $sub.Id
+ ResourceGroup = $zone.resourceGroup
+ ZoneName = $zone.name
+ HasARecords = $hasARecords
+ ARecordCount = $aRecordCount
+ LinkedVNets = $linkedVnets -join ", "
+ ARecords = $aRecordStr
+ }
+ }
+ }
+ Write-Host ""
+}
+
+# Summary
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "Summary" -ForegroundColor Cyan
+Write-Host "========================================" -ForegroundColor Cyan
+
+$withRecords = @($results | Where-Object { $_.HasARecords }).Count
+$withoutRecords = @($results | Where-Object { -not $_.HasARecords }).Count
+$total = $results.Count
+
+Write-Host "Total Private DNS Zones linked to AKS VNets: $total" -ForegroundColor White
+Write-Host " With A Records: $withRecords" -ForegroundColor Green
+Write-Host " Without A Records: $withoutRecords" -ForegroundColor Red
+Write-Host ""
+
+# Return results for further processing if needed
+$results
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/register-oracle-sdn.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/register-oracle-sdn.ps1
new file mode 100644
index 000000000..6f824c602
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/register-oracle-sdn.ps1
@@ -0,0 +1,124 @@
+<#
+.SYNOPSIS
+Registers the Oracle SDN appliance preview features and re-registers resource providers
+for a list of subscriptions.
+
+.DESCRIPTION
+This script ensures that each target subscription has the
+EnableRotterdamSdnApplianceForOracle feature enabled for both
+Microsoft.Baremetal and Microsoft.Network namespaces. It waits for feature
+registration to complete before re-registering the providers, following Azure
+preview feature best practices.
+
+.PARAMETER Subscriptions
+Array of subscription IDs to process.
+
+.EXAMPLE
+PS> ./register-oracle-sdn.ps1
+
+.EXAMPLE
+PS> ./register-oracle-sdn.ps1 -Subscriptions @("", "")
+#>
+param (
+ [string[]]
+ $Subscriptions = @(
+ "4aecf0e8-2fe2-4187-bc93-0356bd2676f5", # sub-mhodaa
+ "556f9b63-ebc9-4c7e-8437-9a05aa8cdb25", # sub-t0
+ "a0844269-41ae-442c-8277-415f1283d422", # sub-t1
+ "b1658f1f-33e5-4e48-9401-f66ba5e64cce", # sub-t2
+ "9aa72379-2067-4948-b51c-de59f4005d04", # sub-t3
+ "98525264-1eb4-493f-983d-16a330caa7f6" # sub-t4
+ ),
+
+ [int]
+ $PollingIntervalSeconds = 30
+)
+
+$features = @(
+ @{ Namespace = "Microsoft.Baremetal"; Name = "EnableRotterdamSdnApplianceForOracle" },
+ @{ Namespace = "Microsoft.Network"; Name = "EnableRotterdamSdnApplianceForOracle" }
+)
+
+$providers = @(
+ "Microsoft.Baremetal",
+ "Microsoft.Network",
+ "Microsoft.Compute",
+ "Oracle.Database"
+)
+
+function Write-Section {
+ param (
+ [string] $Message
+ )
+
+ Write-Host "=== $Message ===" -ForegroundColor Cyan
+}
+
+function Register-Feature {
+ param (
+ [string] $Namespace,
+ [string] $Name
+ )
+
+ az feature register --namespace $Namespace --name $Name --only-show-errors | Out-Null
+}
+
+function Get-FeatureState {
+ param (
+ [string] $Namespace,
+ [string] $Name
+ )
+
+ az feature show --namespace $Namespace --name $Name --query properties.state --output tsv
+}
+
+function Register-Provider {
+ param (
+ [string] $Namespace
+ )
+
+ az provider register --namespace $Namespace --only-show-errors | Out-Null
+}
+
+foreach ($subscription in $Subscriptions) {
+ Write-Section "Processing subscription $subscription"
+
+ az account set --subscription $subscription | Out-Null
+
+ foreach ($feature in $features) {
+ Write-Host "Registering feature $($feature.Namespace)/$($feature.Name)..."
+ Register-Feature -Namespace $feature.Namespace -Name $feature.Name
+ }
+
+ Write-Host "Waiting for feature registration to complete..."
+ do {
+ Start-Sleep -Seconds $PollingIntervalSeconds
+
+ $states = @{}
+ foreach ($feature in $features) {
+ $state = Get-FeatureState -Namespace $feature.Namespace -Name $feature.Name
+ $states["$($feature.Namespace)/$($feature.Name)"] = $state
+ }
+
+ $statusLine = $states.GetEnumerator() | ForEach-Object { "{0}: {1}" -f $_.Key, $_.Value } | Sort-Object
+ Write-Host " $(($statusLine -join '; '))"
+
+ $allRegistered = $states.Values -notcontains "Registering" -and $states.Values -notcontains "Pending"
+ } while (-not $allRegistered)
+
+ if ($states.Values -notcontains "Registered") {
+ Write-Warning "One or more features failed to register for subscription $subscription."
+ Write-Warning "Skipping provider re-registration for this subscription."
+ continue
+ }
+
+ foreach ($provider in $providers) {
+ Write-Host "Re-registering provider $provider..."
+ Register-Provider -Namespace $provider
+ }
+
+ Write-Host "Completed feature setup for $subscription" -ForegroundColor Green
+ Write-Host
+}
+
+Write-Section "All subscriptions processed"
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/reset-user-mfa.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/reset-user-mfa.ps1
new file mode 100644
index 000000000..d3f2fcb39
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/reset-user-mfa.ps1
@@ -0,0 +1,188 @@
+<#
+.SYNOPSIS
+ Reset MFA authentication methods for workshop users to prepare for new attendees.
+
+.DESCRIPTION
+ This script removes all MFA authentication methods (except password) from workshop users,
+ allowing new attendees to set up their own MFA on first login.
+
+ Required permissions for the executing identity:
+ - UserAuthenticationMethod.ReadWrite.All (Application permission)
+ - Or run as a user with Authentication Administrator or Privileged Authentication Administrator role
+
+.PARAMETER UserPrefix
+ The prefix for user accounts (e.g., "user" for user00, user01, etc.)
+
+.PARAMETER Domain
+ The domain suffix for user accounts (e.g., "cptazure.org")
+
+.PARAMETER UserCount
+ Number of users to reset (e.g., 25 for user00-user24)
+
+.PARAMETER IdentityFile
+ Path to user_credentials.json to get user list (alternative to UserPrefix/UserCount)
+
+.EXAMPLE
+ .\reset-user-mfa.ps1 -UserPrefix "user" -Domain "cptazure.org" -UserCount 25
+
+.EXAMPLE
+ .\reset-user-mfa.ps1 -IdentityFile "..\identity\user_credentials.json"
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(ParameterSetName = "Manual")]
+ [string]$UserPrefix = "user",
+
+ [Parameter(ParameterSetName = "Manual")]
+ [string]$Domain = "cptazure.org",
+
+ [Parameter(ParameterSetName = "Manual")]
+ [int]$UserCount = 25,
+
+ [Parameter(ParameterSetName = "FromFile")]
+ [string]$IdentityFile
+)
+
+$ErrorActionPreference = "Stop"
+
+# Build user list
+$users = @()
+if ($IdentityFile) {
+ if (-not (Test-Path $IdentityFile)) {
+ Write-Error "Identity file not found: $IdentityFile"
+ exit 1
+ }
+ $identity = Get-Content $IdentityFile | ConvertFrom-Json
+ foreach ($userKey in $identity.users.PSObject.Properties.Name) {
+ $users += $identity.users.$userKey.user_principal_name
+ }
+} else {
+ for ($i = 0; $i -lt $UserCount; $i++) {
+ $userNum = $i.ToString("D2")
+ $users += "$UserPrefix$userNum@$Domain"
+ }
+}
+
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "MFA Reset Script for Workshop Users" -ForegroundColor Cyan
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "Users to process: $($users.Count)" -ForegroundColor Yellow
+Write-Host ""
+
+# Check Azure CLI login
+try {
+ $account = az account show 2>&1 | ConvertFrom-Json
+ Write-Host "Logged in as: $($account.user.name)" -ForegroundColor Green
+} catch {
+ Write-Error "Please login to Azure CLI first: az login"
+ exit 1
+}
+
+$successCount = 0
+$errorCount = 0
+$noMfaCount = 0
+
+foreach ($upn in $users) {
+ Write-Host "`nProcessing: $upn" -ForegroundColor Cyan
+
+ try {
+ # Get all authentication methods for the user
+ $methodsJson = az rest --method GET `
+ --uri "https://graph.microsoft.com/v1.0/users/$upn/authentication/methods" `
+ --output json 2>&1
+
+ if ($LASTEXITCODE -ne 0) {
+ Write-Host " ERROR: Failed to get auth methods - $methodsJson" -ForegroundColor Red
+ $errorCount++
+ continue
+ }
+
+ $methods = $methodsJson | ConvertFrom-Json
+ $mfaMethods = $methods.value | Where-Object {
+ $_.'@odata.type' -ne '#microsoft.graph.passwordAuthenticationMethod'
+ }
+
+ if ($mfaMethods.Count -eq 0) {
+ Write-Host " No MFA methods registered (only password)" -ForegroundColor Gray
+ $noMfaCount++
+ continue
+ }
+
+ Write-Host " Found $($mfaMethods.Count) MFA method(s) to remove:" -ForegroundColor Yellow
+
+ foreach ($method in $mfaMethods) {
+ $methodType = $method.'@odata.type' -replace '#microsoft.graph.', ''
+ $methodId = $method.id
+
+ Write-Host " - $methodType (ID: $methodId)" -ForegroundColor Gray
+
+ # Determine the correct endpoint for deletion based on method type
+ $deleteUri = switch ($methodType) {
+ "phoneAuthenticationMethod" {
+ "https://graph.microsoft.com/v1.0/users/$upn/authentication/phoneMethods/$methodId"
+ }
+ "microsoftAuthenticatorAuthenticationMethod" {
+ "https://graph.microsoft.com/v1.0/users/$upn/authentication/microsoftAuthenticatorMethods/$methodId"
+ }
+ "softwareOathAuthenticationMethod" {
+ "https://graph.microsoft.com/v1.0/users/$upn/authentication/softwareOathMethods/$methodId"
+ }
+ "fido2AuthenticationMethod" {
+ "https://graph.microsoft.com/v1.0/users/$upn/authentication/fido2Methods/$methodId"
+ }
+ "windowsHelloForBusinessAuthenticationMethod" {
+ "https://graph.microsoft.com/v1.0/users/$upn/authentication/windowsHelloForBusinessMethods/$methodId"
+ }
+ "emailAuthenticationMethod" {
+ "https://graph.microsoft.com/v1.0/users/$upn/authentication/emailMethods/$methodId"
+ }
+ "temporaryAccessPassAuthenticationMethod" {
+ "https://graph.microsoft.com/v1.0/users/$upn/authentication/temporaryAccessPassMethods/$methodId"
+ }
+ default { $null }
+ }
+
+ if ($deleteUri) {
+ try {
+ $deleteResult = az rest --method DELETE --uri $deleteUri 2>&1
+ if ($LASTEXITCODE -eq 0) {
+ Write-Host " Removed successfully" -ForegroundColor Green
+ } else {
+ Write-Host " Failed to remove: $deleteResult" -ForegroundColor Red
+ }
+ } catch {
+ Write-Host " Failed to remove: $_" -ForegroundColor Red
+ }
+ } else {
+ Write-Host " Unknown method type, skipping" -ForegroundColor Yellow
+ }
+ }
+
+ $successCount++
+
+ } catch {
+ Write-Host " ERROR: $_" -ForegroundColor Red
+ $errorCount++
+ }
+}
+
+Write-Host "`n========================================" -ForegroundColor Cyan
+Write-Host "MFA Reset Summary" -ForegroundColor Cyan
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "Successfully processed: $successCount" -ForegroundColor Green
+Write-Host "No MFA registered: $noMfaCount" -ForegroundColor Gray
+Write-Host "Errors: $errorCount" -ForegroundColor $(if ($errorCount -gt 0) { "Red" } else { "Gray" })
+Write-Host ""
+
+if ($errorCount -gt 0) {
+ Write-Host "Some users had errors. Common causes:" -ForegroundColor Yellow
+ Write-Host " - Service principal needs UserAuthenticationMethod.ReadWrite.All permission" -ForegroundColor Yellow
+ Write-Host " - Or run this script as a user with Authentication Administrator role" -ForegroundColor Yellow
+ Write-Host ""
+ Write-Host "To add the permission to your service principal:" -ForegroundColor Cyan
+ Write-Host " 1. Go to Azure Portal > Entra ID > App registrations" -ForegroundColor White
+ Write-Host " 2. Find your app and go to API permissions" -ForegroundColor White
+ Write-Host " 3. Add: Microsoft Graph > Application > UserAuthenticationMethod.ReadWrite.All" -ForegroundColor White
+ Write-Host " 4. Grant admin consent" -ForegroundColor White
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/rotate-passwords.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/rotate-passwords.ps1
new file mode 100644
index 000000000..1eefe06bb
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/scripts/rotate-passwords.ps1
@@ -0,0 +1,201 @@
+<#
+.SYNOPSIS
+ Rotate passwords for all MicroHack users to revoke access after an event.
+
+.DESCRIPTION
+ This script automates the password rotation workflow by:
+ 1. Updating the password_rotation_trigger in terraform.tfvars
+ 2. Running terraform apply to regenerate all passwords
+ 3. Outputting the new credentials (or confirming access is revoked)
+
+ Use this script AFTER an event ends to immediately revoke participant access,
+ or BEFORE an event to generate fresh credentials.
+
+.PARAMETER TriggerValue
+ The new value for password_rotation_trigger.
+ If not specified, generates one based on current date/time.
+
+.PARAMETER Phase
+ Either "start" (before event) or "end" (after event to revoke access).
+ Default: "end"
+
+.PARAMETER EventName
+ Optional event name for the trigger value.
+ Example: -EventName "workshop-dec" generates "workshop-dec-end-20251129"
+
+.PARAMETER TfVarsPath
+ Path to terraform.tfvars file. Default: current directory.
+
+.PARAMETER AutoApprove
+ Skip terraform apply confirmation prompt.
+
+.PARAMETER SkipApply
+ Only update tfvars file, don't run terraform apply.
+
+.EXAMPLE
+ # After event ends - revoke all access immediately
+ .\rotate-passwords.ps1 -Phase end
+
+.EXAMPLE
+ # Before new event - generate fresh credentials
+ .\rotate-passwords.ps1 -Phase start -EventName "december-workshop"
+
+.EXAMPLE
+ # Custom trigger value
+ .\rotate-passwords.ps1 -TriggerValue "revoked-2025-11-29"
+
+.EXAMPLE
+ # Just update tfvars, apply manually later
+ .\rotate-passwords.ps1 -Phase end -SkipApply
+
+.NOTES
+ This script modifies terraform.tfvars and runs terraform apply.
+ The password changes take effect immediately in Azure AD.
+#>
+
+[CmdletBinding()]
+param(
+ [string]$TriggerValue,
+
+ [ValidateSet('start', 'end')]
+ [string]$Phase = 'end',
+
+ [string]$EventName,
+
+ [string]$TfVarsPath = ".",
+
+ [switch]$AutoApprove,
+
+ [switch]$SkipApply
+)
+
+$ErrorActionPreference = 'Stop'
+
+# Resolve tfvars path
+$tfvarsFile = Join-Path (Resolve-Path $TfVarsPath) "terraform.tfvars"
+if (-not (Test-Path $tfvarsFile)) {
+ # Try identity subfolder
+ $tfvarsFile = Join-Path (Resolve-Path $TfVarsPath) "identity/terraform.tfvars"
+}
+if (-not (Test-Path $tfvarsFile)) {
+ throw "terraform.tfvars not found in $TfVarsPath or $TfVarsPath/identity"
+}
+
+$tfvarsDir = Split-Path $tfvarsFile -Parent
+
+# Generate trigger value if not provided
+if (-not $TriggerValue) {
+ $dateSuffix = Get-Date -Format "yyyyMMdd-HHmm"
+ if ($EventName) {
+ $TriggerValue = "$EventName-$Phase-$dateSuffix"
+ } else {
+ $TriggerValue = "$Phase-$dateSuffix"
+ }
+}
+
+Write-Host "`n" -NoNewline
+Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Cyan
+Write-Host "β PASSWORD ROTATION - $(if ($Phase -eq 'end') { 'REVOKE ACCESS' } else { 'NEW CREDENTIALS' }) β" -ForegroundColor Cyan
+Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Cyan
+Write-Host ""
+
+if ($Phase -eq 'end') {
+ Write-Host " β οΈ This will REVOKE ACCESS for all current participants!" -ForegroundColor Yellow
+ Write-Host " All existing passwords will be invalidated immediately." -ForegroundColor Yellow
+} else {
+ Write-Host " β
This will generate FRESH CREDENTIALS for a new event." -ForegroundColor Green
+ Write-Host " New user_credentials.json will be created." -ForegroundColor Green
+}
+
+Write-Host ""
+Write-Host " Trigger value: " -NoNewline
+Write-Host $TriggerValue -ForegroundColor Magenta
+Write-Host " Config file: $tfvarsFile"
+Write-Host ""
+
+# Read current tfvars
+$content = Get-Content $tfvarsFile -Raw
+
+# Check if password_rotation_trigger exists
+if ($content -match 'password_rotation_trigger\s*=\s*"([^"]*)"') {
+ $oldValue = $Matches[1]
+ Write-Host " Current trigger: " -NoNewline
+ Write-Host $oldValue -ForegroundColor DarkGray
+
+ # Replace the value
+ $newContent = $content -replace '(password_rotation_trigger\s*=\s*)"[^"]*"', "`$1`"$TriggerValue`""
+} else {
+ Write-Host " Adding password_rotation_trigger to tfvars..." -ForegroundColor Yellow
+ $newContent = $content + "`n`npassword_rotation_trigger = `"$TriggerValue`"`n"
+}
+
+# Write updated tfvars
+$newContent | Set-Content $tfvarsFile -NoNewline
+Write-Host ""
+Write-Host " β Updated terraform.tfvars" -ForegroundColor Green
+
+if ($SkipApply) {
+ Write-Host ""
+ Write-Host " Skipping terraform apply (use -SkipApply:$false to apply)" -ForegroundColor Yellow
+ Write-Host ""
+ Write-Host " To apply manually, run:" -ForegroundColor Cyan
+ Write-Host " cd $tfvarsDir" -ForegroundColor White
+ Write-Host " terraform apply" -ForegroundColor White
+ Write-Host ""
+ exit 0
+}
+
+# Confirm before apply (unless AutoApprove)
+if (-not $AutoApprove) {
+ Write-Host ""
+ $confirm = Read-Host " Apply now? (yes/no)"
+ if ($confirm -notmatch '^(y|yes)$') {
+ Write-Host ""
+ Write-Host " Aborted. The tfvars file has been updated." -ForegroundColor Yellow
+ Write-Host " Run 'terraform apply' in $tfvarsDir to complete." -ForegroundColor Yellow
+ exit 0
+ }
+}
+
+# Run terraform apply
+Write-Host ""
+Write-Host " Running terraform apply..." -ForegroundColor Cyan
+Write-Host " βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor DarkGray
+
+Push-Location $tfvarsDir
+try {
+ $applyArgs = @('apply')
+ if ($AutoApprove) {
+ $applyArgs += '-auto-approve'
+ }
+
+ & terraform @applyArgs
+
+ if ($LASTEXITCODE -eq 0) {
+ Write-Host ""
+ Write-Host " βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor DarkGray
+ Write-Host ""
+ if ($Phase -eq 'end') {
+ Write-Host " β
ACCESS REVOKED - All previous passwords are now invalid!" -ForegroundColor Green
+ Write-Host ""
+ Write-Host " Participants from the previous event can no longer log in." -ForegroundColor White
+ } else {
+ Write-Host " β
NEW CREDENTIALS GENERATED!" -ForegroundColor Green
+ Write-Host ""
+ $credFile = Join-Path $tfvarsDir "user_credentials.json"
+ if (Test-Path $credFile) {
+ Write-Host " Credentials saved to: $credFile" -ForegroundColor White
+ Write-Host ""
+ Write-Host " Distribute this file to your new participants." -ForegroundColor Cyan
+ }
+ }
+ } else {
+ Write-Host ""
+ Write-Host " β Terraform apply failed!" -ForegroundColor Red
+ exit 1
+ }
+} finally {
+ Pop-Location
+}
+
+Write-Host ""
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/users.json b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/users.json
new file mode 100644
index 000000000..92058219c
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/users.json
@@ -0,0 +1,52 @@
+[
+ {"given_name": "Peter", "surname": "Parker", "hero_name": "Spider-Man"},
+ {"given_name": "Bruce", "surname": "Wayne", "hero_name": "Batman"},
+ {"given_name": "Diana", "surname": "Prince", "hero_name": "Wonder Woman"},
+ {"given_name": "Clark", "surname": "Kent", "hero_name": "Superman"},
+ {"given_name": "Barry", "surname": "Allen", "hero_name": "The Flash"},
+ {"given_name": "Natasha", "surname": "Romanoff", "hero_name": "Black Widow"},
+ {"given_name": "Tony", "surname": "Stark", "hero_name": "Iron Man"},
+ {"given_name": "Carol", "surname": "Danvers", "hero_name": "Captain Marvel"},
+ {"given_name": "Stephen", "surname": "Strange", "hero_name": "Doctor Strange"},
+ {"given_name": "Wanda", "surname": "Maximoff", "hero_name": "Scarlet Witch"},
+ {"given_name": "T'Challa", "surname": "Udaku", "hero_name": "Black Panther"},
+ {"given_name": "Shuri", "surname": "Udaku", "hero_name": "Shuri"},
+ {"given_name": "Sam", "surname": "Wilson", "hero_name": "Falcon"},
+ {"given_name": "Scott", "surname": "Lang", "hero_name": "Ant-Man"},
+ {"given_name": "Ororo", "surname": "Munroe", "hero_name": "Storm"},
+ {"given_name": "Hal", "surname": "Jordan", "hero_name": "Green Lantern"},
+ {"given_name": "Arthur", "surname": "Curry", "hero_name": "Aquaman"},
+ {"given_name": "Victor", "surname": "Stone", "hero_name": "Cyborg"},
+ {"given_name": "Billy", "surname": "Batson", "hero_name": "Shazam"},
+ {"given_name": "Barbara", "surname": "Gordon", "hero_name": "Batgirl"},
+ {"given_name": "Kamala", "surname": "Khan", "hero_name": "Ms. Marvel"},
+ {"given_name": "Kate", "surname": "Bishop", "hero_name": "Hawkeye"},
+ {"given_name": "Jessica", "surname": "Jones", "hero_name": "Jewel"},
+ {"given_name": "Matt", "surname": "Murdock", "hero_name": "Daredevil"},
+ {"given_name": "Luke", "surname": "Cage", "hero_name": "Power Man"},
+ {"given_name": "Jean", "surname": "Grey", "hero_name": "Phoenix"},
+ {"given_name": "Logan", "surname": "Howlett", "hero_name": "Wolverine"},
+ {"given_name": "Remy", "surname": "LeBeau", "hero_name": "Gambit"},
+ {"given_name": "Raven", "surname": "Darkholme", "hero_name": "Mystique"},
+ {"given_name": "Scott", "surname": "Summers", "hero_name": "Cyclops"},
+ {"given_name": "Charles", "surname": "Xavier", "hero_name": "Professor X"},
+ {"given_name": "Kurt", "surname": "Wagner", "hero_name": "Nightcrawler"},
+ {"given_name": "Kitty", "surname": "Pryde", "hero_name": "Shadowcat"},
+ {"given_name": "Piotr", "surname": "Rasputin", "hero_name": "Colossus"},
+ {"given_name": "Betsy", "surname": "Braddock", "hero_name": "Psylocke"},
+ {"given_name": "Monica", "surname": "Rambeau", "hero_name": "Spectrum"},
+ {"given_name": "Jennifer", "surname": "Walters", "hero_name": "She-Hulk"},
+ {"given_name": "Marc", "surname": "Spector", "hero_name": "Moon Knight"},
+ {"given_name": "Janet", "surname": "Van Dyne", "hero_name": "Wasp"},
+ {"given_name": "Hope", "surname": "Van Dyne", "hero_name": "Wasp (Hope)"},
+ {"given_name": "Miles", "surname": "Morales", "hero_name": "Spider-Man (Miles)"},
+ {"given_name": "Gwen", "surname": "Stacy", "hero_name": "Ghost-Spider"},
+ {"given_name": "Eddie", "surname": "Brock", "hero_name": "Venom"},
+ {"given_name": "Felicia", "surname": "Hardy", "hero_name": "Black Cat"},
+ {"given_name": "Stephen", "surname": "Grant", "hero_name": "Mr. Knight"},
+ {"given_name": "Marcella", "surname": "Fury", "hero_name": "Agent Fury"},
+ {"given_name": "Nick", "surname": "Fury", "hero_name": "Nick Fury"},
+ {"given_name": "Maria", "surname": "Hill", "hero_name": "Maria Hill"},
+ {"given_name": "Phil", "surname": "Coulson", "hero_name": "Agent Coulson"},
+ {"given_name": "Daisy", "surname": "Johnson", "hero_name": "Quake"}
+]
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/variables.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/variables.tf
new file mode 100644
index 000000000..e7f370ca2
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/variables.tf
@@ -0,0 +1,231 @@
+# ===============================================================================
+# Variable Definitions for Oracle on Azure Infrastructure
+# ===============================================================================
+
+variable "microhack_event_name" {
+ description = "Name of the microhack event. Auto-populated from identity/user_credentials.json if not specified."
+ type = string
+ default = null
+}
+
+# variable "prefix" {
+# description = "Prefix applied to resource names and user identifiers"
+# type = string
+# default = "mh"
+# }
+# ===============================================================================
+# Subscription Variables
+# ===============================================================================
+
+variable "odaa_subscription_id" {
+ description = "The Azure subscription ID for ODAA resources (single subscription for all ODAA VNets)"
+ type = string
+}
+
+variable "location" {
+ description = "The location to use for all resources"
+ type = string
+ default = "francecentral"
+
+ validation {
+ condition = contains(["francecentral", "germanywestcentral"], lower(trimspace(var.location)))
+ error_message = "location must be either 'francecentral' or 'germanywestcentral'."
+ }
+}
+
+variable "aks_cidr_base" {
+ description = "The base CIDR block for AKS deployments"
+ type = string
+ default = "10.0.0.0"
+}
+
+variable "aks_service_cidr" {
+ description = "The service CIDR used by all AKS clusters"
+ type = string
+ default = "172.16.0.0/16"
+}
+
+variable "odaa_cidr_base" {
+ description = "The base CIDR block for ODAA deployments"
+ type = string
+ default = "192.168.0.0"
+}
+
+variable "fqdn_odaa_fra" {
+ description = "The fully qualified domain name (FQDN) for the ODAA deployment"
+ type = string
+ default = "adb.eu-frankfurt-1.oraclecloud.com"
+}
+
+variable "fqdn_odaa_app_fra" {
+ description = "The fully qualified domain name (FQDN) for ODAA applications"
+ type = string
+ default = "adb.eu-frankfurt-1.oraclecloudapps.com"
+}
+
+variable "fqdn_odaa_app_par" {
+ description = "The fully qualified domain name (FQDN) for ODAA applications"
+ type = string
+ default = "adb.eu-paris-1.oraclecloudapps.com"
+}
+
+variable "fqdn_odaa_par" {
+ description = "The fully qualified domain name (FQDN) for the ODAA deployment"
+ type = string
+ default = "adb.eu-paris-1.oraclecloud.com"
+}
+
+variable "enabled_odaa_regions" {
+ description = "List of ODAA regions to create Private DNS zones for. Valid values: 'paris', 'frankfurt'"
+ type = list(string)
+ default = ["paris"]
+
+ validation {
+ condition = alltrue([for r in var.enabled_odaa_regions : contains(["paris", "frankfurt"], lower(r))])
+ error_message = "enabled_odaa_regions must only contain 'paris' and/or 'frankfurt'."
+ }
+}
+
+variable "aks_vm_size" {
+ description = "The VM size for AKS nodes"
+ type = string
+ default = "Standard_D4as_v5"
+}
+
+variable "aks_os_disk_type" {
+ description = "OS disk type for AKS node pools (Ephemeral or Managed)"
+ type = string
+ default = "Managed"
+
+ validation {
+ condition = contains(["Ephemeral", "Managed"], var.aks_os_disk_type)
+ error_message = "aks_os_disk_type must be either 'Ephemeral' or 'Managed'."
+ }
+}
+
+variable "oracle_cloud_service_principal_object_id" {
+ description = "Object ID of the Oracle Cloud Infrastructure Console enterprise application's service principal."
+ type = string
+ default = "6240ab05-e243-48b2-9619-c3e3f53c6dca"
+}
+
+variable "oracle_cloud_service_principal_app_role_value" {
+ description = "Optional app role value to assign when granting groups access to the Oracle Cloud service principal. Leave null to use the first available app role."
+ type = string
+ default = null
+}
+
+variable "entra_user_principal_domain" {
+ description = "Domain suffix for Entra user principal names (used by identity module)"
+ type = string
+ default = "cptazure.org"
+}
+
+# ===============================================================================
+# AKS Deployments Configuration
+# ===============================================================================
+
+variable "user_count" {
+ description = "Number of isolated user environments to provision"
+ type = number
+ default = 1
+
+ validation {
+ condition = var.user_count >= 1
+ error_message = "At least one user environment must be provisioned."
+ }
+}
+
+variable "subscription_targets" {
+ description = "Ordered list of subscriptions used for round-robin AKS deployment assignment"
+ type = list(object({
+ subscription_id = string
+ }))
+
+ validation {
+ condition = length(var.subscription_targets) >= 1 && length(var.subscription_targets) <= 5
+ error_message = "Provide between 1 and 5 subscription targets."
+ }
+}
+
+# ===============================================================================
+# Oracle Database Configuration
+# ===============================================================================
+
+variable "create_oracle_database" {
+ description = "Controls whether the Oracle Autonomous Database resources are provisioned."
+ type = bool
+ default = false
+}
+
+variable "adb_admin_password" {
+ description = "The admin password for the Oracle Autonomous Database (shared across all ODAA deployments)"
+ type = string
+ sensitive = true
+ default = null
+ validation {
+ condition = var.create_oracle_database ? (
+ var.adb_admin_password != null &&
+ length(var.adb_admin_password) >= 8 &&
+ length(var.adb_admin_password) <= 30
+ ) : (
+ var.adb_admin_password == null || trimspace(var.adb_admin_password) == ""
+ )
+ error_message = "ADB admin password must be provided (8-30 characters) when the Oracle Autonomous Database is enabled."
+ }
+}
+
+variable "client_id" {
+ description = "The Client ID (Application ID) for the Service Principal. Required for authentication to Azure and Entra ID."
+ type = string
+
+ validation {
+ condition = var.client_id != null && var.client_id != "" && can(regex("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", var.client_id))
+ error_message = "The client_id must be a valid GUID/UUID format. Please provide the Service Principal's Application ID."
+ }
+}
+
+variable "client_secret" {
+ description = "The Client Secret for the Service Principal. Required for authentication to Azure and Entra ID."
+ type = string
+ sensitive = true
+
+ validation {
+ condition = var.client_secret != null && var.client_secret != "" && length(var.client_secret) > 0
+ error_message = "The client_secret must be provided and cannot be empty. Please provide the Service Principal's client secret."
+ }
+}
+
+# ===============================================================================
+# Tenant Configuration
+# ===============================================================================
+
+variable "tenant_id" {
+ description = "Azure AD tenant ID for service principal authentication. Used across all subscriptions."
+ type = string
+
+ validation {
+ condition = can(regex("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", var.tenant_id))
+ error_message = "The tenant_id must be a valid GUID/UUID format."
+ }
+}
+
+# ===============================================================================
+# Identity Configuration
+# ===============================================================================
+
+variable "identity_file_path" {
+ description = <<-EOT
+ Path to the user_credentials.json file generated by the identity/ Terraform
+ configuration. This file contains user object IDs, UPNs, group information,
+ and passwords.
+
+ Defaults to 'user_credentials.json' in the terraform root folder.
+
+ Workflow:
+ 1. Run 'terraform apply' in identity/ folder to create users
+ 2. Run 'terraform apply' here to deploy infrastructure
+ EOT
+ type = string
+ default = "user_credentials.json"
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/versions.tf b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/versions.tf
new file mode 100644
index 000000000..e189ad1cc
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/infra/terraform/versions.tf
@@ -0,0 +1,49 @@
+# ===============================================================================
+# Terraform Versions Configuration
+# ===============================================================================
+# This file defines the required Terraform version and provider constraints
+# for the Oracle on Azure infrastructure deployment.
+# ===============================================================================
+
+terraform {
+ required_version = ">= 1.5.0"
+
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = "~> 4.0"
+ }
+ azuread = {
+ source = "hashicorp/azuread"
+ version = "~> 2.0"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "~> 3.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = "~> 3.0"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = "~> 2.5"
+ }
+ azapi = {
+ source = "azure/azapi"
+ version = "~> 2.0"
+ }
+ time = {
+ source = "hashicorp/time"
+ version = "~> 0.12"
+ }
+ }
+
+ # Uncomment and configure the backend for remote state storage
+ # backend "azurerm" {
+ # resource_group_name = "rg-terraform-state"
+ # storage_account_name = "saterraformstate"
+ # container_name = "tfstate"
+ # key = "oracle-on-azure.tfstate"
+ # }
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Check-AKSIngress.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Check-AKSIngress.ps1
new file mode 100644
index 000000000..7c51a6cd3
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Check-AKSIngress.ps1
@@ -0,0 +1,83 @@
+# Check AKS Ingress Configuration Script
+# This script helps diagnose your current AKS ingress setup
+
+param(
+ [Parameter(Mandatory=$false)]
+ [string]$ResourceGroup = "odaa",
+
+ [Parameter(Mandatory=$false)]
+ [string]$AksName = "odaa",
+
+ [Parameter(Mandatory=$false)]
+ [string]$Namespace = "microhacks"
+)
+
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "AKS Ingress Configuration Analysis" -ForegroundColor Cyan
+Write-Host "========================================" -ForegroundColor Cyan
+
+# Step 1: Check AKS addons
+Write-Host "`n1. Checking AKS addons..." -ForegroundColor Green
+$aksAddons = az aks show --name $AksName --resource-group $ResourceGroup --query "addonProfiles" -o json | ConvertFrom-Json
+
+if ($aksAddons.ingressApplicationGateway.enabled -eq $true) {
+ Write-Host "β
Application Gateway Ingress Controller (AGIC) is enabled" -ForegroundColor Green
+ $agicConfig = $aksAddons.ingressApplicationGateway.config
+ Write-Host " Application Gateway: $($agicConfig.applicationGatewayName)" -ForegroundColor Yellow
+ Write-Host " Resource Group: $($agicConfig.applicationGatewayResourceGroup)" -ForegroundColor Yellow
+} else {
+ Write-Host "β Application Gateway Ingress Controller (AGIC) is NOT enabled" -ForegroundColor Red
+}
+
+# Step 2: Check for ingress controllers
+Write-Host "`n2. Checking for ingress controllers in cluster..." -ForegroundColor Green
+kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx -o wide
+kubectl get pods --all-namespaces -l app=ingress-appgw -o wide
+
+# Step 3: Check ingress classes
+Write-Host "`n3. Available ingress classes..." -ForegroundColor Green
+kubectl get ingressclass
+
+# Step 4: Check services of type LoadBalancer
+Write-Host "`n4. LoadBalancer services (public IPs)..." -ForegroundColor Green
+kubectl get services --all-namespaces -o wide | Where-Object { $_ -match "LoadBalancer" }
+
+# Step 5: Check existing ingress resources
+Write-Host "`n5. Existing ingress resources..." -ForegroundColor Green
+kubectl get ingress --all-namespaces -o wide
+
+# Step 6: Check NSG rules for AKS subnet
+Write-Host "`n6. Checking NSG rules for AKS subnet..." -ForegroundColor Green
+$aksDetails = az aks show --name $AksName --resource-group $ResourceGroup -o json | ConvertFrom-Json
+$aksSubnetId = $aksDetails.agentPoolProfiles[0].vnetSubnetId
+
+if ($aksSubnetId) {
+ $subnetDetails = az network vnet subnet show --ids $aksSubnetId -o json | ConvertFrom-Json
+ $nsgId = $subnetDetails.networkSecurityGroup.id
+
+ if ($nsgId) {
+ $nsgName = $nsgId.Split('/')[-1]
+ $nsgResourceGroup = $nsgId.Split('/')[4]
+
+ Write-Host " NSG Name: $nsgName" -ForegroundColor Yellow
+ Write-Host " NSG Resource Group: $nsgResourceGroup" -ForegroundColor Yellow
+
+ Write-Host "`n Inbound rules allowing HTTP/HTTPS:" -ForegroundColor Cyan
+ az network nsg rule list --nsg-name $nsgName --resource-group $nsgResourceGroup --query "[?direction=='Inbound' && (destinationPortRange=='80' || destinationPortRange=='443' || destinationPortRange=='*')].{Name:name, Priority:priority, Source:sourceAddressPrefix, DestPort:destinationPortRange, Access:access}" -o table
+ } else {
+ Write-Host " β No NSG associated with AKS subnet" -ForegroundColor Red
+ }
+} else {
+ Write-Host " β Could not determine AKS subnet" -ForegroundColor Red
+}
+
+Write-Host "`n========================================" -ForegroundColor Cyan
+Write-Host "Analysis Complete" -ForegroundColor Cyan
+Write-Host "========================================" -ForegroundColor Cyan
+
+Write-Host "`nRecommendations:" -ForegroundColor Yellow
+Write-Host "β’ If no ingress controller is found, install one:" -ForegroundColor White
+Write-Host " - AGIC (recommended): az aks enable-addons --addons ingress-appgw" -ForegroundColor White
+Write-Host " - NGINX: helm install ingress-nginx ingress-nginx/ingress-nginx" -ForegroundColor White
+Write-Host "β’ Ensure NSG allows inbound traffic on ports 80/443" -ForegroundColor White
+Write-Host "β’ Create ingress resources to route traffic to your services" -ForegroundColor White
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/DEPLOYMENT-SCRIPTS-README.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/DEPLOYMENT-SCRIPTS-README.md
new file mode 100644
index 000000000..6a8b1127a
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/DEPLOYMENT-SCRIPTS-README.md
@@ -0,0 +1,159 @@
+# π ODAA MH Environment Deployment Scripts
+
+This directory contains PowerShell scripts to automate the deployment of ODAA MH environments on Azure, following the instructions from the original README.md.
+
+## π Scripts Overview
+
+### 1. π― Deploy-ODAAMHEnv.ps1
+
+Main deployment script that automates all the manual steps from the original README.md:
+- π¦ Creates Azure Resource Group
+- β Deploys AKS cluster using Bicep template
+- π Installs NGINX Ingress Controller
+- π§ Configures health probes and external access
+
+### 2. π Deploy-MultipleEnvironments.ps1
+
+Batch deployment script for creating multiple team environments simultaneously with parallel processing capabilities.
+
+### 3. βοΈ Manage-Environments.ps1
+
+Environment management script for cleanup, status checking, and maintenance operations.
+
+## π Prerequisites
+
+Before running these scripts, ensure you have the following tools installed:
+- **Azure CLI** - https://docs.microsoft.com/en-us/cli/azure/install-azure-cli
+- **kubectl** - https://kubernetes.io/docs/tasks/tools/install-kubectl/
+- **helm** - https://helm.sh/docs/intro/install/
+- **jq** - https://stedolan.github.io/jq/download/
+- **PowerShell 5.1 or later**
+
+## Usage Examples
+
+### Single Environment Deployment
+
+```powershell
+# Deploy a single team environment
+.\Deploy-ODAAMHEnv.ps1 -ResourceGroupName "odaa-team1" -Prefix "ODAA" -Postfix "team1" -Location "germanywestcentral"
+
+# Deploy with custom subscription
+.\Deploy-ODAAMHEnv.ps1 -ResourceGroupName "odaa-prod" -Prefix "ODAA" -Postfix "prod" -Location "westeurope" -SubscriptionName "my-subscription"
+
+# Skip login (useful when already authenticated)
+.\Deploy-ODAAMHEnv.ps1 -ResourceGroupName "odaa-dev" -Prefix "ODAA" -Postfix "dev" -SkipLogin
+```
+
+### Multiple Environment Deployment
+
+```powershell
+# Deploy 5 predefined team environments
+.\Deploy-MultipleEnvironments.ps1 -PredefinedTeams 5 -BaseResourceGroupName "odaa" -BasePrefix "ODAA" -Location "germanywestcentral"
+
+# Deploy from configuration file
+.\Deploy-MultipleEnvironments.ps1 -ConfigFile ".\team-configs.csv"
+
+# Deploy with custom parallel job limit
+.\Deploy-MultipleEnvironments.ps1 -PredefinedTeams 3 -BaseResourceGroupName "odaa" -BasePrefix "ODAA" -MaxParallelJobs 2
+```
+
+### Environment Management
+
+```powershell
+# List all environments
+.\Manage-Environments.ps1 -Action List -ResourceGroupPattern "odaa-*"
+
+# Check status of specific environments
+.\Manage-Environments.ps1 -Action Status -ResourceGroupNames @("odaa-team1", "odaa-team2")
+
+# Clean up all team environments
+.\Manage-Environments.ps1 -Action Cleanup -ResourceGroupPattern "odaa-team*" -Confirm
+```
+
+## Configuration File Format
+
+When using the batch deployment script with a configuration file, use the following CSV format:
+
+```csv
+ResourceGroupName,Prefix,Postfix,Location
+odaa-team1,ODAA,team1,germanywestcentral
+odaa-team2,ODAA,team2,germanywestcentral
+odaa-team3,ODAA,team3,westeurope
+odaa-workshop,ODAA,ws,northeurope
+```
+
+## Script Parameters
+
+### Deploy-ODAAMHEnv.ps1
+
+| Parameter | Required | Default | Description |
+|-----------|----------|---------|-------------|
+| ResourceGroupName | Yes | - | Name of the Azure resource group |
+| Prefix | Yes | - | Prefix for Azure resource names |
+| Postfix | No | "" | Postfix for Azure resource names |
+| Location | No | "germanywestcentral" | Azure region |
+| SubscriptionName | No | "sub-cptdx-01" | Azure subscription name |
+| SkipPrerequisites | No | False | Skip prerequisite checks |
+| SkipLogin | No | False | Skip Azure login |
+
+### Deploy-MultipleEnvironments.ps1
+
+| Parameter | Required | Default | Description |
+|-----------|----------|---------|-------------|
+| ConfigFile | Yes* | - | Path to CSV configuration file |
+| PredefinedTeams | Yes* | - | Number of team environments (1-10) |
+| BaseResourceGroupName | Yes* | - | Base name for resource groups |
+| BasePrefix | Yes* | - | Base prefix for resources |
+| Location | No | "germanywestcentral" | Azure region |
+| SubscriptionName | No | "sub-cptdx-01" | Azure subscription name |
+| MaxParallelJobs | No | 3 | Maximum parallel deployments |
+
+*Either ConfigFile OR PredefinedTeams parameters are required
+
+## Important Notes
+
+1. **VNet CIDR Configuration**: After deployment, ensure that the CIDR of the created VNet is added to the Oracle NSG as mentioned in the original instructions.
+
+2. **External IP Assignment**: The NGINX ingress controller external IP may take a few minutes to be assigned. The script will wait and retry, but you can check manually later if needed:
+ ```powershell
+ kubectl get service -n ingress-nginx
+ ```
+
+3. **Resource Naming**: The scripts use the pattern `{Prefix}{Postfix}` for AKS cluster names and other resources.
+
+4. **Parallel Deployments**: When using the batch deployment script, be mindful of Azure subscription limits and quotas.
+
+5. **Authentication**: The scripts handle Azure authentication automatically, but you can skip the login step if you're already authenticated.
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Prerequisites Not Found**: Install the required tools (Azure CLI, kubectl, helm, jq) before running the scripts.
+
+2. **Authentication Errors**: Ensure you have proper permissions in the Azure subscription and that the subscription name is correct.
+
+3. **Resource Quota Limits**: Check your Azure subscription quotas if deployments fail due to resource limits.
+
+4. **Bicep Template Not Found**: Ensure you're running the scripts from the resources directory where the `infra/bicep/main.bicep` file is located.
+
+### Getting Help
+
+- Use the `-Verbose` parameter for detailed execution logs
+- Check the Azure portal for resource deployment status
+- Review AKS cluster logs if Kubernetes operations fail
+- Verify network connectivity and firewall settings
+
+## Cleanup
+
+To clean up deployed resources:
+
+```powershell
+# Delete a single resource group
+az group delete --name "odaa-team1" --yes --no-wait
+
+# Use the management script for bulk cleanup
+.\Manage-Environments.ps1 -Action Cleanup -ResourceGroupPattern "odaa-*"
+```
+
+**Warning**: Resource group deletion is irreversible and will remove all contained resources.
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-MultipleEnvironments.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-MultipleEnvironments.ps1
new file mode 100644
index 000000000..32dffc940
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-MultipleEnvironments.ps1
@@ -0,0 +1,360 @@
+#Requires -Version 5.1
+<#
+.SYNOPSIS
+ Batch deployment script for multiple ODAA MH environments
+
+.DESCRIPTION
+ This script allows you to deploy multiple ODAA MH environments at once.
+ It reads configuration from a CSV file or uses predefined configurations.
+
+.PARAMETER ConfigFile
+ Path to CSV configuration file with columns: ResourceGroupName, Prefix, Postfix, Location
+
+.PARAMETER PredefinedTeams
+ Number of predefined team environments to create (1-10)
+
+.PARAMETER BaseResourceGroupName
+ Base name for resource groups when using predefined teams
+
+.PARAMETER BasePrefix
+ Base prefix for resources when using predefined teams
+
+.PARAMETER Location
+ Azure region for all deployments
+
+.PARAMETER SubscriptionName
+ Azure subscription name
+
+.PARAMETER MaxParallelJobs
+ Maximum number of parallel deployments (default: 3)
+
+.EXAMPLE
+ .\Deploy-MultipleEnvironments.ps1 -PredefinedTeams 3 -BaseResourceGroupName "odaa" -BasePrefix "ODAA" -Location "germanywestcentral"
+
+.EXAMPLE
+ .\Deploy-MultipleEnvironments.ps1 -ConfigFile ".\team-configs.csv"
+
+.NOTES
+ Author: Generated for ODAA MH Workshop
+ This script creates multiple environments for workshop teams
+#>
+
+[CmdletBinding(DefaultParameterSetName = "Predefined")]
+param(
+ [Parameter(ParameterSetName = "ConfigFile", Mandatory = $true)]
+ [string]$ConfigFile,
+
+ [Parameter(ParameterSetName = "Predefined", Mandatory = $true)]
+ [ValidateRange(1, 10)]
+ [int]$PredefinedTeams,
+
+ [Parameter(ParameterSetName = "Predefined", Mandatory = $true)]
+ [string]$BaseResourceGroupName,
+
+ [Parameter(ParameterSetName = "Predefined", Mandatory = $true)]
+ [string]$BasePrefix,
+
+ [Parameter(Mandatory = $false)]
+ [string]$Location = "germanywestcentral",
+
+ [Parameter(Mandatory = $false)]
+ [string]$SubscriptionName = "sub-cptdx-01",
+
+ [Parameter(Mandatory = $false)]
+ [ValidateRange(1, 5)]
+ [int]$MaxParallelJobs = 3
+)
+
+# Function to write colored output
+function Write-ColorOutput {
+ param(
+ [string]$Message,
+ [string]$Color = "White"
+ )
+ Write-Host $Message -ForegroundColor $Color
+}
+
+# Function to create sample configuration file
+function New-SampleConfigFile {
+ $sampleFile = ".\sample-team-configs.csv"
+
+ $sampleContent = @"
+ResourceGroupName,Prefix,Postfix,Location
+odaa-team1,ODAA,team1,germanywestcentral
+odaa-team2,ODAA,team2,germanywestcentral
+odaa-team3,ODAA,team3,germanywestcentral
+odaa-team4,ODAA,team4,westeurope
+odaa-team5,ODAA,team5,westeurope
+"@
+
+ $sampleContent | Out-File -FilePath $sampleFile -Encoding UTF8
+ Write-ColorOutput "Sample configuration file created: $sampleFile" "Green"
+ Write-ColorOutput "Edit this file with your desired configurations and use -ConfigFile parameter" "Yellow"
+}
+
+# Function to get configurations
+function Get-DeploymentConfigurations {
+ if ($PSCmdlet.ParameterSetName -eq "ConfigFile") {
+ if (-not (Test-Path $ConfigFile)) {
+ Write-ColorOutput "Configuration file not found: $ConfigFile" "Red"
+ Write-ColorOutput "Creating a sample configuration file..." "Yellow"
+ New-SampleConfigFile
+ exit 1
+ }
+
+ try {
+ $configs = Import-Csv $ConfigFile
+ Write-ColorOutput "Loaded $($configs.Count) configurations from $ConfigFile" "Green"
+ return $configs
+ }
+ catch {
+ Write-ColorOutput "Failed to read configuration file: $_" "Red"
+ exit 1
+ }
+ }
+ else {
+ # Generate predefined team configurations
+ $configs = @()
+
+ for ($i = 1; $i -le $PredefinedTeams; $i++) {
+ $configs += [PSCustomObject]@{
+ ResourceGroupName = "$BaseResourceGroupName-team$i"
+ Prefix = $BasePrefix
+ Postfix = "team$i"
+ Location = $Location
+ }
+ }
+
+ Write-ColorOutput "Generated $($configs.Count) predefined team configurations" "Green"
+ return $configs
+ }
+}
+
+# Function to deploy single environment
+function Deploy-SingleEnvironment {
+ param(
+ [PSCustomObject]$Config,
+ [string]$SubscriptionName,
+ [int]$JobNumber
+ )
+
+ $jobName = "Deploy-Team-$($Config.Postfix)"
+ Write-ColorOutput "[$jobName] Starting deployment..." "Cyan"
+
+ $scriptPath = ".\Deploy-ODAAMHEnv.ps1"
+
+ if (-not (Test-Path $scriptPath)) {
+ Write-ColorOutput "[$jobName] Main deployment script not found: $scriptPath" "Red"
+ return $false
+ }
+
+ try {
+ $params = @{
+ ResourceGroupName = $Config.ResourceGroupName
+ Prefix = $Config.Prefix
+ Postfix = $Config.Postfix
+ Location = $Config.Location
+ SubscriptionName = $SubscriptionName
+ SkipLogin = $true
+ SkipPrerequisites = $true
+ }
+
+ # Execute deployment script
+ & $scriptPath @params
+
+ Write-ColorOutput "[$jobName] Deployment completed successfully!" "Green"
+ return $true
+ }
+ catch {
+ Write-ColorOutput "[$jobName] Deployment failed: $_" "Red"
+ return $false
+ }
+}
+
+# Function to display final summary
+function Show-FinalSummary {
+ param(
+ [array]$Results,
+ [array]$Configs
+ )
+
+ Write-ColorOutput "`n" + "="*80 "Green"
+ Write-ColorOutput "BATCH DEPLOYMENT SUMMARY" "Green"
+ Write-ColorOutput "="*80 "Green"
+
+ $successful = $Results | Where-Object { $_.Success -eq $true }
+ $failed = $Results | Where-Object { $_.Success -eq $false }
+
+ Write-ColorOutput "Total Deployments: $($Results.Count)" "White"
+ Write-ColorOutput "Successful: $($successful.Count)" "Green"
+ Write-ColorOutput "Failed: $($failed.Count)" "Red"
+
+ if ($successful.Count -gt 0) {
+ Write-ColorOutput "`nSuccessful Deployments:" "Green"
+ foreach ($result in $successful) {
+ Write-ColorOutput " β $($result.Config.ResourceGroupName) ($($result.Config.Postfix))" "Green"
+ }
+ }
+
+ if ($failed.Count -gt 0) {
+ Write-ColorOutput "`nFailed Deployments:" "Red"
+ foreach ($result in $failed) {
+ Write-ColorOutput " β $($result.Config.ResourceGroupName) ($($result.Config.Postfix))" "Red"
+ }
+ }
+
+ Write-ColorOutput "`nNext Steps:" "Yellow"
+ Write-ColorOutput "1. Verify all AKS clusters are accessible" "White"
+ Write-ColorOutput "2. Check NGINX ingress controller external IPs" "White"
+ Write-ColorOutput "3. Add VNet CIDRs to Oracle NSG" "White"
+ Write-ColorOutput "4. Distribute access credentials to teams" "White"
+
+ Write-ColorOutput "="*80 "Green"
+}
+
+# Main execution
+function Main {
+ $startTime = Get-Date
+
+ Write-ColorOutput "Starting Batch ODAA MH Environment Deployment" "Green"
+ Write-ColorOutput "Subscription: $SubscriptionName" "White"
+ Write-ColorOutput "Max Parallel Jobs: $MaxParallelJobs" "White"
+ Write-ColorOutput ""
+
+ # Get deployment configurations
+ $configs = Get-DeploymentConfigurations
+
+ if ($configs.Count -eq 0) {
+ Write-ColorOutput "No configurations found to deploy" "Red"
+ exit 1
+ }
+
+ # Login to Azure once
+ Write-ColorOutput "Logging into Azure..." "Yellow"
+ try {
+ az login
+ az account set -s $SubscriptionName
+ Write-ColorOutput "Successfully logged into Azure!" "Green"
+ }
+ catch {
+ Write-ColorOutput "Failed to login to Azure: $_" "Red"
+ exit 1
+ }
+
+ # Display configurations to be deployed
+ Write-ColorOutput "`nConfigurations to deploy:" "Cyan"
+ $configs | Format-Table -AutoSize
+
+ $confirmation = Read-Host "Do you want to proceed with these deployments? (y/N)"
+ if ($confirmation -notmatch "^[Yy]") {
+ Write-ColorOutput "Deployment cancelled by user" "Yellow"
+ exit 0
+ }
+
+ # Execute deployments
+ $results = @()
+ $jobs = @()
+ $jobIndex = 0
+
+ foreach ($config in $configs) {
+ # Wait if we have reached max parallel jobs
+ while ($jobs.Count -ge $MaxParallelJobs) {
+ $completedJobs = $jobs | Where-Object { $_.State -eq "Completed" -or $_.State -eq "Failed" -or $_.State -eq "Stopped" }
+
+ if ($completedJobs.Count -gt 0) {
+ foreach ($job in $completedJobs) {
+ $jobResult = Receive-Job $job -Wait
+ $success = $job.State -eq "Completed"
+
+ $results += [PSCustomObject]@{
+ Config = $job.Config
+ Success = $success
+ JobName = $job.Name
+ }
+
+ Remove-Job $job
+ $jobs = $jobs | Where-Object { $_.Id -ne $job.Id }
+ }
+ }
+ else {
+ Start-Sleep 10
+ }
+ }
+
+ # Start new job
+ $jobIndex++
+ $jobName = "DeployJob-$jobIndex-$($config.Postfix)"
+
+ $job = Start-Job -Name $jobName -ScriptBlock {
+ param($Config, $SubscriptionName, $JobNumber, $ScriptPath)
+
+ try {
+ $params = @{
+ ResourceGroupName = $Config.ResourceGroupName
+ Prefix = $Config.Prefix
+ Postfix = $Config.Postfix
+ Location = $Config.Location
+ SubscriptionName = $SubscriptionName
+ SkipLogin = $true
+ SkipPrerequisites = $true
+ }
+
+ & $ScriptPath @params
+ return $true
+ }
+ catch {
+ Write-Error "Deployment failed: $_"
+ return $false
+ }
+ } -ArgumentList $config, $SubscriptionName, $jobIndex, ".\Deploy-ODAAMHEnv.ps1"
+
+ # Add config reference to job for later use
+ $job | Add-Member -MemberType NoteProperty -Name "Config" -Value $config
+ $jobs += $job
+
+ Write-ColorOutput "Started job: $jobName for $($config.ResourceGroupName)" "Cyan"
+ }
+
+ # Wait for remaining jobs to complete
+ Write-ColorOutput "Waiting for remaining deployments to complete..." "Yellow"
+
+ while ($jobs.Count -gt 0) {
+ $completedJobs = $jobs | Where-Object { $_.State -eq "Completed" -or $_.State -eq "Failed" -or $_.State -eq "Stopped" }
+
+ if ($completedJobs.Count -gt 0) {
+ foreach ($job in $completedJobs) {
+ $jobResult = Receive-Job $job -Wait
+ $success = $job.State -eq "Completed"
+
+ $results += [PSCustomObject]@{
+ Config = $job.Config
+ Success = $success
+ JobName = $job.Name
+ }
+
+ Remove-Job $job
+ $jobs = $jobs | Where-Object { $_.Id -ne $job.Id }
+ }
+ }
+ else {
+ Start-Sleep 10
+ }
+ }
+
+ # Show final summary
+ $endTime = Get-Date
+ $duration = $endTime - $startTime
+
+ Show-FinalSummary -Results $results -Configs $configs
+ Write-ColorOutput "`nBatch deployment completed in $($duration.ToString('hh\:mm\:ss'))" "Green"
+}
+
+# Execute main function
+try {
+ Main
+}
+catch {
+ Write-ColorOutput "Batch deployment failed: $_" "Red"
+ Write-ColorOutput "Stack trace: $($_.ScriptStackTrace)" "Red"
+ exit 1
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-ODAAMHEnv.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-ODAAMHEnv.ps1
new file mode 100644
index 000000000..3af5603b9
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-ODAAMHEnv.ps1
@@ -0,0 +1,399 @@
+#Requires -Version 5.1
+<#
+.SYNOPSIS
+ Deploys ODAA MH environment with Azure Kubernetes Service (AKS) and NGINX Ingress Controller
+
+.DESCRIPTION
+ This script automates the deployment of Azure resources for ODAA MH workshop environment.
+ It creates a resource group, deploys AKS cluster using Bicep template, and installs NGINX ingress controller.
+ The script allows multiple deployments with different names using configurable parameters.
+
+.PARAMETER ResourceGroupName
+ Name of the Azure resource group to create
+
+.PARAMETER Prefix
+ Prefix for naming Azure resources
+
+.PARAMETER Postfix
+ Postfix for naming Azure resources (useful for creating multiple environments)
+
+.PARAMETER Location
+ Azure region where resources will be deployed
+
+.PARAMETER SubscriptionName
+ Azure subscription name to use for deployment
+
+.PARAMETER SkipPrerequisites
+ Skip prerequisite checks (Azure CLI, kubectl, helm, jq)
+
+.PARAMETER SkipLogin
+ Skip Azure login process
+
+.EXAMPLE
+ .\Deploy-ODAAMHEnv.ps1 -ResourceGroupName "odaa-team1" -Prefix "ODAA" -Postfix "team1" -Location "germanywestcentral"
+
+.EXAMPLE
+ .\Deploy-ODAAMHEnv.ps1 -ResourceGroupName "odaa-team2" -Prefix "ODAA" -Postfix "team2" -Location "germanywestcentral" -SkipLogin
+
+.NOTES
+ Author: Generated for ODAA MH Workshop
+ Prerequisites: Azure CLI, kubectl, helm, jq
+ Note: Scripts originally designed for bash but adapted for PowerShell
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $true)]
+ [string]$ResourceGroupName,
+
+ [Parameter(Mandatory = $true)]
+ [string]$Prefix,
+
+ [Parameter(Mandatory = $false)]
+ [string]$Postfix = "",
+
+ [Parameter(Mandatory = $false)]
+ [string]$Location = "germanywestcentral",
+
+ [Parameter(Mandatory = $false)]
+ [string]$SubscriptionName = "sub-cptdx-01",
+
+ [Parameter(Mandatory = $false)]
+ [switch]$SkipPrerequisites,
+
+ [Parameter(Mandatory = $false)]
+ [switch]$SkipLogin
+)
+
+# Global variables
+$ErrorActionPreference = "Stop"
+$ProgressPreference = "Continue"
+
+# Function to write colored output
+function Write-ColorOutput {
+ param(
+ [string]$Message,
+ [string]$Color = "White"
+ )
+ Write-Host $Message -ForegroundColor $Color
+}
+
+# Function to check if a command exists
+function Test-Command {
+ param([string]$Command)
+
+ try {
+ Get-Command $Command -ErrorAction Stop | Out-Null
+ return $true
+ }
+ catch {
+ return $false
+ }
+}
+
+# Function to check prerequisites
+function Test-Prerequisites {
+ Write-ColorOutput "Checking prerequisites..." "Yellow"
+
+ $prerequisites = @(
+ @{ Name = "Azure CLI"; Command = "az" },
+ @{ Name = "kubectl"; Command = "kubectl" },
+ @{ Name = "helm"; Command = "helm" },
+ @{ Name = "jq"; Command = "jq" }
+ )
+
+ $missing = @()
+
+ foreach ($prereq in $prerequisites) {
+ if (Test-Command $prereq.Command) {
+ Write-ColorOutput "β $($prereq.Name) is installed" "Green"
+ }
+ else {
+ Write-ColorOutput "β $($prereq.Name) is NOT installed" "Red"
+ $missing += $prereq.Name
+ }
+ }
+
+ if ($missing.Count -gt 0) {
+ Write-ColorOutput "Missing prerequisites: $($missing -join ', ')" "Red"
+ Write-ColorOutput "Please install the missing tools before running this script." "Red"
+
+ Write-ColorOutput "`nInstallation instructions:" "Yellow"
+ Write-ColorOutput "- Azure CLI: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli" "Cyan"
+ Write-ColorOutput "- kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/" "Cyan"
+ Write-ColorOutput "- helm: https://helm.sh/docs/intro/install/" "Cyan"
+ Write-ColorOutput "- jq: https://stedolan.github.io/jq/download/" "Cyan"
+
+ exit 1
+ }
+
+ Write-ColorOutput "All prerequisites are installed!" "Green"
+}
+
+# Function to login to Azure
+function Connect-AzureAccount {
+ Write-ColorOutput "Logging into Azure..." "Yellow"
+
+ try {
+ # Login with device code
+ az login
+
+ # Show current account
+ Write-ColorOutput "Current account information:" "Cyan"
+ az account show
+
+ # Set subscription
+ Write-ColorOutput "Setting subscription to: $SubscriptionName" "Yellow"
+ az account set -s $SubscriptionName
+
+ Write-ColorOutput "Successfully logged into Azure!" "Green"
+ }
+ catch {
+ Write-ColorOutput "Failed to login to Azure: $_" "Red"
+ exit 1
+ }
+}
+
+# Function to create Azure Resource Group
+function New-AzureResourceGroup {
+ param(
+ [string]$Name,
+ [string]$Location
+ )
+
+ Write-ColorOutput "Creating Azure Resource Group: $Name in $Location..." "Yellow"
+
+ try {
+ az group create -n $Name -l $Location
+ Write-ColorOutput "β Resource Group '$Name' created successfully!" "Green"
+ }
+ catch {
+ Write-ColorOutput "Failed to create resource group: $_" "Red"
+ exit 1
+ }
+}
+
+# Function to deploy Azure resources using Bicep
+function Deploy-AzureResources {
+ param(
+ [string]$ResourceGroupName,
+ [string]$Prefix,
+ [string]$Postfix,
+ [string]$Location
+ )
+
+ Write-ColorOutput "Deploying Azure resources using Bicep template..." "Yellow"
+
+ $bicepFile = "./infra/bicep/main.bicep"
+
+ if (-not (Test-Path $bicepFile)) {
+ Write-ColorOutput "Bicep file not found: $bicepFile" "Red"
+ Write-ColorOutput "Please make sure you're running this script from the resources directory." "Red"
+ exit 1
+ }
+
+ try {
+ az deployment group create -n $Prefix -g $ResourceGroupName -f $bicepFile -p location=$Location aksName=$Prefix postfix=$Postfix
+ Write-ColorOutput "β Azure resources deployed successfully!" "Green"
+
+ # List created resources
+ Write-ColorOutput "`nListing created resources:" "Cyan"
+ az resource list -g $ResourceGroupName -o table --query "[].{Name:name, Type:type}"
+
+ }
+ catch {
+ Write-ColorOutput "Failed to deploy Azure resources: $_" "Red"
+ exit 1
+ }
+}
+
+# Function to connect to AKS cluster
+function Connect-AKSCluster {
+ param(
+ [string]$ResourceGroupName,
+ [string]$AksName
+ )
+
+ Write-ColorOutput "Connecting to AKS cluster: $AksName..." "Yellow"
+
+ try {
+ # Get AKS credentials
+ az aks get-credentials -g $ResourceGroupName -n $AksName --overwrite-existing
+
+ # Verify connection by listing namespaces
+ Write-ColorOutput "Verifying AKS connection - listing namespaces:" "Cyan"
+ kubectl get namespaces
+
+ Write-ColorOutput "β Successfully connected to AKS cluster!" "Green"
+ }
+ catch {
+ Write-ColorOutput "Failed to connect to AKS cluster: $_" "Red"
+ exit 1
+ }
+}
+
+# Function to install NGINX Ingress Controller
+function Install-NginxIngressController {
+ Write-ColorOutput "Installing NGINX Ingress Controller..." "Yellow"
+
+ try {
+ # Add helm repository
+ Write-ColorOutput "Adding ingress-nginx helm repository..." "Cyan"
+ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+ helm repo update
+
+ # Create namespace
+ Write-ColorOutput "Creating ingress-nginx namespace..." "Cyan"
+ kubectl create namespace ingress-nginx --dry-run=client -o yaml | kubectl apply -f -
+
+ # Install NGINX ingress controller
+ Write-ColorOutput "Installing NGINX ingress controller..." "Cyan"
+ helm install nginx-quick ingress-nginx/ingress-nginx -n ingress-nginx
+
+ # Wait for deployment to be ready
+ Write-ColorOutput "Waiting for NGINX controller to be ready..." "Yellow"
+ kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=300s
+
+ # Patch health probe
+ Write-ColorOutput "Patching health probe..." "Cyan"
+ kubectl patch service nginx-quick-ingress-nginx-controller -n ingress-nginx -p '{\"metadata\":{\"annotations\":{\"service.beta.kubernetes.io/azure-load-balancer-health-probe-request-path\":\"/healthz\"}}}'
+
+ # Verify annotation
+ Write-ColorOutput "Verifying health probe annotation:" "Cyan"
+ kubectl get service nginx-quick-ingress-nginx-controller -n ingress-nginx -o jsonpath='{.metadata.annotations}' | jq
+
+ # Show service details
+ Write-ColorOutput "NGINX Ingress Controller service details:" "Cyan"
+ kubectl get service --namespace ingress-nginx nginx-quick-ingress-nginx-controller --output wide
+
+ # Get external IP
+ Write-ColorOutput "Getting external IP of NGINX controller..." "Yellow"
+ $maxAttempts = 10
+ $attempt = 1
+
+ do {
+ Write-ColorOutput "Attempt $attempt/$maxAttempts - Waiting for external IP..." "Yellow"
+ $externalIP = kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]} {.status.loadBalancer.ingress[*].ip} {"\n"} {end}'
+
+ if ($externalIP -and $externalIP.Trim()) {
+ Write-ColorOutput "β External IP obtained: $($externalIP.Trim())" "Green"
+ break
+ }
+
+ Start-Sleep 30
+ $attempt++
+ } while ($attempt -le $maxAttempts)
+
+ if (-not $externalIP -or -not $externalIP.Trim()) {
+ Write-ColorOutput "Warning: External IP not yet assigned. Check later with: kubectl get service -n ingress-nginx" "Yellow"
+ }
+
+ Write-ColorOutput "β NGINX Ingress Controller installed successfully!" "Green"
+ }
+ catch {
+ Write-ColorOutput "Failed to install NGINX Ingress Controller: $_" "Red"
+ exit 1
+ }
+}
+
+# Function to display deployment summary
+function Show-DeploymentSummary {
+ param(
+ [string]$ResourceGroupName,
+ [string]$AksName,
+ [string]$Location
+ )
+
+ Write-ColorOutput "`n" + "="*80 "Green"
+ Write-ColorOutput "DEPLOYMENT SUMMARY" "Green"
+ Write-ColorOutput "="*80 "Green"
+
+ Write-ColorOutput "Resource Group: $ResourceGroupName" "White"
+ Write-ColorOutput "AKS Cluster: $AksName" "White"
+ Write-ColorOutput "Location: $Location" "White"
+ Write-ColorOutput "Timestamp: $(Get-Date)" "White"
+
+ Write-ColorOutput "`nDeployed Resources:" "Cyan"
+ try {
+ az resource list -g $ResourceGroupName -o table --query "[].{Name:name, Type:type}"
+ }
+ catch {
+ Write-ColorOutput "Could not retrieve resource list" "Yellow"
+ }
+
+ Write-ColorOutput "`nNGINX Ingress Controller External IP:" "Cyan"
+ try {
+ $externalIP = kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]} {.status.loadBalancer.ingress[*].ip} {"\n"} {end}'
+ if ($externalIP -and $externalIP.Trim()) {
+ Write-ColorOutput $externalIP.Trim() "White"
+ }
+ else {
+ Write-ColorOutput "Not yet assigned - check later" "Yellow"
+ }
+ }
+ catch {
+ Write-ColorOutput "Could not retrieve external IP" "Yellow"
+ }
+
+ Write-ColorOutput "`nIMPORTANT:" "Red"
+ Write-ColorOutput "Make sure the CIDR of the created VNet is added to the Oracle NSG." "Yellow"
+ Write-ColorOutput "="*80 "Green"
+}
+
+# Main execution
+function Main {
+ $startTime = Get-Date
+
+ Write-ColorOutput "Starting ODAA MH Environment Deployment" "Green"
+ Write-ColorOutput "Resource Group: $ResourceGroupName" "White"
+ Write-ColorOutput "Prefix: $Prefix" "White"
+ Write-ColorOutput "Postfix: $Postfix" "White"
+ Write-ColorOutput "Location: $Location" "White"
+ Write-ColorOutput ""
+
+ # Check prerequisites
+ if (-not $SkipPrerequisites) {
+ Test-Prerequisites
+ }
+ else {
+ Write-ColorOutput "Skipping prerequisite checks..." "Yellow"
+ }
+
+ # Login to Azure
+ if (-not $SkipLogin) {
+ Connect-AzureAccount
+ }
+ else {
+ Write-ColorOutput "Skipping Azure login..." "Yellow"
+ }
+
+ # Create resource group
+ New-AzureResourceGroup -Name $ResourceGroupName -Location $Location
+
+ # Deploy Azure resources
+ Deploy-AzureResources -ResourceGroupName $ResourceGroupName -Prefix $Prefix -Postfix $Postfix -Location $Location
+
+ # Connect to AKS
+ $aksName = $Prefix + $Postfix
+ Connect-AKSCluster -ResourceGroupName $ResourceGroupName -AksName $aksName
+
+ # Install NGINX Ingress Controller
+ Install-NginxIngressController
+
+ # Show deployment summary
+ Show-DeploymentSummary -ResourceGroupName $ResourceGroupName -AksName $aksName -Location $Location
+
+ $endTime = Get-Date
+ $duration = $endTime - $startTime
+ Write-ColorOutput "`nDeployment completed in $($duration.ToString('hh\:mm\:ss'))" "Green"
+}
+
+# Execute main function
+try {
+ Main
+}
+catch {
+ Write-ColorOutput "Deployment failed: $_" "Red"
+ Write-ColorOutput "Stack trace: $($_.ScriptStackTrace)" "Red"
+ exit 1
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-OracleHAEnvironment.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-OracleHAEnvironment.ps1
new file mode 100644
index 000000000..478db7d5c
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-OracleHAEnvironment.ps1
@@ -0,0 +1,5 @@
+The use case HA for ODAA is currently in development and will be available in a future release.
+
+Please check back later for updates.
+
+# Placeholder for future implementation of High Availability for Oracle Database Appliance on Azure (ODAA)
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-TokenRefreshSidecar.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-TokenRefreshSidecar.ps1
new file mode 100644
index 000000000..389ffdfce
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Deploy-TokenRefreshSidecar.ps1
@@ -0,0 +1,210 @@
+# Deploy Token Refresh Sidecar Container
+# This script builds and deploys the token refresh sidecar to your AKS cluster
+
+param(
+ [Parameter(Mandatory=$false)]
+ [string]$ResourceGroup = "odaa",
+
+ [Parameter(Mandatory=$false)]
+ [string]$AcrName = "odaa",
+
+ [Parameter(Mandatory=$false)]
+ [string]$AksCluster = "odaa",
+
+ [Parameter(Mandatory=$false)]
+ [string]$Namespace = "microhacks",
+
+ [Parameter(Mandatory=$false)]
+ [string]$ImageTag = "latest",
+
+ [Parameter(Mandatory=$false)]
+ [string]$TenantId = "f71980b2-590a-4de9-90d5-6fbc867da951",
+
+ [Parameter(Mandatory=$false)]
+ [string]$ClientId = "7d22ece1-dd60-4279-a911-4b7b95934f2e"
+)
+
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "Token Refresh Sidecar Deployment" -ForegroundColor Cyan
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host ""
+
+# Step 1: Get ACR login server
+Write-Host "Step 1: Getting ACR details..." -ForegroundColor Green
+$acrLoginServer = az acr show --name $AcrName --resource-group $ResourceGroup --query loginServer -o tsv
+
+if (!$acrLoginServer) {
+ Write-Host "ERROR: Could not find ACR: $AcrName" -ForegroundColor Red
+ exit 1
+}
+
+Write-Host "ACR Login Server: $acrLoginServer" -ForegroundColor Yellow
+
+# Step 2: Build Docker image
+Write-Host "`nStep 2: Building Docker image..." -ForegroundColor Green
+$imageName = "${acrLoginServer}/token-refresh:${ImageTag}"
+
+docker build -f .\resources\infra\Dockerfile.token-refresh -t $imageName .
+
+if ($LASTEXITCODE -ne 0) {
+ Write-Host "ERROR: Docker build failed" -ForegroundColor Red
+ exit 1
+}
+
+Write-Host "β
Image built: $imageName" -ForegroundColor Yellow
+
+# Step 3: Push to ACR
+Write-Host "`nStep 3: Pushing image to ACR..." -ForegroundColor Green
+
+# Login to ACR
+az acr login --name $AcrName
+
+# Push image
+docker push $imageName
+
+if ($LASTEXITCODE -ne 0) {
+ Write-Host "ERROR: Docker push failed" -ForegroundColor Red
+ exit 1
+}
+
+Write-Host "β
Image pushed to ACR" -ForegroundColor Yellow
+
+# Step 4: Attach ACR to AKS (if not already attached)
+Write-Host "`nStep 4: Ensuring AKS can pull from ACR..." -ForegroundColor Green
+az aks update --name $AksCluster --resource-group $ResourceGroup --attach-acr $AcrName
+
+# Step 5: Get AKS credentials
+Write-Host "`nStep 5: Getting AKS credentials..." -ForegroundColor Green
+az aks get-credentials --resource-group $ResourceGroup --name $AksCluster --overwrite-existing
+
+# Step 6: Create deployment YAML
+Write-Host "`nStep 6: Creating Kubernetes deployment..." -ForegroundColor Green
+
+$deploymentYaml = @"
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: oracle-client-with-token-refresh
+ namespace: $Namespace
+ labels:
+ app: oracle-client
+ component: database-client
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: oracle-client
+ template:
+ metadata:
+ labels:
+ app: oracle-client
+ azure.workload.identity/use: "true"
+ spec:
+ serviceAccountName: oracle-client-sa
+ containers:
+ # Main Oracle client container
+ - name: instantclient
+ image: container-registry.oracle.com/database/instantclient:23
+ command: ["/bin/bash"]
+ args: ["-c", "sleep infinity"]
+ env:
+ - name: TNS_ADMIN
+ value: "/tmp/wallet"
+ - name: ORACLE_HOME
+ value: "/opt/oracle/instantclient_23_4"
+ - name: LD_LIBRARY_PATH
+ value: "/opt/oracle/instantclient_23_4"
+ volumeMounts:
+ - name: wallet
+ mountPath: /tmp/wallet
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+
+ # Token refresh sidecar
+ - name: token-refresh
+ image: $imageName
+ env:
+ - name: AZURE_CLIENT_ID
+ value: "$ClientId"
+ - name: AZURE_TENANT_ID
+ value: "$TenantId"
+ - name: TOKEN_FILE
+ value: "/tmp/wallet/token.txt"
+ - name: REFRESH_INTERVAL
+ value: "2700" # 45 minutes
+ - name: LOG_LEVEL
+ value: "INFO"
+ volumeMounts:
+ - name: wallet
+ mountPath: /tmp/wallet
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "50m"
+ limits:
+ memory: "256Mi"
+ cpu: "200m"
+
+ volumes:
+ - name: wallet
+ emptyDir: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: oracle-client-sa
+ namespace: $Namespace
+ annotations:
+ azure.workload.identity/client-id: "$ClientId"
+ labels:
+ azure.workload.identity/use: "true"
+"@
+
+# Save YAML file
+$yamlFile = ".\resources\infra\oracle-client-deployment.yaml"
+$deploymentYaml | Out-File -FilePath $yamlFile -Encoding UTF8
+
+Write-Host "β
Deployment YAML created: $yamlFile" -ForegroundColor Yellow
+
+# Step 7: Apply deployment
+Write-Host "`nStep 7: Applying Kubernetes deployment..." -ForegroundColor Green
+
+kubectl apply -f $yamlFile
+
+if ($LASTEXITCODE -ne 0) {
+ Write-Host "ERROR: kubectl apply failed" -ForegroundColor Red
+ exit 1
+}
+
+Write-Host "β
Deployment applied successfully" -ForegroundColor Yellow
+
+# Step 8: Wait for pods to be ready
+Write-Host "`nStep 8: Waiting for pods to be ready..." -ForegroundColor Green
+kubectl wait --for=condition=ready pod -l app=oracle-client -n $Namespace --timeout=120s
+
+# Step 9: Display status
+Write-Host "`nStep 9: Checking deployment status..." -ForegroundColor Green
+Write-Host "`nPods:" -ForegroundColor Cyan
+kubectl get pods -n $Namespace -l app=oracle-client
+
+Write-Host "`nToken refresh logs (last 20 lines):" -ForegroundColor Cyan
+$podName = kubectl get pods -n $Namespace -l app=oracle-client -o jsonpath='{.items[0].metadata.name}'
+kubectl logs -n $Namespace $podName -c token-refresh --tail=20
+
+Write-Host "`n========================================" -ForegroundColor Cyan
+Write-Host "Deployment Complete!" -ForegroundColor Green
+Write-Host "========================================" -ForegroundColor Cyan
+Write-Host "`nTo test the connection:" -ForegroundColor Yellow
+Write-Host " kubectl exec -it -n $Namespace $podName -c instantclient -- bash" -ForegroundColor White
+Write-Host " # Inside the pod:" -ForegroundColor White
+Write-Host " export TNS_ADMIN=/tmp/wallet" -ForegroundColor White
+Write-Host " export LD_LIBRARY_PATH=/opt/oracle/instantclient_23_4" -ForegroundColor White
+Write-Host " sqlplus /@adbger_high" -ForegroundColor White
+Write-Host "`nTo view token refresh logs:" -ForegroundColor Yellow
+Write-Host " kubectl logs -n $Namespace $podName -c token-refresh -f" -ForegroundColor White
+Write-Host ""
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Enable-EntraSSO-OracleADB.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Enable-EntraSSO-OracleADB.ps1
new file mode 100644
index 000000000..7e937ad6a
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Enable-EntraSSO-OracleADB.ps1
@@ -0,0 +1,161 @@
+<#!
+.SYNOPSIS
+Automates Azure AD (Entra ID) setup to enable SSO (OIDC group-based) for an existing Oracle Autonomous Database (ADB).
+
+.DESCRIPTION
+Creates (idempotently):
+ 1. Security groups for DB role mapping (ODB_DBA, ODB_READWRITE, ODB_READONLY) and optional custom names.
+ 2. An App Registration configured to emit group claims in access & id tokens.
+ 3. A Service Principal for the app.
+ 4. Outputs a metadata JSON file with endpoints & group/object IDs for use in Oracle ADB configuration.
+
+This script only handles Azure side prerequisites. Oracle-side steps (executed via SQL*Plus / OCI) must use the produced metadata.
+
+.REQUIREMENTS
+ - Azure CLI (az) logged in with sufficient privileges (App Registration + Group + Graph Patch rights)
+ - Permission to call Microsoft Graph (Application.ReadWrite.All, Group.ReadWrite.All) if tenant restrictions apply
+
+.NOTES
+Tested with Azure CLI >= 2.62 (Graph v1.0). Adjust if your CLI differs.
+#>
+[CmdletBinding()]param(
+ [Parameter(Mandatory)] [string]$SubscriptionId,
+ [Parameter(Mandatory)] [string]$TenantId,
+ [string]$AppDisplayName = 'OracleADB-SSO',
+ [string]$IdentifierUri, # If blank a new api:// GUID URI is generated
+ [string]$RedirectUri = 'https://localhost/redirect',
+ [string]$GroupDbaName = 'ODB_DBA',
+ [string]$GroupReadWriteName = 'ODB_READWRITE',
+ [string]$GroupReadOnlyName = 'ODB_READONLY',
+ [string[]]$DbaMembers = @(), # User (objectId or UPN) list
+ [string[]]$ReadWriteMembers = @(),
+ [string[]]$ReadOnlyMembers = @(),
+ [string]$OutputMetadataFile = './oracle-adb-entra-metadata.json',
+ [switch]$SkipGroupCreation # If groups already exist
+)
+
+function Ensure-AzContext {
+ Write-Verbose 'Ensuring Azure CLI context.'
+ $acct = az account show --only-show-errors 2>$null | ConvertFrom-Json
+ if(-not $acct){
+ Write-Host 'Logging in...' -ForegroundColor Cyan
+ az login --only-show-errors | Out-Null
+ }
+ az account set -s $SubscriptionId --only-show-errors
+}
+
+function Get-GroupId($name){
+ $gid = az ad group list --display-name $name --query '[0].id' -o tsv 2>$null
+ return $gid
+}
+
+function Ensure-Group($name){
+ $gid = Get-GroupId $name
+ if($gid){ Write-Host "Group $name exists ($gid)"; return $gid }
+ if($SkipGroupCreation){ throw "Group $name not found and creation skipped." }
+ Write-Host "Creating group $name" -ForegroundColor Cyan
+ $gid = az ad group create --display-name $name --mail-nickname $name --query id -o tsv
+ return $gid
+}
+
+function Ensure-Members($groupName, [string[]]$members){
+ if(-not $members -or $members.Count -eq 0){ return }
+ $existing = az ad group member list --group $groupName --query '[].id' -o tsv | Sort-Object -Unique
+ foreach($m in $members){
+ $mid = $m
+ if($m -notmatch '^[0-9a-f-]{36}$'){
+ # Treat as UPN -> resolve objectId
+ $mid = az ad user show --id $m --query id -o tsv 2>$null
+ if(-not $mid){ Write-Warning "Could not resolve user $m"; continue }
+ }
+ if($existing -contains $mid){ continue }
+ Write-Host "Adding member $m to $groupName" -ForegroundColor DarkCyan
+ az ad group member add --group $groupName --member-id $mid --only-show-errors 2>$null
+ }
+}
+
+function Ensure-AppRegistration {
+ param([string]$name,[string]$redirect,[string]$identifierUri)
+ $app = az ad app list --display-name $name --query '[0]' -o json | ConvertFrom-Json
+ if(-not $app){
+ if(-not $identifierUri -or $identifierUri -eq ''){ $identifierUri = "api://$([guid]::NewGuid())" }
+ Write-Host "Creating App Registration $name" -ForegroundColor Cyan
+ $appJson = az ad app create `
+ --display-name $name `
+ --sign-in-audience AzureADMyOrg `
+ --identifier-uris $identifierUri `
+ --web-redirect-uris $redirect `
+ --enable-access-token-issuance true `
+ --enable-id-token-issuance true -o json | ConvertFrom-Json
+ $app = $appJson
+ }
+ else {
+ if($identifierUri){ Write-Verbose 'Identifier URI provided but app exists; skipping update.' }
+ }
+ # Ensure group claims
+ if($app.groupMembershipClaims -ne 'SecurityGroup'){
+ Write-Host 'Setting groupMembershipClaims=SecurityGroup' -ForegroundColor Cyan
+ az ad app update --id $app.appId --set groupMembershipClaims=SecurityGroup --only-show-errors | Out-Null
+ }
+ # Ensure optional group claims (access & id tokens)
+ $objectId = $app.id # Directory (object) id
+ Write-Host 'Ensuring optional claims for groups' -ForegroundColor Cyan
+ $claimsBody = @{ optionalClaims = @{ idToken = @(@{ name='groups' }); accessToken = @(@{ name='groups' }) } } | ConvertTo-Json -Depth 6
+ az rest --method PATCH --uri "https://graph.microsoft.com/v1.0/applications/$objectId" --headers 'Content-Type=application/json' --body $claimsBody --only-show-errors | Out-Null
+
+ # Ensure service principal
+ $sp = az ad sp list --filter "appId eq '$($app.appId)'" --query '[0]' -o json | ConvertFrom-Json
+ if(-not $sp){
+ Write-Host 'Creating Service Principal' -ForegroundColor Cyan
+ $sp = az ad sp create --id $app.appId -o json | ConvertFrom-Json
+ }
+ return $app
+}
+
+function Write-Metadata {
+ param($app,$groupMap)
+ $issuer = "https://login.microsoftonline.com/$TenantId/v2.0"
+ $metadata = [ordered]@{
+ generatedUtc = (Get-Date).ToUniversalTime().ToString('o')
+ tenantId = $TenantId
+ subscriptionId = $SubscriptionId
+ app = @{ displayName=$app.displayName; appId=$app.appId; identifierUris=$app.identifierUris; objectId=$app.id }
+ oidc = @{ issuer=$issuer; authorization_endpoint="https://login.microsoftonline.com/$TenantId/oauth2/v2.0/authorize"; token_endpoint="https://login.microsoftonline.com/$TenantId/oauth2/v2.0/token"; jwks_uri="https://login.microsoftonline.com/$TenantId/discovery/v2.0/keys" }
+ groups = $groupMap
+ guidance = @{ oracle_next_steps = @(
+ 'In ADB: map Entra ID group object IDs to database roles/users.',
+ 'Use DBMS_CLOUD_ADMIN.CREATE_CLOUD_USER or appropriate role mapping feature.',
+ 'Grant privileges based on ODB_DBA / ODB_READWRITE / ODB_READONLY mappings.',
+ 'Test: Acquire token (az account get-access-token --resource ) and connect using Oracle client supporting OAuth.'
+ ) }
+ }
+ $json = ($metadata | ConvertTo-Json -Depth 8)
+ Set-Content -Path $OutputMetadataFile -Value $json -Encoding UTF8
+ Write-Host "Metadata written to $OutputMetadataFile" -ForegroundColor Green
+}
+
+# ------------- MAIN -------------
+Ensure-AzContext
+
+$groupIds = @{}
+$gidDba = Ensure-Group $GroupDbaName; $groupIds[$GroupDbaName] = $gidDba
+$gidRw = Ensure-Group $GroupReadWriteName; $groupIds[$GroupReadWriteName] = $gidRw
+$gidRo = Ensure-Group $GroupReadOnlyName; $groupIds[$GroupReadOnlyName] = $gidRo
+
+Ensure-Members $GroupDbaName $DbaMembers
+Ensure-Members $GroupReadWriteName $ReadWriteMembers
+Ensure-Members $GroupReadOnlyName $ReadOnlyMembers
+
+$app = Ensure-AppRegistration -name $AppDisplayName -redirect $RedirectUri -identifierUri $IdentifierUri
+
+Write-Metadata -app $app -groupMap $groupIds
+
+Write-Host 'Done.' -ForegroundColor Green
+
+<#!
+EXAMPLE:
+ pwsh ./Enable-EntraSSO-OracleADB.ps1 -SubscriptionId '00000000-0000-0000-0000-000000000000' -TenantId '11111111-1111-1111-1111-111111111111' `
+ -DbaMembers user1@contoso.com -ReadWriteMembers user2@contoso.com -ReadOnlyMembers user3@contoso.com
+
+Then configure Oracle ADB using the produced oracle-adb-entra-metadata.json.
+#>
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Manage-Environments.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Manage-Environments.ps1
new file mode 100644
index 000000000..64f97985a
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/Manage-Environments.ps1
@@ -0,0 +1,390 @@
+#Requires -Version 5.1
+<#
+.SYNOPSIS
+ Management script for ODAA MH environments deployed on Azure
+
+.DESCRIPTION
+ This script provides management capabilities for ODAA MH environments including:
+ - List all deployed environments
+ - Check status of environments
+ - Cleanup/delete environments
+ - Get external IPs and connection information
+
+.PARAMETER Action
+ Action to perform: List, Status, Cleanup, GetInfo
+
+.PARAMETER ResourceGroupPattern
+ Pattern to match resource group names (supports wildcards)
+
+.PARAMETER ResourceGroupNames
+ Specific resource group names to operate on
+
+.PARAMETER SubscriptionName
+ Azure subscription name
+
+.PARAMETER Confirm
+ Skip confirmation prompts for destructive operations
+
+.EXAMPLE
+ .\Manage-Environments.ps1 -Action List
+
+.EXAMPLE
+ .\Manage-Environments.ps1 -Action Status -ResourceGroupNames @("odaa-team1", "odaa-team2")
+
+.EXAMPLE
+ .\Manage-Environments.ps1 -Action Cleanup -ResourceGroupPattern "odaa-team*" -Confirm
+
+.NOTES
+ Author: Generated for ODAA MH Workshop
+ This script helps manage multiple ODAA MH environments
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("List", "Status", "Cleanup", "GetInfo")]
+ [string]$Action,
+
+ [Parameter(Mandatory = $false)]
+ [string]$ResourceGroupPattern,
+
+ [Parameter(Mandatory = $false)]
+ [string[]]$ResourceGroupNames,
+
+ [Parameter(Mandatory = $false)]
+ [string]$SubscriptionName = "sub-cptdx-01",
+
+ [Parameter(Mandatory = $false)]
+ [switch]$Confirm
+)
+
+# Function to write colored output
+function Write-ColorOutput {
+ param(
+ [string]$Message,
+ [string]$Color = "White"
+ )
+ Write-Host $Message -ForegroundColor $Color
+}
+
+# Function to get resource groups
+function Get-TargetResourceGroups {
+ try {
+ if ($ResourceGroupNames) {
+ $resourceGroups = @()
+ foreach ($rgName in $ResourceGroupNames) {
+ $rg = az group show --name $rgName --query "name" -o tsv 2>$null
+ if ($rg) {
+ $resourceGroups += $rg
+ }
+ else {
+ Write-ColorOutput "Resource group not found: $rgName" "Yellow"
+ }
+ }
+ return $resourceGroups
+ }
+ elseif ($ResourceGroupPattern) {
+ $allRgs = az group list --query "[].name" -o tsv
+ $matchingRgs = $allRgs | Where-Object { $_ -like $ResourceGroupPattern }
+ return $matchingRgs
+ }
+ else {
+ # List all resource groups
+ return az group list --query "[].name" -o tsv
+ }
+ }
+ catch {
+ Write-ColorOutput "Failed to get resource groups: $_" "Red"
+ return @()
+ }
+}
+
+# Function to list environments
+function Invoke-ListEnvironments {
+ Write-ColorOutput "Listing ODAA MH Environments..." "Yellow"
+
+ $resourceGroups = Get-TargetResourceGroups
+
+ if ($resourceGroups.Count -eq 0) {
+ Write-ColorOutput "No matching resource groups found" "Yellow"
+ return
+ }
+
+ $environments = @()
+
+ foreach ($rgName in $resourceGroups) {
+ try {
+ # Get AKS clusters in the resource group
+ $aksClusters = az aks list --resource-group $rgName --query "[].{Name:name, Status:powerState.code, Version:kubernetesVersion, Location:location}" -o json | ConvertFrom-Json
+
+ if ($aksClusters) {
+ foreach ($cluster in $aksClusters) {
+ $environments += [PSCustomObject]@{
+ ResourceGroup = $rgName
+ AKSCluster = $cluster.Name
+ Status = $cluster.Status
+ K8sVersion = $cluster.Version
+ Location = $cluster.Location
+ }
+ }
+ }
+ else {
+ # Check if resource group has any resources
+ $resources = az resource list --resource-group $rgName --query "length(@)" -o tsv
+ if ($resources -gt 0) {
+ $environments += [PSCustomObject]@{
+ ResourceGroup = $rgName
+ AKSCluster = "No AKS cluster"
+ Status = "Unknown"
+ K8sVersion = "N/A"
+ Location = "N/A"
+ }
+ }
+ }
+ }
+ catch {
+ Write-ColorOutput "Failed to get info for $rgName: $_" "Red"
+ }
+ }
+
+ if ($environments.Count -gt 0) {
+ Write-ColorOutput "`nFound $($environments.Count) environments:" "Green"
+ $environments | Format-Table -AutoSize
+ }
+ else {
+ Write-ColorOutput "No ODAA MH environments found" "Yellow"
+ }
+}
+
+# Function to check environment status
+function Invoke-StatusCheck {
+ Write-ColorOutput "Checking environment status..." "Yellow"
+
+ $resourceGroups = Get-TargetResourceGroups
+
+ if ($resourceGroups.Count -eq 0) {
+ Write-ColorOutput "No matching resource groups found" "Yellow"
+ return
+ }
+
+ foreach ($rgName in $resourceGroups) {
+ Write-ColorOutput "`n" + "="*60 "Cyan"
+ Write-ColorOutput "Resource Group: $rgName" "Cyan"
+ Write-ColorOutput "="*60 "Cyan"
+
+ try {
+ # Get AKS cluster info
+ $aksClusters = az aks list --resource-group $rgName --query "[].name" -o tsv
+
+ if ($aksClusters) {
+ foreach ($aksName in $aksClusters) {
+ Write-ColorOutput "`nAKS Cluster: $aksName" "White"
+
+ # Get credentials and check cluster
+ try {
+ az aks get-credentials --resource-group $rgName --name $aksName --overwrite-existing --only-show-errors
+
+ # Check cluster nodes
+ Write-ColorOutput "Cluster Nodes:" "Yellow"
+ kubectl get nodes --no-headers 2>$null | Format-Table
+
+ # Check namespaces
+ Write-ColorOutput "Namespaces:" "Yellow"
+ kubectl get namespaces --no-headers 2>$null | Format-Table
+
+ # Check NGINX ingress controller
+ Write-ColorOutput "NGINX Ingress Controller:" "Yellow"
+ $nginxService = kubectl get service -n ingress-nginx --no-headers 2>$null
+
+ if ($nginxService) {
+ $nginxService | Format-Table
+
+ # Get external IP
+ $externalIP = kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]} {.status.loadBalancer.ingress[*].ip} {"\n"} {end}' 2>$null
+ if ($externalIP -and $externalIP.Trim()) {
+ Write-ColorOutput "External IP: $($externalIP.Trim())" "Green"
+ }
+ else {
+ Write-ColorOutput "External IP: Not assigned yet" "Yellow"
+ }
+ }
+ else {
+ Write-ColorOutput "NGINX Ingress Controller not found" "Red"
+ }
+ }
+ catch {
+ Write-ColorOutput "Failed to check AKS cluster: $_" "Red"
+ }
+ }
+ }
+ else {
+ Write-ColorOutput "No AKS clusters found in this resource group" "Yellow"
+
+ # List other resources
+ Write-ColorOutput "Other resources:" "Yellow"
+ az resource list --resource-group $rgName --query "[].{Name:name, Type:type}" -o table
+ }
+ }
+ catch {
+ Write-ColorOutput "Failed to get status for $rgName: $_" "Red"
+ }
+ }
+}
+
+# Function to get environment information
+function Invoke-GetInfo {
+ Write-ColorOutput "Getting environment connection information..." "Yellow"
+
+ $resourceGroups = Get-TargetResourceGroups
+
+ if ($resourceGroups.Count -eq 0) {
+ Write-ColorOutput "No matching resource groups found" "Yellow"
+ return
+ }
+
+ $connectionInfo = @()
+
+ foreach ($rgName in $resourceGroups) {
+ try {
+ $aksClusters = az aks list --resource-group $rgName --query "[].name" -o tsv
+
+ foreach ($aksName in $aksClusters) {
+ # Get credentials
+ az aks get-credentials --resource-group $rgName --name $aksName --overwrite-existing --only-show-errors
+
+ # Get external IP
+ $externalIP = kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]} {.status.loadBalancer.ingress[*].ip} {"\n"} {end}' 2>$null
+
+ # Get cluster info
+ $clusterInfo = az aks show --resource-group $rgName --name $aksName --query "{fqdn:fqdn, version:kubernetesVersion}" -o json | ConvertFrom-Json
+
+ $connectionInfo += [PSCustomObject]@{
+ ResourceGroup = $rgName
+ AKSCluster = $aksName
+ ClusterFQDN = $clusterInfo.fqdn
+ K8sVersion = $clusterInfo.version
+ ExternalIP = if ($externalIP -and $externalIP.Trim()) { $externalIP.Trim() } else { "Not assigned" }
+ KubectlContext = kubectl config current-context 2>$null
+ }
+ }
+ }
+ catch {
+ Write-ColorOutput "Failed to get info for $rgName/$aksName: $_" "Red"
+ }
+ }
+
+ if ($connectionInfo.Count -gt 0) {
+ Write-ColorOutput "`nConnection Information:" "Green"
+ $connectionInfo | Format-Table -AutoSize -Wrap
+
+ Write-ColorOutput "`nConnection Commands:" "Yellow"
+ foreach ($info in $connectionInfo) {
+ Write-ColorOutput "# Connect to $($info.AKSCluster):" "Cyan"
+ Write-ColorOutput "az aks get-credentials --resource-group $($info.ResourceGroup) --name $($info.AKSCluster)" "White"
+ Write-ColorOutput ""
+ }
+ }
+ else {
+ Write-ColorOutput "No connection information found" "Yellow"
+ }
+}
+
+# Function to cleanup environments
+function Invoke-Cleanup {
+ Write-ColorOutput "Preparing to cleanup environments..." "Yellow"
+
+ $resourceGroups = Get-TargetResourceGroups
+
+ if ($resourceGroups.Count -eq 0) {
+ Write-ColorOutput "No matching resource groups found" "Yellow"
+ return
+ }
+
+ Write-ColorOutput "`nResource groups to be deleted:" "Red"
+ foreach ($rgName in $resourceGroups) {
+ Write-ColorOutput "- $rgName" "Red"
+ }
+
+ if (-not $Confirm) {
+ Write-ColorOutput "`nWARNING: This will permanently delete all resources in the above resource groups!" "Red"
+ $confirmation = Read-Host "Type 'DELETE' to confirm deletion"
+
+ if ($confirmation -ne "DELETE") {
+ Write-ColorOutput "Cleanup cancelled" "Yellow"
+ return
+ }
+ }
+
+ Write-ColorOutput "`nStarting cleanup..." "Yellow"
+ $failed = @()
+
+ foreach ($rgName in $resourceGroups) {
+ try {
+ Write-ColorOutput "Deleting resource group: $rgName" "Yellow"
+ az group delete --name $rgName --yes --no-wait
+ Write-ColorOutput "β Deletion initiated for: $rgName" "Green"
+ }
+ catch {
+ Write-ColorOutput "β Failed to delete: $rgName - $_" "Red"
+ $failed += $rgName
+ }
+ }
+
+ if ($failed.Count -eq 0) {
+ Write-ColorOutput "`nAll resource group deletions initiated successfully!" "Green"
+ Write-ColorOutput "Note: Actual deletion may take several minutes to complete." "Yellow"
+ }
+ else {
+ Write-ColorOutput "`nSome deletions failed:" "Red"
+ foreach ($failedRg in $failed) {
+ Write-ColorOutput "- $failedRg" "Red"
+ }
+ }
+}
+
+# Main execution
+function Main {
+ Write-ColorOutput "ODAA MH Environment Management" "Green"
+ Write-ColorOutput "Action: $Action" "White"
+ Write-ColorOutput "Subscription: $SubscriptionName" "White"
+ Write-ColorOutput ""
+
+ # Check Azure CLI authentication
+ try {
+ $currentAccount = az account show --query "name" -o tsv 2>$null
+ if (-not $currentAccount) {
+ Write-ColorOutput "Not authenticated with Azure CLI. Please run: az login" "Red"
+ exit 1
+ }
+
+ # Set subscription
+ az account set -s $SubscriptionName
+ Write-ColorOutput "Using subscription: $SubscriptionName" "Green"
+ }
+ catch {
+ Write-ColorOutput "Failed to set Azure subscription: $_" "Red"
+ exit 1
+ }
+
+ # Execute action
+ switch ($Action) {
+ "List" { Invoke-ListEnvironments }
+ "Status" { Invoke-StatusCheck }
+ "GetInfo" { Invoke-GetInfo }
+ "Cleanup" { Invoke-Cleanup }
+ default {
+ Write-ColorOutput "Unknown action: $Action" "Red"
+ exit 1
+ }
+ }
+}
+
+# Execute main function
+try {
+ Main
+}
+catch {
+ Write-ColorOutput "Management operation failed: $_" "Red"
+ Write-ColorOutput "Stack trace: $($_.ScriptStackTrace)" "Red"
+ exit 1
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/jwt.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/jwt.ps1
new file mode 100644
index 000000000..37f72e2a2
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/jwt.ps1
@@ -0,0 +1,65 @@
+param(
+ [Parameter(Mandatory,ValueFromPipeline,ValueFromPipelineByPropertyName)]
+ [string]$Jwt
+)
+
+begin {
+ function Convert-FromBase64Url {
+ param([string]$Value)
+ $Value = $Value.Replace('-','+').Replace('_','/')
+ switch ($Value.Length % 4) {
+ 2 { $Value += '==' }
+ 3 { $Value += '=' }
+ 0 { }
+ default { throw "Invalid Base64URL length." }
+ }
+ [Convert]::FromBase64String($Value)
+ }
+}
+
+process {
+ $Jwt = $Jwt.Trim()
+ $parts = $Jwt -split '\.'
+ if ($parts.Length -ne 3) {
+ throw "Not a valid JWT (expected 3 dot-separated parts)."
+ }
+
+ $headerJson = [Text.Encoding]::UTF8.GetString((Convert-FromBase64Url $parts[0]))
+ $payloadJson = [Text.Encoding]::UTF8.GetString((Convert-FromBase64Url $parts[1]))
+ $signatureBytes = Convert-FromBase64Url $parts[2]
+
+ $headerObj = $headerJson | ConvertFrom-Json
+ $payloadObj = $payloadJson | ConvertFrom-Json
+
+ Write-Host "`n=== JWT HEADER ===" -ForegroundColor Cyan
+ $headerObj | Format-List
+
+ Write-Host "=== JWT PAYLOAD ===" -ForegroundColor Cyan
+ $payloadObj | Format-List
+
+ Write-Host "=== TOKEN TIMING ===" -ForegroundColor Cyan
+ if ($payloadObj.iat) {
+ $issuedAt = [DateTimeOffset]::FromUnixTimeSeconds($payloadObj.iat).LocalDateTime
+ Write-Host "Issued At (iat): $issuedAt"
+ }
+ if ($payloadObj.nbf) {
+ $notBefore = [DateTimeOffset]::FromUnixTimeSeconds($payloadObj.nbf).LocalDateTime
+ Write-Host "Not Before (nbf): $notBefore"
+ }
+ if ($payloadObj.exp) {
+ $expires = [DateTimeOffset]::FromUnixTimeSeconds($payloadObj.exp).LocalDateTime
+ $remaining = $expires - [DateTime]::Now
+ Write-Host "Expires (exp): $expires"
+ if ($remaining.TotalSeconds -gt 0) {
+ Write-Host "Time Remaining: $($remaining.ToString('hh\:mm\:ss'))" -ForegroundColor Green
+ } else {
+ Write-Host "Status: EXPIRED" -ForegroundColor Red
+ }
+ }
+
+ Write-Host "`n=== SIGNATURE (SHA256) ===" -ForegroundColor Cyan
+ $sigHash = ([System.BitConverter]::ToString([System.Security.Cryptography.SHA256]::Create().ComputeHash($signatureBytes))).Replace('-','')
+ Write-Host $sigHash
+
+ Write-Host ""
+}
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/refresh-token.sh b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/refresh-token.sh
new file mode 100644
index 000000000..e53baf1bb
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/scripts/refresh-token.sh
@@ -0,0 +1,218 @@
+#!/bin/bash
+# Token Refresh Service for Oracle Entra ID Authentication
+# Runs as a sidecar container to automatically refresh OAuth tokens
+
+set -e
+
+# Configuration from environment variables
+TENANT_ID="${AZURE_TENANT_ID:-f71980b2-590a-4de9-90d5-6fbc867da951}"
+CLIENT_ID="${AZURE_CLIENT_ID:-7d22ece1-dd60-4279-a911-4b7b95934f2e}"
+SCOPE="https://cptazure.org/${CLIENT_ID}/.default"
+TOKEN_FILE="${TOKEN_FILE:-/tmp/wallet/token.txt}"
+REFRESH_INTERVAL="${REFRESH_INTERVAL:-2700}" # 45 minutes (2700 seconds)
+LOG_LEVEL="${LOG_LEVEL:-INFO}"
+
+# Colors for logging
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Logging functions
+log_info() {
+ echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')] INFO:${NC} $1"
+}
+
+log_error() {
+ echo -e "${RED}[$(date '+%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1" >&2
+}
+
+log_warn() {
+ echo -e "${YELLOW}[$(date '+%Y-%m-%d %H:%M:%S')] WARN:${NC} $1"
+}
+
+# Function to decode JWT and get expiry time
+get_token_expiry() {
+ local token=$1
+
+ # Extract payload (second part of JWT)
+ payload=$(echo "$token" | cut -d'.' -f2)
+
+ # Add padding if needed
+ padding_length=$((4 - ${#payload} % 4))
+ if [ $padding_length -ne 4 ]; then
+ payload="${payload}$(printf '=%.0s' $(seq 1 $padding_length))"
+ fi
+
+ # Decode and extract exp claim
+ exp=$(echo "$payload" | base64 -d 2>/dev/null | grep -o '"exp":[0-9]*' | cut -d':' -f2)
+
+ if [ -n "$exp" ]; then
+ echo "$exp"
+ else
+ echo "0"
+ fi
+}
+
+# Function to refresh token
+refresh_token() {
+ log_info "Attempting to refresh token..."
+
+ # Try to get token using Managed Identity first
+ local token=""
+ local use_method="unknown"
+
+ # Method 1: Try Managed Identity via IMDS endpoint
+ if command -v curl &> /dev/null; then
+ log_info "Trying Managed Identity (IMDS endpoint)..."
+ response=$(curl -sf -H "Metadata: true" \
+ "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=${SCOPE}" \
+ 2>/dev/null || echo "")
+
+ if [ -n "$response" ]; then
+ token=$(echo "$response" | grep -o '"access_token":"[^"]*"' | cut -d'"' -f4)
+ if [ -n "$token" ]; then
+ use_method="Managed Identity (IMDS)"
+ fi
+ fi
+ fi
+
+ # Method 2: Try Azure CLI with Managed Identity
+ if [ -z "$token" ] && command -v az &> /dev/null; then
+ log_info "Trying Azure CLI with Managed Identity..."
+ token=$(az account get-access-token \
+ --tenant "$TENANT_ID" \
+ --scope "$SCOPE" \
+ --query accessToken \
+ --output tsv 2>/dev/null || echo "")
+
+ if [ -n "$token" ]; then
+ use_method="Azure CLI (Managed Identity)"
+ fi
+ fi
+
+ # Method 3: Try Azure CLI with user authentication (fallback)
+ if [ -z "$token" ] && command -v az &> /dev/null; then
+ log_warn "Managed Identity failed, trying Azure CLI user authentication..."
+ token=$(az account get-access-token \
+ --scope "$SCOPE" \
+ --query accessToken \
+ --output tsv 2>/dev/null || echo "")
+
+ if [ -n "$token" ]; then
+ use_method="Azure CLI (User Auth)"
+ fi
+ fi
+
+ # Check if we got a token
+ if [ -z "$token" ] || [ "$token" == "null" ]; then
+ log_error "Failed to obtain token from any method"
+ return 1
+ fi
+
+ # Write token to file (single line, no newline)
+ echo -n "$token" > "$TOKEN_FILE"
+ chmod 600 "$TOKEN_FILE"
+
+ # Get token expiry
+ exp=$(get_token_expiry "$token")
+ if [ "$exp" -gt 0 ]; then
+ expiry_date=$(date -d "@$exp" '+%Y-%m-%d %H:%M:%S' 2>/dev/null || date -r "$exp" '+%Y-%m-%d %H:%M:%S' 2>/dev/null || echo "unknown")
+ time_until_expiry=$(( exp - $(date +%s) ))
+ minutes_until_expiry=$(( time_until_expiry / 60 ))
+
+ log_info "β
Token refreshed successfully using ${use_method}"
+ log_info "Token expires at: ${expiry_date} (in ${minutes_until_expiry} minutes)"
+ else
+ log_info "β
Token refreshed successfully using ${use_method}"
+ log_warn "Could not decode token expiry time"
+ fi
+
+ return 0
+}
+
+# Function to verify token file
+verify_token_file() {
+ if [ ! -f "$TOKEN_FILE" ]; then
+ log_error "Token file does not exist: $TOKEN_FILE"
+ return 1
+ fi
+
+ local file_size=$(wc -c < "$TOKEN_FILE")
+ local line_count=$(wc -l < "$TOKEN_FILE")
+
+ if [ "$file_size" -lt 100 ]; then
+ log_error "Token file is too small (${file_size} bytes)"
+ return 1
+ fi
+
+ if [ "$line_count" -gt 0 ]; then
+ log_warn "Token file has line breaks (${line_count} lines) - this may cause issues"
+ fi
+
+ log_info "Token file verification passed (${file_size} bytes, ${line_count} lines)"
+ return 0
+}
+
+# Main loop
+main() {
+ log_info "=========================================="
+ log_info "Token Refresh Service Starting"
+ log_info "=========================================="
+ log_info "Tenant ID: ${TENANT_ID}"
+ log_info "Client ID: ${CLIENT_ID}"
+ log_info "Scope: ${SCOPE}"
+ log_info "Token File: ${TOKEN_FILE}"
+ log_info "Refresh Interval: ${REFRESH_INTERVAL} seconds ($(($REFRESH_INTERVAL / 60)) minutes)"
+ log_info "=========================================="
+
+ # Initial token refresh
+ log_info "Performing initial token refresh..."
+ if ! refresh_token; then
+ log_error "Initial token refresh failed. Retrying in 60 seconds..."
+ sleep 60
+ fi
+
+ # Verify token file
+ verify_token_file
+
+ # Main refresh loop
+ local retry_count=0
+ local max_retries=3
+
+ while true; do
+ log_info "Next token refresh in $(($REFRESH_INTERVAL / 60)) minutes"
+ log_info "Sleeping for ${REFRESH_INTERVAL} seconds..."
+ sleep "$REFRESH_INTERVAL"
+
+ # Attempt to refresh token
+ if refresh_token; then
+ retry_count=0
+ verify_token_file
+ else
+ retry_count=$((retry_count + 1))
+ log_error "Token refresh failed (attempt ${retry_count}/${max_retries})"
+
+ if [ $retry_count -ge $max_retries ]; then
+ log_error "Max retries reached. Waiting full interval before next attempt."
+ retry_count=0
+ else
+ # Exponential backoff for retries
+ retry_delay=$((60 * retry_count))
+ log_warn "Retrying in ${retry_delay} seconds..."
+ sleep "$retry_delay"
+ fi
+ fi
+ done
+}
+
+# Handle signals for graceful shutdown
+cleanup() {
+ log_info "Received termination signal. Shutting down gracefully..."
+ exit 0
+}
+
+trap cleanup SIGTERM SIGINT
+
+# Start the service
+main
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/template/gghack.yaml b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/template/gghack.yaml
new file mode 100644
index 000000000..47bccd5b8
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/resources/template/gghack.yaml
@@ -0,0 +1,71 @@
+microhack:
+ user:
+### specify the name of an existing secret that contains the ogg admin username and password
+ogg:
+ adminSecret: ogg-admin-secret
+ usernameKey: oggusername
+ passwordKey: oggpassword
+ ## the name of an OGG deployment, mostly "local", or ogg-east, ogg-west, ...
+ deploymentName: hacksync
+
+databases:
+ # value for source database (23ai free container) is calculated.
+ trgConn: ""
+
+ #for demo env, dont change the schema names. SH already exists in ADB, so we create another one.
+ srcSchema: "SH"
+ trgSchema: "SH2"
+ dumpFile: "https://objectstorage.eu-frankfurt-1.oraclecloud.com/p/FRGSZDxFIT8uwY3bp_uhQm_8Cm1JL1VD8Wcau0kTZYZAyTGlvj1xzBHI1jXv2tC3/n/fre3ftc1iva4/b/export_bucket/o/SHBIG.dmp"
+ adminSecret:
+ secretName: db-admin-secret
+ srcAdminPwdKey: srcAdminPwd
+ trgAdminPwdKey: trgAdminPwd
+ srcGGUserNameKey: srcGGUserName
+ trgGGUserNameKey: trgGGUserName
+ srcGGPwdKey: srcGGPwd
+ trgGGPwdKey: trgGGPwd
+
+## request sizes of some PVCs, beware that there can be many big trail files ...
+storage:
+ configSize: 1Gi
+ logSize: 10Gi
+ trailSize: 50Gi
+ storageClass: managed-csi
+
+image:
+ pullPolicy: IfNotPresent
+ #ggImageName: container-registry.oracle.com/goldengate/goldengate-oracle-free:23.8.0.25.04
+ ggImageName: fra.ocir.io/fre3ftc1iva4/pub_gg_micro_ora:23.4.1.24.05
+ ggdaaImageName: fra.ocir.io/fre3ftc1iva4/pub_gg_micro_bigdata:23.8.4.25.08
+ dbImageName: container-registry.oracle.com/database/free:23.26.0.0
+
+services:
+ ### You can choose to create an ingress in front of the service
+ ### with a virtual host name of ggate.
+ external:
+ ### set type to either ingress or none if You need something customized
+ type: ingress
+ ### typical ingressClasses are nginx and istio
+ ingressClass: nginx
+ ### uses default SSL certificate of gateway/controller or specify a custom tls-secret here
+ tlsSecretName: ggate-tls-secret
+ vhostName: gghack..nip.io # public IP address of
+ internal:
+ type: ClusterIP
+ plainPort: 8080
+ sslPort: 8443
+
+resources:
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ limits:
+ cpu: "2"
+ memory: 16Gi
+
+gpu:
+ runOnGPU: false
+ toleration: nvidia.com/gpu
+ nodeSelector: VM.GPU.A10.1
+
+
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/azure-data-fabric-odaa/azure-data-fabric-odaa.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/azure-data-fabric-odaa/azure-data-fabric-odaa.md
new file mode 100644
index 000000000..13be01d71
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/azure-data-fabric-odaa/azure-data-fabric-odaa.md
@@ -0,0 +1,130 @@
+# π§΅ Challenge 8: (Optional) Integration of Azure Data Fabric with Oracle ADB
+
+[Back to workspace README](../../README.md)
+
+## π― Overview
+
+π This challenge focuses on integrating Oracle Autonomous Database (ADB) with Azure Data Fabric to create comprehensive data solutions that leverage both Oracle's database capabilities and Azure's data platform services.
+
+## π Key Integration Areas
+
+1. **π Data Connectivity**
+ - Configure secure connections between Oracle ADB and Azure Data Fabric
+ - Set up data source registrations
+
+2. **π Data Pipeline Creation**
+ - Build data pipelines to move data between Oracle ADB and Azure services
+ - Implement real-time and batch data processing
+
+3. **π Analytics and Reporting**
+ - Create reports and dashboards using Azure Data Fabric tools
+ - Leverage Power BI integration for visualization
+
+4. **π Data Governance**
+ - Implement data lineage tracking
+ - Set up data quality monitoring
+
+## π Deploy GoldenGate for Azure Fabric on AKS
+
+Log in to the AKS cluster where you want to deploy GoldenGate for Azure Fabric if not already done:
+
+~~~powershell
+# switch to the subscription where AKS is deployed
+$subAKS="sub-mh1" # replace with your AKS subscription name
+# Make sure your cli points to the AKS subscription
+az account set --subscription $subAKS
+# log in to your AKS cluster if not already done
+$rgAKS="aks-user01" # replace with your AKS resource group name
+$AKSClusterName="aks-user01" # replace with your AKS cluster name
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+~~~
+
+Once you have the external IP address, replace the placeholder in the gghack.yaml file.
+
+~~~powershell
+# get the external IP of the ingress controller and strip spaces
+$EXTIP = (kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]}{.status.loadBalancer.ingress[*].ip} {end}') -replace '\s', ''
+# create a copy of the template file
+cp resources/template/ggfabric.yaml .
+# replace the placeholder with the actual external IP
+(Get-Content ggfabric.yaml) -replace 'xxx-xxx-xxx-xxx', $EXTIP.Trim() | Set-Content ggfabric.yaml
+code ggfabric.yaml
+~~~
+
+The value of vhostName should look like this:
+
+~~~yaml
+ ### uses default SSL certificate of gateway/controller or specify a custom tls-secret here
+ tlsSecretName: ggate-tls-secret
+ vhostName: ggfabric.xxx-xxx-xxx-xxx.nip.io
+~~~
+
+## π Install GoldenGate Pods
+
+Install all components via Helm:
+
+~~~powershell
+helm install oggfabric oggfree/goldengate-bigdata --values ggfabric.yaml -n microhacks
+~~~
+
+~~~text
+NAME: oggfabric
+LAST DEPLOYED: Wed Oct 22 18:31:24 2025
+NAMESPACE: microhacks
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+~~~
+
+~~~powershell
+# See deployment.apps/oggfabric-goldengate-bigdata
+kubectl describe deployment oggfabric-goldengate-bigdata -n microhacks
+# get our pod
+$podOGGfabric = kubectl get pods -n microhacks | Select-String 'oggfabric-goldengate-bigdata' | ForEach-Object { ($_ -split '\s+')[0] }
+# get details
+kubectl describe pod $podOGGfabric -n microhacks
+# Check used image:
+kubectl get deployment oggfabric-goldengate-bigdata -n microhacks -o jsonpath='{.spec.template.spec.containers[0].image}'
+# check used services
+kubectl get service --namespace ingress-nginx nginx-quick-ingress-nginx-controller --output wide
+# get external IP of nginx controller, you maybe need to wait a few minutes until the IP is assigned
+kubectl get service -n microhacks -o jsonpath='{.items[*].status.loadBalancer.ingress[*].ip}'
+~~~
+
+You can now access the GoldenGate Microservices UI at: `https://ggfabric..nip.io` (e.g. `https://ggfabric.xxx.xxx.xxx.xxx.nip.io`)
+
+
+## π Useful Resources
+
+- π [Azure Data Fabric Documentation](https://docs.microsoft.com/en-us/azure/data-factory/)
+- πΆ [Oracle ADB Integration Guides](https://docs.oracle.com/en/cloud/paas/autonomous-database/)
+- π [Hybrid Data Integration Patterns](https://docs.microsoft.com/en-us/azure/architecture/)
+
+## π‘ Tips and Tricks
+
+### π Redeploy if things go wrong
+
+~~~powershell
+# login to aks
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+# Uninstall the Helm release
+helm uninstall oggfabric -n microhacks
+~~~
+
+### π Use a private Oracle Container Registry image
+
+~~~powershell
+# delete secret if already exist
+kubectl delete secret container-registry-secret -n microhacks
+
+# Prompt for the password that will be used for all three components
+$password = Read-Host -Prompt "Enter the shared password"
+
+kubectl create secret docker-registry container-registry-secret -n microhacks --docker-username=test@gmail.com --docker-password=$password --docker-server=container-registry.oracle.com
+
+[System.Text.Encoding]::UTF8.GetString([Convert]::FromBase64String(
+ (kubectl get secret container-registry-secret -n microhacks -o jsonpath="{.data.\.dockerconfigjson}")
+))
+~~~
+
+[Back to workspace README](../../README.md)
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/beaver-odaa.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/beaver-odaa.md
new file mode 100644
index 000000000..135b0d65c
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/beaver-odaa.md
@@ -0,0 +1,100 @@
+# 𦫠Challenge 5: Review data replication via Beaver
+
+[Back to workspace README](../../README.md)
+
+π CloudBeaver is a web-based database management tool that allows users to connect to, manage, and query various databases directly from a browser. In the context of Kubernetes, deploying CloudBeaver provides an easy-to-access interface for database administration tasks, such as browsing tables, running SQL queries, and managing database users, without needing to install local database clients. This is especially useful for teams working in cloud or containerized environments.
+
+~~~powershell
+# switch to the subscription where AKS is deployed
+$subAKS="sub-mh1" # replace with your AKS subscription name
+# Make sure your cli points to the AKS subscription
+az account set --subscription $subAKS
+# log in your AKS cluster if not already done
+$rgAKS="aks-user01" # replace with your AKS resource group name
+$AKSClusterName="aks-user01" # replace with your AKS cluster name
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+
+# Deploy Cloud Beaver
+helm repo add avisto https://avistotelecom.github.io/charts/
+kubectl create namespace cloudbeaver
+helm install cloudbeaver avisto/cloudbeaver --version 1.0.1 -n cloudbeaver
+
+# get the external IP of the ingress controller and strip spaces
+$EXTIP = (kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]}{.status.loadBalancer.ingress[*].ip} {end}') -replace '\s', ''
+
+echo "
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: cloudbeaver-ingress
+ namespace: cloudbeaver
+spec:
+ ingressClassName: nginx
+ rules:
+ - host: beaver.$EXTIP.nip.io
+ http:
+ paths:
+ - backend:
+ service:
+ name: cloudbeaver
+ port:
+ number: 8978
+ path: /
+ pathType: Prefix
+" | kubectl apply -f -
+
+# get the hostname of the beaver ingress
+echo "You can access beaver under: https://beaver.${EXTIP}.nip.io"
+
+# validate access to beaver
+http GET https://beaver.${EXTIP}.nip.io --verbose
+~~~
+
+## π Create Connection to the AKS local Oracle Database
+
+> β οΈ **IMPORTANT**: In case you are using an Azure Network Security Group (NSG) make sure your client IP is whitelisted to access the AKS where Beaver is running.
+
+1. π Go to CloudBeaver and set the Password as follows:
+ - π€ User: cbadmin
+ - π Password: <"Assigned Password"> (replace by your password)
+ - Click the "Next" button on top of the page and "Finish" button afterwards.
+2. π Login to CloudBeaver after you did set the password.
+3. βοΈ Select the cloud icon on the upper left corner.
+4. β Select "New Connection"
+5. πΆ Select Oracle
+6. βοΈ Select Configuration option "URL" instead of "Manual"
+7. π Enter the following JDBC URL: jdbc:oracle:thin:@ogghack-goldengate-microhack-sample-db23ai.microhacks:1521/freepdb1
+8. π€ Set User name = system
+9. π Set Password = <"Assigned Password">
+10. πΎ Select check box "Save credentials for all users with access"
+11. π§ͺ Select "Test"
+12. β
You should see "Success" popup
+13. π― Select "Create" on the upper right corner
+
+## π Create Connection to the ODAA ADB (work in progress)
+
+1. β Select the "New connection" Icon on the upper left corner.
+2. πΆ Select Oracle
+3. βοΈ Select Configuration option "URL" instead of "Manual"
+4. π§ Construct an easy connect string with your current connection string and the needed prefixes similar to the following one: jdbc:oracle:thin:@(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=gpdmotes.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_odaa2_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+5. Set "Connection name" to "adb"
+6. π€ Set User name = admin
+7. π Set Password = <"Assigned Passwort">
+8. πΎ Select check box "Save credentials for all users with access"
+9. π§ͺ Select "Test Connection"
+10. β
You should see "Success" popup
+11. π― Select "Create" on the upper right corner
+
+
+## π‘ Tips and Tricks
+
+### π Redeploy if things go wrong
+
+~~~powershell
+# login to aks
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+# Uninstall the Helm release
+helm uninstall cloudbeaver -n cloudbeaver
+~~~
+
+[Back to workspace README](../../README.md)
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver1.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver1.jpg
new file mode 100644
index 000000000..8134d394a
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver1.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver2.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver2.jpg
new file mode 100644
index 000000000..d3db2e8b6
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver2.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver3.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver3.jpg
new file mode 100644
index 000000000..d3db2e8b6
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/beaver-odaa/media/Beaver3.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/create-odaa-adb.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/create-odaa-adb.md
new file mode 100644
index 000000000..3b12b324c
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/create-odaa-adb.md
@@ -0,0 +1,179 @@
+# π Challenge 2: Create Azure ODAA [Oracle Database@Azure] Database Resources
+
+[Back to workspace README](../../README.md)
+
+1. Registration of the Azure resource provider in Azure. In our case they are already deployed but can be checked if they are registered - see [Oracle Documentation: Oracle Database at Azure Network Enhancements](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-network-plan)
+2. Check the availability of a VNet and delegated subnet for the deployment of the database.
+3. Deploy an Oracle ADB in Azure.
+ 1. Important: Choose the region FRANCE CENTRAL
+ 2. Use in the Networking section Managed private virtual network IP only
+4. Furthermore, you will deploy in this chapter an ADB database via the Azure Portal.
+5. Finally, check if the existing VNet peering between the AKS and database subscriptions is available and correctly configured.
+
+## π°οΈ Delegated Subnet Design (Prerequisites)
+
+- ODAA Autonomous Database can be deployed within Azure Virtual Networks, in delegated subnets that are delegated to Oracle Database@Azure.
+- Client subnet CIDR falls in general between /27 and /22 (inclusive).
+- Valid ranges must use private IPv4 addresses and must avoid the reserved 100.106.0.0/16 and 100.107.0.0/16 blocks used for the interconnect.
+
+A more detailed description can be found here: [Oracle Documentation: Oracle's delegated subnet guidance](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/network-delegated-subnet-design.htm)
+
+**NOTE**: For this Microhack, we have already created the corresponding VNets and subnets, so no additional action is required in this step.
+
+
+
+## π§ What is an Azure Delegated Subnet?
+
+Azure delegated subnets allow you to delegate exclusive control of a subnet within your VNet to a specific Azure service. When you delegate a subnet, the service can deploy and manage its own network resources (NICs, endpoints, routing) within that subnet without requiring you to provision each resource manually. Traffic still flows privately over your VNet, and you remain in control of higher-level constructs like NSGs and route tables.
+
+The delegated subnet is part of the VNet inside your ODAA subscription.
+1. Click on the subscription sub-mhodaa.
+2. Change to the available resource group odaa-user00.
+3. You see the deployed resources inside the resource group and use the VNet odaa-user00.
+4. In the VNet overview, you find under the sub-menu Settings the menu Subnets.
+5. In the menu Subnets, you see the subnet and inside the table the delegation for "Oracle.Database/networkAttachments".
+ 
+
+
+## π οΈ Create an ODAA Autonomous Database Instance
+
+### Log in to the [Azure portal](https://portal.azure.com)
+
+In the Azure portal, search for Oracle Services and select **Oracle Database@Azure**.
+
+
+
+### Select Oracle Autonomous Database
+
+Select **Create Oracle Autonomous Database** and "create" to start the creation of the Autonomous Database.
+
+
+
+### Define Azure Basics
+
+- Subscription: Select "sub-mhodaa"
+- Resource Group: Select "odaa-user"
+- Database name: user
+- Region: France Central
+
+
+
+### Settings of the ADB
+
+> [!IMPORTANT]
+>
+> Setup the ADB exactly with the following settings:
+>
+> **ADB Deployment Settings:**
+> 1. Workload type: **OLTP**
+> 2. Database version: **23ai**
+> 3. ECPU Count: **2**
+> 4. Compute auto scaling: **off**
+> 5. Storage: **20 GB**
+> 6. Storage autoscaling: **off**
+> 7. Backup retention period in days: **1 day**
+> 8. Administrator password: (do not use '!' inside your password)
+> 9. License type: **License included**
+> 10. Oracle database edition: **Enterprise Edition**
+
+
+
+### Network Setting
+
+1. Choose for the connectivity the Access type: Managed private virtual network IP only
+
+
+
+### Final Summary of the ADB Shared Settings
+
+Review the final summary and click "Create".
+
+
+
+### Deployment finished
+
+The deployment will take between 10 to 15 minutes.
+
+
+
+After the deployment is finished, you see the overview page of your newly created Autonomous Database.
+
+### Further Reading
+
+Complete documentation is available under the following links.
+
+[Oracle Documentation: Create an Autonomous Database](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/azucr-create-autonomous-database.html)
+
+
+## **IMPORTANT: While You Are Waiting for the ADB Creation**
+
+You will need the Microhack GitHub repository in the following challenges. Please clone the repository to your local machine if you have not done so yet.
+
+Follow the instructions in the [Clone Partial Repository](../../docs/clone-partial-repo.md) document to clone only the required folder for this Microhack.
+
+## Check the Created ADB in OCI Console
+
+After the ADB was deployed successfully, check if the ADB is visible on the Azure Portal and OCI side. Important to mention on the OCI side is that the region is set to ***France Central*** and the Compartment is chosen properly.
+
+
+For Oracle Database@Azure, the OCI console is mainly needed for βOracle-sideβ lifecycle and integration tasks that still live in OCI, not in the Azure portal:
+
+Tenant / identity / policy management
+
+Managing OCI tenancy, compartments, IAM policies, and user access that relate to the Oracle-managed parts of the service.
+Networking and integration on the OCI side
+
+Viewing and managing OCI VCNs, subnets, NSGs/Security Lists, and routes that participate in the private darkβfiber connectivity with Azure.
+Advanced Oracle platform services
+
+Using OCI-native services that integrate with Oracle Database@Azure (e.g., GoldenGate, Data Guard configurations that are exposed via OCI, logging/monitoring integrations).
+Support, diagnostics, and observability
+
+Accessing OCI-native logs, metrics, events, and support tools when Oracle asks you to verify or adjust something from the OCI side.
+
+In short: dayβtoβday database and app operations happen in Azure and the Azure portal; the OCI console is needed when you touch the underlying Oracle/OCI tenancy, networking, or advanced Oracle platform services that sit βbehindβ Oracle Database@Azure.
+
+To access the OCI console, use the following link after you are logged in to the Azure portal under your newly created ODAA Autonomous Database resource:
+
+
+At the OCI console login page, select the "Entra ID" link:
+
+
+You will land on the Oracle ADB databases overview page:
+
+
+
+
+## Check the Existing VNet Peering
+
+To save time to focus on the ODAA service itself, the VNet peering between both subscriptions is already available and can be verified. Here you have to switch to the resource group aks-user[assigned number]. Under the section Settings, you find the menu point Peering. Open the peering and check if the peering sync status and peering state are active.
+
+
+The check of the VNet peering can be done from the ODAA side as well.
+
+---
+
+## Tips and Tricks
+
+### How to Control What Can Be Deployed with Azure Policies and RBAC
+
+Oracle Database@Azure does introduce new built-in RBAC Roles to help you manage access to Oracle Database@Azure resources. These roles can be assigned to users, groups, or service principals to control who can perform specific actions on Oracle Database@Azure resources. An overview of the different Azure RBAC roles can be found here: [Oracle documentation on RBAC roles](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/onboard-access-control.htm)
+
+In case you consider using Azure Policies to restrict what can be deployed, Azure Policy only accepts resource fields that have published aliases.
+
+Oracle Database@Azure ADB doesnβt currently expose aliases for dataStorageSizeInGbs, backupRetentionPeriodInDays, isAutoScalingEnabled, isAutoScalingForStorageEnabled, licenseModel, or computeCount, so the service rejects any policy trying to evaluate them (InvalidPolicyAlias).
+
+Currently you can only restrict the locations.
+
+[Back to workspace README](../../README.md)
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation2.png
new file mode 100644
index 000000000..3602dfd92
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation2.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation3.png
new file mode 100644
index 000000000..2af99356f
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation3.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation_01.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation_01.jpg
new file mode 100644
index 000000000..8b554f978
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/adb_creation_01.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/delegated_subnet.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/delegated_subnet.jpg
new file mode 100644
index 000000000..824546610
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/delegated_subnet.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 10.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 10.png
new file mode 100644
index 000000000..3dcbd7d3e
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 10.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 11.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 11.png
new file mode 100644
index 000000000..167cec022
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 11.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 12.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 12.png
new file mode 100644
index 000000000..8ecd3f551
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 12.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 13.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 13.png
new file mode 100644
index 000000000..118aa2011
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 13.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 2.png
new file mode 100644
index 000000000..3c66ce74c
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 2.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 3.png
new file mode 100644
index 000000000..4cf75f877
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 3.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 4.png
new file mode 100644
index 000000000..338873114
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 4.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 5.png
new file mode 100644
index 000000000..bffe9bd92
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 5.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 6.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 6.png
new file mode 100644
index 000000000..15fd36114
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 6.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 7.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 7.png
new file mode 100644
index 000000000..874aea388
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 7.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 8.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 8.png
new file mode 100644
index 000000000..bd68ffb32
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 8.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 9.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 9.png
new file mode 100644
index 000000000..3eaecbeca
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy 9.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy.png
new file mode 100644
index 000000000..086cfbe1f
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image copy.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image.png
new file mode 100644
index 000000000..55ada4841
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/image.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/oci_region_check_compartment.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/oci_region_check_compartment.jpg
new file mode 100644
index 000000000..0084b5738
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/oci_region_check_compartment.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_1.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_1.jpg
new file mode 100644
index 000000000..5caf96c8b
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_1.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_2.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_2.jpg
new file mode 100644
index 000000000..9e8802d5b
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_2.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_3.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_3.jpg
new file mode 100644
index 000000000..a54924c35
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-adb/media/vnet_peering/vnet_peering_3.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/create-odaa-subscription.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/create-odaa-subscription.md
new file mode 100644
index 000000000..3aa32e580
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/create-odaa-subscription.md
@@ -0,0 +1,222 @@
+# π Challenge 1: Create Azure ODAA [Oracle Database@Azure] Service
+
+[Back to workspace README](../../README.md)
+
+> [!NOTE]
+> **This is a theoretical challenge only.** No action is required from participants aside from reading the content. The ODAA subscription has already been created for you to save time.
+
+## π§± ODAA Main Components
+
+1. ODAA Subscription, which is a container that contains the different Oracle Database Services and defines the commercial aspects of the service
+2. ODAA Database service. These are the database services that are used to run Oracle Database on Azure
+
+Without an ODAA Subscription, you cannot create any ODAA Database services.
+
+### Component Hierarchy Diagram
+
+```mermaid
+flowchart TB
+ subgraph Azure["βοΈ Azure"]
+ subgraph AzureSub["Azure Subscription"]
+ subgraph ODAAS["ODAA Subscription
(Container & Commercial Terms)"]
+ DB1["ποΈ Oracle Database Service 1
(e.g., Autonomous DB)"]
+ DB2["ποΈ Oracle Database Service 2
(e.g., Exadata DB)"]
+ DB3["ποΈ Oracle Database Service N"]
+ end
+ end
+ end
+
+ style Azure fill:#0078D4,color:#fff
+ style AzureSub fill:#50E6FF,color:#000
+ style ODAAS fill:#C74634,color:#fff
+ style DB1 fill:#F5DEB3,color:#000
+ style DB2 fill:#F5DEB3,color:#000
+ style DB3 fill:#F5DEB3,color:#000
+```
+
+> **Key Concept**: The ODAA Subscription acts as a container that must exist before you can create any Oracle Database services within it.
+
+## π οΈ Prerequisites
+
+To create an ODAA Subscription, you will need an Azure subscription with permissions to purchase resources from the Azure Marketplace.
+In addition, the subscriptions must have the following Microsoft Providers registered:
+
+- Oracle.Database
+- Microsoft.BareMetal
+- Microsoft.Network
+- Microsoft.Compute
+
+This can be done by following the instructions in
+* [Oracleβs resource provider registration guide](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/oaaprerequisites.htm#register-resource-providers)
+* [Network planning for Oracle Database@Azure](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-network-plan)
+
+
+The enablement of the enhanced networking capabilities will be a default behavior shortly. Because it is not yet the case you can check if the Azure resource providers are enabled by using the Azure Portal or the Azure CLI. Because the settings need to be done on subscription level the providers are all enabled. Following we quickly described how to check the status via the Azure Portal.
+
+1. Go the subscription where the ODAA is deployed. In our case the subscription has the name - sub-mhodaa.
+2. In the subscription view you find under the menu setting the sub menue Resource Providers.
+3. Click on the Resource Providers and search for the Providers
+
+
+
+### π‘οΈ Permissions for Onboarding and Provisioning ODAA
+
+Many of the tasks you perform during ODAA Subscription onboarding require permissions in either the Azure cloud or the Oracle Cloud Infrastructure (OCI) cloud. Review the following link that details the permissions you need for each task of the onboarding process:
+
+- [Oracle Documentation Permissions for Onboarding and Provisioning ODAA Services](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/oaaprerequisites.htm#permissions)
+
+## π° ODAA Subscription purchase options
+
+You can purchase ODAA Subscription directly from the [Azure Marketplace](https://marketplace.microsoft.com/en-us/product/saas/oracle.oracle_database_at_azure?tab=Overview).
+
+
+
+The Marketplace offers two purchasing options:
+
+1. Public offer, available to all Azure customers
+2. Private offer, available only to enterprise customers who have negotiated specific terms with Oracle
+
+> β οΈ **IMPORTANT**: In this Microhack the ODAA Subscription has been already purchased via a Public Offer.
+
+## π° Purchase ODAA Subscription via Public Offer
+
+When you purchase an ODAA Subscription through the public offer, you can choose between two licensing options:
+
+1. Bring your own license (BYOL)
+2. Pay-as-you-go (PAYG)
+
+
+
+With BYOL, you can use your existing Oracle Database licenses to run the service, while with PAYG, you pay for the service based on your usage.
+
+### π Onboarding ODAA Subscription via Public Offer
+
+You can find the official Oracle documentation about onboarding ODAA Subscription in [Oracle's PAYG onboarding guide](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/onboard-purchase.htm#purchase-payg-offer).
+
+To use the ODAA Subscription, you configure it within your Azure subscription through a process referred to as [onboarding](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/oaaonboard.htm).
+
+#### Onboarding Process Overview
+
+```mermaid
+flowchart LR
+ A["1οΈβ£ Purchase
Azure Marketplace"] --> B["2οΈβ£ Select
Licensing Option"]
+ B --> C["3οΈβ£ Link to
OCI Tenancy"]
+ C --> D["4οΈβ£ Configure
Account Details"]
+ D --> E["5οΈβ£ Activate
ODAA Subscription"]
+ E --> F["β
Ready to
Create Databases"]
+
+ style A fill:#0078D4,color:#fff
+ style B fill:#50E6FF,color:#000
+ style C fill:#C74634,color:#fff
+ style D fill:#F25022,color:#fff
+ style E fill:#7FBA00,color:#fff
+ style F fill:#00A4EF,color:#fff
+```
+
+
+
+> **NOTE**: We need to select a subscription on the screenshot, as shown. The ODAA Subscription itself is not aligned to any Azure Region, but the database services created under the ODAA Subscription will be aligned to the Azure Region selected during onboarding.
+
+You complete the purchase through Azure Marketplace. After the purchase is complete, you link your Azure subscription with an OCI tenancy. This is called multicloud linking.
+
+Source: [Oracle Docs ODAA Overview](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/overview.htm)
+
+Final step of the ODAA Subscription creation:
+
+
+During the deployment of the ODAA Subscription:
+
+
+After the deployment is complete:
+
+
+Now you can see the ODAA Subscription in your Azure portal where you can create ODAA Database services under this ODAA Subscription.
+
+
+### π ODAA Multicloud Linking
+
+To finish the ODAA Subscription onboarding, you will need an OCI account. This is needed because some components of the ODAA databases need to be configured via the Oracle Cloud Infrastructure (OCI) even if the physical hardware runs inside Azure Data Centers.
+
+You perform most onboarding tasks only once, during your ODAA Subscription deployment. After you complete the onboarding tasks, you can begin provisioning and using ODAA database resources in your Azure environment.
+
+#### Multicloud Architecture Diagram
+
+```mermaid
+flowchart TB
+ subgraph Azure["βοΈ Microsoft Azure"]
+ subgraph AzureSub["Azure Subscription"]
+ Portal["π₯οΈ Azure Portal
(Primary Management)"]
+ ODAAS["π¦ ODAA Subscription"]
+ Hardware["π§ Oracle Hardware
(In Azure Data Center)"]
+ end
+ end
+
+ subgraph OCI["βοΈ Oracle Cloud Infrastructure"]
+ subgraph OCITenancy["OCI Tenancy"]
+ Console["π₯οΈ OCI Console
(Oracle-Specific Config)"]
+ OracleServices["βοΈ Oracle Services
(Backup, Patching, etc.)"]
+ end
+ end
+
+ Portal <--> |"Multicloud Link"| Console
+ ODAAS <--> |"Service Integration"| OracleServices
+ Hardware <--> |"Management"| OracleServices
+
+ User(["π€ Administrator"]) --> Portal
+ User -.-> |"Some configs"| Console
+
+ style Azure fill:#0078D4,color:#fff
+ style AzureSub fill:#50E6FF,color:#000
+ style OCI fill:#C74634,color:#fff
+ style OCITenancy fill:#F5DEB3,color:#000
+ style Portal fill:#fff,color:#000
+ style Console fill:#fff,color:#000
+ style ODAAS fill:#FFB900,color:#000
+ style Hardware fill:#7FBA00,color:#fff
+ style OracleServices fill:#F25022,color:#fff
+```
+
+> **Key Concept**: While Oracle hardware runs physically inside Azure Data Centers, some Oracle-specific configurations (like backup policies, patching schedules) are managed through the OCI Console. The multicloud link connects your Azure subscription to an OCI tenancy.
+
+Whether you create a new OCI account or link an existing account depends on your situation. Learn more about the Multicloud Linking here:
+
+- [Linking your Azure subscription with an OCI tenancy](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/onboard-link.htm)
+
+The following shows how we linked our newly created public offer ODAA Subscription with a new OCI account as an example.
+
+1. Select the "View Oracle Subscription" button on the ODAA Subscription Overview page in the Azure portal.
+
+
+2. Select the newly created ODAA Subscription called "default".
+
+
+3. On the Oracle Subscription detail page, select "Oracle Cloud Account" from the left-hand menu.
+
+
+4. Select the link "Create new Oracle Cloud Account on OCI".
+
+
+5. Enter details to create a new OCI account on the Oracle Cloud Infrastructure Portal.
+
+
+6. Message after submitting the new OCI account creation form.
+
+
+7. Go back to the Azure portal and refresh the Oracle Subscription page.
+
+
+8. After a while, the Status will switch from "Pending" to "Activated".
+
+
+## π° Purchase ODAA Subscription via Private Offer (Enterprise)
+
+To purchase an ODAA Subscription via Private Offer, contact the Oracle sales team or your Oracle sales representative for a sales offer. Oracle Sales creates an Azure private offer in Azure Marketplace for your instance of the service. After an offer is created for your organization, you can accept the offer and complete the purchase in the marketplace in the Azure portal. For more information about Azure private offers, see [Overview of the commercial marketplace and enterprise procurement](https://learn.microsoft.com/en-us/marketplace/what-is-commercial-marketplace).
+
+- [Oracle Docs ODAA Subscription private offer](https://docs.oracle.com/en-us/iaas/Content/database-at-azure/onboard-purchase.htm#purchase-private-offer)
+
+## π΅ Billing Information
+
+Billing and payment for the service is processed through Azure for public and private offers. Payment for ODAA Subscription counts toward Microsoft Azure Consumption Commitment (MACC). Existing Oracle Database software customers can use a bring-your-own-license (BYOL) option or an unlimited license agreement (ULA). On your regular invoice for Azure, charges for ODAA Subscription appear with charges for your other Azure Marketplace services.
+([source](https://learn.microsoft.com/en-us/azure/oracle/oracle-db/oracle-database-get-started#purchase-oracle-databaseazure))
+
+[Back to workspace README](../../README.md)
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/oci-account-admin-details.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/oci-account-admin-details.jpeg
new file mode 100644
index 000000000..dd3f3ed42
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/oci-account-admin-details.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/oci-account-validation-message.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/oci-account-validation-message.jpeg
new file mode 100644
index 000000000..c52153e30
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/oci-account-validation-message.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-marketplace-offer.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-marketplace-offer.jpeg
new file mode 100644
index 000000000..6c05c81e8
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-marketplace-offer.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-marketplace-purchase-options.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-marketplace-purchase-options.jpeg
new file mode 100644
index 000000000..203ee9f05
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-marketplace-purchase-options.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-final-step.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-final-step.jpeg
new file mode 100644
index 000000000..d1a634012
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-final-step.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-overview.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-overview.jpeg
new file mode 100644
index 000000000..9aff99fd4
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-overview.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-step-detail.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-step-detail.jpeg
new file mode 100644
index 000000000..e5d471347
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-onboarding-step-detail.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-deployment-complete.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-deployment-complete.jpeg
new file mode 100644
index 000000000..d711bdd83
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-deployment-complete.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-deployment-progress.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-deployment-progress.jpeg
new file mode 100644
index 000000000..ff387b3c3
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-deployment-progress.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-portal-view.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-portal-view.jpeg
new file mode 100644
index 000000000..2fa45ac60
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-service-portal-view.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-account-menu.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-account-menu.jpeg
new file mode 100644
index 000000000..01dafc066
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-account-menu.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-activated-status.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-activated-status.jpeg
new file mode 100644
index 000000000..0bef584d3
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-activated-status.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-create-oci-link.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-create-oci-link.jpeg
new file mode 100644
index 000000000..ff7599d18
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-create-oci-link.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-select-default.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-select-default.jpeg
new file mode 100644
index 000000000..31797f70d
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-select-default.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-view-oracle-button.jpeg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-view-oracle-button.jpeg
new file mode 100644
index 000000000..2fa45ac60
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/odaa-subscription-view-oracle-button.jpeg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/resource_provider.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/resource_provider.jpg
new file mode 100644
index 000000000..e9ec5e69f
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/create-odaa-subscription/media/resource_provider.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/entraid/entraid.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/entraid/entraid.md
new file mode 100644
index 000000000..0a3c7f7a5
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/entraid/entraid.md
@@ -0,0 +1,384 @@
+# π Enable Microsoft Entra ID Authentication on Autonomous AI Database
+
+[Back to workspace README](../../README.md)
+
+- [Oracle guide to enabling Entra ID authentication](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/autonomous-azure-ad-enable.html#GUID-C69B47D7-E5B5-4BC5-BB57-EC5BACFAC1DC)
+- [Oracle database authentication steps](https://docs.oracle.com/en/database/oracle/oracle-database/19/dbseg/authenticating-and-authorizing-microsoft-entra-id-ms-ei-users-oracle-databases-oracle-exadata.html#GUID-CC8FFE52-DC3B-4F2F-B1CA-308E35288C73)
+
+---
+
+## π§Ύ Oracle AI Database Requirements for the Microsoft Entra ID Integration
+
+Before you can configure an Oracle AI Database instance with Microsoft Entra ID, you must ensure that your environment meets special requirements.
+
+For an on-premises, non-cloud Oracle AI Database, follow the steps in this document. If your Oracle AI Database is in one of the following DBaaS platforms, then refer to the platform documentation for additional requirements.
+Using Oracle Autonomous AI Database Serverless
+Using Oracle Autonomous Database on Dedicated Exadata Infrastructure
+Use Azure Active Directory Authentication with Oracle Base Database Service
+Use Azure Active Directory Authentication with Oracle Exadata Database Service on Dedicated Infrastructure
+Note the following:
+
+The Oracle AI Database server must be able to request the Entra ID public key. Depending on the enterprise network connectivity setup, you may need to configure a proxy setting.
+Users and applications that need to request an Entra ID token must also be able to have network connectivity to Entra ID. You may need to configure a proxy setting for the connection.
+You must configure Transport Layer Security (TLS) between the Oracle AI Database client and the Oracle AI Database server so that the token can be transported securely. This TLS connection can be either one-way or mutual.
+You can create the TLS server certificate to be self-signed or be signed by a well known certificate authority. The advantage of using a certificate that is signed by a well known Certificate Authority (CA) is that the database client can use the system default certificate store to validate the Oracle AI Database server certificate instead of having to create and maintain a local wallet with the root certificate. Note that this applies to Linux and Windows clients only.
+
+Set the App ID URI, in the Application ID URI field, enter the app ID URI for the database connection using the following format, and then click Save:
+
+your_tenancy_url/application_(client)_id
+`https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e`
+
+### π 8.2.3 Enabling Microsoft Entra ID v2 Access Tokens
+
+We did add the following to the manifest of the app registration:
+
+~~~json
+ "id": "9bda9b0b-fcab-4472-9815-58dc3b908439",
+ "accessTokenAcceptedVersion": 2,
+~~~
+
+~~~powershell
+# Prompt for the password that will be used for all three components
+$password = Read-Host -Prompt "Enter the shared password"
+$rgName="odaa-user00"
+$prefix="odaa-user00"
+$location="francecentral" # e.g. germanywestcentral
+# login to aks if not already done
+az aks get-credentials -g $rgName -n $prefix --overwrite-existing
+# extract the pod name of the instantcleint as it contains a random suffix
+$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] }
+# login to the pod ogghack-goldengate-microhack-sample-instantclient-5985df84vc5xs
+kubectl exec -it -n microhacks $podInstanteClientName -- /bin/bash
+~~~
+
+Log in to the Oracle Database instance as a user who has been granted the ALTER SYSTEM system privilege
+
+~~~bash
+sqlplus admin@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))'
+~~~
+
+Output should look as follow:
+
+~~~text
+Version 23.4.0.24.05
+
+Copyright (c) 1982, 2024, Oracle. All rights reserved.
+
+Enter password:
+Last Successful login time: Wed Oct 15 2025 09:03:47 +00:00
+
+Connected to:
+Oracle Database 23ai Enterprise Edition Release 23.0.0.0.0 - for Oracle Cloud and Engineered Systems
+Version 23.10.0.25.10
+~~~
+
+turn on external authentication on the database:
+
+~~~sql
+BEGIN
+ DBMS_CLOUD_ADMIN.ENABLE_EXTERNAL_AUTHENTICATION(
+ type =>'AZURE_AD',
+ params => JSON_OBJECT('tenant_id' VALUE 'f71980b2-590a-4de9-90d5-6fbc867da951',
+ 'application_id' VALUE '7d22ece1-dd60-4279-a911-4b7b95934f2e',
+ 'application_id_uri' VALUE 'https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e'),
+ force => TRUE
+ );
+END;
+/
+-- Ensure that you set the IDENTITY_PROVIDER_TYPE parameter correctly.
+SELECT NAME, VALUE FROM V$PARAMETER WHERE NAME='identity_provider_type';
+~~~
+
+The following output should appear:
+
+~~~text
+NAME
+--------------------------------------------------------------------------------
+VALUE
+--------------------------------------------------------------------------------
+identity_provider_type
+AZURE_AD
+~~~
+
+### πΊοΈ Exclusively Mapping an Oracle Database Schema to a Microsoft Azure User
+
+You can exclusively map an Oracle Database schema to a Microsoft Azure user.
+
+Log in to the Oracle Database instance as a user who has been granted the CREATE USER or ALTER USER system privilege.
+
+Run the CREATE USER or ALTER USER statement with the IDENTIFIED GLOBALLY AS clause specifying the Azure user name.
+For example, to create a new database schema user named peter_fitch and map this user to an existing Azure user named `ga1@cptazure.org`:
+
+~~~sql
+CREATE USER ga1 IDENTIFIED GLOBALLY AS 'AZURE_USER=ga1@cptazure.org';
+-- Grant the CREATE SESSION privilege to the user.
+GRANT CREATE SESSION TO ga1;
+-- List all users
+SELECT username, user_id, account_status FROM dba_users WHERE username='GA1';
+
+-- Create ACLs for Entra Domains, run as a DBA/ADMIN user
+-- to first test with ADMIN user, then add ACEs for your app user
+BEGIN
+ DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(
+ host => 'login.windows.net', -- you can also use 'login.microsoftonline.com'
+ ace => xs$ace_type(
+ privilege_list => xs$name_list('connect','resolve'),
+ principal_name => 'ADMIN', -- your DBA user
+ principal_type => xs_acl.ptype_db));
+END;
+/
+COMMIT;
+
+BEGIN
+ DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(
+ host => 'login.microsoftonline.com', -- you can also use ''
+ ace => xs$ace_type(
+ privilege_list => xs$name_list('connect','resolve'),
+ principal_name => 'ADMIN', -- your DBA user
+ principal_type => xs_acl.ptype_db));
+END;
+/
+COMMIT;
+-- then add ACEs for your app user
+BEGIN
+ DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(
+ host => 'login.windows.net', -- you can also use 'login.microsoftonline.com'
+ ace => xs$ace_type(
+ privilege_list => xs$name_list('connect','resolve'),
+ principal_name => 'GA1', -- your DBA user
+ principal_type => xs_acl.ptype_db));
+END;
+/
+COMMIT;
+
+BEGIN
+ DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(
+ host => 'login.microsoftonline.com', -- you can also use ''
+ ace => xs$ace_type(
+ privilege_list => xs$name_list('connect','resolve'),
+ principal_name => 'GA1', -- your DBA user
+ principal_type => xs_acl.ptype_db));
+END;
+/
+COMMIT;
+--- check
+DESCRIBE DBMS_NETWORK_ACL_ADMIN;
+
+-- verify access via http request
+SELECT host, lower_port, upper_port, principal, privilege
+FROM dba_host_aces
+WHERE host LIKE 'login%';
+
+SET SERVEROUTPUT ON SIZE 40000
+DECLARE
+ req UTL_HTTP.REQ;
+ resp UTL_HTTP.RESP;
+BEGIN
+ UTL_HTTP.SET_WALLET(path => 'system:');
+ req := UTL_HTTP.BEGIN_REQUEST('https://login.windows.net/common/discovery/keys');
+ resp := UTL_HTTP.GET_RESPONSE(req);
+ DBMS_OUTPUT.PUT_LINE('HTTP response status code: ' || resp.status_code);
+ UTL_HTTP.END_RESPONSE(resp);
+END;
+/
+
+-- 1. See current user (in the session that runs UTL_HTTP)
+SELECT USER FROM dual;
+
+-- 2. Inspect existing host ACEs for login*
+SELECT host, lower_port, upper_port, principal, privilege
+FROM dba_host_aces
+WHERE host LIKE 'login%' ORDER BY host, principal, privilege;
+
+-- Remove ACEs for your app user
+BEGIN
+DBMS_NETWORK_ACL_ADMIN.REMOVE_HOST_ACE(
+ host => '*',
+ ace => xs$ace_type(privilege_list => xs$name_list('connect'),
+ principal_name => 'GA1',
+ principal_type => xs_acl.ptype_db));
+END;
+/
+
+~~~
+
+### βοΈ 8.4.4 Operational Flow for SQL*Plus Client Connection in PowerShell to Oracle Database
+
+(source: [Oracle documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/dbseg/authenticating-and-authorizing-microsoft-entra-id-ms-ei-users-oracle-databases-oracle-exadata.html#GUID-455CDC87-C5A1-4A58-801A-29D216CB66B5))
+
+#### πΌ Get Wallet
+
+Download the Wallet from the Azure portal and unzip it to a secure directory.
+
+The Azure user requests an Azure AD access token for the database in PowerShell and the returned token is written into a file called token at a file location.
+
+~~~powershell
+# login to azure with the tenant id of the app registration
+az login --tenant "f71980b2-590a-4de9-90d5-6fbc867da951" --scope "https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e/session:scope:connect"
+# get access token for the app registration
+$token=az account get-access-token --resource 'https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e' --query accessToken --output tsv
+$token=az account get-access-token --scope "https://cptazure.org/7d22ece1-dd60-4279-a911-4b7b95934f2e/.default" --query accessToken -o tsv
+$token | Out-File -FilePath .\misc\token.txt -Encoding ascii
+# view the jwt content
+./resources/scripts/jwt.ps1 -Jwt $token
+# write token to file
+$token | Out-File -FilePath .\misc\token.txt -Encoding ascii
+code .\misc\token.txt
+
+# extract the pod name of the instantcleint as it contains a random suffix
+$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] }
+# upload all files under folder misc/wallet to the pod
+kubectl cp misc/wallet ${podInstanteClientName}:/tmp -n microhacks
+
+# upload all files under folder misc/wallet to the pod
+kubectl cp ./misc/token.txt ${podInstanteClientName}:/tmp/wallet -n microhacks
+
+# login to the pod ogghack-goldengate-microhack-sample-instantclient-5985df84vc5xs
+kubectl exec -it -n microhacks $podInstanteClientName -- /bin/bash
+cd /tmp/wallet
+ls -l /tmp/wallet
+cat /tmp/wallet/token.txt
+exit
+~~~
+
+#### π οΈ 8.4.7 Configuring SQL*Plus for Azure AD Access Tokens
+
+(source: [Oracle documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/dbseg/authenticating-and-authorizing-microsoft-entra-id-ms-ei-users-oracle-databases-oracle-exadata.html#GUID-89CB6E1E-E383-476A-8B46-4343CEF8512E))
+
+##### β
Ensure you have an Azure AD user account
+
+##### π Check with an Azure AD administrator or Oracle Database administrator
+
+For one of the following:
+
+- An application client ID that you can use to get Azure AD tokens. If you have Azure AD privileges to do so, then create your own client app registration, similar to registering the Oracle Database instance with an Azure AD tenancy.
+- You are mapped to a global schema in the database.
+- Ensure that you are using the latest release updates for the Oracle Database client releases 19c.
+- This configuration only works with the Oracle Database client release 19c.
+
+##### π₯ Follow the existing process to download the wallet from the Oracle Database instance and then configure SQL*Plus
+
+##### βοΈ Set the sqlnet.ora parameters on the client
+
+Check for the parameter SSL_SERVER_DN_MATCH = ON to ensure that DN matching is enabled.
+
+~~~bash
+# edit the sqlnet.ora file
+vi /tmp/wallet/sqlnet.ora
+# change to "SSL_SERVER_DN_MATCH=ON"
+[esc]:x
+# verify
+cat /tmp/wallet/sqlnet.ora
+~~~
+
+Set the TOKEN_AUTH parameter to enable the client to use the Azure AD token. Include the TOKEN_LOCATION parameter to point to the token location. For example:
+
+~~~bash
+cat <<'EOF' >> /tmp/wallet/sqlnet.ora
+TOKEN_AUTH=OAUTH
+TOKEN_LOCATION="/tmp/wallet/token.txt"
+EOF
+# verify
+cat /tmp/wallet/sqlnet.ora
+~~~
+
+Note that there is no default location. If the token is named token, then you only need to specify the file directory (for example, /test/oracle/aad-token). If the token name is different from token (for example, azure.token), then you must include this name in the path (for example, /test/oracle/aad-token/azure.token).
+
+You can specify the TOKEN_AUTH and TOKEN_LOCATION parameters in tnsnames.ora, as well as in sqlnet.ora. The TOKEN_AUTH and TOKEN_LOCATION values in the tnsnames.ora connect strings take precedence over the sqlnet.ora settings for that connection. For example:
+
+~~~bash
+# verfiy current content of tnsnames.ora
+cat /tmp/wallet/tnsnames.ora
+~~~
+
+Generell example of a connect string with the TOKEN_AUTH and TOKEN_LOCATION parameters:
+
+~~~text
+(description=
+ (retry_count=20)(retry_delay=3)
+ (address=(protocol=tcps)(port=1522)
+ (host=example.us-phoenix-1.oraclecloud.com))
+ (connect_data=(service_name=aaabbbccc_exampledb_high.example.oraclecloud.com))
+ (security=(ssl_server_cert_dn="CN=example.uscom-east-1.oraclecloud.com,
+ OU=Oracle BMCS US, O=Example Corporation,
+ L=Redwood City, ST=California, C=US")
+ (TOKEN_AUTH=OAUTH)(TOKEN_LOCATION="/test/oracle/aad-token"))
+~~~
+
+> NOTE: We did modify the sqlnet.ora file directly instead of the tnsnames.ora file. Therefore the connect strings in the tnsnames.ora file do not contain the TOKEN_AUTH and TOKEN_LOCATION parameters.
+
+~~~text
+adbger_high = (description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1522)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+
+adbger_low = (description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1522)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_low.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+
+adbger_medium = (description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1522)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_medium.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+
+adbger_tp = (description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1522)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_tp.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+
+adbger_tpurgent = (description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1522)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_tpurgent.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+~~~
+
+After the connect string is updated with the TOKEN_AUTH and TOKEN_LOCATION parameters, the Azure user can log in to the Oracle Database instance by running the following command to start SQL*Plus. You can include the connect descriptor itself or use the name of the descriptor from the tnsnames.ora file.
+
+The Azure user connects to the database using / slash login. Either the sqlnet.ora or tnsnames.ora connection string tells the instant client that an Azure AD OAuth2 token is needed and to retrieve it from a specified file location. The access token is sent to the database.
+
+~~~bash
+# set TNS_ADMIN to the wallet directory
+export TNS_ADMIN=/tmp/wallet
+# verify
+echo $TNS_ADMIN
+# delete TNS_ADMIN
+# unset TNS_ADMIN
+
+# connect to the database
+sqlplus /nolog
+connect /@adbger_high # did not work
+
+# connect with user GA1 connection string directly
+sqlplus /@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=on))(TOKEN_AUTH=OAUTH)(TOKEN_LOCATION="/tmp/wallet/token.txt"))'
+ga1
+
+# connect with user GA1 connection string directly
+sqlplus admin@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))'
+~~~
+
+#### Debug
+
+(TOKEN_AUTH=OAUTH)(TOKEN_LOCATION="/test/oracle/aad-token")
+
+~~~bash
+sqlplus admin@'(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=eqsmjgp2.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_adbger_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))'
+~~~
+
+~~~sql
+-- Ensure that you set the IDENTITY_PROVIDER_TYPE parameter correctly.
+SELECT NAME, VALUE FROM V$PARAMETER WHERE NAME='identity_provider_type';
+~~~
+
+~~~text
+NAME
+--------------------------------------------------------------------------------
+VALUE
+--------------------------------------------------------------------------------
+identity_provider_type
+AZURE_AD
+~~~
+
+On the Autonomous Database, check that the mapped database user exists and is global: select username, authentication_type from dba_users where username = 'GA1'; should show GLOBAL. If not, recreate it with the right tenant ID, app (audience) and optional group/role claims using DBMS_CLOUD_ADMIN.CREATE_CLOUD_IDENTITY.
+
+~~~sql
+-- check that the mapped database user exists and is global
+select username, authentication_type from dba_users where username = 'GA1';
+~~~
+
+~~~text
+USERNAME
+--------------------------------------------------------------------------------
+AUTHENTI
+--------
+GA1
+GLOBAL
+~~~
+
+[Back to workspace README](../../README.md)
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/estate-explorer-odaa/estate-explorer-odaa.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/estate-explorer-odaa/estate-explorer-odaa.md
new file mode 100644
index 000000000..2e110a9ac
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/estate-explorer-odaa/estate-explorer-odaa.md
@@ -0,0 +1,32 @@
+# π Challenge 7: (Optional) Use Estate Explorer to visualize the Oracle ADB instance
+
+[Back to workspace README](../../README.md)
+
+## π― Overview
+
+Estate Explorer provides comprehensive visualization and analysis capabilities for Oracle databases, helping you understand your database landscape and performance characteristics.
+
+## π Installation Steps
+
+~~~bash
+# π¦ Installation estate explorer
+
+helm show values oggfree/autonomous-free >autonomous.yaml
+# π getting public address of nginx ingress controller
+export EXTIP=$(kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]} {.status.loadBalancer.ingress[*].ip} {end}')
+# π§ putting the external address into the deployment
+sed -i "s/xxx-xxx-xxx-xxx/${EXTIP// /}/g" autonomous.yaml
+
+# β οΈ BEFORE running the install, please exchange the database password in autonomous.yaml !
+kubectl create namespace estateexplorer
+helm install estate-exp oggfree/autonomous-free --values autonomous.yaml -n estateexplorer
+~~~
+
+## π Post-Installation
+
+1. β³ Wait for all pods to be in running state
+2. π Access the Estate Explorer interface via the configured ingress
+3. π Configure connection to your Oracle ADB instance
+4. π Explore the visualization capabilities
+
+[Back to workspace README](../../README.md)
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/ha-oracle-adb/ha-oracle-adb.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/ha-oracle-adb/ha-oracle-adb.md
new file mode 100644
index 000000000..7ee42259c
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/ha-oracle-adb/ha-oracle-adb.md
@@ -0,0 +1,29 @@
+# ποΈ Challenge 6: Setup High Availability for Oracle ADB
+
+[Back to workspace README](../../README.md)
+
+β‘ This challenge focuses on setting up high availability configurations for Oracle Autonomous Database (ADB) to ensure business continuity and disaster recovery capabilities.
+
+## π Overview
+
+π‘οΈ Oracle Autonomous Database provides built-in high availability features, but additional configuration may be required for specific use cases.
+
+## π Steps
+
+1. **π Review Current ADB Configuration**
+ - βοΈ Check the current service level and availability settings
+ - πΎ Verify backup and recovery configurations
+
+2. **π Configure Additional HA Features**
+ - π Set up cross-region data guard if needed
+ - π Configure monitoring and alerting
+ - π§ͺ Test failover scenarios
+
+3. **β
Validate HA Setup**
+ - π Perform connectivity tests
+ - π Verify data consistency across replicas
+ - π Test disaster recovery procedures
+
+> βΉοΈ **NOTE**: Specific implementation steps depend on your organization's requirements. Please refer to Oracle documentation for detailed high availability setup procedures for Oracle ADB on Azure.
+
+[Back to workspace README](../../README.md)
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/Deploy-OnPremReplication.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/Deploy-OnPremReplication.ps1
new file mode 100644
index 000000000..cd69fbc7a
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/Deploy-OnPremReplication.ps1
@@ -0,0 +1,531 @@
+#Requires -Version 5.1
+<#
+.SYNOPSIS
+ Deploys Oracle GoldenGate replication from on-premises (AKS) to ODAA ADB
+
+.DESCRIPTION
+ This script automates the deployment of Oracle GoldenGate microhack environment.
+ It handles all the manual steps from the original walkthrough:
+ - Connects to AKS cluster
+ - Configures helm repositories
+ - Uses the existing gghack.yaml template (no modifications)
+ - Overrides user-specific values via Helm --set parameters
+ - Creates Kubernetes secrets
+ - Deploys the GoldenGate helm chart
+ - Waits for deployment completion
+
+.PARAMETER UserName
+ Your assigned username (e.g., user01, user02)
+
+.PARAMETER ADBPassword
+ Password for the ODAA ADB instance (used for all database users)
+
+.PARAMETER ADBConnectionString
+ TNS connection string for your ODAA ADB instance
+ Example: (description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=xxx.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=xxx_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+
+.PARAMETER AKSResourceGroup
+ Name of the AKS resource group (default: auto-detected from username)
+
+.PARAMETER AKSClusterName
+ Name of the AKS cluster (default: auto-detected from username)
+
+.PARAMETER Subscription
+ Azure subscription name for AKS (default: auto-detected)
+
+.PARAMETER SkipAKSConnection
+ Skip AKS connection (use if already connected)
+
+.PARAMETER SkipHelmSetup
+ Skip helm repository setup (use if already configured)
+
+.PARAMETER Uninstall
+ Uninstall existing deployment before installing
+
+.PARAMETER TemplateFile
+ Path to the gghack.yaml template file (default: ../../resources/template/gghack.yaml)
+ The template is used as base values; user-specific values are overridden via Helm --set
+
+.EXAMPLE
+ .\Deploy-OnPremReplication.ps1 -UserName "user01" -ADBPassword "" -ADBConnectionString "(description= ...)"
+
+.EXAMPLE
+ .\Deploy-OnPremReplication.ps1 -UserName "user01" -ADBPassword "" -ADBConnectionString "(description= ...)" -SkipAKSConnection
+
+.NOTES
+ Author: ODAA MicroHack Team
+ This script simplifies Challenge 4: OnPrem ramp up
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $true, HelpMessage = "Your assigned username (e.g., user01)")]
+ [string]$UserName,
+
+ [Parameter(Mandatory = $true, HelpMessage = "Password for ODAA ADB instance")]
+ [string]$ADBPassword,
+
+ [Parameter(Mandatory = $true, HelpMessage = "TNS connection string for ODAA ADB")]
+ [string]$ADBConnectionString,
+
+ [Parameter(Mandatory = $false)]
+ [string]$AKSResourceGroup = "",
+
+ [Parameter(Mandatory = $false)]
+ [string]$AKSClusterName = "",
+
+ [Parameter(Mandatory = $false)]
+ [string]$Subscription = "",
+
+ [Parameter(Mandatory = $false)]
+ [switch]$SkipAKSConnection,
+
+ [Parameter(Mandatory = $false)]
+ [switch]$SkipHelmSetup,
+
+ [Parameter(Mandatory = $false)]
+ [switch]$Uninstall,
+
+ [Parameter(Mandatory = $false, HelpMessage = "Path to the gghack.yaml template file")]
+ [string]$TemplateFile = ""
+)
+
+# ============================================================================
+# Configuration
+# ============================================================================
+$ErrorActionPreference = "Stop"
+$Namespace = "microhacks"
+$HelmReleaseName = "ogghack"
+$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
+
+# ============================================================================
+# Helper Functions
+# ============================================================================
+
+function Write-Step {
+ param([string]$Message, [string]$Icon = "π")
+ Write-Host "`n$Icon $Message" -ForegroundColor Cyan
+ Write-Host ("=" * 60) -ForegroundColor DarkGray
+}
+
+function Write-Success {
+ param([string]$Message)
+ Write-Host "β
$Message" -ForegroundColor Green
+}
+
+function Write-Warning {
+ param([string]$Message)
+ Write-Host "β οΈ $Message" -ForegroundColor Yellow
+}
+
+function Write-Error {
+ param([string]$Message)
+ Write-Host "β $Message" -ForegroundColor Red
+}
+
+function Write-Info {
+ param([string]$Message)
+ Write-Host "βΉοΈ $Message" -ForegroundColor White
+}
+
+function Test-Command {
+ param([string]$Command)
+ try {
+ Get-Command $Command -ErrorAction Stop | Out-Null
+ return $true
+ }
+ catch {
+ return $false
+ }
+}
+
+function Test-Prerequisites {
+ Write-Step "Checking Prerequisites" "π"
+
+ $required = @("az", "kubectl", "helm")
+ $missing = @()
+
+ foreach ($cmd in $required) {
+ if (Test-Command $cmd) {
+ Write-Success "$cmd is installed"
+ }
+ else {
+ Write-Error "$cmd is NOT installed"
+ $missing += $cmd
+ }
+ }
+
+ if ($missing.Count -gt 0) {
+ throw "Missing prerequisites: $($missing -join ', '). Please install them first."
+ }
+}
+
+function Connect-ToAKS {
+ param(
+ [string]$ResourceGroup,
+ [string]$ClusterName,
+ [string]$SubscriptionName
+ )
+
+ Write-Step "Connecting to AKS Cluster" "β"
+
+ # Set subscription if provided
+ if ($SubscriptionName) {
+ Write-Info "Setting subscription to: $SubscriptionName"
+ az account set --subscription $SubscriptionName
+ }
+
+ # Get AKS credentials
+ Write-Info "Getting AKS credentials for cluster: $ClusterName"
+ az aks get-credentials -g $ResourceGroup -n $ClusterName --overwrite-existing
+
+ # Verify connection
+ $namespaces = kubectl get namespaces --no-headers 2>&1
+ if ($LASTEXITCODE -ne 0) {
+ throw "Failed to connect to AKS cluster"
+ }
+
+ Write-Success "Connected to AKS cluster: $ClusterName"
+}
+
+function Initialize-HelmRepository {
+ Write-Step "Setting up Helm Repository" "π¦"
+
+ Write-Info "Adding oggfree helm repository..."
+ helm repo add oggfree https://ilfur.github.io/VirtualAnalyticRooms 2>&1 | Out-Null
+
+ Write-Info "Updating helm repositories..."
+ helm repo update
+
+ Write-Success "Helm repository configured"
+}
+
+function Get-IngressExternalIP {
+ Write-Step "Getting Ingress Controller External IP" "π"
+
+ $maxAttempts = 12
+ $attempt = 1
+ $externalIP = ""
+
+ while ($attempt -le $maxAttempts) {
+ Write-Info "Attempt $attempt/$maxAttempts - Checking for external IP..."
+
+ $externalIP = (kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]}{.status.loadBalancer.ingress[*].ip}{end}' 2>&1) -replace '\s', ''
+
+ if ($externalIP -and $externalIP -match '^\d+\.\d+\.\d+\.\d+$') {
+ Write-Success "External IP found: $externalIP"
+ return $externalIP
+ }
+
+ Write-Warning "External IP not yet assigned, waiting 10 seconds..."
+ Start-Sleep -Seconds 10
+ $attempt++
+ }
+
+ throw "Failed to get external IP after $maxAttempts attempts. Please check your ingress controller."
+}
+
+function Test-TemplateFile {
+ param(
+ [string]$TemplateFile
+ )
+
+ Write-Step "Validating Template File" "π"
+
+ if (-not (Test-Path $TemplateFile)) {
+ throw "Template file not found: $TemplateFile"
+ }
+
+ Write-Success "Template file found: $TemplateFile"
+ Write-Info "Template will be used as base values (no modifications)"
+}
+
+function New-KubernetesSecrets {
+ param(
+ [string]$Namespace,
+ [string]$Password
+ )
+
+ Write-Step "Creating Kubernetes Secrets" "π"
+
+ # Create namespace if it doesn't exist
+ Write-Info "Creating namespace: $Namespace"
+ kubectl create namespace $Namespace --dry-run=client -o yaml | kubectl apply -f - 2>&1 | Out-Null
+
+ # Delete existing secrets if they exist (to update them)
+ kubectl delete secret ogg-admin-secret -n $Namespace 2>&1 | Out-Null
+ kubectl delete secret db-admin-secret -n $Namespace 2>&1 | Out-Null
+
+ # Create OGG admin secret
+ Write-Info "Creating ogg-admin-secret..."
+ kubectl create secret generic ogg-admin-secret -n $Namespace `
+ --from-literal=oggusername=ggadmin `
+ --from-literal=oggpassword=$Password
+
+ # Create database admin secret
+ Write-Info "Creating db-admin-secret..."
+ kubectl create secret generic db-admin-secret -n $Namespace `
+ --from-literal=srcAdminPwd=$Password `
+ --from-literal=trgAdminPwd=$Password `
+ --from-literal=srcGGUserName=ggadmin `
+ --from-literal=trgGGUserName=ggadmin `
+ --from-literal=srcGGPwd=$Password `
+ --from-literal=trgGGPwd=$Password
+
+ Write-Success "Kubernetes secrets created"
+}
+
+function Uninstall-GoldenGate {
+ param(
+ [string]$ReleaseName,
+ [string]$Namespace
+ )
+
+ Write-Step "Uninstalling Existing Deployment" "ποΈ"
+
+ $existing = helm list -n $Namespace --filter $ReleaseName -q 2>&1
+ if ($existing -eq $ReleaseName) {
+ Write-Info "Uninstalling helm release: $ReleaseName"
+ helm uninstall $ReleaseName -n $Namespace
+
+ Write-Info "Waiting for pods to terminate..."
+ Start-Sleep -Seconds 10
+
+ # Wait for pods to be deleted
+ $maxWait = 60
+ $waited = 0
+ while ($waited -lt $maxWait) {
+ $pods = kubectl get pods -n $Namespace --no-headers 2>&1
+ if (-not $pods -or $pods -match "No resources found") {
+ break
+ }
+ Start-Sleep -Seconds 5
+ $waited += 5
+ }
+
+ Write-Success "Existing deployment uninstalled"
+ }
+ else {
+ Write-Info "No existing deployment found"
+ }
+}
+
+function Install-GoldenGate {
+ param(
+ [string]$ReleaseName,
+ [string]$Namespace,
+ [string]$ValuesFile,
+ [string]$UserName,
+ [string]$ExternalIP,
+ [string]$ConnectionString
+ )
+
+ Write-Step "Installing GoldenGate via Helm" "π"
+
+ Write-Info "Using template file: $ValuesFile"
+ Write-Info "Overriding values via --set parameters:"
+ Write-Info " - microhack.user = $UserName"
+ Write-Info " - services.external.vhostName = gghack.$ExternalIP.nip.io"
+ Write-Info " - databases.trgConn = "
+
+ # Build the vhostName value
+ $vhostName = "gghack.$ExternalIP.nip.io"
+
+ # Use --set-string to handle special characters in connection string
+ Write-Info "Installing helm chart: oggfree/goldengate-microhack-sample"
+ helm install $ReleaseName oggfree/goldengate-microhack-sample `
+ --values $ValuesFile `
+ --set-string microhack.user=$UserName `
+ --set-string databases.trgConn=$ConnectionString `
+ --set-string services.external.vhostName=$vhostName `
+ -n $Namespace
+
+ Write-Success "Helm installation initiated (template unchanged)"
+}
+
+function Wait-ForDeployment {
+ param(
+ [string]$Namespace
+ )
+
+ Write-Step "Waiting for Deployment to Complete" "β³"
+
+ Write-Info "This may take 5-10 minutes. You can also watch progress with:"
+ Write-Host " kubectl get pods -n $Namespace --watch" -ForegroundColor Yellow
+ Write-Host ""
+
+ $maxWait = 600 # 10 minutes
+ $waited = 0
+ $checkInterval = 15
+
+ while ($waited -lt $maxWait) {
+ $pods = kubectl get pods -n $Namespace --no-headers 2>&1
+
+ if ($pods -and $pods -notmatch "No resources found") {
+ $podLines = $pods -split "`n" | Where-Object { $_ -match '\S' }
+
+ $allReady = $true
+ $completed = 0
+ $running = 0
+ $pending = 0
+
+ foreach ($line in $podLines) {
+ if ($line -match 'Completed') {
+ $completed++
+ }
+ elseif ($line -match 'Running' -and $line -match '1/1') {
+ $running++
+ }
+ else {
+ $allReady = $false
+ $pending++
+ }
+ }
+
+ Write-Host "`r[$(Get-Date -Format 'HH:mm:ss')] Running: $running | Completed: $completed | Pending: $pending " -NoNewline
+
+ # Check if db-prepare-job is completed
+ $prepJob = $podLines | Where-Object { $_ -match 'db-prepare-job' -and $_ -match 'Completed' }
+ if ($prepJob -and $running -ge 3) {
+ Write-Host ""
+ Write-Success "Deployment completed successfully!"
+ return $true
+ }
+ }
+
+ Start-Sleep -Seconds $checkInterval
+ $waited += $checkInterval
+ }
+
+ Write-Host ""
+ Write-Warning "Deployment is still in progress. Please check manually with: kubectl get pods -n $Namespace"
+ return $false
+}
+
+function Show-DeploymentSummary {
+ param(
+ [string]$ExternalIP,
+ [string]$Namespace,
+ [string]$ConnectionString
+ )
+
+ Write-Step "Deployment Summary" "π"
+
+ Write-Host ""
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Green
+ Write-Host "β DEPLOYMENT COMPLETE β" -ForegroundColor Green
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Green
+ Write-Host ""
+
+ Write-Host "π Access URLs:" -ForegroundColor Cyan
+ Write-Host " GoldenGate UI: https://gghack.$ExternalIP.nip.io" -ForegroundColor White
+ Write-Host " SQLPlus Web: https://gghack.$ExternalIP.nip.io/sqlplus/vnc.html" -ForegroundColor White
+ Write-Host " Jupyter Notebook: https://gghack.$ExternalIP.nip.io/jupyter/" -ForegroundColor White
+ Write-Host " GG Big Data: https://daagghack.$ExternalIP.nip.io" -ForegroundColor White
+ Write-Host ""
+
+ Write-Host "π Credentials:" -ForegroundColor Cyan
+ Write-Host " GoldenGate Admin: ggadmin / " -ForegroundColor White
+ Write-Host " Jupyter Password: Welcome1234" -ForegroundColor White
+ Write-Host ""
+
+ Write-Host "π Useful Commands:" -ForegroundColor Cyan
+ Write-Host " Check pods: kubectl get pods -n $Namespace" -ForegroundColor White
+ Write-Host " Check logs: kubectl logs -n $Namespace " -ForegroundColor White
+ Write-Host " Connect to client: kubectl exec -it -n $Namespace -- /bin/bash" -ForegroundColor White
+ Write-Host ""
+
+ Write-Host "π Next Steps:" -ForegroundColor Cyan
+ Write-Host " 1. Wait for all pods to be Running/Completed" -ForegroundColor White
+ Write-Host " 2. Access GoldenGate UI to verify replication setup" -ForegroundColor White
+ Write-Host " 3. Connect to instantclient pod to verify data migration" -ForegroundColor White
+ Write-Host " 4. See 'onprem-ramp-up-simplified.md' for verification steps" -ForegroundColor White
+ Write-Host ""
+}
+
+# ============================================================================
+# Main Execution
+# ============================================================================
+
+function Main {
+ $startTime = Get-Date
+
+ Write-Host ""
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Cyan
+ Write-Host "β π Challenge 4: OnPrem Ramp Up - Automated Deployment β" -ForegroundColor Cyan
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Cyan
+ Write-Host ""
+
+ # Auto-detect AKS settings from username if not provided
+ if (-not $AKSResourceGroup) {
+ $AKSResourceGroup = "aks-$UserName"
+ Write-Info "Auto-detected AKS Resource Group: $AKSResourceGroup"
+ }
+ if (-not $AKSClusterName) {
+ $AKSClusterName = "aks-$UserName"
+ Write-Info "Auto-detected AKS Cluster Name: $AKSClusterName"
+ }
+
+ # Check prerequisites
+ Test-Prerequisites
+
+ # Connect to AKS
+ if (-not $SkipAKSConnection) {
+ Connect-ToAKS -ResourceGroup $AKSResourceGroup -ClusterName $AKSClusterName -SubscriptionName $Subscription
+ }
+ else {
+ Write-Info "Skipping AKS connection (using existing context)"
+ }
+
+ # Setup Helm
+ if (-not $SkipHelmSetup) {
+ Initialize-HelmRepository
+ }
+ else {
+ Write-Info "Skipping Helm setup"
+ }
+
+ # Get external IP
+ $externalIP = Get-IngressExternalIP
+
+ # Uninstall if requested
+ if ($Uninstall) {
+ Uninstall-GoldenGate -ReleaseName $HelmReleaseName -Namespace $Namespace
+ }
+
+ # Resolve template file path
+ if (-not $TemplateFile) {
+ $TemplateFile = Join-Path $ScriptDir "..\..\resources\template\gghack.yaml"
+ }
+ $TemplateFile = [System.IO.Path]::GetFullPath($TemplateFile)
+
+ # Validate template file exists
+ Test-TemplateFile -TemplateFile $TemplateFile
+
+ # Create secrets
+ New-KubernetesSecrets -Namespace $Namespace -Password $ADBPassword
+
+ # Install GoldenGate using template + --set overrides (no file modification)
+ Install-GoldenGate -ReleaseName $HelmReleaseName -Namespace $Namespace -ValuesFile $TemplateFile `
+ -UserName $UserName -ExternalIP $externalIP -ConnectionString $ADBConnectionString
+
+ # Wait for deployment
+ $deploymentComplete = Wait-ForDeployment -Namespace $Namespace
+
+ # Show summary
+ Show-DeploymentSummary -ExternalIP $externalIP -Namespace $Namespace -ConnectionString $ADBConnectionString
+
+ $endTime = Get-Date
+ $duration = $endTime - $startTime
+ Write-Host "Total execution time: $($duration.ToString('mm\:ss'))" -ForegroundColor Gray
+}
+
+# Run main function
+try {
+ Main
+}
+catch {
+ Write-Error "Deployment failed: $_"
+ Write-Host "Stack trace: $($_.ScriptStackTrace)" -ForegroundColor DarkGray
+ exit 1
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/Verify-Replication.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/Verify-Replication.ps1
new file mode 100644
index 000000000..0dcdf7996
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/Verify-Replication.ps1
@@ -0,0 +1,195 @@
+#Requires -Version 5.1
+<#
+.SYNOPSIS
+ Verify Oracle GoldenGate replication is working correctly
+
+.DESCRIPTION
+ This script helps verify that the GoldenGate deployment is working by:
+ - Checking pod status
+ - Testing database connectivity
+ - Creating test data and verifying replication
+
+.PARAMETER ADBConnectionString
+ TNS connection string for your ODAA ADB instance
+
+.PARAMETER ADBPassword
+ Password for the ODAA ADB admin user
+
+.PARAMETER Namespace
+ Kubernetes namespace (default: microhacks)
+
+.EXAMPLE
+ .\Verify-Replication.ps1 -ADBConnectionString "(description= ...)" -ADBPassword ""
+
+.NOTES
+ Run this after Deploy-OnPremReplication.ps1 has completed successfully
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $true)]
+ [string]$ADBConnectionString,
+
+ [Parameter(Mandatory = $true)]
+ [string]$ADBPassword,
+
+ [Parameter(Mandatory = $false)]
+ [string]$Namespace = "microhacks"
+)
+
+function Write-Step {
+ param([string]$Message, [string]$Icon = "π")
+ Write-Host "`n$Icon $Message" -ForegroundColor Cyan
+ Write-Host ("=" * 50) -ForegroundColor DarkGray
+}
+
+function Write-Success { param([string]$Message) Write-Host "β
$Message" -ForegroundColor Green }
+function Write-Warning { param([string]$Message) Write-Host "β οΈ $Message" -ForegroundColor Yellow }
+function Write-Error { param([string]$Message) Write-Host "β $Message" -ForegroundColor Red }
+function Write-Info { param([string]$Message) Write-Host "βΉοΈ $Message" -ForegroundColor White }
+
+# ============================================================================
+# Check Pod Status
+# ============================================================================
+
+Write-Step "Checking Pod Status" "π¦"
+
+$pods = kubectl get pods -n $Namespace --no-headers 2>&1
+if ($LASTEXITCODE -ne 0) {
+ Write-Error "Failed to get pods. Are you connected to the AKS cluster?"
+ exit 1
+}
+
+$podLines = $pods -split "`n" | Where-Object { $_ -match '\S' }
+
+$runningCount = 0
+$completedCount = 0
+$errorCount = 0
+
+foreach ($line in $podLines) {
+ $parts = $line -split '\s+'
+ $podName = $parts[0]
+ $status = $parts[2]
+
+ if ($status -eq "Running") {
+ Write-Success "$podName - Running"
+ $runningCount++
+ }
+ elseif ($status -eq "Completed") {
+ Write-Success "$podName - Completed"
+ $completedCount++
+ }
+ else {
+ Write-Warning "$podName - $status"
+ $errorCount++
+ }
+}
+
+Write-Host ""
+Write-Info "Summary: $runningCount Running, $completedCount Completed, $errorCount Other"
+
+if ($runningCount -lt 3) {
+ Write-Warning "Not all pods are running yet. Please wait and try again."
+ Write-Host ""
+ Write-Host "Watch pod status with:" -ForegroundColor Yellow
+ Write-Host " kubectl get pods -n $Namespace --watch" -ForegroundColor White
+ exit 0
+}
+
+# ============================================================================
+# Get Instant Client Pod
+# ============================================================================
+
+Write-Step "Preparing Verification Commands" "π§"
+
+$instantClientPod = $podLines | Where-Object { $_ -match 'instantclient' } | ForEach-Object { ($_ -split '\s+')[0] }
+
+if (-not $instantClientPod) {
+ Write-Error "Instant client pod not found"
+ exit 1
+}
+
+Write-Success "Found instant client pod: $instantClientPod"
+
+# ============================================================================
+# Generate Verification Commands
+# ============================================================================
+
+Write-Step "Verification Steps" "π"
+
+Write-Host @"
+
+To verify replication is working, follow these steps:
+
+"@ -ForegroundColor White
+
+Write-Host "1οΈβ£ Connect to the Instant Client Pod:" -ForegroundColor Cyan
+Write-Host @"
+ kubectl exec -it -n $Namespace $instantClientPod -- /bin/bash
+
+"@ -ForegroundColor Yellow
+
+Write-Host "2οΈβ£ Inside the pod, connect to ODAA ADB and check SH2 schema:" -ForegroundColor Cyan
+Write-Host @"
+ sqlplus admin@'$ADBConnectionString'
+ # Enter password: $ADBPassword
+
+ -- Run these SQL commands:
+ SELECT USERNAME FROM ALL_USERS WHERE USERNAME LIKE 'SH%';
+ SELECT COUNT(*) FROM all_tables WHERE owner = 'SH2';
+ exit
+
+"@ -ForegroundColor Yellow
+
+Write-Host "3οΈβ£ Connect to on-prem database and create test data:" -ForegroundColor Cyan
+Write-Host @"
+ sql
+
+ -- Create a test table:
+ CREATE TABLE SH.TEST_REPLICATION AS SELECT * FROM SH.COUNTRIES;
+ SELECT COUNT(*) FROM SH.TEST_REPLICATION;
+ exit
+
+"@ -ForegroundColor Yellow
+
+Write-Host "4οΈβ£ Verify replication to ODAA ADB:" -ForegroundColor Cyan
+Write-Host @"
+ sqlplus admin@'$ADBConnectionString'
+ # Enter password: $ADBPassword
+
+ -- Check if test table replicated:
+ SELECT COUNT(*) FROM SH2.TEST_REPLICATION;
+ exit
+
+"@ -ForegroundColor Yellow
+
+Write-Host "5οΈβ£ Exit the pod:" -ForegroundColor Cyan
+Write-Host @"
+ exit
+
+"@ -ForegroundColor Yellow
+
+# ============================================================================
+# Web Interface URLs
+# ============================================================================
+
+Write-Step "Web Interfaces" "π"
+
+$externalIP = (kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]}{.status.loadBalancer.ingress[*].ip}{end}') -replace '\s', ''
+
+if ($externalIP) {
+ Write-Host @"
+
+Access these URLs in your browser:
+
+ GoldenGate UI: https://gghack.$externalIP.nip.io
+ SQLPlus Web: https://gghack.$externalIP.nip.io/sqlplus/vnc.html
+ Jupyter Notebook: https://gghack.$externalIP.nip.io/jupyter/
+ GG Big Data: https://daagghack.$externalIP.nip.io
+
+"@ -ForegroundColor White
+}
+
+Write-Host "βββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Green
+Write-Host " Verification script completed. Follow steps above." -ForegroundColor Green
+Write-Host "βββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Green
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/onprem-ramp-up-simplified.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/onprem-ramp-up-simplified.md
new file mode 100644
index 000000000..27d7d0496
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/onprem-ramp-up-simplified.md
@@ -0,0 +1,271 @@
+# π Challenge 4: OnPrem Ramp Up (Simplified)
+
+[Back to workspace README](../../README.md) | [Original detailed walkthrough](./onprem-ramp-up.md)
+
+> π We are going to use an automated script to deploy everything with minimal manual steps which will mimic an OnPrem environment.
+
+---
+
+## π― What You'll Deploy
+
+This challenge sets up Oracle GoldenGate to replicate data from an on-premises Oracle database (running in AKS) to your ODAA Autonomous Database:
+
+| Component | Description |
+|-----------|-------------|
+| **Oracle Database 23ai Free** | Source database with SH schema (pre-populated) |
+| **Oracle Data Pump** | Initial data migration to ODAA ADB |
+| **Oracle GoldenGate** | Real-time data replication |
+| **Oracle Instant Client** | SQL*Plus access to both databases |
+
+---
+
+## π Prerequisites
+
+Before starting, make sure you have:
+
+- [x] Completed previous challenges (ODAA ADB created)
+- [x] Your ODAA ADB password
+- [x] Access to your AKS cluster
+- [x] Azure CLI, kubectl, and helm installed
+
+---
+
+## π Step 1: Get Your ODAA Connection String
+
+First, retrieve your ODAA ADB connection string from the Azure Portal:
+
+1. Go to your **ODAA ADB resource** in Azure Portal
+2. Navigate to **Connections**
+3. Copy the **High** profile connection string
+
+It should look like this:
+```
+(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=xxx.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=xxx_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+```
+
+> π‘ **Tip**: For detailed instructions, see [How to retrieve the ODAA connection string](../../docs/odaa-get-token.md)
+
+---
+
+## π Step 2: Run the Deployment Script
+
+Open PowerShell and navigate to this folder, then run:
+
+```powershell
+# Navigate to the walkthrough folder
+cd walkthrough\onprem-ramp-up
+
+# Login to Azure (if not already logged in)
+az login
+
+# Set your AKS subscription (replace with your subscription name)
+az account set --subscription "sub-team0"
+
+# Run the deployment script (use $trgConn if you retrieved it via docs\odaa-get-token.md)
+.\Deploy-OnPremReplication.ps1 `
+ -UserName "user00" `
+ -ADBPassword "" `
+ -ADBConnectionString $trgConn
+```
+
+### π Script Parameters
+
+| Parameter | Required | Description |
+|-----------|----------|-------------|
+| `-UserName` | Yes | Your assigned username (e.g., `user00`, `user01`) |
+| `-ADBPassword` | Yes | Your ODAA ADB password |
+| `-ADBConnectionString` | Yes | Full TNS connection string from Step 1 |
+| `-AKSResourceGroup` | No | AKS resource group (auto-detected from username) |
+| `-AKSClusterName` | No | AKS cluster name (auto-detected from username) |
+| `-SkipAKSConnection` | No | Skip if already connected to AKS |
+| `-Uninstall` | No | Remove existing deployment first |
+| `-TemplateFile` | No | Custom template path (default: `../../resources/template/gghack.yaml`) |
+
+> π‘ **Note**: The script uses the existing `gghack.yaml` template without modifying it. Your user-specific values are applied via Helm `--set` parameters at deployment time.
+
+### β³ Wait for Completion
+
+The script will:
+
+1. β
Connect to your AKS cluster
+2. β
Configure Helm repositories
+3. β
Auto-detect the Ingress external IP
+4. β
Validate the template file exists
+5. β
Create Kubernetes secrets
+6. β
Deploy using template + `--set` overrides (~5-10 minutes)
+
+---
+
+## π Step 3: Verify the Deployment
+
+Once the script completes, check that all pods are running:
+
+```powershell
+kubectl get pods -n microhacks
+```
+
+Expected output (after ~8 minutes):
+```
+NAME READY STATUS RESTARTS AGE
+ogghack-goldengate-microhack-sample-db-xxxxx 1/1 Running 0 10m
+ogghack-goldengate-microhack-sample-db-prepare-job-xxxxx 0/1 Completed 0 10m
+ogghack-goldengate-microhack-sample-instantclient-xxxxx 1/1 Running 0 10m
+ogghack-goldengate-microhack-sample-jupyter-xxxxx 1/1 Running 0 10m
+ogghack-goldengate-microhack-sample-ogg-xxxxx 1/1 Running 0 10m
+```
+
+> β οΈ **Note**: Some CrashLoopBackOff errors on the prepare-job are expected while it waits for the database.
+
+---
+
+## π Step 4: Access the Web Interfaces
+
+The deployment creates several web interfaces (replace `` with your actual IP):
+
+| Interface | URL | Credentials |
+|-----------|-----|-------------|
+| **GoldenGate UI** | `https://gghack..nip.io` | ggadmin / your-password |
+| **SQLPlus Web** | `https://gghack..nip.io/sqlplus/vnc.html` | - |
+| **Jupyter Notebook** | `https://gghack..nip.io/jupyter/` | Welcome1234 |
+| **GG Big Data** | `https://daagghack..nip.io` | ggadmin / your-password |
+
+To find your external IP:
+```powershell
+kubectl get service -n ingress-nginx -o jsonpath='{.items[*].status.loadBalancer.ingress[*].ip}'
+```
+
+---
+
+## β
Step 5: Verify Data Replication
+
+### Connect to the Instant Client Pod
+
+```powershell
+# Get the pod name
+$podName = kubectl get pods -n microhacks -o name | Select-String 'instantclient' | ForEach-Object { $_ -replace 'pod/', '' }
+
+# Connect to the pod
+kubectl exec -it -n microhacks $podName -- /bin/bash
+```
+
+### Verify SH2 Schema in ODAA ADB
+
+From inside the pod:
+
+```bash
+# Connect to ODAA ADB (replace with your connection string)
+sqlplus admin@''
+# Enter your password when prompted
+```
+
+```sql
+-- Check that SH2 schema exists
+SELECT USERNAME FROM ALL_USERS WHERE USERNAME LIKE 'SH%';
+
+-- Count tables in SH2
+SELECT COUNT(*) FROM all_tables WHERE owner = 'SH2';
+
+-- Exit
+exit
+```
+
+### Test Real-Time Replication
+
+From inside the pod, connect to the on-prem database:
+
+```bash
+# Use the pre-configured alias
+sql
+```
+
+```sql
+-- Create a test table
+CREATE TABLE SH.SALES_COPY AS SELECT * FROM SH.SALES;
+
+-- Check row count
+SELECT COUNT(*) FROM SH.SALES_COPY;
+
+-- Exit
+exit
+```
+
+Now verify it replicated to ODAA ADB:
+
+```bash
+sqlplus admin@'(description= ...your-connection-string...)'
+```
+
+```sql
+-- Check if table was replicated
+SELECT COUNT(*) FROM SH2.SALES_COPY;
+
+-- Exit
+exit
+```
+
+If you see the same row count, **GoldenGate replication is working!** π
+
+Type `exit` to leave the pod.
+
+---
+
+## π§ Troubleshooting
+
+### Redeploy if Something Goes Wrong
+
+```powershell
+# Uninstall and reinstall
+.\Deploy-OnPremReplication.ps1 `
+ -UserName "user00" `
+ -ADBPassword "" `
+ -ADBConnectionString "(description= ...)" `
+ -Uninstall
+```
+
+### Manual Uninstall
+
+```powershell
+helm uninstall ogghack -n microhacks
+kubectl delete namespace microhacks
+```
+
+### Check Pod Logs
+
+```powershell
+# Get pod names
+kubectl get pods -n microhacks
+
+# Check logs for a specific pod
+kubectl logs -n microhacks
+
+# Check the prepare job logs
+$prepPod = kubectl get pods -n microhacks -o name | Select-String 'prepare-job' | ForEach-Object { $_ -replace 'pod/', '' }
+kubectl logs -n microhacks $prepPod
+```
+
+### Common Issues
+
+| Issue | Solution |
+|-------|----------|
+| Pods stuck in `Init:ErrImagePull` | Network/auth issue with Oracle Container Registry - check logs |
+| External IP not assigned | Wait a few minutes, or check ingress-nginx service |
+| Connection refused to ADB | Verify connection string and NSG rules |
+| Wrong password | Re-run script with `-Uninstall` flag |
+
+---
+
+## βοΈ Next Challenge
+
+While waiting for the deployment, you can start on:
+
+**[Challenge 5: Measure Network Performance](../perf-test-odaa/perf-test-odaa.md)**
+
+---
+
+## π Additional Resources
+
+- [Original detailed walkthrough](./onprem-ramp-up.md) - Step-by-step manual process
+- [ODAA Connection String Guide](../../docs/odaa-get-token.md) - How to get your TNS string
+- [Helm Chart Documentation](https://ilfur.github.io/VirtualAnalyticRooms) - GoldenGate Microhack chart
+
+[Back to workspace README](../../README.md)
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/onprem-ramp-up.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/onprem-ramp-up.md
new file mode 100644
index 000000000..a7756dc5b
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/onprem-ramp-up/onprem-ramp-up.md
@@ -0,0 +1,592 @@
+# π Challenge 4: OnPrem ramp up
+
+[Back to workspace README](../../README.md)
+
+After you successfully created the ODAA Autonomous Database instance, you will now setup Oracle Data Pump and GoldenGate to replicate data from an on-premises Oracle database running in Azure Kubernetes Service (AKS) to the ODAA ADB instance.
+
+Before you continue with the challenge copy the Adress space (for example 10.0.0.0/16) in the vnet overview of the AKS resource group.
+
+We will install the following components into the AKS cluster under the Namespace "microhacks" via helm:
+
+- An Oracle Database free edition 23ai (prefilled with the Schema SH1)
+- Oracle Data Pump to do the initial import of the SH schema into the ODAA ADB instance as SH2 schema
+- Oracle GoldenGate to replicate data changes from SH schema to SH2 schema in near real-time
+- Oracle Instant Client to connect to the ODAA ADB instance via sqlplus
+
+## π¦ What is Kubernetes Helm?
+
+Helm is a package manager for Kubernetes that allows you to define, install, and manage Kubernetes applications. It uses a packaging format called charts, which are collections of pre-configured Kubernetes resources.
+
+## π Login to Azure and set the right subscription
+
+~~~bash
+az login # choose your assigned user account for ex. user01@cptazure.org or the menu "Work or school account
+# make sure you select the subscription which starts with "sub-team", do not choose the subscription called "sub-mhodaa".
+# Assign the subscription name to a variable
+$subAKS="sub-mh0" # Replace with your Subscription Name.
+az account set --subscription $subAKS # make sure you are in the AKS subscription
+~~~
+
+## π Define required environment variables
+
+~~~bash
+# log into your AKS cluster if not already done
+$rgAKS="aks-user00" # replace with your AKS resource group name
+$AKSClusterName="aks-user00" # replace with your AKS cluster name
+~~~
+
+## β Connect to AKS
+
+~~~bash
+# login to aks
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+~~~
+
+## π οΈ Install OnPrem on AKS with helm
+
+~~~powershell
+# Install golden gate
+helm repo add oggfree https://ilfur.github.io/VirtualAnalyticRooms
+# Do an update to get the newest chart templates
+helm repo update
+~~~
+
+Output should look similar to this:
+
+~~~text
+10_Oracle_on_Azure> helm repo add oggfree https://ilfur.github.io/VirtualAnalyticRooms
+"oggfree" already exists with the same configuration, skipping
+10_Oracle_on_Azure> # Do an update to get the newest chart templates
+10_Oracle_on_Azure> helm repo update
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "ingress-nginx" chart repository
+...Successfully got an update from the "avisto" chart repository
+...Successfully got an update from the "oggfree" chart repository
+...Successfully got an update from the "bitnami" chart repository
+Update Complete. βHappy Helming!β
+~~~
+
+## π§ Replace Goldengate configuration File User Name in gghack.yaml
+
+~~~powershell
+# retrieve the external IP of the nginx ingress controller
+$UserName = "user00" # replace with your user name
+# create a copy of the template file
+cp resources/template/gghack.yaml .
+# replace the placeholder with the actual external IP
+(Get-Content gghack.yaml) -replace '', $UserName.Trim() | Set-Content gghack.yaml
+# show line 44 till 50 with powershell of gghack.yaml
+(Get-Content gghack.yaml)[0..3]
+~~~
+
+The value of vhostName should look like this:
+
+~~~yaml
+microhack:
+ user: user02
+### specify the name of an existing secret that contains the ogg admin username and password
+ogg:
+~~~
+
+## π§ Replace Goldengate configuration File Ingress Public IP in gghack.yaml
+
+We are already running an nginx ingress controller in the AKS cluster to provide access from outside the cluster to the GoldenGate microhack application.
+
+~~~powershell
+# retrieve the external IP of the nginx ingress controller
+$EXTIP = (kubectl get service -n ingress-nginx -o jsonpath='{range .items[*]}{.status.loadBalancer.ingress[*].ip} {end}') -replace '\s', ''
+echo "External IP of the Ingress Controller: $EXTIP"
+~~~
+
+Output should look similar to this:
+
+~~~text
+External IP of the Ingress Controller: 4.251.148.158
+~~~
+
+After you have the external IP address, replace the placeholder in the gghack.yaml file.
+
+~~~powershell
+# replace the placeholder with the actual external IP
+(Get-Content gghack.yaml) -replace '', $EXTIP.Trim() | Set-Content gghack.yaml
+# show line 44 till 50 with powershell of gghack.yaml
+(Get-Content gghack.yaml)[41..55]
+~~~
+
+The value of vhostName should look like this:
+
+~~~yaml
+services:
+ ### You can choose to create an ingress in front of the service
+ ### with a virtual host name of ggate.
+ external:
+ ### set type to either ingress or none if You need something customized
+ ### typical ingressClasses are nginx and istio
+ ingressClass: nginx
+ ### typical ingressClasses are nginx and istio
+ ingressClass: nginx
+ ### uses default SSL certificate of gateway/controller or specify a custom tls-secret here
+ tlsSecretName: ggate-tls-secret
+ vhostName: gghack.4.251.148.158.nip.io # public IP address of
+ internal:
+ type: ClusterIP
+ plainPort: 8080
+ sslPort: 8443
+~~~
+
+## π Replace Goldengate configuration File gghack.yaml ODAA TNS connection String
+
+Reference the document [How to retrieve the Oracle Database Autonomous Database connection string from ODAA](../../docs/odaa-get-token.md) to get the TNS connection string for your ODAA ADB instance.
+
+β οΈ **Important**: If you follow the instructions in `docs\odaa-get-token.md`, remember to switch back to your AKS subscription after retrieving the TNS connection string:
+
+After you have retrieved the TNS connection string and assigned it to the `$trgConn` variable (as shown in docs\odaa-get-token.md), replace the placeholder in the gghack.yaml file:
+
+
+~~~powershell
+# Replace the placeholder with the HIGH TNS connection string of your deployed ODAA ADB database. If you followed the instructions in docs\odaa-get-token.md - see above, the $trgConn variable is already set and you do not need the line that assigns $trgConn="".
+
+# Check if the variable contains the ADB TNS connection string.
+echo "External ADB connection string : $trgConn"
+
+# If the variable is not filled copy the TNS connection string directly from the ADB overview under settings/connection menue or following the docs\odaa-get-token.md instrcutions.
+$trgConn=""
+
+# replace in value in your gghack.yaml
+(Get-Content gghack.yaml) -replace '', $trgConn | Set-Content gghack.yaml
+# show line 8 till 11 with powershell of gghack.yaml
+(Get-Content gghack.yaml)[10..13]
+~~~
+
+Your connection string in your gghack.yaml should look similar the yaml below. If the connection string is not copied successful into the gghack.yaml file, you can do it manually as well by copy the connection string from the ADB Azure Portal under section connections. Choose the connection for High profile.
+
+~~~yaml
+databases:
+ # value for source database (23ai free container) is calculated.
+ trgConn: "(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=zuyhervb.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=gc2401553d1c7ab_uer00_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))"
+~~~
+
+## π Install GoldenGate Pods
+
+Like mentioned at the beginning of this challenge, we will install several components on the AKS cluster under the Namespace "microhacks" via helm. Some of this components will be acccessing via a Web UI, some of them will need to connect to each other. Therefore we need to create some Kubernetes secrets to store the required credentials.
+
+- GoldenGate (ogg-admin-secret)
+ - GoldenGate Admin User: ggadmin
+- Source Database oracle 23ai free edition
+ - Admin User: system (db-admin-secret), ggadmin (srcGGUserName)
+- Target Database ODAA ADB
+ - Admin User: admin (db-admin-secret), ggadmin (trgGGUserName)
+
+> IMPORTANT: The password for all users must be the same for simplicity and must match the password you defined during the creation of the ODAA ADB instance (should be <"Assigned Password"> (without quotes)).
+
+~~~powershell
+# Prompt for the password that will be used for all three components - Please ask if you do not know the one!!!
+$password = # e.g. $password=""
+
+# create the namespace everything goes into
+kubectl create namespace microhacks
+#create secret for Golden Gate OGG admin user and password to-be-created
+kubectl create secret generic ogg-admin-secret -n microhacks --from-literal=oggusername=ggadmin --from-literal=oggpassword=$password
+#create secret for source and target database admin and ogg users to be created (target must be there already! ODAA ADB in Azure)
+kubectl create secret generic db-admin-secret -n microhacks --from-literal=srcAdminPwd=$password --from-literal=trgAdminPwd=$password --from-literal=srcGGUserName=ggadmin --from-literal=trgGGUserName=ggadmin --from-literal=srcGGPwd=$password --from-literal=trgGGPwd=$password
+# Verify secrets and display them (passwords will be hidden)
+kubectl get secrets -n microhacks -o json
+# decode secretes for verification (passwords will be visible here)
+# Decode ogg-admin-secret password
+[System.Text.Encoding]::UTF8.GetString([Convert]::FromBase64String(
+ (kubectl get secret ogg-admin-secret -n microhacks -o jsonpath="{.data.oggpassword}")
+))
+
+# Decode db-admin-secret srcAdminPwd
+[System.Text.Encoding]::UTF8.GetString([Convert]::FromBase64String(
+ (kubectl get secret db-admin-secret -n microhacks -o jsonpath="{.data.srcAdminPwd}")
+))
+
+# Decode db-admin-secret trgAdminPwd
+[System.Text.Encoding]::UTF8.GetString([Convert]::FromBase64String(
+ (kubectl get secret db-admin-secret -n microhacks -o jsonpath="{.data.trgAdminPwd}")
+))
+~~~
+
+Install all components via Helm:
+
+~~~powershell
+helm install ogghack oggfree/goldengate-microhack-sample --values gghack.yaml -n microhacks
+~~~
+
+π You should see a similar output:
+
+~~~text
+NAME: ogghack
+LAST DEPLOYED: Wed Nov 12 15:33:39 2025
+NAMESPACE: microhacks
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+Final NOTES:
+
+Please wait about 8 Minutes for the source database to be completely up and loaded.
+
+You can already try out Your sqlplus command line by using this URL in Your browser:
+https://gghack.4.251.147.64.nip.io/sqlplus/vnc.html
+
+Alternatively, have a look at Your personal jupyter notebook:
+https://gghack.4.251.147.64.nip.io/jupyter/
+Just enter the password "Welcome1234" when asked and then open the CPAT analysis notebook.
+
+Once the DB is ready, GoldenGate Sync should be set up too. Check it out right here:
+https://gghack.4.251.147.64.nip.io
+
+...and for GoldenGate for Distributed Applications, check this host name:
+https://daagghack.4.251.147.64.nip.io
+
+Have fun !
+~~~
+
+## β³ Wait until all pods are in status "Running"
+
+~~~bash
+kubectl get pods -n microhacks --watch
+~~~
+
+β° Wait until the ogghack-goldengate-microhack-sample-db-prepare-job is completed and exit the watch with Ctrl+C. It takes about 8 minutes till all pods are running / Completed.
+
+> βΉοΈ **NOTE**: Error and CrashLoopBackOff of the ogghack-goldengate-microhack-sample-db-prepare-job pod is expected.
+
+π The output should look similar to this after the job is completed:
+
+~~~text
+NAME READY STATUS RESTARTS AGE
+ogghack-goldengate-microhack-sample-db-5f9ccf59dc-wtcmf 1/1 Running 0 62m
+ogghack-goldengate-microhack-sample-db-prepare-job-pzncs 0/1 Completed 0 62m
+ogghack-goldengate-microhack-sample-instantclient-5985df84lcjwx 1/1 Running 0 62m
+ogghack-goldengate-microhack-sample-jupyter-69d77b895b-4b7g8 1/1 Running 0 62m
+ogghack-goldengate-microhack-sample-ogg-787f954698-kzjpl 1/1 Running 0 62m
+~~~
+
+β
After the job is completed, the local database, which is running inside the AKS cluster, has been migrated to the ODAA ADB instance via Oracle Data Pump.
+
+Use Key CTRL+C to exit the watch command.
+
+> IMPORTANT: While you are waiting feel free to already work on the next challenge [Challenge 5: Measure Network Performance to Your Oracle Database@Azure Autonomous Database](../perf-test-odaa/perf-test-odaa.md).
+
+### π Connect to the ADB Oracle Database
+
+~~~powershell
+# extract the pod name of the instantcleint as it contains a random suffix
+$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] }
+# login to the pod instantclient
+kubectl exec -it -n microhacks $podInstanteClientName -- /bin/bash
+~~~
+
+output should look similar to this:
+
+~~~text
+Defaulted container "app" out of: app, get-files (init)
+***********************************************************************
+WELCOME to the Oracle sqlcl/sqlplus command line
+Oracle instant client has been installed to /opt/oracle/instantclient_23_4
+TNS_ADMIN is set to: /projects
+Just type 'sql' to connect to Your database with sqlplus
+Now hopping to /projects as work directory for Your convenience
+***********************************************************************
+~~~
+
+Login with sqlplus to the ADB instance as admin user
+
+~~~bash
+# log into ADB with admin via sqlplus, replace the TNS connection string with your own
+echo "External ADB connection string : $trgConn"
+
+
+sqlplus admin@'' # Replace with your TNS connection string. The TNS connection string should be available over the variable $trgConn. If not you can find the TNS connection string of the deployed ADB in the Azure portal overview under settings/connections.
+
+# Enter your password e.g.
+~~~
+
+Output should look similar to this:
+
+~~~text
+SQL*Plus: Release 23.0.0.0.0 - Production on Fri Nov 14 08:08:02 2025
+Version 23.4.0.24.05
+
+Copyright (c) 1982, 2024, Oracle. All rights reserved.
+
+Enter password:
+Last Successful login time: Fri Nov 14 2025 07:58:14 +00:00
+
+Connected to:
+Oracle Database 23ai Enterprise Edition Release 23.0.0.0.0 - for Oracle Cloud and Engineered Systems
+Version 23.10.0.25.10
+
+SQL>
+~~~
+
+Inside the sqlplus session, run the following commands to verify the SH2 schema and the GoldenGate GGADMIN user have been created successfully in the ADB instance.
+
+Because "Nothing is as constant as change" we are constantly working on the Microhack.
+
+Therefore you might see after the sucessfuly registration at the database a
+
+ A: successful migration of the SH schema in the 23ai free edition in the SH2 schema of the ADB database
+ B: a current empty SH2 schema in the ADB database
+
+~~~sql
+select USERNAME from ALL_USERS where USERNAME like 'SH%';
+~~~
+
+~~~text
+USERNAME
+--------------------------------------------------------------------------------
+SH
+SH2
+~~~
+
+
+~~~sql
+-- should return 18 rows
+SELECT COUNT(*) FROM all_tables WHERE owner = 'SH2';
+~~~
+
+~~~text
+ COUNT(*)
+----------
+ 18
+~~~
+
+
+If the previous command returns 0, please continue with the section "Create a table manually in the schema SH2 of your ADB with the user SH2 to trigger Goldengate". If the SH2 schema already contains tables (count is not 0), you can continue with the next steps as normal.
+
+
+List all tables in SH2 schema on the ODAA ADB database
+
+~~~sql
+-- list all tables in SH schema
+SELECT table_name FROM all_tables WHERE owner = 'SH2';
+~~~
+
+~~~text
+TABLE_NAME
+--------------------------------------------------------------------------------
+COSTS
+DR$SUP_TEXT_IDX$N
+SALES
+CAL_MONTH_SALES_MV
+CHANNELS
+COUNTRIES
+CUSTOMERS
+DR$SUP_TEXT_IDX$B
+DR$SUP_TEXT_IDX$C
+DR$SUP_TEXT_IDX$I
+DR$SUP_TEXT_IDX$K
+
+TABLE_NAME
+--------------------------------------------------------------------------------
+DR$SUP_TEXT_IDX$Q
+DR$SUP_TEXT_IDX$U
+FWEEK_PSCAT_SALES_MV
+PRODUCTS
+PROMOTIONS
+SUPPLEMENTARY_DEMOGRAPHICS
+TIMES
+
+18 rows selected.
+~~~
+
+Exit sqlplus
+
+~~~sql
+exit
+~~~
+
+### π Verify replication
+
+#### Create a table manually in the schema SH2 of your ADB with the user SH2 to trigger Goldengate
+
+The following steps are required to trigger Goldengate to mirror data from the migrated ADB database into Azure Fabric. Goldengate 23ai suported open mirroring and can contininously replicate data (CDC) from ODAA into Azure Lakehouse. The following online links give additional information about the setup in Goldengate 23ai.
+
+
+* [Oracle GG 23ai supports open mirroring](https://blogs.oracle.com/dataintegration/how-to-replicate-to-mirrored-database-in-microsoft-fabric-using-goldengate)
+* [Oracle OCI GoldenGate to Microsoft Fabric Lakehouse Data Replication](https://www.ateam-oracle.com/post/oracle-oci-goldengate-to-microsoft-fabric-lakehouse-data-replication)
+
+
+~~~bash
+# Reconnect into ADB with admin via sqlplus, replace the TNS connection string with your own
+echo "External ADB connection string : $trgConn"
+
+
+sqlplus admin@'' # Replace with your TNS connection string. The TNS connection string should be available over the variable $trgConn. If not you can find the TNS connection string of the deployed ADB in the Azure portal overview under settings/connections.
+
+# Enter your password e.g.
+~~~
+
+If the objects in the SH2 schema in the previous section was not displayed, you have the grant the "create session" and "resource" role to the user SH2 in the ADB database.
+
+~~~sql
+grant create session, resource to SH2;
+
+exit
+~~~
+
+~~~sql
+grant create session, resource to SH2;
+
+exit
+~~~
+
+Connect as User SH2 into the ADB database
+~~~bash
+# Reconnect into ADB with admin via sqlplus, replace the TNS connection string with your own
+echo "External ADB connection string : $trgConn"
+
+
+sqlplus SH2@'' # Replace with your TNS connection string. The TNS connection string should be available over the variable $trgConn. If not you can find the TNS connection string of the deployed ADB in the Azure portal overview under settings/connections.
+
+# Enter your password e.g.
+~~~
+
+Create a copy of the table SH.sales in the schema SH2. Please do not use a DDL command like "CREATE TABLE AS SELECT ...."!
+
+~~~sql
+-- count the records in SH.SALES table on prem database
+SELECT COUNT(*) FROM SH.SALES;
+~~~
+
+~~~text
+ COUNT(*)
+----------
+ 918843
+~~~
+
+~~~sql
+-- create copy from SH.SALES to SH.SALES_COPY
+CREATE TABLE SALES_COPY AS SELECT * FROM SH.SALES where rownum = 0;
+-- count the records in SH.SALES_COPY table
+SELECT COUNT(*) FROM SALES_COPY;
+~~~
+
+~~~text
+ COUNT(*)
+----------
+ 0
+~~~
+
+~~~sql
+-- create copy from SH.SALES to SH.SALES_COPY
+INSERT INTO TABLE SALES_COPY (SELECT * FROM SH.SALES);
+-- count the records in SH.SALES_COPY table
+SELECT COUNT(*) FROM SALES_COPY;
+~~~
+
+~~~text
+ COUNT(*)
+----------
+ 918843
+~~~
+
+~~~sql
+exit
+~~~
+
+
+
+## π‘ Tips and Tricks
+
+### οΏ½ Troubleshooting Init:ErrImagePull Issues
+
+If you see pods with `Init:ErrImagePull` status, this is likely due to authentication issues with Oracle Container Image Registry (OCIR) or network connectivity problems.
+
+**Common causes:**
+1. **Missing Oracle Container Registry authentication**
+2. **Network connectivity issues (TLS handshake timeout)**
+3. **AKS node storage I/O issues**
+
+### οΏ½π Redeploy if things go wrong
+
+~~~powershell
+# Update the helm repo
+helm repo update
+# login to aks
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+# Uninstall list the available helm charts
+helm list -n microhacks
+# Uninstall the Helm release
+helm uninstall ogghack -n microhacks
+# Verify the pods inside the namespace microhacks are deletled
+kubectl get pods -n microhacks
+# Delete the namespace if required
+kubectl delete namespace microhacks
+~~~
+
+### Check Network Connectivity**
+
+```powershell
+# Test connectivity to Oracle Container Registry from AKS nodes
+kubectl run test-connectivity --image=nginx --rm -it --restart=Never -- curl -I https://fra.ocir.io
+```
+
+### Restart AKS Nodes (if I/O errors persist)**
+
+```powershell
+# If there are persistent I/O errors, restart the AKS nodepool
+az aks nodepool update --resource-group $rgAKS --cluster-name $AKSClusterName --name agentpool --enable-cluster-autoscaler
+```
+
+### π Show the logs of the GoldenGate Prepare Job
+
+~~~powershell
+# login to aks if not already done
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+# get prep job pod name
+$podPrepName = kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-db-prepare-job' | ForEach-Object { ($_ -split '\s+')[0] }
+kubectl logs -n microhacks $podPrepName
+
+# To check for image pull issues:
+kubectl describe pod $podPrepName -n microhacks | Select-String -Pattern "Failed|Error|Warning" -A 2 -B 2
+~~~
+
+~~~text
+Defaulted container "dbhelper" out of: dbhelper, dbcheck (init)
+Cloning into 'gg_microhacks_scripts'...
+ADP=(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=gpdmotes.adb.eu-frankfurt-1.oraclecloud.com))(connect_data=(service_name=g6425a1dbd2e95a_odaa2_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))
+
+SQL*Plus: Release 23.0.0.0.0 - Production on Tue Oct 21 14:02:31 2025
+Version 23.4.0.24.05
+
+Copyright (c) 1982, 2024, Oracle. All rights reserved.
+
+Last Successful login time: Tue Oct 21 2025 13:30:17 +00:00
+
+Connected to:
+Oracle Database 23ai Enterprise Edition Release 23.0.0.0.0 - for Oracle Cloud and Engineered Systems
+Version 23.10.0.25.10
+
+SQL>
+User altered.
+
+SQL> 2 3 4 5 6 7
+PL/SQL procedure successfully completed.
+
+SQL> 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
+PL/SQL procedure successfully completed.
+
+SQL> Disconnected from Oracle Database 23ai Enterprise Edition Release 23.0.0.0.0 - for Oracle Cloud and Engineered Systems
+Version 23.10.0.25.10
+Cloning into 'db-sample-schemas'...
+~~~
+
+### π Show the logs of the GoldenGate Big Data Container
+
+~~~powershell
+# login to aks if not already done
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+# get prep job pod name
+$podBigDataName = kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-daa' | ForEach-Object { ($_ -split '\s+')[0] }
+kubectl logs -n microhacks $podBigDataName
+
+# To check for image pull issues:
+kubectl describe pod $podBigDataName -n microhacks | Select-String -Pattern "Failed|Error|Warning" -A 2 -B 2
+~~~
+
+[Back to workspace README](../../README.md)
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/Deploy-PerfTest.ps1 b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/Deploy-PerfTest.ps1
new file mode 100644
index 000000000..6391bdb7a
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/Deploy-PerfTest.ps1
@@ -0,0 +1,426 @@
+#Requires -Version 5.1
+<#
+.SYNOPSIS
+ Deploys and runs Oracle ADB performance tests from AKS
+
+.DESCRIPTION
+ This script automates the deployment and execution of performance tests against ODAA ADB.
+ It handles all the manual steps from the original walkthrough:
+ - Connects to AKS cluster
+ - Deploys adbping performance test job
+ - Runs the tests and displays results
+ - Optionally runs connping for additional metrics
+
+.PARAMETER UserName
+ Your assigned username (e.g., user01, user02)
+
+.PARAMETER ADBPassword
+ Password for the ODAA ADB instance
+
+.PARAMETER ADBConnectionString
+ TNS connection string for your ODAA ADB instance
+
+.PARAMETER AKSResourceGroup
+ Name of the AKS resource group (default: auto-detected from username)
+
+.PARAMETER AKSClusterName
+ Name of the AKS cluster (default: auto-detected from username)
+
+.PARAMETER Subscription
+ Azure subscription name for AKS (default: auto-detected)
+
+.PARAMETER SkipAKSConnection
+ Skip AKS connection (use if already connected)
+
+.PARAMETER TestType
+ Type of test to run: 'adbping', 'connping', or 'both' (default: 'adbping')
+
+.PARAMETER TestDuration
+ Duration of the test in seconds (default: 90)
+
+.PARAMETER Threads
+ Number of concurrent threads for adbping (default: 3)
+
+.PARAMETER Cleanup
+ Remove test jobs after completion
+
+.EXAMPLE
+ .\Deploy-PerfTest.ps1 -UserName "user01" -ADBPassword "Welcome1234#" -ADBConnectionString "(description= ...)"
+
+.EXAMPLE
+ .\Deploy-PerfTest.ps1 -UserName "user01" -ADBPassword "Welcome1234#" -ADBConnectionString "(description= ...)" -TestType "both"
+
+.NOTES
+ Author: ODAA MicroHack Team
+ This script simplifies Challenge 5: Performance Testing
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $true, HelpMessage = "Your assigned username (e.g., user01)")]
+ [string]$UserName,
+
+ [Parameter(Mandatory = $true, HelpMessage = "Password for ODAA ADB instance")]
+ [string]$ADBPassword,
+
+ [Parameter(Mandatory = $true, HelpMessage = "TNS connection string for ODAA ADB")]
+ [string]$ADBConnectionString,
+
+ [Parameter(Mandatory = $false)]
+ [string]$AKSResourceGroup = "",
+
+ [Parameter(Mandatory = $false)]
+ [string]$AKSClusterName = "",
+
+ [Parameter(Mandatory = $false)]
+ [string]$Subscription = "",
+
+ [Parameter(Mandatory = $false)]
+ [switch]$SkipAKSConnection,
+
+ [Parameter(Mandatory = $false)]
+ [ValidateSet("adbping", "connping", "both")]
+ [string]$TestType = "adbping",
+
+ [Parameter(Mandatory = $false)]
+ [int]$TestDuration = 90,
+
+ [Parameter(Mandatory = $false)]
+ [int]$Threads = 3,
+
+ [Parameter(Mandatory = $false)]
+ [switch]$Cleanup
+)
+
+# ============================================================================
+# Configuration
+# ============================================================================
+$ErrorActionPreference = "Stop"
+$Namespace = "adb-perf-test"
+$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
+$RepoRoot = (Resolve-Path "$ScriptDir\..\..").Path
+
+# ============================================================================
+# Helper Functions
+# ============================================================================
+
+function Write-Step {
+ param([string]$Message, [string]$Icon = "π")
+ Write-Host "`n$Icon $Message" -ForegroundColor Cyan
+ Write-Host ("=" * 60) -ForegroundColor DarkGray
+}
+
+function Write-Success {
+ param([string]$Message)
+ Write-Host "β
$Message" -ForegroundColor Green
+}
+
+function Write-Warning {
+ param([string]$Message)
+ Write-Host "β οΈ $Message" -ForegroundColor Yellow
+}
+
+function Write-ErrorMsg {
+ param([string]$Message)
+ Write-Host "β $Message" -ForegroundColor Red
+}
+
+function Write-Info {
+ param([string]$Message)
+ Write-Host "βΉοΈ $Message" -ForegroundColor White
+}
+
+function Test-Command {
+ param([string]$Command)
+ try {
+ Get-Command $Command -ErrorAction Stop | Out-Null
+ return $true
+ }
+ catch {
+ return $false
+ }
+}
+
+function Test-Prerequisites {
+ Write-Step "Checking Prerequisites" "π"
+
+ $required = @("az", "kubectl")
+ $missing = @()
+
+ foreach ($cmd in $required) {
+ if (Test-Command $cmd) {
+ Write-Success "$cmd is installed"
+ }
+ else {
+ Write-ErrorMsg "$cmd is NOT installed"
+ $missing += $cmd
+ }
+ }
+
+ if ($missing.Count -gt 0) {
+ throw "Missing prerequisites: $($missing -join ', '). Please install them first."
+ }
+}
+
+function Connect-ToAKS {
+ param(
+ [string]$ResourceGroup,
+ [string]$ClusterName,
+ [string]$SubscriptionName
+ )
+
+ Write-Step "Connecting to AKS Cluster" "β"
+
+ if ($SubscriptionName) {
+ Write-Info "Setting subscription to: $SubscriptionName"
+ az account set --subscription $SubscriptionName
+ }
+
+ Write-Info "Getting AKS credentials for cluster: $ClusterName"
+ az aks get-credentials -g $ResourceGroup -n $ClusterName --overwrite-existing
+
+ $namespaces = kubectl get namespaces --no-headers 2>&1
+ if ($LASTEXITCODE -ne 0) {
+ throw "Failed to connect to AKS cluster"
+ }
+
+ Write-Success "Connected to AKS cluster: $ClusterName"
+}
+
+function Initialize-Namespace {
+ Write-Step "Setting up Namespace" "π¦"
+
+ $existingNs = kubectl get namespace $Namespace --ignore-not-found -o name 2>&1
+ if (-not $existingNs) {
+ Write-Info "Creating namespace: $Namespace"
+ kubectl create namespace $Namespace
+ }
+ else {
+ Write-Info "Namespace already exists: $Namespace"
+ }
+
+ Write-Success "Namespace ready: $Namespace"
+}
+
+function Remove-ExistingJobs {
+ Write-Step "Cleaning up existing jobs" "π§Ή"
+
+ kubectl delete job adbping-performance-test -n $Namespace --ignore-not-found 2>&1 | Out-Null
+ kubectl delete job connping-performance-test -n $Namespace --ignore-not-found 2>&1 | Out-Null
+
+ Write-Success "Existing jobs cleaned up"
+}
+
+function Deploy-ADBPingTest {
+ param(
+ [string]$Password,
+ [string]$TNSString,
+ [int]$Duration,
+ [int]$ThreadCount
+ )
+
+ Write-Step "Deploying ADBPing Performance Test" "π"
+
+ $templatePath = "$RepoRoot\resources\infra\k8s\adbping-job.yaml"
+
+ if (-not (Test-Path $templatePath)) {
+ throw "Template file not found: $templatePath"
+ }
+
+ # Create a temporary job file
+ $tempJobFile = Join-Path $env:TEMP "adbping-job-temp.yaml"
+
+ # Read and modify the template
+ $content = Get-Content $templatePath -Raw
+ $content = $content -replace 'YOUR_PASSWORD_HERE', $Password
+ $content = $content -replace 'YOUR_TNS_CONNECTION_STRING_HERE', $TNSString
+
+ # Write to temp file
+ $content | Set-Content $tempJobFile -Encoding UTF8
+
+ Write-Info "Deploying adbping job..."
+ kubectl apply -f $tempJobFile -n $Namespace
+
+ # Clean up temp file
+ Remove-Item $tempJobFile -Force
+
+ Write-Success "ADBPing job deployed"
+}
+
+function Deploy-ConnPingTest {
+ param(
+ [string]$Password,
+ [string]$TNSString,
+ [int]$Duration
+ )
+
+ Write-Step "Deploying ConnPing Performance Test" "π"
+
+ $templatePath = "$RepoRoot\resources\infra\k8s\connping-job.yaml"
+
+ if (-not (Test-Path $templatePath)) {
+ throw "Template file not found: $templatePath"
+ }
+
+ # Create a temporary job file
+ $tempJobFile = Join-Path $env:TEMP "connping-job-temp.yaml"
+
+ # Read and modify the template
+ $content = Get-Content $templatePath -Raw
+ $content = $content -replace 'YOUR_PASSWORD_HERE', $Password
+ $content = $content -replace 'YOUR_TNS_CONNECTION_STRING_HERE', $TNSString
+
+ # Write to temp file
+ $content | Set-Content $tempJobFile -Encoding UTF8
+
+ Write-Info "Deploying connping job..."
+ kubectl apply -f $tempJobFile -n $Namespace
+
+ # Clean up temp file
+ Remove-Item $tempJobFile -Force
+
+ Write-Success "ConnPing job deployed"
+}
+
+function Wait-ForJobCompletion {
+ param(
+ [string]$JobName,
+ [int]$TimeoutSeconds = 300
+ )
+
+ Write-Step "Waiting for $JobName to complete" "β³"
+
+ $startTime = Get-Date
+ $completed = $false
+
+ while (-not $completed) {
+ $elapsed = ((Get-Date) - $startTime).TotalSeconds
+ if ($elapsed -gt $TimeoutSeconds) {
+ throw "Job $JobName timed out after $TimeoutSeconds seconds"
+ }
+
+ $status = kubectl get job $JobName -n $Namespace -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}' 2>&1
+ $failed = kubectl get job $JobName -n $Namespace -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}' 2>&1
+
+ if ($status -eq "True") {
+ $completed = $true
+ Write-Success "$JobName completed successfully"
+ }
+ elseif ($failed -eq "True") {
+ throw "Job $JobName failed"
+ }
+ else {
+ Write-Info "Job running... (elapsed: $([math]::Round($elapsed))s)"
+ Start-Sleep -Seconds 10
+ }
+ }
+}
+
+function Get-JobResults {
+ param([string]$JobName)
+
+ Write-Step "Retrieving $JobName Results" "π"
+
+ $logs = kubectl logs job/$JobName -n $Namespace 2>&1
+
+ Write-Host "`n" -NoNewline
+ Write-Host "=" * 60 -ForegroundColor Yellow
+ Write-Host " PERFORMANCE TEST RESULTS" -ForegroundColor Yellow
+ Write-Host "=" * 60 -ForegroundColor Yellow
+ Write-Host $logs
+ Write-Host "=" * 60 -ForegroundColor Yellow
+}
+
+function Remove-TestJobs {
+ Write-Step "Cleaning up test jobs" "π§Ή"
+
+ kubectl delete job adbping-performance-test -n $Namespace --ignore-not-found 2>&1 | Out-Null
+ kubectl delete job connping-performance-test -n $Namespace --ignore-not-found 2>&1 | Out-Null
+
+ Write-Success "Test jobs removed"
+}
+
+# ============================================================================
+# Main Script
+# ============================================================================
+
+try {
+ Write-Host "`n" -NoNewline
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Magenta
+ Write-Host "β ODAA Performance Test Deployment Script β" -ForegroundColor Magenta
+ Write-Host "β Challenge 5: Measure Network Performance β" -ForegroundColor Magenta
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Magenta
+
+ # Check prerequisites
+ Test-Prerequisites
+
+ # Auto-detect resource group and cluster name if not provided
+ if (-not $AKSResourceGroup) {
+ $AKSResourceGroup = "aks-$UserName"
+ Write-Info "Auto-detected AKS Resource Group: $AKSResourceGroup"
+ }
+
+ if (-not $AKSClusterName) {
+ $AKSClusterName = "aks-$UserName"
+ Write-Info "Auto-detected AKS Cluster Name: $AKSClusterName"
+ }
+
+ # Connect to AKS if not skipped
+ if (-not $SkipAKSConnection) {
+ Connect-ToAKS -ResourceGroup $AKSResourceGroup -ClusterName $AKSClusterName -SubscriptionName $Subscription
+ }
+ else {
+ Write-Info "Skipping AKS connection (assuming already connected)"
+ }
+
+ # Initialize namespace
+ Initialize-Namespace
+
+ # Clean up existing jobs
+ Remove-ExistingJobs
+
+ # Run tests based on TestType
+ if ($TestType -eq "adbping" -or $TestType -eq "both") {
+ Deploy-ADBPingTest -Password $ADBPassword -TNSString $ADBConnectionString -Duration $TestDuration -ThreadCount $Threads
+ Wait-ForJobCompletion -JobName "adbping-performance-test" -TimeoutSeconds 300
+ Get-JobResults -JobName "adbping-performance-test"
+ }
+
+ if ($TestType -eq "connping" -or $TestType -eq "both") {
+ Deploy-ConnPingTest -Password $ADBPassword -TNSString $ADBConnectionString -Duration $TestDuration
+ Wait-ForJobCompletion -JobName "connping-performance-test" -TimeoutSeconds 300
+ Get-JobResults -JobName "connping-performance-test"
+ }
+
+ # Cleanup if requested
+ if ($Cleanup) {
+ Remove-TestJobs
+ }
+
+ Write-Host "`n" -NoNewline
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Green
+ Write-Host "β β
Performance Tests Completed Successfully! β" -ForegroundColor Green
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Green
+
+ Write-Host "`nπ Results Summary:" -ForegroundColor Cyan
+ Write-Host " - Test Type: $TestType" -ForegroundColor White
+ Write-Host " - Duration: $TestDuration seconds" -ForegroundColor White
+ if ($TestType -eq "adbping" -or $TestType -eq "both") {
+ Write-Host " - Threads: $Threads" -ForegroundColor White
+ }
+ Write-Host "`nπ‘ Tip: Look for 'ociping mean' or 'SQL Execution Time' in the results above" -ForegroundColor Yellow
+ Write-Host " Values under 2ms indicate excellent network performance!" -ForegroundColor Yellow
+}
+catch {
+ Write-Host "`n" -NoNewline
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Red
+ Write-Host "β β Deployment Failed β" -ForegroundColor Red
+ Write-Host "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" -ForegroundColor Red
+ Write-Host "`nError: $_" -ForegroundColor Red
+ Write-Host "`nTroubleshooting:" -ForegroundColor Yellow
+ Write-Host " 1. Verify you're logged into Azure: az login" -ForegroundColor White
+ Write-Host " 2. Verify AKS connection: kubectl get nodes" -ForegroundColor White
+ Write-Host " 3. Check if adb-perf-test namespace exists: kubectl get ns" -ForegroundColor White
+ Write-Host " 4. Check job status: kubectl get jobs -n adb-perf-test" -ForegroundColor White
+ exit 1
+}
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/perf-test-odaa-simplified.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/perf-test-odaa-simplified.md
new file mode 100644
index 000000000..2c8ccc2be
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/perf-test-odaa-simplified.md
@@ -0,0 +1,231 @@
+# π Challenge 5: Performance Testing (Simplified)
+
+[Back to workspace README](../../README.md) | [Original detailed walkthrough](./perf-test-odaa.md)
+
+> π We are going to use an automated script to run performance tests against your ODAA Autonomous Database with minimal manual steps.
+
+---
+
+## π― What You'll Test
+
+This challenge measures the network performance between your AKS cluster and ODAA Autonomous Database:
+
+| Metric | Description | Good Value |
+|--------|-------------|------------|
+| **ociping** | Database round-trip latency | < 2ms |
+| **SQL Execution Time** | Time to execute `SELECT 1 FROM DUAL` | < 1ms |
+| **Connect Time** | Session establishment time | < 150ms |
+| **P95/P99 Latency** | 95th/99th percentile latencies | < 2ms |
+
+---
+
+## π Prerequisites
+
+Before starting, make sure you have:
+
+- [x] Completed previous challenges (ODAA ADB created)
+- [x] Your ODAA ADB password
+- [x] Access to your AKS cluster
+- [x] Azure CLI and kubectl installed
+- [x] The `adb-perf-test` namespace deployed (from Challenge 2)
+
+---
+
+## π Step 1: Get Your ODAA Connection String
+
+First, retrieve your ODAA ADB connection string:
+
+```powershell
+# Set your variables
+$adbName = "user00" # Replace with your ADB name
+$rgODAA = "odaa-user00" # Replace with your ODAA resource group
+$subODAA = "sub-mhodaa" # Replace with your ODAA subscription
+
+# Switch to ODAA subscription
+az account set --subscription $subODAA
+
+# Get the TNS connection string
+$trgConn = az oracle-database autonomous-database show `
+ -g $rgODAA -n $adbName `
+ --query "connectionStrings.profiles[?consumerGroup=='High' && protocol=='TCPS' && tlsAuthentication=='Server'].value | [0]" `
+ -o tsv
+
+echo $trgConn
+```
+
+> π‘ **Tip**: For detailed instructions, see [How to retrieve the ODAA connection string](../../docs/odaa-get-token.md)
+
+---
+
+## π Step 2: Run the Performance Test Script
+
+Open PowerShell and navigate to this folder, then run:
+
+```powershell
+# Navigate to the walkthrough folder
+cd walkthrough\perf-test-odaa
+
+# Set your AKS subscription
+az account set --subscription "sub-team0" # Replace with your AKS subscription
+
+# Run the performance test script
+.\Deploy-PerfTest.ps1 `
+ -UserName "user00" `
+ -ADBPassword "Welcome1234#" `
+ -ADBConnectionString $trgConn
+```
+
+### π Script Parameters
+
+| Parameter | Required | Description |
+|-----------|----------|-------------|
+| `-UserName` | Yes | Your assigned username (e.g., `user00`, `user01`) |
+| `-ADBPassword` | Yes | Your ODAA ADB password |
+| `-ADBConnectionString` | Yes | Full TNS connection string from Step 1 |
+| `-TestType` | No | `adbping`, `connping`, or `both` (default: `adbping`) |
+| `-TestDuration` | No | Test duration in seconds (default: 90) |
+| `-Threads` | No | Number of concurrent threads for adbping (default: 3) |
+| `-AKSResourceGroup` | No | AKS resource group (auto-detected from username) |
+| `-AKSClusterName` | No | AKS cluster name (auto-detected from username) |
+| `-SkipAKSConnection` | No | Skip if already connected to AKS |
+| `-Cleanup` | No | Remove test jobs after completion |
+
+### β³ Wait for Results
+
+The script will:
+
+1. β
Connect to your AKS cluster
+2. β
Set up the test namespace
+3. β
Deploy the performance test job
+4. β
Wait for completion (~2-3 minutes)
+5. β
Display the results
+
+---
+
+## π Step 3: Understanding the Results
+
+### ADBPing Results
+
+```text
++++Test Summary+++
+ Test Client: java
+ Number of concurrent threads: 3
+ Duration (secs): 90
+ SQL executed: select 1 from dual;
+ Pass: 341760 Fail: 0
+ SQL Execution Time(ms) : Min:0.543 Max:89.571 Avg:0.747 Median:0.653 Perc90:0.762 Perc95:0.778 Perc99:0.891
+ Connect + SQL Execution Time(ms) : Min:0.549 Max:89.797 Avg:0.76 Median:0.661 Perc90:0.77 Perc95:0.791 Perc99:0.968
+```
+
+### ConnPing Results
+
+```text
+connect mean=111.78, stddev=6.27, min=104.73, max=139.55
+ociping mean=0.95, stddev=0.08, min=0.80, max=1.25
+dualping mean=1.00, stddev=0.09, min=0.85, max=1.23
+```
+
+### Performance Benchmarks
+
+| Metric | Excellent | Good | Needs Investigation |
+|--------|-----------|------|---------------------|
+| **ociping/SQL Execution** | < 1ms | 1-2ms | > 5ms |
+| **Connect Time** | < 120ms | 120-200ms | > 300ms |
+| **P99 Latency** | < 2ms | 2-5ms | > 10ms |
+| **Pass Rate** | 100% | > 99% | < 99% |
+
+---
+
+## π§ Running Both Test Types
+
+To run both adbping and connping tests:
+
+```powershell
+.\Deploy-PerfTest.ps1 `
+ -UserName "user00" `
+ -ADBPassword "Welcome1234#" `
+ -ADBConnectionString $trgConn `
+ -TestType "both"
+```
+
+---
+
+## π§ Troubleshooting
+
+### Check Test Job Status
+
+```powershell
+# View running jobs
+kubectl get jobs -n adb-perf-test
+
+# Check pod status
+kubectl get pods -n adb-perf-test
+
+# View logs for a specific job
+kubectl logs job/adbping-performance-test -n adb-perf-test
+```
+
+### Rerun Tests
+
+```powershell
+# Clean up and rerun
+.\Deploy-PerfTest.ps1 `
+ -UserName "user00" `
+ -ADBPassword "Welcome1234#" `
+ -ADBConnectionString $trgConn `
+ -Cleanup
+
+# Then run again
+.\Deploy-PerfTest.ps1 `
+ -UserName "user00" `
+ -ADBPassword "Welcome1234#" `
+ -ADBConnectionString $trgConn
+```
+
+### Common Issues
+
+| Issue | Solution |
+|-------|----------|
+| Job stuck in `Pending` | Check if `adb-perf-test` namespace has the required pods |
+| Connection timeout | Verify TNS connection string and NSG rules |
+| High latency (> 10ms) | Check network path, VNet peering, or DNS resolution |
+| Jobs not found | Ensure the perf-test pods are deployed from Challenge 2 |
+
+---
+
+## π Advanced: Interactive Testing
+
+For more control, you can run tests interactively:
+
+```powershell
+# Get the adbping pod name
+$podName = kubectl get pods -n adb-perf-test -l app=adbping -o jsonpath="{.items[0].metadata.name}"
+
+# Run custom adbping test
+kubectl exec -it $podName -n adb-perf-test -- adbping `
+ -u "admin" `
+ -p "Welcome1234#" `
+ -o `
+ -l "(description= ...your-connection-string...)" `
+ -c java -t 5 -d 60
+```
+
+---
+
+## βοΈ Next Steps
+
+You've completed Challenge 5! Here's what to explore next:
+
+- **Compare results** across different times of day
+- **Test with different thread counts** to measure scalability
+- **Review NSG rules** if latency is higher than expected
+
+---
+
+## π Additional Resources
+
+- [Original detailed walkthrough](./perf-test-odaa.md) - Manual testing process
+- [ODAA Connection String Guide](../../docs/odaa-get-token.md) - How to get your TNS string
+- [ADBPing Documentation](https://github.com/oracle/adbping) - Oracle's performance testing tool
+
+[Back to workspace README](../../README.md)
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/perf-test-odaa.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/perf-test-odaa.md
new file mode 100644
index 000000000..176a5db80
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/perf-test-odaa/perf-test-odaa.md
@@ -0,0 +1,704 @@
+# π Challenge 5: Perform Connectivity Tests on Oracle Database@azure [ODAA] Autonoumous Database
+
+[Back to workspace README](../../README.md)
+
+ODAA Autonoumous Database are so called PaaS (Platform as a Service) offerings, where the underlying infrastructure is fully managed by Microsoft and Oracle.
+
+Installing tools like iperf, sockperf, etc is not possible on the ODAA ADB instance itself, as you would do it on a VM or Bare Metal server.
+
+The following exercise will use the oracle instant client running inside the AKS cluster to connect via sqlplus to the ODAA Autonomous Database instance, and perform some simple latency measurements via SQL queries.
+
+The SQL queries will measure the network round trips, elapsed time, DB time, and calculate the latency per round trip.
+
+This is inspired from the blog of Clemens Bleile.
+
+## π Login to Azure and set the right subscription
+
+~~~powershell
+
+az login # choose your assigned user account for ex. user01@cptazure.org or the menu "Work or school account
+# switch to the subscription where AKS is deployed
+
+$subAKS="sub-mh2" # replace with your AKS subscription name
+
+# Make sure your cli points to the AKS subscription
+az account set --subscription $subAKS
+
+# log into your AKS cluster if not already done
+$rgAKS="rg-aks-user02" # replace with your AKS resource group name
+$AKSClusterName="aks-user02" # replace with your AKS cluster name
+# login to aks
+az aks get-credentials -g $rgAKS -n $AKSClusterName --overwrite-existing
+
+~~~
+
+## π Performance Testing with ADBPing
+
+### π― What is ADBPing
+
+The `adbping` tool is a performance testing utility specifically designed for Oracle Autonomous Databases (ADB). It allows you to measure various performance metrics such as connection latency, SQL execution time, and overall throughput when interacting with an Oracle ADB instance.
+
+### Deploy ADBPing Container
+
+~~~powershell
+# Get pod name for interactive access
+$podNameADBPing = kubectl get pods -n adb-perf-test -l app=adbping -o jsonpath="{.items[0].metadata.name}"
+Write-Host "Pod Name: $podNameADBPing"
+~~~
+
+Output should be similar to:
+
+~~~text
+Pod Name: adbping-deployment-5cb699cd9c-mbgsn
+~~~
+
+### Configure and Run ADBPing Performance Tests Automated
+
+**Prerequisites**: Ensure you have obtained the TNS connection string by following [docs\odaa-get-token.md](../../docs/odaa-get-token.md) and assigned it to the `$trgConn` variable. Similar to this:
+
+~~~powershell
+$trgConn="(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=zuyhervb.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=gc2401553d1c7ab_uer00_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))"
+~~~
+
+~~~powershell
+# Create a copy of the job template to avoid overwriting the original
+Copy-Item "resources\infra\k8s\adbping-job.yaml" "adbping-job.yaml"
+
+# Configure your ADB connection details
+
+$ADB_PASSWORD = "" # Replace with your actual ADB password
+$ADB_TNS = $trgConn # Use the TNS connection string obtained from docs\odaa-get-token.md
+
+# Update the job configuration with your credentials
+(Get-Content adbping-job.yaml) -replace 'YOUR_PASSWORD_HERE', $ADB_PASSWORD | Set-Content adbping-job.yaml
+(Get-Content adbping-job.yaml) -replace 'YOUR_TNS_CONNECTION_STRING_HERE', $ADB_TNS | Set-Content adbping-job.yaml
+
+
+# Verify the changes - show the updated configuration lines
+(Get-Content adbping-job.yaml)[23..26] | ForEach-Object { Write-Host " $_" -ForegroundColor Yellow }
+~~~
+
+The updated configuration should look like this:
+
+~~~yaml
+ USER="admin"
+ PASSWORD="****************" # Your actual password
+ TNS="(description= (retry_count=20)(retry_delay=3)...)" # Your actual TNS string
+~~~
+
+Deploy and monitor the performance testing job:
+
+~~~powershell
+# Deploy the customized performance testing job
+kubectl apply -f adbping-job.yaml
+
+# Monitor the job progress
+kubectl get jobs -n adb-perf-test -w
+~~~
+
+Initial output should look similar to:
+
+~~~text
+NAME STATUS COMPLETIONS DURATION AGE
+adbping-performance-test Running 0/1 74s 74s
+~~~
+
+After a while you should see:
+
+~~~text
+NAME STATUS COMPLETIONS DURATION AGE
+adbping-performance-test Complete 1/1 2m2s 2m8s
+~~~
+
+Use Key CTRL+C to exit the watch command.
+
+~~~powershell
+# View test results
+kubectl logs job/adbping-performance-test -n adb-perf-test
+~~~
+
+Results should look similar to:
+
+~~~text
+π Oracle ADB Performance Test
+==============================
+π Starting performance test...
+Threads: 3, Duration: 30 seconds
+
++++Test Summary+++
+ Test Client: java
+ Number of concurrent threads: 3
+ Duration (secs): 90
+ SQL executed: select 1 from dual;
+ Pass: 341760 Fail: 0
+ Test start date: 2025-11-14 07:34:22.005100+00:00
+ Test end date: 2025-11-14 07:36:08.007073+00:00
+ Java connection pool Stats: Initsize:3, Maxsize:3, Pool setup time(ms):6090.818
+ SQL Execution Time(ms) : Min:0.543 Max:89.571 Avg:0.747 Median:0.653 Perc90:0.762 Perc95:0.778 Perc99:0.891
+ Connect + SQL Execution Time(ms) : Min:0.549 Max:89.797 Avg:0.76 Median:0.661 Perc90:0.77 Perc95:0.791 Perc99:0.968
+
+Interpretation of the results
+-----------------------------
+
+ 1. Pass/Fail count: Indicates the total number of connections passed/failed in defined duration by the defined number of threads.
+
+ 2. SQL execution time: Time taken to just execute the SQL. Connection time not included.
+ For sqlplus, this would be the elapsed time reported by sqlplus.
+
+ 3. Connect + SQL Execution Time: Time taken to connect and execute SQL.
+ For sqlplus, this would be the time to connect and run the sql.
+ For java, it would be time taken to getConnection() and execute the query.
+
+ 4. Java connection pool stats: Reports the time taken to setup the java connection pool and the initial and max size.
+ All query executions do a getConnection() and execute the SQL.
+
+ 5. Perc90, Perc95, Perc99: This is the percentile value indicating 90%, 95% or 99% of the latencies are below the respective value.
+
+β
Performance test completed!
+~~~
+
+### Performance Results Analysis
+
+The adbping tool provides comprehensive metrics including:
+
+- **Pass/Fail Counts**: Total successful/failed connections
+- **SQL Execution Time**: Time to execute SQL only (excludes connection time)
+- **Connect + SQL Time**: Total time including connection establishment
+- **Percentile Analysis**: P90, P95, P99 latency metrics
+- **Connection Pool Statistics**: Pool setup time and configuration
+
+
+### Interactive Testing of ADBPing(Advanced)
+
+For interactive testing and custom test scenarios:
+
+~~~powershell
+# Enter the adbping container for interactive testing
+kubectl exec -it $podNameADBPing -n adb-perf-test -- /bin/bash
+~~~
+
+Inside the container, run custom adbping tests:
+
+~~~bash
+# The adbping tool is pre-extracted and ready to use
+which adbping
+adbping --help
+
+# Set your Oracle ADB connection details
+export ADB_USER="admin"
+export $ADB_PASSWORD = "" # Replace with your actual ADB password
+export ADB_TNS="(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=zuyhervb.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=gc2401553d1c7ab_uer00_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))" # Use the TNS connection string from docs\odaa-get-token.md
+
+# Run custom performance tests
+adbping -u "$ADB_USER" -p "$ADB_PASSWORD" -o -l "$ADB_TNS" -c java -t 5 -d 90
+~~~
+
+Results will be similar to the automated job output.
+
+~~~text
++++Test Summary+++
+ Test Client: java
+ Number of concurrent threads: 5
+ Duration (secs): 90
+ SQL executed: select 1 from dual;
+ Pass: 379893 Fail: 0
+ Test start date: 2025-11-14 07:41:47.368456+00:00
+ Test end date: 2025-11-14 07:43:36.007844+00:00
+ Java connection pool Stats: Initsize:5, Maxsize:5, Pool setup time(ms):5790.047
+ SQL Execution Time(ms) : Min:0.848 Max:102.297 Avg:1.158 Median:0.999 Perc90:1.089 Perc95:1.119 Perc99:1.455
+ Connect + SQL Execution Time(ms) : Min:0.855 Max:108.171 Avg:1.175 Median:1.008 Perc90:1.1 Perc95:1.136 Perc99:1.626
+
+Interpretation of the results
+-----------------------------
+
+ 1. Pass/Fail count: Indicates the total number of connections passed/failed in defined duration by the defined number of threads.
+
+ 2. SQL execution time: Time taken to just execute the SQL. Connection time not included.
+ For sqlplus, this would be the elapsed time reported by sqlplus.
+
+ 3. Connect + SQL Execution Time: Time taken to connect and execute SQL.
+ For sqlplus, this would be the time to connect and run the sql.
+ For java, it would be time taken to getConnection() and execute the query.
+
+ 4. Java connection pool stats: Reports the time taken to setup the java connection pool and the initial and max size.
+ All query executions do a getConnection() and execute the SQL.
+
+ 5. Perc90, Perc95, Perc99: This is the percentile value indicating 90%, 95% or 99% of the latencies are below the respective value.
+~~~
+
+## π Performance Testing with ConnPing
+
+### π― What is Connping?
+
+Connping is a performance testing tool based on Oracle's rwloadsim that measures:
+
+- **ociping**: Database round-trip latency (primary metric)
+- **dualping**: SQL execution time for `SELECT 1 FROM DUAL`
+- **connect**: Session establishment time
+
+
+
+### π Interactive Testing with ConnPing
+
+~~~powershell
+# Get pod name for interactive access
+$podNameConnPing = kubectl get pods -n adb-perf-test -l app=connping -o jsonpath="{.items[0].metadata.name}"
+Write-Host "Pod Name: $podNameConnPing"
+~~~
+
+Output should be similar to:
+
+~~~text
+Pod Name: connping-deployment-598bb45987-fqr9r
+~~~
+
+**Prerequisites**: Ensure you have obtained the TNS connection string by following [docs\odaa-get-token.md](../../docs/odaa-get-token.md) and assigned it to the `$trgConn` variable. Similar to this:
+
+~~~powershell
+$trgConn="(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=y1jilkjp.adb.eu-paris-1.oraclecloud.com))(connect_data=(service_name=gc2401553d1c7ab_user02_high.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))"
+~~~
+
+~~~powershell
+# Configure your ADB connection details
+$ADB_PASSWORD = # Replace with your actual ADB password for ex
+$ADB_TNS = $trgConn # Use the TNS connection string obtained from docs\odaa-get-token.md
+# Run an ad-hoc test
+kubectl exec -it $podNameConnPing -n adb-perf-test -- connping -l "admin/$ADB_PASSWORD@$ADB_TNS" --period=90
+~~~
+
+Results should look similar to:
+
+~~~text
+RWP*Connect/OCIPing Release 3.2.1.0 Production on Fri, 14 Nov 2025 07:56:43 UTC
+RWL-002: warning: public directory '/opt/rwloadsim/../public' is not accessible or does not appear correct
+Connected default database with reconnect to:
+Oracle Database 23ai Enterprise Edition Release 23.0.0.0.0 - for Oracle Cloud and Engineered Systems
+connect:108.21 ms, ociping:1.125 ms, dualping:1.191 ms, sid=49476, inst#=1, time=1.1
+connect:114.77 ms, ociping:0.999 ms, dualping:1.054 ms, sid=49476, inst#=1, time=2.1
+connect:110.57 ms, ociping:0.891 ms, dualping:0.922 ms, sid=49476, inst#=1, time=3.1
+connect:108.97 ms, ociping:0.874 ms, dualping:0.949 ms, sid=49476, inst#=1, time=4.1
+connect:112.82 ms, ociping:1.033 ms, dualping:1.012 ms, sid=49476, inst#=1, time=5.1
+connect:107.23 ms, ociping:0.802 ms, dualping:0.866 ms, sid=49476, inst#=1, time=6.1
+connect:106.64 ms, ociping:0.883 ms, dualping:0.932 ms, sid=49476, inst#=1, time=7.1
+connect:112.56 ms, ociping:0.877 ms, dualping:0.939 ms, sid=49476, inst#=1, time=8.1
+connect:124.42 ms, ociping:0.843 ms, dualping:0.923 ms, sid=52345, inst#=1, time=9.1
+connect:109.17 ms, ociping:0.878 ms, dualping:0.940 ms, sid=7174, inst#=1, time=10.1
+connect:126.04 ms, ociping:0.954 ms, dualping:1.005 ms, sid=7174, inst#=1, time=11.1
+connect:105.92 ms, ociping:0.931 ms, dualping:0.943 ms, sid=7174, inst#=1, time=12.1
+connect:108.21 ms, ociping:1.081 ms, dualping:1.101 ms, sid=7174, inst#=1, time=13.1
+connect:115.14 ms, ociping:0.835 ms, dualping:0.937 ms, sid=7174, inst#=1, time=14.1
+connect:119.90 ms, ociping:0.956 ms, dualping:0.986 ms, sid=7174, inst#=1, time=15.1
+connect:114.64 ms, ociping:0.953 ms, dualping:0.988 ms, sid=7174, inst#=1, time=16.1
+connect:108.50 ms, ociping:0.953 ms, dualping:0.939 ms, sid=7174, inst#=1, time=17.1
+connect:117.25 ms, ociping:1.127 ms, dualping:1.170 ms, sid=7174, inst#=1, time=18.1
+connect:139.55 ms, ociping:0.899 ms, dualping:0.902 ms, sid=7174, inst#=1, time=19.2
+connect:112.88 ms, ociping:1.079 ms, dualping:1.152 ms, sid=7174, inst#=1, time=20.1
+connect:106.89 ms, ociping:1.074 ms, dualping:0.972 ms, sid=7174, inst#=1, time=21.1
+connect:125.84 ms, ociping:0.915 ms, dualping:0.943 ms, sid=48520, inst#=1, time=22.1
+connect:107.00 ms, ociping:0.960 ms, dualping:1.003 ms, sid=7174, inst#=1, time=23.1
+connect:107.39 ms, ociping:0.997 ms, dualping:1.052 ms, sid=52345, inst#=1, time=24.1
+connect:109.52 ms, ociping:1.039 ms, dualping:1.090 ms, sid=52345, inst#=1, time=25.1
+connect:111.20 ms, ociping:0.918 ms, dualping:0.983 ms, sid=52345, inst#=1, time=26.1
+connect:109.44 ms, ociping:0.956 ms, dualping:1.153 ms, sid=7174, inst#=1, time=27.1
+connect:120.70 ms, ociping:0.979 ms, dualping:1.036 ms, sid=7174, inst#=1, time=28.1
+connect:125.74 ms, ociping:1.023 ms, dualping:1.106 ms, sid=7174, inst#=1, time=29.1
+connect:116.22 ms, ociping:0.921 ms, dualping:0.942 ms, sid=7174, inst#=1, time=30.1
+connect:106.67 ms, ociping:0.966 ms, dualping:0.946 ms, sid=52345, inst#=1, time=31.1
+connect:109.95 ms, ociping:0.919 ms, dualping:0.943 ms, sid=24382, inst#=1, time=32.1
+connect:115.90 ms, ociping:0.971 ms, dualping:0.951 ms, sid=24382, inst#=1, time=33.1
+connect:106.37 ms, ociping:0.878 ms, dualping:0.924 ms, sid=24382, inst#=1, time=34.1
+connect:112.01 ms, ociping:1.073 ms, dualping:1.222 ms, sid=52345, inst#=1, time=35.1
+connect:106.83 ms, ociping:0.952 ms, dualping:1.022 ms, sid=52345, inst#=1, time=36.1
+connect:105.75 ms, ociping:0.880 ms, dualping:0.907 ms, sid=52345, inst#=1, time=37.1
+connect:107.74 ms, ociping:0.894 ms, dualping:0.940 ms, sid=52345, inst#=1, time=38.1
+connect:106.66 ms, ociping:0.982 ms, dualping:1.031 ms, sid=52345, inst#=1, time=39.1
+connect:120.95 ms, ociping:0.924 ms, dualping:0.961 ms, sid=52345, inst#=1, time=40.1
+connect:106.97 ms, ociping:0.918 ms, dualping:0.889 ms, sid=52345, inst#=1, time=41.1
+connect:105.50 ms, ociping:0.920 ms, dualping:0.970 ms, sid=23666, inst#=1, time=42.1
+connect:113.08 ms, ociping:0.917 ms, dualping:0.992 ms, sid=23666, inst#=1, time=43.1
+connect:107.52 ms, ociping:0.894 ms, dualping:0.901 ms, sid=52345, inst#=1, time=44.1
+connect:110.68 ms, ociping:0.940 ms, dualping:0.965 ms, sid=52345, inst#=1, time=45.1
+connect:110.66 ms, ociping:0.892 ms, dualping:0.901 ms, sid=52345, inst#=1, time=46.1
+connect:108.95 ms, ociping:0.837 ms, dualping:0.884 ms, sid=52345, inst#=1, time=47.1
+connect:110.03 ms, ociping:1.051 ms, dualping:1.182 ms, sid=52345, inst#=1, time=48.1
+connect:106.31 ms, ociping:0.928 ms, dualping:0.948 ms, sid=52345, inst#=1, time=49.1
+connect:110.32 ms, ociping:1.037 ms, dualping:1.225 ms, sid=52345, inst#=1, time=50.1
+connect:112.63 ms, ociping:0.990 ms, dualping:1.060 ms, sid=52345, inst#=1, time=51.1
+connect:120.74 ms, ociping:0.878 ms, dualping:0.916 ms, sid=52345, inst#=1, time=52.1
+connect:109.31 ms, ociping:1.007 ms, dualping:1.097 ms, sid=52345, inst#=1, time=53.1
+connect:111.16 ms, ociping:1.016 ms, dualping:1.024 ms, sid=52345, inst#=1, time=54.1
+connect:104.73 ms, ociping:0.802 ms, dualping:0.857 ms, sid=52345, inst#=1, time=55.1
+connect:106.77 ms, ociping:0.965 ms, dualping:0.977 ms, sid=52345, inst#=1, time=56.1
+connect:114.93 ms, ociping:0.999 ms, dualping:1.097 ms, sid=48520, inst#=1, time=57.1
+connect:105.79 ms, ociping:0.851 ms, dualping:0.906 ms, sid=48520, inst#=1, time=58.1
+connect:113.72 ms, ociping:0.893 ms, dualping:0.933 ms, sid=48520, inst#=1, time=59.1
+connect:105.47 ms, ociping:0.843 ms, dualping:0.986 ms, sid=48520, inst#=1, time=60.1
+connect:106.59 ms, ociping:0.873 ms, dualping:0.931 ms, sid=48520, inst#=1, time=61.1
+connect:115.23 ms, ociping:1.000 ms, dualping:1.029 ms, sid=48520, inst#=1, time=62.1
+connect:111.70 ms, ociping:0.930 ms, dualping:0.939 ms, sid=48520, inst#=1, time=63.1
+connect:114.30 ms, ociping:0.999 ms, dualping:1.083 ms, sid=48520, inst#=1, time=64.1
+connect:107.36 ms, ociping:0.922 ms, dualping:0.974 ms, sid=48520, inst#=1, time=65.1
+connect:108.19 ms, ociping:1.080 ms, dualping:1.151 ms, sid=48520, inst#=1, time=66.1
+connect:107.93 ms, ociping:0.808 ms, dualping:0.852 ms, sid=48520, inst#=1, time=67.1
+connect:117.00 ms, ociping:0.915 ms, dualping:0.945 ms, sid=48520, inst#=1, time=68.1
+connect:109.28 ms, ociping:0.875 ms, dualping:0.932 ms, sid=52345, inst#=1, time=69.1
+connect:118.01 ms, ociping:1.041 ms, dualping:1.104 ms, sid=52345, inst#=1, time=70.1
+connect:109.54 ms, ociping:0.959 ms, dualping:1.016 ms, sid=52345, inst#=1, time=71.1
+connect:105.26 ms, ociping:0.832 ms, dualping:0.875 ms, sid=52345, inst#=1, time=72.1
+connect:110.08 ms, ociping:0.873 ms, dualping:0.878 ms, sid=52345, inst#=1, time=73.1
+connect:108.91 ms, ociping:0.956 ms, dualping:0.990 ms, sid=52345, inst#=1, time=74.1
+connect:111.61 ms, ociping:0.978 ms, dualping:1.054 ms, sid=52345, inst#=1, time=75.1
+connect:106.75 ms, ociping:0.840 ms, dualping:0.862 ms, sid=52345, inst#=1, time=76.1
+connect:105.03 ms, ociping:0.963 ms, dualping:1.019 ms, sid=52345, inst#=1, time=77.1
+connect:108.72 ms, ociping:1.032 ms, dualping:1.104 ms, sid=52345, inst#=1, time=78.1
+connect:113.82 ms, ociping:0.957 ms, dualping:1.022 ms, sid=52345, inst#=1, time=79.1
+connect:105.20 ms, ociping:0.863 ms, dualping:0.891 ms, sid=52345, inst#=1, time=80.1
+connect:114.58 ms, ociping:1.078 ms, dualping:1.105 ms, sid=23666, inst#=1, time=81.1
+connect:106.52 ms, ociping:1.116 ms, dualping:1.151 ms, sid=23666, inst#=1, time=82.1
+connect:117.42 ms, ociping:1.013 ms, dualping:1.060 ms, sid=24382, inst#=1, time=83.1
+connect:124.60 ms, ociping:0.877 ms, dualping:0.900 ms, sid=24382, inst#=1, time=84.1
+connect:126.40 ms, ociping:1.253 ms, dualping:1.219 ms, sid=23666, inst#=1, time=85.1
+connect:109.28 ms, ociping:0.918 ms, dualping:0.996 ms, sid=23666, inst#=1, time=86.1
+connect:118.35 ms, ociping:0.917 ms, dualping:0.984 ms, sid=23666, inst#=1, time=87.1
+connect:105.95 ms, ociping:0.925 ms, dualping:0.937 ms, sid=23666, inst#=1, time=88.1
+connect:107.67 ms, ociping:0.974 ms, dualping:0.978 ms, sid=23666, inst#=1, time=89.1
+connect mean=111.78, stddev=6.27, min=104.73, max=139.55
+ociping mean=0.95, stddev=0.08, min=0.80, max=1.25
+dualping mean=1.00, stddev=0.09, min=0.85, max=1.23
+~~~
+
+#### π Understanding the Results
+
+The output will show:
+
+- **Real-time metrics**: Per-second latency measurements during the test
+- **Summary statistics**:
+ - `ociping mean`: Average round-trip latency
+ - `connect mean`: Average connection establishment time
+ - Standard deviation, min, and max values
+
+## π Test TCP Connection Time to ADB (!!DONT!!)
+
+The following method should **not be used** to measure the TCP connection time from within the AKS cluster to the ADB instance.
+It does establish a new TCP connection 10 times in a row and measures the time taken for each connection attempt.
+This for sure include the TCP handshake time everytime and therefore the results will not be that accurate compared to the method described above.
+
+~~~powershell
+# extract the pod name of the instantcleint as it contains a random suffix
+$podInstanteClientName=kubectl get pods -n microhacks | Select-String 'ogghack-goldengate-microhack-sample-instantclient' | ForEach-Object { ($_ -split '\s+')[0] }
+$podInstanteClientName
+~~~
+
+Output should be similar to:
+
+~~~text
+ogghack-goldengate-microhack-sample-instantclient-5985df84wp5c4
+~~~
+
+~~~powershell
+# login to the pod InstanteClientName
+kubectl exec -it -n microhacks $podInstanteClientName -- /bin/bash
+~~~
+
+Inside the instantclient pod, run the following commands to test TCP connection time to ADB
+
+~~~bash
+# You should still be logged in into the pod
+# Test tcp connection time to ADB
+bash -c 'H=y1jilkjp.adb.eu-paris-1.oraclecloud.com;P=1521;for i in {1..10};do t0=$(date +%s%3N);(echo >/dev/tcp/$H/$P) &>/dev/null && dt=$(( $(date +%s%3N)-t0 )) || dt=-1;echo "$i: ${dt} ms";sleep 1;done' # replace with your ADB host
+~~~
+
+Results should look similar to:
+
+~~~text
+1: 13 ms
+2: 19 ms
+3: 18 ms
+4: 27 ms
+5: 13 ms
+6: 10 ms
+7: 11 ms
+8: 13 ms
+9: 17 ms
+10: 16 ms
+~~~
+
+Exit the pod
+
+~~~bash
+exit
+~~~
+
+
+
+[Back to workspace README](../../README.md)
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/image.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/image.png
new file mode 100644
index 000000000..064db298b
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/image.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 2.png
new file mode 100644
index 000000000..2aaaaef3c
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 2.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 3.png
new file mode 100644
index 000000000..d1d00d014
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 3.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 4.png
new file mode 100644
index 000000000..1da4e5375
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 4.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 5.png
new file mode 100644
index 000000000..198518e54
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 5.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 6.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 6.png
new file mode 100644
index 000000000..198518e54
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 6.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 7.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 7.png
new file mode 100644
index 000000000..9df418e58
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy 7.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy.png
new file mode 100644
index 000000000..6c27556c3
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image copy.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image.png
new file mode 100644
index 000000000..110b4341b
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/media/image.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/setup-user-account.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/setup-user-account.md
new file mode 100644
index 000000000..d0f2c819e
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/setup-user-account/setup-user-account.md
@@ -0,0 +1,100 @@
+# Setup your User Account on Azure
+
+[Back to workspace README](../../README.md)
+
+In the following section you will cover two task.
+
+- To access via Multi factor authentication
+- First check resource groups and roles are available
+
+## Open a Browser with a new Incognito/Private window or Profile.
+
+### Method 1 - Use your preferred browser
+
+- a. EDGE - Open a new private window
+- b. Chrome - Open a new incognito window
+
+### Method 2 - Create a new user profile in your browser
+
+ * Example with EDGE
+
+Open your browser and click on your work icon in the upper right corner.
+
+
+
+
+### Login the Azure portal
+
+Login to the Azure portal by calling the URL https://portal.azure.com in the browser window of the new created profile and use the provided credentials you got at the beginning of the Microhack. Following an example of the credential you should got:
+
+~~~json
+ "user01": [
+ {
+ "user_principal_name": "user01@cptazure.org",
+ "display_name": "Bruce Wayne",
+ "initial_password": <"Assigned Password">
+ }
+ ],
+~~~
+
+### Create MFA authentication when prompted.
+
+Important: For the Multi-Factor Authentication you have to download first the Microsoft Authenticator if you don't have the App yet on your mobile phone.
+
+Following you see the step of a MFA authentication. If you have any additional question check the available online resources under [MFA](https://learn.microsoft.com/en-us/entra/identity/authentication/tutorial-enable-azure-mfa)
+
+1. After you have open the first time the URL [Azure Portal](https://portal.azure.com/) you are forwarded to enable the MFA to access your Azure subscription. The following picutures will guide you through the process visually.
+
+ 1. Press next to follow the authentication process.
+
+ 
+
+ 2. Press in the opened Authenticator app on the upper right + symbol and choose a new "work or school account". In the following menue choose "Scan QR code".
+
+ 
+
+ 3. After you have registered the new account you will asked to verify the registration by a sent random number to typ in the authenticator app.
+
+ 
+
+ 4. The registration process for the MFA should be successful done.
+
+ 
+
+ 5. Congratulations you have an established MFA authentication
+
+ 
+
+ 6. Finally you have to update the pre assigned password.
+
+ 
+
+ 7. Now, you are logged in the Azure Portal
+
+ 
+
+A first important step is successfully finished!
+
+### First check resource groups and roles are available
+
+After you successfully logged into the Azure portal a first check could be the verification of the required resource groups for the Microhack.
+
+#### Move to the resource group in the Azure portal or search for the name in the upper available search bar
+
+
+
+#### Two resource group are in interest for the microhack and should be created "aks-user[your user number]" and "odaa-user[your user number]"
+
+
+
+#### Your Resources
+
+Should look simiar to this one for Resource Group aks-user[your user number]
+
+
+
+Should look simiar to this one for Resource Group odaa-user[your user number]
+
+
+
+[Back to workspace README](../../README.md)
\ No newline at end of file
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/OCI_nsg1.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/OCI_nsg1.jpg
new file mode 100644
index 000000000..2f4e123f0
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/OCI_nsg1.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 2.png
new file mode 100644
index 000000000..3c66ce74c
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 2.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 3.png
new file mode 100644
index 000000000..4cf75f877
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 3.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 4.png
new file mode 100644
index 000000000..0e5782d8d
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 4.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 5.png
new file mode 100644
index 000000000..7628499b3
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 5.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 6.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 6.png
new file mode 100644
index 000000000..14b10b9d0
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 6.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 7.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 7.png
new file mode 100644
index 000000000..cb8652040
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 7.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 8.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 8.png
new file mode 100644
index 000000000..f29dd2b81
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy 8.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy.png
new file mode 100644
index 000000000..086cfbe1f
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image copy.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image.png
new file mode 100644
index 000000000..60a63db6b
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/image.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg2.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg2.jpg
new file mode 100644
index 000000000..33b25f9c6
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg2.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg3.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg3.jpg
new file mode 100644
index 000000000..be66cf293
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg3.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg4.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg4.jpg
new file mode 100644
index 000000000..5226cd8a1
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg4.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg5.jpg b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg5.jpg
new file mode 100644
index 000000000..b00164c83
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/oci_nsg5.jpg differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_1.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_1.png
new file mode 100644
index 000000000..7899a415c
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_1.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_2.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_2.png
new file mode 100644
index 000000000..90a5e973d
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_2.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_3.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_3.png
new file mode 100644
index 000000000..9e18b4e12
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_3.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_4.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_4.png
new file mode 100644
index 000000000..9c92f825f
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_4.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_5.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_5.png
new file mode 100644
index 000000000..c0ad07675
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/private_dns_5.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/prviate_dns_0.png b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/prviate_dns_0.png
new file mode 100644
index 000000000..0f39d350c
Binary files /dev/null and b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/media/prviate_dns_0.png differ
diff --git a/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/update-odaa-nsg-dns.md b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/update-odaa-nsg-dns.md
new file mode 100644
index 000000000..60b1a1016
--- /dev/null
+++ b/03-Azure/01-03-Infrastructure/10_Oracle_on_Azure/walkthrough/update-odaa-nsg-dns/update-odaa-nsg-dns.md
@@ -0,0 +1,196 @@
+# π Challenge 3: Update Oracle ADB NSG and DNS Configuration
+
+[Back to workspace README](../../README.md)
+
+## π Network Security Group Configuration
+
+You need to update the Oracle ADB Network Security Group (NSG) with the CIDR range of the VNet where your AKS cluster is deployed. This can be done via the Azure Portal.
+
+See the [official Oracle documentation about Network Security Groups](https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/nsg-manage.htm) for more details about Oracle NSG.
+
+Move in the Azure portal to your AKS subscription and resource group where your AKS cluster is deployed. Inside the resource group you will find the AKS-related VNet. Copy in the overview "address space" the CIDR of the VNet.
+
+In our case the "address space" should be 10.0.0.0/16
+
+
+
+To access the OCI console, use the following link after you are logged in to the Azure portal under your newly created ODAA Autonomous Database resource:
+
+
+At the OCI console login page, select the "Entra ID" link:
+
+
+You will land on the Oracle ADB databases overview page:
+
+
+### Scroll down to the networking section on the ADB homepage.
+
+
+
+4. Press on the link "Network Security Groups" to reach the NSG page. Under the Tab "Security Rules", press the "Add Rules" button to add an ingress rule.
+
+
+5. Choose in the Rule as "Source Type" CIDR and add the copied VNet address space of the previous AKS cluster into the field. Finally, click the "Add" button to create the rule.
+
+
+## Set the private DNS zones for AKS VNet via Azure Portal
+
+1. From the overview portal of the deployed ADB database, copy the FQDN of the "Database URL name" and Database private IP address both in the section Network.
+
+ 
+
+2. Move to the AKS resource group (aks-user[assignedgroup number])
+
+ 
+
+3. The private DNS name needs to be set on the following two private DNS names:
+ * adb.eu-paris-1.oraclecloud.com
+ * adb.eu-paris-1.oraclecloudapps.com
+
+ The following step is equal for both private DNS names. In the private DNS zone menu, open the DNS management and press the link **Recordsets**.
+
+ 
+
+ **Important!** The other two Private DNS zones can be skipped for the moment until we need to set up ADB with high availability.
+
+4. In the menu Recordsets, press the Add button to add the FQDN and private IP address of the deployed ADB Shared database.
+
+ 
+
+5. After you press the Add button a new window will be opened where the name for the ADB and the private IP address needs to be added. In addition change the TTL to 10 seconds.
+
+ 
+
+6. Finally, save the configuration and repeat the step for the second private DNS name in the same way as described.
+
+ 
+
+
+---
+
+## Setup via Command Line (Optional)
+
+If you prefer to configure the DNS settings via command line for example, to enable later automationβyou can follow the next chapter. It repeats the same steps you performed above in the Azure portal UI.
+
+### Retrieve ODAA "Database private URL" (FQDN)
+
+> [!CAUTION]
+> **Important:** We need to query the Private DNS Zones created with the **ODAA deployment**.
+
+~~~powershell
+# switch to the corresponding subscription where ODAA is deployed
+$subODAA="sub-mhodaa" # name of the odaa subscription
+az account set -s $subODAA
+$rgODAA="odaa-user02" # replace with your ODAA resource group name
+
+$zones = az network private-dns zone list -g $rgODAA --query "[].name" -o tsv
+echo $zones
+# Extract the first label of the first ODAA FQDN entry of $zones
+$yourADBDNSLabel = ($zones[0] -split '\.')[0]
+~~~
+
+The extracted ODAA FQDNs should look similar to this:
+
+~~~text
+t6bchxz9.adb.eu-paris-1.oraclecloud.com
+t6bchxz9.adb.eu-paris-1.oraclecloudapps.com
+~~~
+
+Extract the first label of the first ODAA FQDN entry of $zones
+
+~~~powershell
+$yourADBDNSLabel = ($zones[0] -split '\.')[0]
+~~~
+
+The extracted ODAA FQDN Label should look similar to this:
+
+~~~text
+zuyhervb
+~~~
+
+Get IP address of the ODAA ADB from the Private DNS Zone
+
+~~~powershell
+$fqdnODAAIpv4 = az network private-dns record-set a show -g $rgODAA --zone-name $zones[0] --name "@" --query "aRecords[0].ipv4Address" -o tsv
+~~~
+
+Find your Azure Kubernetes Service
+
+
+There you will find the subscription name which is also used by the private DNS zones linked to your AKS VNet.
+
+
+~~~powershell
+# switch back to the subscription where AKS is deployed
+$subAKS="sub-mh2" # replace with your AKS subscription name
+az account set -s $subAKS
+$rgAKS="aks-user02" # replace with your AKS resource group name
+$vnetAKSName="aks-user02" # replace with your AKS resource group name
+
+# iterate through all zones and list all A records
+$zonesAKS = az network private-dns zone list --resource-group $rgAKS --query "[].name" -o tsv
+$zonesAKS
+~~~
+
+Resulting zones should look similar to this:
+
+~~~text
+adb.eu-frankfurt-1.oraclecloud.com
+adb.eu-frankfurt-1.oraclecloudapps.com
+adb.eu-paris-1.oraclecloud.com
+adb.eu-paris-1.oraclecloudapps.com
+~~~
+
+~~~powershell
+# Create A records in each private DNS zone with TTL of 10 seconds
+foreach ($zoneAKS in $zonesAKS) {
+ Write-Host "Creating A record '$yourADBDNSLabel' in zone: $zoneAKS"
+
+ # Create or update the record set with TTL of 10 seconds
+ az network private-dns record-set a create `
+ --resource-group $rgAKS `
+ --zone-name $zoneAKS `
+ --name $yourADBDNSLabel `
+ --ttl 10 `
+
+ # Add the IP address to the record set
+ az network private-dns record-set a add-record `
+ --resource-group $rgAKS `
+ --zone-name $zoneAKS `
+ --record-set-name $yourADBDNSLabel `
+ --ipv4-address $fqdnODAAIpv4
+}
+~~~
+
+Verify the created A records:
+
+~~~powershell
+foreach ($zoneAKS in $zonesAKS) {
+ Write-Host "Listing A records for zone: $zoneAKS"
+ az network private-dns record-set a list --zone-name $zoneAKS --resource-group $rgAKS --query "[].{Name:name, Records:aRecords[0].ipv4Address}" -o table
+}
+~~~
+
+~~~text
+Listing A records for zone: adb.eu-frankfurt-1.oraclecloud.com
+Name Records
+-------- -------------
+t6bchxz9 192.168.0.185
+Listing A records for zone: adb.eu-frankfurt-1.oraclecloudapps.com
+Name Records
+-------- -------------
+t6bchxz9 192.168.0.185
+Listing A records for zone: adb.eu-paris-1.oraclecloud.com
+Name Records
+-------- -------------
+t6bchxz9 192.168.0.185
+Listing A records for zone: adb.eu-paris-1.oraclecloudapps.com
+Name Records
+-------- -------------
+t6bchxz9 192.168.0.185
+~~~
+
+> [!NOTE]
+> The script already creates A-Records for all 4 private DNS zones linked to the AKS VNet. But we are only going to use the ones that contain "paris" in the name for the moment.
+
+[Back to workspace README](../../README.md)
\ No newline at end of file
diff --git a/03-Azure/01-05-SAP/01_MicroHack-SAP-Cashflow-Prediction/SAP-Data-MicroHack.pptx b/03-Azure/01-05-SAP/01_MicroHack-SAP-Cashflow-Prediction/SAP-Data-MicroHack.pptx
index f61ffd3a2..97a482d10 100644
Binary files a/03-Azure/01-05-SAP/01_MicroHack-SAP-Cashflow-Prediction/SAP-Data-MicroHack.pptx and b/03-Azure/01-05-SAP/01_MicroHack-SAP-Cashflow-Prediction/SAP-Data-MicroHack.pptx differ