diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/buildContainerImage.sh b/OracleDatabase/RAC/OracleDNSServer/containerfiles/buildContainerImage.sh similarity index 96% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/buildContainerImage.sh rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/buildContainerImage.sh index 1a091363b7..488eb05028 100755 --- a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/buildContainerImage.sh +++ b/OracleDatabase/RAC/OracleDNSServer/containerfiles/buildContainerImage.sh @@ -6,7 +6,7 @@ # # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. # -# Copyright (c) 2018-2024 Oracle and/or its affiliates. +# Copyright (c) 2018-2025 Oracle and/or its affiliates. # # shellcheck disable=SC2154 usage() { @@ -121,7 +121,7 @@ echo "Building image '$IMAGE_NAME' ..." # BUILD THE IMAGE (replace all environment variables) BUILD_START=$(date '+%s') -if docker build --force-rm=true --no-cache=true "${DOCKEROPS[@]}" "${PROXY_SETTINGS[@]}" -t "$IMAGE_NAME" -f Dockerfile .; then +if docker build --force-rm=true --no-cache=true "${DOCKEROPS[@]}" "${PROXY_SETTINGS[@]}" -t "$IMAGE_NAME" -f Containerfile .; then BUILD_END=$(date '+%s') BUILD_ELAPSED=$((BUILD_END - BUILD_START)) diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/Dockerfile b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/Containerfile similarity index 97% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/Dockerfile rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/Containerfile index 3562a9a902..407c8c52b3 100644 --- a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/Dockerfile +++ b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/Containerfile @@ -1,6 +1,6 @@ # LICENSE UPL 1.0 # -# Copyright (c) 2018-2024 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2018-2025 Oracle and/or its affiliates. All rights reserved. # # ORACLE DOCKERFILES PROJECT # -------------------------- diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/checkSpace.sh b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/checkSpace.sh old mode 100644 new mode 100755 similarity index 93% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/checkSpace.sh rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/checkSpace.sh index 3156880589..e3b9792f3d --- a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/checkSpace.sh +++ b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/checkSpace.sh @@ -1,7 +1,7 @@ #!/bin/bash # LICENSE UPL 1.0 # -# Copyright (c) 2018-2024 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2018-2025 Oracle and/or its affiliates. All rights reserved. # # Since: January, 2018 # Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/crDNS b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/crDNS old mode 100644 new mode 100755 similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/crDNS rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/crDNS diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/functions.sh b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/functions.sh old mode 100644 new mode 100755 similarity index 96% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/functions.sh rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/functions.sh index 88ca6814ee..692c3c3bae --- a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/functions.sh +++ b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/functions.sh @@ -1,7 +1,7 @@ #!/bin/bash # LICENSE UPL 1.0 # -# Copyright (c) 2018-2024 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2018-2025 Oracle and/or its affiliates. All rights reserved. # # Since: January, 2018 # Author: paramdeep.saini@oracle.com diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.conf b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.conf similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.conf rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.conf diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.empty b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.empty similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.empty rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.empty diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.localhost b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.localhost similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.localhost rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.localhost diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.loopback b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.loopback similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.loopback rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.loopback diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.rfc1912.zones b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.rfc1912.zones similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/named.rfc1912.zones rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/named.rfc1912.zones diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/priv_reversezonefile b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/priv_reversezonefile similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/priv_reversezonefile rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/priv_reversezonefile diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/priv_zonefile b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/priv_zonefile similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/priv_zonefile rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/priv_zonefile diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/reversezonefile b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/reversezonefile similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/reversezonefile rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/reversezonefile diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/runOracle.sh b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/runOracle.sh old mode 100644 new mode 100755 similarity index 95% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/runOracle.sh rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/runOracle.sh index 02b114debd..ae120bbe43 --- a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/runOracle.sh +++ b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/runOracle.sh @@ -1,7 +1,7 @@ #!/bin/bash # LICENSE UPL 1.0 # -# Copyright (c) 2018-2024 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2018-2025 Oracle and/or its affiliates. All rights reserved. # # Since: January, 2018 # Author: paramdeep.saini@oracle.com diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupDNSServer.sh b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupDNSServer.sh similarity index 99% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupDNSServer.sh rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupDNSServer.sh index 33eed1700c..3e0878ddb7 100755 --- a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupDNSServer.sh +++ b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupDNSServer.sh @@ -1,7 +1,7 @@ #!/bin/bash # LICENSE UPL 1.0 # -# Copyright (c) 2018-2024 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2018-2025 Oracle and/or its affiliates. All rights reserved. # # Since: January, 2018 # Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupLinuxEnv.sh b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupLinuxEnv.sh old mode 100644 new mode 100755 similarity index 91% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupLinuxEnv.sh rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupLinuxEnv.sh index 4e9a80d65f..d7ea102bd2 --- a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupLinuxEnv.sh +++ b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupLinuxEnv.sh @@ -1,7 +1,7 @@ #!/bin/bash # LICENSE UPL 1.0 # -# Copyright (c) 2018-2024 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2018-2025 Oracle and/or its affiliates. All rights reserved. # # Since: January, 2018 # Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupSudo.sh b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupSudo.sh old mode 100644 new mode 100755 similarity index 84% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupSudo.sh rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupSudo.sh index b5b3fa71dc..61398fa08c --- a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/setupSudo.sh +++ b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/setupSudo.sh @@ -1,7 +1,7 @@ #!/bin/bash # LICENSE UPL 1.0 # -# Copyright (c) 2018-2024 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2018-2025 Oracle and/or its affiliates. All rights reserved. # # Since: January, 2018 # Author: paramdeep.saini@oracle.com diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/zonefile b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/zonefile similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/zonefile rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/zonefile diff --git a/OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/zonefile.sample b/OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/zonefile.sample similarity index 100% rename from OracleDatabase/RAC/OracleDNSServer/dockerfiles/latest/zonefile.sample rename to OracleDatabase/RAC/OracleDNSServer/containerfiles/latest/zonefile.sample diff --git a/OracleDatabase/RAC/OracleRACStorageServer/README.md b/OracleDatabase/RAC/OracleRACStorageServer/README.md index 77c2cde84f..995f8c664f 100644 --- a/OracleDatabase/RAC/OracleRACStorageServer/README.md +++ b/OracleDatabase/RAC/OracleRACStorageServer/README.md @@ -1,189 +1,214 @@ # Oracle ASM on NFS Server for RAC testing -Sample Docker and Podman build files to facilitate installation, configuration, and environment setup for DevOps users. +learn how to use example Podman build files to facilitate installation, configuration, and environment setup NFS Server for Oracle Real Application Clusters (Oracle RAC) testing for DevOps users. -**IMPORTANT:** This image can be used to setup ASM on NFS for RAC. You can skip if you have physical block devices or NAS server for Oracle RAC and Grid. You need to make sure that NFS server container must be up and running for RAC functioning. This image is for only testing purpose. +**IMPORTANT:** This image can be used to set up ASM on NFS for Oracle RAC. You can skip this procedure if you have physical block devices or an NAS server for Oracle RAC and Oracle Grid Infrastructure. You must ensure that the NFS server container is up and running for Oracle RAC functioning. -Refer below instructions for setup of NFS Container for RAC - +Refer to the following instructions for setup of NFS Container for Oracle RAC: -- [Oracle ASM on NFS Server for RAC testing](#oracle-asm-on-nfs-server-for-rac-testing) -- [How to build NFS Storage Container Image](#how-to-build-nfs-storage-container-image) - - [How to build NFS Storage Container Image on Docker Host](#how-to-build-nfs-storage-container-image-on-docker-host) - - [How to build NFS Storage Container Image on Podman Host](#how-to-build-nfs-storage-container-image-on-podman-host) +- [Oracle ASM on NFS Server for Oracle RAC testing](#oracle-asm-on-nfs-server-for-rac-testing) +- [How to build NFS Storage Container Image on Container host](#how-to-build-nfs-storage-container-image-on-container-host) - [Create Bridge Network](#create-bridge-network) -- [NFS Server installation on Host](#nfs-server-installation-on-host) -- [Running RACStorageServer container](#running-racstorageserver-container) - - [RAC Storage container for Docker Host Machine](#rac-storage-container-for-docker-host-machine) - - [RAC Storage Container for Podman Host Machine](#rac-storage-container-for-podman-host-machine) +- [NFS Server installation on Podman Host](#nfs-server-installation-on-podman-host) +- [SELinux Configuration on Podman Host](#selinux-configuration-on-podman-host) +- [Oracle RAC Storage Container for Podman Host](#oracle-rac-storage-container-for-podman-host) +- [Oracle RAC Storage container for Docker Host](#oracle-rac-storage-container-for-docker-host) - [Create NFS Volume](#create-nfs-volume) - [Copyright](#copyright) -## How to build NFS Storage Container Image +## How to build NFS Storage Container Image on Container host +To create the files for Oracle RAC storage, ensure that you have at least 60 GB space available for the container. -### How to build NFS Storage Container Image on Docker Host -You need to make sure that you have atleast 60GB space available for container to create the files for RAC storage. +**IMPORTANT:** If you are behind a proxy, then you must set the `http_proxy` and `https_proxy` environment variables (env variables) to values based on your environment before building the image. -**IMPORTANT:** If you are behind the proxy, you need to set http_proxy env variable based on your enviornment before building the image. Please ensure that you have the `podman-docker` package installed on your OL8 Podman host to run the command using the docker utility. -```bash -dnf install podman-docker -y -``` +To assist in building the images, you can use the [`buildContainerImage.sh`](containerfiles/buildContainerImage.sh) script. See below for instructions and usage. -To assist in building the images, you can use the [buildDockerImage.sh](dockerfiles/buildDockerImage.sh) script. See below for instructions and usage. +In this guide, we refer to Oracle Linux 8 onwards as the Podman Host, and Oracle Linux 7 as the Docker Host machines. -The `buildDockerImage.sh` script is just a utility shell script that performs MD5 checks and is an easy way for beginners to get started. Expert users are welcome to directly call `docker build` with their prefered set of parameters. Go into the **dockerfiles** folder and run the **buildDockerImage.sh** script: +The `buildContainerImage.sh` script is just a utility shell script that performs MD5 checks. It provides an easy way for beginners to get started. Expert users are welcome to directly call `podman build` with their preferred set of parameters. Go into the **containerfiles** folder and run the **buildContainerImage.sh** script on your Podman host: ```bash -cd /docker-images/OracleDatabase/RAC/OracleRACStorageServer/dockerfiles -./buildDockerImage.sh -v 19.3.0 +./buildContainerImage.sh -v (Software Version) +./buildContainerImage.sh -v latest ``` -For detailed usage of command, please execute folowing command: +In a successful build, you should see build messages similar to the following: ```bash -cd /docker-images/OracleDatabase/RAC/OracleRACStorageServer/dockerfiles -./buildDockerImage.sh -h -``` -### How to build NFS Storage Container Image on Podman Host - -You need to make sure that you have atleast 60GB space available for container to create the files for RAC storage. - -**IMPORTANT:** If you are behind the proxy, you need to set `http_proxy` and `https_proxy` env variable based on your enviornment before building the image. - -To assist in building the images, you can use the [buildDockerImage.sh](dockerfiles/buildDockerImage.sh) script. See below for instructions and usage. - -The `buildDockerImage.sh` script is just a utility shell script that performs MD5 checks and is an easy way for beginners to get started. Expert users are welcome to directly call `docker build` with their prefered set of parameters. Go into the **dockerfiles** folder and run the **buildDockerImage.sh** script: - -```bash -cd /docker-images/OracleDatabase/RAC/OracleRACStorageServer/dockerfiles -./buildDockerImage.sh -v latest -``` -You would see successful build message similar like below- -```bash - Oracle RAC Storage Server Podman Image version latest is ready to be extended: + Oracle RAC Storage Server Container Image version latest is ready to be extended: --> oracle/rac-storage-server:latest ``` -## Create Bridge Network -Before creating container, create the bridge private network for NFS storage container. +**NOTE**: To build an Oracle RAC storage Image for the Docker host, pass the version `ol7` to buildContainerImage.sh -On the host- +For detailed usage notes for this script, run the following command: ```bash -docker network create --driver=bridge --subnet=192.168.17.0/24 rac_priv1_nw +./buildContainerImage.sh -h +Usage: buildContainerImage.sh -v [version] [-o] [Docker build option] +Builds a Docker Image for Oracle Database. + +Parameters: + -v: version to build + Choose one of: latest ol7 + Choose "latest" version for podman host machines + Choose "ol7" for docker host machines + -o: passes on Docker build option ``` -**Note:** You can change subnet according to your environment. - +### Create Bridge Network +Before creating the container, create the bridge public network for the NFS storage container. -## NFS Server installation on Host -Ensure to install NFS server rpms on host to utilize NFS volumes in containers- +The following are examples of creating `bridge`, `macvlan` or `ipvlan` [networks](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html). +Example of creating bridge networks: ```bash -yum -y install nfs-utils +podman network create --driver=bridge --subnet=10.0.20.0/24 rac_pub1_nw ``` -## Running RACStorageServer container - -### RAC Storage container for Docker Host Machine - -#### Prerequisites for RAC Storage Container for Docker Host - -Create placeholder for NFS storage and make sure it is empty - +Example of creating macvlan networks: ```bash -export ORACLE_DBNAME=ORCLCDB -mkdir -p /docker_volumes/asm_vol/$ORACLE_DBNAME -rm -rf /docker_volumes/asm_vol/$ORACLE_DBNAME/asm_disk0* +podman network create -d macvlan --subnet=10.0.20.0/24 -o parent=ens5 rac_pub1_nw ``` -Execute following command to create the container: - +Example of creating ipvlan networks: ```bash -export ORACLE_DBNAME=ORCLCDB -docker run -d -t --hostname racnode-storage \ ---dns-search=example.com --cap-add SYS_ADMIN --cap-add AUDIT_WRITE \ ---volume /docker_volumes/asm_vol/$ORACLE_DBNAME:/oradata --init \ ---network=rac_priv1_nw --ip=192.168.17.80 --tmpfs=/run \ ---volume /sys/fs/cgroup:/sys/fs/cgroup:ro \ ---name racnode-storage oracle/rac-storage-server:19.3.0 +podman network create -d ipvlan --subnet=10.0.20.0/24 -o parent=ens5 rac_pub1_nw ``` -**IMPORTANT:** During the container startup 5 files named as `asm_disk0[1-5].img` will be created under /oradata.If the files are already present, they will not be recreated.These files can be used for ASM storage in RAC containers. - -**NOTE**: Expose directory to container which has atleast 60GB. In the above example, we are using `/docker_volumes/asm_vol/$ORACLE_DBNAME` and you need to change values according to your env. Inside container, it will be /oradata and do not change this. - -In the above example, we used **192.168.17.0/24** subnet for NFS server. You can change the subnet values according to your environment. Also, SELINUX must be disabled or in permissive mode in Docker Host Machine. - -To check the racstorage container/services creation logs , please tail docker logs. It will take 10 minutes to create the racnode-storage container service. +**Note:** You can change the subnet and parent network interfaces according to your environment. +### NFS Server installation on Podman Host +To use NFS volumes in containers, you must install NFS server RPMs on the Podman host. For example: ```bash -docker logs -f racnode-storage +dnf install -y nfs-utils ``` -you should see following in docker logs output: +### SELinux Configuration on Podman Host +If SELinux is enabled on the Podman host, then you must install another SELINUX module, and specifically enable permissions to write to the Podman host. To check if your SELinux is enabled or not, run the `getenforce` command. + +Copy [`rac-storage.te`](./rac-storage.te) to the `/var/opt` folder in your host and run the following commands: ```bash -################################################# -runOracle.sh: NFS Server is up and running -Create NFS volume for /oradata -################################################# +cd /var/opt +make -f /usr/share/selinux/devel/Makefile rac-storage.pp +semodule -i rac-storage.pp +semodule -l | grep rac-storage ``` - -### RAC Storage Container for Podman Host Machine +### Oracle RAC Storage Container for Podman Host +To create the container, run the following set of commands in the order presented below: #### Prerequisites for RAC Storage Container for Podman Host -Create placeholder for NFS storage and make sure it is empty - +Create a placeholder for NFS storage and ensure that it is empty: ```bash export ORACLE_DBNAME=ORCLCDB mkdir -p /scratch/stage/rac-storage/$ORACLE_DBNAME rm -rf /scratch/stage/rac-storage/$ORACLE_DBNAME/asm_disk0* ``` - -If SELinux is enabled on Podman Host (you can check by running `sestatus` command), then execute below to make SELinux policy as `permissive` and reboot the host machine. This will allow permissions to write to `asm-disks*` in the `/oradata` folder inside the podman containers- +If SELinux host is enabled on the machine, then run the following command: ```bash -sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config -reboot +semanage fcontext -a -t container_file_t /scratch/stage/rac-storage/$ORACLE_DBNAME +restorecon -v /scratch/stage/rac-storage/$ORACLE_DBNAME ``` - -Execute following command to create the container: +#### Deploying Oracle RAC Storage Container for Podman Host +If you are building an Oracle RAC storage container for the Podman Host, then you can run the following commands: ```bash export ORACLE_DBNAME=ORCLCDB podman run -d -t \ --hostname racnode-storage \ - --dns-search=example.com \ + --dns-search=example.info \ + --dns 10.0.20.25 \ --cap-add SYS_ADMIN \ --cap-add AUDIT_WRITE \ --cap-add NET_ADMIN \ + -e DNS_SERVER=10.0.20.25 \ + -e DOMAIN=example.info \ --volume /scratch/stage/rac-storage/$ORACLE_DBNAME:/oradata \ - --network=rac_priv1_nw \ - --ip=192.168.17.80 \ + --network=rac_pub1_nw --ip=10.0.20.80 \ --systemd=always \ --restart=always \ --name racnode-storage \ localhost/oracle/rac-storage-server:latest ``` -To check the racstorage container/services creation logs , please tail docker logs. It will take 10 minutes to create the racnode-storage container service. +To check the Oracle RAC storage container and services creation logs, you can run a `tail` command on the Docker logs. It should take approximately 10 minutes to create the racnode-storage container service. ```bash podman exec racnode-storage tail -f /tmp/storage_setup.log ``` -You would see successful message like below - +In a successful deployment, you should see messages similar to the following: ```bash +Export list for racnode-storage: +/oradata * ################################################# Setup Completed ################################################# ``` -**NOTE**: Expose directory to container which has atleast 60GB. In the above example, we are using `/scratch/stage/rac-storage/$ORACLE_DBNAME` and you need to change values according to your env. Inside container, it will be /oradata and do not change this. -In the above example, we used **192.168.17.0/24** subnet for NFS server. You can change the subnet values according to your environment. +### Oracle RAC Storage container for Docker Host + +To use NFS volumes in containers, you must install NFS server RPMs on the Podman host: + +```bash +yum install -y nfs-utils +``` +#### Prerequisites for an Oracle RAC Storage Container for Docker Host + +Create a placeholder for NFS storage, and ensure that it is empty: +```bash +export ORACLE_DBNAME=ORCLCDB +mkdir -p /scratch/docker_volumes/asm_vol/$ORACLE_DBNAME +rm -rf /scratch/docker_volumes/asm_vol/$ORACLE_DBNAME/asm_disk0* +``` + +#### Deploying Oracle RAC Storage Container for Docker Host + +If you are building an Oracle RAC storage container on Docker host machines, then run the following commands: + +```bash +export ORACLE_DBNAME=ORCLCDB +docker run -d -t \ +--hostname racnode-storage \ +--dns-search=example.info \ +--cap-add SYS_ADMIN \ +--cap-add AUDIT_WRITE \ +--volume /scratch/docker_volumes/asm_vol/$ORACLE_DBNAME:/oradata --init \ +--network=rac_pub1_nw --ip=10.0.20.80 \ +--tmpfs=/run \ +--volume /sys/fs/cgroup:/sys/fs/cgroup:ro \ +--name racnode-storage \ +oracle/rac-storage-server:ol7 +``` + +To check the Oracle RAC storage container and services creation logs, you can run a `tail` command on the Docker logs. It should take 10 minutes to create the racnode-storage container service. + +```bash +docker logs -f racnode-storage +``` + +**IMPORTANT:** During the container startup, five files with the name `asm_disk0[1-5].img` will be created under `/oradata`. If the files are already present, then they will not be recreated. These files can be used for ASM storage in Oracle RAC containers. + +**NOTE**: Place the directory in a container that has at least 60 GB. In the preceding example, we are using `/scratch/stage/rac-storage/$ORACLE_DBNAME`. Change these values according to your environment. Inside the container, the directory will be `/oradata`. Do not change this value. -**Note** : If SELINUX is enabled on the Podman host, then you must create an SELinux policy for Oracle RAC on Podman. For details about this procedure, see "How to Configure Podman for SELinux Mode" in the publication [Oracle Real Application Clusters Installation Guide for Podman Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racpd/target-configuration-oracle-rac-podman.html#GUID-59138DF8-3781-4033-A38F-E0466884D008). +In the preceding example, we use **192.168.17.0/24** as the subnet for the NFS server. You can change the subnet values according to your environment. +You should see following in the Docker logs output: -**IMPORTANT:** During the container startup 5 files named as `asm_disk0[1-5].img` will be created under /oradata.If the files are already present, they will not be recreated.These files can be used for ASM storage in RAC containers. + +**IMPORTANT:** The NFS volume must be `/oradata`, which you will export to Oracle RAC containers for ASM storage. It will take approximately 10 minutes to set up the NFS server. ### Create NFS Volume -Create NFS volume using following command on Podman Host: +#### Create NFS volume using the following command on the Podman Host + +```bash +podman volume create --driver local \ +--opt type=nfs \ +--opt o=addr=10.0.20.80,rw,bg,hard,tcp,vers=3,timeo=600,rsize=32768,wsize=32768,actimeo=0 \ +--opt device=10.0.20.80:/oradata \ +racstorage +``` + +#### Create NFS volume using following command on Docker Host ```bash docker volume create --driver local \ @@ -192,11 +217,17 @@ docker volume create --driver local \ --opt device=192.168.17.80:/oradata \ racstorage ``` +**IMPORTANT:** If you are not using the `192.168.17.0/24` subnet then you must change **addr=192.168.17.80** based on your environment. -**IMPORTANT:** If you are not using 192.168.17.0/24 subnet then you need to change **addr=192.168.17.25** based on your environment. +## Environment variables explained -**IMPORTANT:** The NFS volume must be `/oradata` which you will export to RAC containers for ASM storage. It will take 10 minutes for setting up NFS server. +| Environment Variable | Description | +|----------------------|-----------------| +| DNS_SERVER | Default set to `10.0.20.25`. Specify the comma-separated list of DNS server IP addresses where both Oracle RAC nodes are resolved. | +| DOMAIN | Default set to `example.info`. Specify the domain details for the Oracle RAC Container Environment. | -## Copyright +## License +Unless otherwise noted, all scripts and files hosted in this repository that are required to build the container images are under UPL 1.0 license. -Copyright (c) 2014-2024 Oracle and/or its affiliates. All rights reserved. \ No newline at end of file +## Copyright +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRACStorageServer/README1.md b/OracleDatabase/RAC/OracleRACStorageServer/README1.md new file mode 100644 index 0000000000..77c2cde84f --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/README1.md @@ -0,0 +1,202 @@ +# Oracle ASM on NFS Server for RAC testing +Sample Docker and Podman build files to facilitate installation, configuration, and environment setup for DevOps users. + +**IMPORTANT:** This image can be used to setup ASM on NFS for RAC. You can skip if you have physical block devices or NAS server for Oracle RAC and Grid. You need to make sure that NFS server container must be up and running for RAC functioning. This image is for only testing purpose. + +Refer below instructions for setup of NFS Container for RAC - + +- [Oracle ASM on NFS Server for RAC testing](#oracle-asm-on-nfs-server-for-rac-testing) +- [How to build NFS Storage Container Image](#how-to-build-nfs-storage-container-image) + - [How to build NFS Storage Container Image on Docker Host](#how-to-build-nfs-storage-container-image-on-docker-host) + - [How to build NFS Storage Container Image on Podman Host](#how-to-build-nfs-storage-container-image-on-podman-host) +- [Create Bridge Network](#create-bridge-network) +- [NFS Server installation on Host](#nfs-server-installation-on-host) +- [Running RACStorageServer container](#running-racstorageserver-container) + - [RAC Storage container for Docker Host Machine](#rac-storage-container-for-docker-host-machine) + - [RAC Storage Container for Podman Host Machine](#rac-storage-container-for-podman-host-machine) +- [Create NFS Volume](#create-nfs-volume) +- [Copyright](#copyright) + +## How to build NFS Storage Container Image + +### How to build NFS Storage Container Image on Docker Host +You need to make sure that you have atleast 60GB space available for container to create the files for RAC storage. + +**IMPORTANT:** If you are behind the proxy, you need to set http_proxy env variable based on your enviornment before building the image. Please ensure that you have the `podman-docker` package installed on your OL8 Podman host to run the command using the docker utility. +```bash +dnf install podman-docker -y +``` + +To assist in building the images, you can use the [buildDockerImage.sh](dockerfiles/buildDockerImage.sh) script. See below for instructions and usage. + +The `buildDockerImage.sh` script is just a utility shell script that performs MD5 checks and is an easy way for beginners to get started. Expert users are welcome to directly call `docker build` with their prefered set of parameters. Go into the **dockerfiles** folder and run the **buildDockerImage.sh** script: + +```bash +cd /docker-images/OracleDatabase/RAC/OracleRACStorageServer/dockerfiles +./buildDockerImage.sh -v 19.3.0 +``` + +For detailed usage of command, please execute folowing command: +```bash +cd /docker-images/OracleDatabase/RAC/OracleRACStorageServer/dockerfiles +./buildDockerImage.sh -h +``` +### How to build NFS Storage Container Image on Podman Host + +You need to make sure that you have atleast 60GB space available for container to create the files for RAC storage. + +**IMPORTANT:** If you are behind the proxy, you need to set `http_proxy` and `https_proxy` env variable based on your enviornment before building the image. + +To assist in building the images, you can use the [buildDockerImage.sh](dockerfiles/buildDockerImage.sh) script. See below for instructions and usage. + +The `buildDockerImage.sh` script is just a utility shell script that performs MD5 checks and is an easy way for beginners to get started. Expert users are welcome to directly call `docker build` with their prefered set of parameters. Go into the **dockerfiles** folder and run the **buildDockerImage.sh** script: + +```bash +cd /docker-images/OracleDatabase/RAC/OracleRACStorageServer/dockerfiles +./buildDockerImage.sh -v latest +``` +You would see successful build message similar like below- +```bash + Oracle RAC Storage Server Podman Image version latest is ready to be extended: + + --> oracle/rac-storage-server:latest +``` + +## Create Bridge Network +Before creating container, create the bridge private network for NFS storage container. + +On the host- +```bash +docker network create --driver=bridge --subnet=192.168.17.0/24 rac_priv1_nw +``` + +**Note:** You can change subnet according to your environment. + + +## NFS Server installation on Host +Ensure to install NFS server rpms on host to utilize NFS volumes in containers- + +```bash +yum -y install nfs-utils +``` +## Running RACStorageServer container + +### RAC Storage container for Docker Host Machine + +#### Prerequisites for RAC Storage Container for Docker Host + +Create placeholder for NFS storage and make sure it is empty - +```bash +export ORACLE_DBNAME=ORCLCDB +mkdir -p /docker_volumes/asm_vol/$ORACLE_DBNAME +rm -rf /docker_volumes/asm_vol/$ORACLE_DBNAME/asm_disk0* +``` + +Execute following command to create the container: + +```bash +export ORACLE_DBNAME=ORCLCDB +docker run -d -t --hostname racnode-storage \ +--dns-search=example.com --cap-add SYS_ADMIN --cap-add AUDIT_WRITE \ +--volume /docker_volumes/asm_vol/$ORACLE_DBNAME:/oradata --init \ +--network=rac_priv1_nw --ip=192.168.17.80 --tmpfs=/run \ +--volume /sys/fs/cgroup:/sys/fs/cgroup:ro \ +--name racnode-storage oracle/rac-storage-server:19.3.0 +``` + +**IMPORTANT:** During the container startup 5 files named as `asm_disk0[1-5].img` will be created under /oradata.If the files are already present, they will not be recreated.These files can be used for ASM storage in RAC containers. + +**NOTE**: Expose directory to container which has atleast 60GB. In the above example, we are using `/docker_volumes/asm_vol/$ORACLE_DBNAME` and you need to change values according to your env. Inside container, it will be /oradata and do not change this. + +In the above example, we used **192.168.17.0/24** subnet for NFS server. You can change the subnet values according to your environment. Also, SELINUX must be disabled or in permissive mode in Docker Host Machine. + +To check the racstorage container/services creation logs , please tail docker logs. It will take 10 minutes to create the racnode-storage container service. + +```bash +docker logs -f racnode-storage +``` + +you should see following in docker logs output: + +```bash +################################################# +runOracle.sh: NFS Server is up and running +Create NFS volume for /oradata +################################################# +``` + +### RAC Storage Container for Podman Host Machine + +#### Prerequisites for RAC Storage Container for Podman Host + +Create placeholder for NFS storage and make sure it is empty - +```bash +export ORACLE_DBNAME=ORCLCDB +mkdir -p /scratch/stage/rac-storage/$ORACLE_DBNAME +rm -rf /scratch/stage/rac-storage/$ORACLE_DBNAME/asm_disk0* +``` + +If SELinux is enabled on Podman Host (you can check by running `sestatus` command), then execute below to make SELinux policy as `permissive` and reboot the host machine. This will allow permissions to write to `asm-disks*` in the `/oradata` folder inside the podman containers- +```bash +sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config +reboot +``` + +Execute following command to create the container: + +```bash +export ORACLE_DBNAME=ORCLCDB +podman run -d -t \ + --hostname racnode-storage \ + --dns-search=example.com \ + --cap-add SYS_ADMIN \ + --cap-add AUDIT_WRITE \ + --cap-add NET_ADMIN \ + --volume /scratch/stage/rac-storage/$ORACLE_DBNAME:/oradata \ + --network=rac_priv1_nw \ + --ip=192.168.17.80 \ + --systemd=always \ + --restart=always \ + --name racnode-storage \ + localhost/oracle/rac-storage-server:latest +``` + +To check the racstorage container/services creation logs , please tail docker logs. It will take 10 minutes to create the racnode-storage container service. + +```bash +podman exec racnode-storage tail -f /tmp/storage_setup.log +``` +You would see successful message like below - +```bash +################################################# + Setup Completed +################################################# +``` + +**NOTE**: Expose directory to container which has atleast 60GB. In the above example, we are using `/scratch/stage/rac-storage/$ORACLE_DBNAME` and you need to change values according to your env. Inside container, it will be /oradata and do not change this. + +In the above example, we used **192.168.17.0/24** subnet for NFS server. You can change the subnet values according to your environment. + +**Note** : If SELINUX is enabled on the Podman host, then you must create an SELinux policy for Oracle RAC on Podman. For details about this procedure, see "How to Configure Podman for SELinux Mode" in the publication [Oracle Real Application Clusters Installation Guide for Podman Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racpd/target-configuration-oracle-rac-podman.html#GUID-59138DF8-3781-4033-A38F-E0466884D008). + + +**IMPORTANT:** During the container startup 5 files named as `asm_disk0[1-5].img` will be created under /oradata.If the files are already present, they will not be recreated.These files can be used for ASM storage in RAC containers. + +### Create NFS Volume +Create NFS volume using following command on Podman Host: + +```bash +docker volume create --driver local \ +--opt type=nfs \ +--opt o=addr=192.168.17.80,rw,bg,hard,tcp,vers=3,timeo=600,rsize=32768,wsize=32768,actimeo=0 \ +--opt device=192.168.17.80:/oradata \ +racstorage +``` + +**IMPORTANT:** If you are not using 192.168.17.0/24 subnet then you need to change **addr=192.168.17.25** based on your environment. + +**IMPORTANT:** The NFS volume must be `/oradata` which you will export to RAC containers for ASM storage. It will take 10 minutes for setting up NFS server. + +## Copyright + +Copyright (c) 2014-2024 Oracle and/or its affiliates. All rights reserved. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/buildContainerImage.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/buildContainerImage.sh new file mode 100755 index 0000000000..5600c030cd --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/buildContainerImage.sh @@ -0,0 +1,132 @@ +#!/bin/bash +############################# +# Copyright (c) 2025, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com +############################ + +usage() { + cat << EOF + +Usage: buildContainerImage.sh -v [version] [-o] [Docker build option] +Builds a Docker Image for Oracle Database. + +Parameters: + -v: version to build + Choose "latest" version for podman host machines + Choose "ol7" version for docker host machines + -o: passes on Docker build option + +############################# +# Copyright (c) 2025, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ + +EOF + exit 0 +} + +############## +#### MAIN #### +############## + +# Parameters +VERSION="latest" +export SKIPMD5=0 +DOCKEROPS="" + +while getopts "hiv:o:" optname; do + case "$optname" in + "h") + usage + ;; + "v") + VERSION="$OPTARG" + ;; + "o") + DOCKEROPS="$OPTARG" + ;; + "?") + usage; + exit 1; + ;; + *) + # Should not occur + echo "Unknown error while processing options inside buildContainerImage.sh" + ;; + esac +done + +# Oracle Database Image Name +IMAGE_NAME="oracle/rac-storage-server:$VERSION" +if command -v docker &>/dev/null; then + CONTAINER_BUILD_TOOL="docker" +elif command -v podman &>/dev/null; then + CONTAINER_BUILD_TOOL="podman" +else + echo "Neither Docker nor Podman is installed. Please install either Docker or Podman to proceed." + exit 1 +fi +# Go into version folder +cd "$VERSION" || exit + +echo "==========================" +echo "DOCKER info:" +docker info +echo "==========================" + +# Proxy settings +PROXY_SETTINGS="" +# shellcheck disable=SC2154 +if [ "${http_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg http_proxy=${http_proxy}" +fi +# shellcheck disable=SC2154 +if [ "${https_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg https_proxy=${https_proxy}" +fi +# shellcheck disable=SC2154 +if [ "${ftp_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg ftp_proxy=${ftp_proxy}" +fi +# shellcheck disable=SC2154 +if [ "${no_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg no_proxy=${no_proxy}" +fi +# shellcheck disable=SC2154 +if [ "$PROXY_SETTINGS" != "" ]; then + echo "Proxy settings were found and will be used during the build." +fi + +# ################## # +# BUILDING THE IMAGE # +# ################## # +echo "Building image '$IMAGE_NAME' ..." + +# BUILD THE IMAGE (replace all environment variables) +BUILD_START=$(date '+%s') +# shellcheck disable=SC2086 +$CONTAINER_BUILD_TOOL build --force-rm=true --no-cache=true $DOCKEROPS $PROXY_SETTINGS -t $IMAGE_NAME -f Containerfile . || { + echo "There was an error building the image." + exit 1 +} +BUILD_END=$(date '+%s') +# shellcheck disable=SC2154,SC2003 +BUILD_ELAPSED=$((BUILD_END - BUILD_START)) + +echo "" +# shellcheck disable=SC2181,SC2320 +if [ $? -eq 0 ]; then +cat << EOF + Oracle RAC Storage Server Container Image version $VERSION is ready to be extended: + + --> $IMAGE_NAME + + Build completed in $BUILD_ELAPSED seconds. + +EOF + +else + echo "Oracle RAC Storage Server Docker Image was NOT successfully created. Check the output and correct any reported problems with the docker build operation." +fi \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/Containerfile b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/Containerfile new file mode 100644 index 0000000000..ab0ad5338d --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/Containerfile @@ -0,0 +1,64 @@ +############################# +# Copyright (c) 2025, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ +# +# ORACLE CONTAINERFILES PROJECT +# -------------------------- +# This is the Containerfile for Oracle Database RAC Storage Server. This file create NFS server for ASM storage. +# +# HOW TO BUILD THIS IMAGE +# ----------------------- +# Put all downloaded files in the same directory as this Containerfile +# Run: +# $ podman build -t oracle/rac-storage-server:latest. +# +# Pull base image +# --------------- +FROM oraclelinux:8 + +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +# Linux Env Variable +ENV SETUP_LINUX_FILE="setupLinuxEnv.sh" \ + INSTALL_DIR=/opt/scripts \ + EXPORTFILE=exportfile \ + RUN_FILE="runOracle.sh" \ + SUDO_SETUP_FILE="setupSudo.sh" \ + INITSH="initsh" \ + BIN="/usr/sbin" \ + ORADATA="/oradata" \ + container="true" +# Use second ENV so that variable get substituted +ENV INSTALL_SCRIPTS=$INSTALL_DIR/install \ + SCRIPT_DIR=$INSTALL_DIR/startup + +# Copy binaries +# ------------- +# Copy Linux setup Files +COPY $SETUP_LINUX_FILE $SUDO_SETUP_FILE $INSTALL_SCRIPTS/ + +# Setup Scripts +COPY $RUN_FILE $EXPORTFILE $INITSH $SCRIPT_DIR/ + +RUN chmod 755 $INSTALL_DIR/install/*.sh && \ + $INSTALL_DIR/install/$SETUP_LINUX_FILE && \ + $INSTALL_DIR/install/$SUDO_SETUP_FILE && \ + sync + +RUN rm -rf $INSTALL_DIR/install && \ + chmod 755 $SCRIPT_DIR/*.sh && \ + echo "nohup $SCRIPT_DIR/runOracle.sh &" >> /etc/rc.local && \ + chmod +x /etc/rc.d/rc.local && \ + cp $SCRIPT_DIR/$INITSH /usr/bin/$INITSH && \ + chmod 755 /usr/bin/$INITSH && \ + chmod 666 $SCRIPT_DIR/$EXPORTFILE + +USER root +VOLUME ["/oradata"] +WORKDIR /workdir + +# Define default command to start Oracle Database. +# hadolint ignore=DL3025 +ENTRYPOINT /usr/bin/$INITSH diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/checkSpace.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/checkSpace.sh new file mode 100755 index 0000000000..040550962b --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/checkSpace.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +############################# +# Copyright (c) 2025, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ +# +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +REQUIRED_SPACE_GB=5 +AVAILABLE_SPACE_GB=`df -PB 1G / | tail -n 1 | awk '{print $4}'` + +if [ $AVAILABLE_SPACE_GB -lt $REQUIRED_SPACE_GB ]; then + script_name=`basename "$0"` + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "$script_name: ERROR - There is not enough space available in the docker container." + echo "$script_name: The container needs at least $REQUIRED_SPACE_GB GB , but only $AVAILABLE_SPACE_GB available." + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + exit 1; +fi; diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/exportfile b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/exportfile new file mode 100644 index 0000000000..3fb4d631e0 --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/exportfile @@ -0,0 +1 @@ +/oradata *(rw,sync,no_wdelay,no_root_squash,insecure) diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/initsh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/initsh new file mode 100755 index 0000000000..70b02bc084 --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/initsh @@ -0,0 +1,10 @@ +#!/bin/bash +# Copyright (c) 2023, Oracle and/or its affiliates +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +echo "Creating env variables file /etc/storage_env_vars" +/bin/bash -c "cat /proc/1/environ | tr '\0' '\n' > /etc/storage_env_vars" +/bin/bash -c "sed -i -e 's/^/export /' /etc/storage_env_vars" + +echo "Starting Systemd" +exec /lib/systemd/systemd diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/runOracle.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/runOracle.sh new file mode 100755 index 0000000000..e4170ae874 --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/runOracle.sh @@ -0,0 +1,172 @@ +#!/bin/bash +# +############################# +# Copyright (c) 2025, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com +############################ +# Description: Runs NFS server inside the container +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +if [ -f /etc/storage_env_vars ]; then +# shellcheck disable=SC1091 + source /etc/storage_env_vars +fi + +logfile="/tmp/storage_setup.log" + +touch $logfile +chmod 666 $logfile +# shellcheck disable=SC2034,SC2086 +progname="$(basename $0)" + +####################### Constants ################# +# shellcheck disable=SC2034 +declare -r FALSE=1 +# shellcheck disable=SC2034 +declare -r TRUE=0 +export REQUIRED_SPACE_GB=55 +export ORADATA=/oradata +export INSTALL_COMPLETED_FILE="/workdir/installcomplete" +export FILE_COUNT=0 +################################################## + +check_space () +{ + local REQUIRED_SPACE_GB=$1 + # shellcheck disable=SC2006 + AVAILABLE_SPACE_GB=`df -B 1G $ORADATA | tail -n 1 | awk '{print $4}'` + if [ ! -f ${INSTALL_COMPLETED_FILE} ] ;then + # shellcheck disable=SC2086 + if [ $AVAILABLE_SPACE_GB -lt $REQUIRED_SPACE_GB ]; then + # shellcheck disable=SC2006 + script_name=`basename "$0"` + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | tee -a $logfile + echo "$script_name: ERROR - There is not enough space available in the docker container under $ORADATA." | tee -a $logfile + echo "$script_name: The container needs at least $REQUIRED_SPACE_GB GB , but only $AVAILABLE_SPACE_GB available." | tee -a $logfile + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" | tee -a $logfile + exit 1; + else + echo " Space check passed : $ORADATA has available space $AVAILABLE_SPACE_GB and ASM storage set to $REQUIRED_SPACE_GB" | tee -a $logfile + fi; + fi; +} +####################################### ETC Host Function ############################################################# + +setupEtcResolvConf() +{ +local stat=3 +# shellcheck disable=SC2154 +if [ "$action" == "" ]; then +# shellcheck disable=SC2236 + if [ ! -z "${DNS_SERVER}" ] ; then + sudo sh -c "echo \"search ${DOMAIN}\" > /etc/resolv.conf" + sudo sh -c "echo \"nameserver ${DNS_SERVER}\" >> /etc/resolv.conf" + fi +fi + +} + +SetupEtcHosts() +{ +# shellcheck disable=SC2034 +local stat=3 +# shellcheck disable=SC2034 +local HOST_LINE +if [ "$action" == "" ]; then +# shellcheck disable=SC2236 + if [ ! -z "${HOSTFILE}" ]; then + if [ -f "${HOSTFILE}" ]; then + sudo sh -c "cat \"${HOSTFILE}\" > /etc/hosts" + fi + else + sudo sh -c "echo -e \"127.0.0.1\tlocalhost.localdomain\tlocalhost\" > /etc/hosts" + sudo sh -c "echo -e \"$PUBLIC_IP\t$PUBLIC_HOSTNAME.$DOMAIN\t$PUBLIC_HOSTNAME\" >> /etc/hosts" + fi +fi + +} + + + + ################################### + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # + ############# MAIN ################ + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # + ################################### + + if [ ! -d "$ORADATA" ] ;then + echo "$ORADATA dir doesn't exist! exiting" | tee -a $logfile + exit 1 + fi + # shellcheck disable=SC2086 + if [ -z $ASM_STORAGE_SIZE_GB ] ;then + echo "ASM_STORAGE_SIZE env variable is not defined! Assigning 50GB default" | tee -a $logfile + ASM_STORAGE_SIZE_GB=50 + else + echo "ASM STORAGE SIZE set to : $ASM_STORAGE_SIZE_GB" | tee -a $logfile + fi + ####### Populating resolv.conf and /etc/hosts ### + setupEtcResolvConf + SetupEtcHosts + #################### + echo "Oracle user will be the owner for /oradata" | tee -a $logfile + sudo chown -R oracle:oinstall /oradata + + echo "Checking Space" | tee -a $logfile + check_space $ASM_STORAGE_SIZE_GB + # shellcheck disable=SC2004 + ASM_DISKS_SIZE=$(($ASM_STORAGE_SIZE_GB/5)) + count=1; + while [ $count -le 5 ]; + do + echo "Creating ASM Disks $ORADATA/asm_disk0$count.img of size $ASM_DISKS_SIZE" | tee -a $logfile + + if [ ! -f $ORADATA/asm_disk0$count.img ];then + dd if=/dev/zero of=$ORADATA/asm_disk0$count.img bs=1G count=$ASM_DISKS_SIZE + chown oracle:oinstall $ORADATA/asm_disk0$count.img + else + echo "$ORADATA/asm_disk0$count.img file already exist! Skipping file creation" | tee -a $logfile + fi + # shellcheck disable=SC2004 + count=$(($count+1)) + done + # shellcheck disable=SC2012 + FILE_COUNT=$(ls $ORADATA/asm_disk0* | wc -l) + # shellcheck disable=SC2086 + if [ ${FILE_COUNT} -ge 5 ];then + echo "Touching ${INSTALL_COMPLETED_FILE}" | tee -a $logfile + touch ${INSTALL_COMPLETED_FILE} + fi + + echo "#################################################" | tee -a $logfile + echo " Starting NFS Server Setup " | tee -a $logfile + echo "#################################################" | tee -a $logfile + + + echo "Starting Nfs Server" | tee -a $logfile + systemctl start nfs-utils.service | tee -a $logfile + systemctl restart rpcbind.service | tee -a $logfile + systemctl start nfs-server.service | tee -a $logfile + + echo "Checking Nfs Service" | tee -a $logfile + systemctl status nfs-utils.service | tee -a $logfile + + echo "Checking rpc bind service" + systemctl status rpcbind.service | tee -a $logfile + + echo "Setting up /etc/exports" + # shellcheck disable=SC2086,SC2002 + cat $SCRIPT_DIR/$EXPORTFILE | tee -a /etc/exports + + echo "Exporting File System" + sudo /usr/sbin/exportfs -r | tee -a $logfile + + echo "Checking exported mountpoints" | tee -a $logfile + showmount -e | tee -a $logfile + + echo "#################################################" | tee -a $logfile + echo " Setup Completed " | tee -a $logfile + echo "#################################################" | tee -a $logfile diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/setupLinuxEnv.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/setupLinuxEnv.sh new file mode 100755 index 0000000000..8c0978d9bc --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/setupLinuxEnv.sh @@ -0,0 +1,33 @@ +#!/bin/bash +############################# +# Copyright (c) 2025, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com +############################ +# Description: Sets up the unix environment for DB installation. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Setup filesystem and oracle user +# Adjust file permissions, go to /opt/oracle as user 'oracle' to proceed with Oracle installation +# ------------------------------------------------------------ +mkdir /oradata && \ +chmod ug+x /opt/scripts/startup/*.sh && \ +if grep -q "Oracle Linux Server release 9" /etc/oracle-release; then \ + dnf install -y oracle-database-preinstall-23ai && \ + cp /etc/security/limits.d/oracle-database-preinstall-23ai.conf /etc/security/limits.d/grid-database-preinstall-23ai.conf && \ + sed -i 's/oracle/grid/g' /etc/security/limits.d/grid-database-preinstall-23ai.conf && \ + rm -f /etc/systemd/system/oracle-database-preinstall-23ai-firstboot.service && \ + sed -i 's/^TasksMax\S*/TasksMax=80%/g' /usr/lib/systemd/system/user-.slice.d/10-defaults.conf && \ + dnf clean all; \ +else \ + dnf -y install oraclelinux-developer-release-el8 && \ + dnf -y install oracle-database-preinstall-23ai && \ + cp /etc/security/limits.d/oracle-database-preinstall-23ai.conf /etc/security/limits.d/grid-database-preinstall-23ai.conf && \ + sed -i 's/oracle/grid/g' /etc/security/limits.d/grid-database-preinstall-23ai.conf && \ + rm -f /etc/rc.d/init.d/oracle-database-preinstall-23ai-firstboot && \ + dnf clean all; \ +fi && \ +dnf -y install net-tools which zip unzip tar openssh-server vim-minimal which vim-minimal passwd sudo nfs-utils && \ +dnf clean all diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/setupSudo.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/setupSudo.sh new file mode 100755 index 0000000000..8a067a90d7 --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/latest/setupSudo.sh @@ -0,0 +1,13 @@ +#!/bin/bash +############################# +# Copyright (c) 2025, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com +############################ +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +chmod 666 /etc/sudoers +echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +chmod 440 /etc/sudoers diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/Containerfile b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/Containerfile new file mode 100644 index 0000000000..1a047aff0d --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/Containerfile @@ -0,0 +1,57 @@ +# LICENSE UPL 1.0 +# +# Copyright (c) 1982-2025 Oracle and/or its affiliates. All rights reserved. +# +# ORACLE DOCKERFILES PROJECT +# -------------------------- +# This is the Dockerfile for Oracle Database 18c RAC Storage Server. This file create NFS server for ASM storage. +# +# HOW TO BUILD THIS IMAGE +# ----------------------- +# Put all downloaded files in the same directory as this Dockerfile +# Run: +# $ docker build -t oracle/rac-storage-server:19.3.0. +# +# Pull base image +# --------------- +FROM oraclelinux:7-slim + +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +# Linux Env Variable +ENV SETUP_LINUX_FILE="setupLinuxEnv.sh" \ + INSTALL_DIR=/opt/scripts \ + EXPORTFILE=exportfile \ + RUN_FILE="runOracle.sh" \ + SUDO_SETUP_FILE="setupSudo.sh" \ + BIN="/usr/sbin" \ + ORADATA="/oradata" \ + container="true" +# Use second ENV so that variable get substituted +ENV INSTALL_SCRIPTS=$INSTALL_DIR/install \ + SCRIPT_DIR=$INSTALL_DIR/startup + +# Copy binaries +# ------------- +# Copy Linux setup Files +COPY $SETUP_LINUX_FILE $SUDO_SETUP_FILE $INSTALL_SCRIPTS/ + +# Setup Scripts +COPY $RUN_FILE $EXPORTFILE $SCRIPT_DIR/ + +RUN chmod 755 $INSTALL_DIR/install/*.sh && \ + $INSTALL_DIR/install/$SETUP_LINUX_FILE && \ + $INSTALL_DIR/install/$SUDO_SETUP_FILE && \ + sync + +RUN rm -rf $INSTALL_DIR/install && \ + chmod 755 $SCRIPT_DIR/*.sh && \ + chmod 666 $SCRIPT_DIR/$EXPORTFILE + +USER oracle +VOLUME ["/oradata"] +WORKDIR /home/oracle + +# Define default command to start Oracle Database. + +CMD ["exec", "$SCRIPT_DIR/$RUN_FILE"] diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/checkSpace.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/checkSpace.sh new file mode 100755 index 0000000000..eedb57330f --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/checkSpace.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 1982-2025 Oracle and/or its affiliates. All rights reserved. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com +# Description: Checks the available space of the system. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +REQUIRED_SPACE_GB=5 +AVAILABLE_SPACE_GB=`df -PB 1G / | tail -n 1 | awk '{print $4}'` + +if [ $AVAILABLE_SPACE_GB -lt $REQUIRED_SPACE_GB ]; then + script_name=`basename "$0"` + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "$script_name: ERROR - There is not enough space available in the docker container." + echo "$script_name: The container needs at least $REQUIRED_SPACE_GB GB , but only $AVAILABLE_SPACE_GB available." + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + exit 1; +fi; diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/exportfile b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/exportfile new file mode 100644 index 0000000000..906122a523 --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/exportfile @@ -0,0 +1 @@ +/oradata *(rw,sync,no_wdelay,no_root_squash) diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/runOracle.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/runOracle.sh new file mode 100755 index 0000000000..168d81764d --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/runOracle.sh @@ -0,0 +1,193 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 1982-2025 Oracle and/or its affiliates. All rights reserved. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com +# Description: Runs NFS server inside the container +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +if [ -f /etc/rac_env_vars ]; then +# shellcheck disable=SC1091 +source /etc/rac_env_vars +fi +logfile="/tmp/orod.log" + +touch $logfile +chmod 666 /tmp/orod.log +# shellcheck disable=SC2086,SC2034 +progname="$(basename $0)" + +####################### Constants ################# +# shellcheck disable=SC2034 +declare -r FALSE=1 +declare -r TRUE=0 +export REQUIRED_SPACE_GB=55 +export ORADATA=/oradata +export INSTALL_COMPLETED_FILE="/home/oracle/installcomplete" +export FILE_COUNT=0 +################################################## + +check_space () +{ +local REQUIRED_SPACE_GB=$1 +# shellcheck disable=SC2006,SC2086 +AVAILABLE_SPACE_GB=`df -B 1G $ORADATA | tail -n 1 | awk '{print $4}'` +# shellcheck disable=SC1009 +if [ ! -f ${INSTALL_COMPLETED_FILE} ] ;then +# shellcheck disable=SC2086 +if [ $AVAILABLE_SPACE_GB -lt $REQUIRED_SPACE_GB ]; then + # shellcheck disable=SC2006 + script_name=`basename "$0"` + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "$script_name: ERROR - There is not enough space available in the docker container under $ORADATA." + echo "$script_name: The container needs at least $REQUIRED_SPACE_GB GB , but only $AVAILABLE_SPACE_GB available." + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + exit 1; +else + echo " Space check passed : $ORADATA has avilable space $AVAILABLE_SPACE_GB and ASM storage set to $REQUIRED_SPACE_GB" +fi; +fi; +} + +########### SIGINT handler ############ +function _int() { + echo "Stopping container." +local cmd +echo "Stopping nfs server" +sudo /usr/sbin/rpc.nfsd 0 +echo "Executing exportfs au" +sudo /usr/sbin/exportfs -au +echo "Executing exportfs f" +sudo /usr/sbin/exportfs -f +touch /tmp/stop +} + +########### SIGTERM handler ############ +function _term() { + echo "Stopping container." + echo "SIGTERM received, shutting down!" +local cmd +echo "Stopping nfs server" +sudo /usr/sbin/rpc.nfsd 0 +echo "Executing exportfs au" +sudo /usr/sbin/exportfs -au +echo "Executing exportfs f" +sudo /usr/sbin/exportfs -f +touch /tmp/sigterm +} + +########### SIGKILL handler ############ +function _kill() { + echo "SIGKILL received, shutting down database!" +# shellcheck disable=SC2034 +local cmd +echo "Stopping nfs server" +sudo /usr/sbin/rpc.nfsd 0 +echo "Executing exportfs au" +sudo /usr/sbin/exportfs -au +echo "Executing exportfs f" +sudo /usr/sbin/exportfs -f +touch /tmp/sigkill +} + +################################### +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # +############# MAIN ################ +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # +################################### + +# Set SIGINT handler +trap _int SIGINT + +# Set SIGTERM handler +trap _term SIGTERM + +# Set SIGKILL handler +# shellcheck disable=SC2173 +trap _kill SIGKILL + +if [ ! -d "$ORADATA" ] ;then +echo "$ORADATA dir doesn't exist! exiting" +exit 1 +fi +# shellcheck disable=SC2086 +if [ -z $ASM_STORAGE_SIZE_GB ] ;then +echo "ASM_STORAGE_SIZE env variable is not defined! Assigning 50GB default" +ASM_STORAGE_SIZE_GB=50 +else +echo "ASM STORAGE SIZE set to : $ASM_STORAGE_SIZE_GB" +fi + +echo "Oracle user will be the owner for /oradata" +sudo chown -R oracle:oinstall /oradata + +echo "Checking Space" +check_space $ASM_STORAGE_SIZE_GB +# shellcheck disable=SC2004 +ASM_DISKS_SIZE=$(($ASM_STORAGE_SIZE_GB/5)) +count=1; +while [ $count -le 5 ]; +do +echo "Creating ASM Disks $ORADATA/asm_disk0$count.img of size $ASM_DISKS_SIZE" + +if [ ! -f $ORADATA/asm_disk0$count.img ];then +dd if=/dev/zero of=$ORADATA/asm_disk0$count.img bs=1G count=$ASM_DISKS_SIZE +else +echo "$ORADATA/asm_disk0$count.img file already exist! Skipping file creation" +fi +# shellcheck disable=SC2004 +count=$(($count+1)) +done +# shellcheck disable=SC2012 +FILE_COUNT=$(ls $ORADATA/asm_disk0* | wc -l) +# shellcheck disable=SC2086 +if [ ${FILE_COUNT} -ge 5 ];then +echo "Touching ${INSTALL_COMPLETED_FILE}" +touch ${INSTALL_COMPLETED_FILE} +fi + +echo "#################################################" +echo " Starting NFS Server Setup " +echo "#################################################" + + +echo "Setting up /etc/exports" +# shellcheck disable=SC2086,SC2002 +cat $SCRIPT_DIR/$EXPORTFILE | sudo tee -a /etc/exports + +echo "Starting RPC Bind " +sudo /sbin/rpcbind -w + +echo "Exporting File System" +sudo /usr/sbin/exportfs -r + +echo "Starting RPC NFSD" +sudo /usr/sbin/rpc.nfsd + +echo "Starting RPC Mountd" +sudo /usr/sbin/rpc.mountd --manage-gids + +#echo "Starting Rpc Quotad" +sudo /usr/sbin/rpc.rquotad + +echo "Checking NFS server" +# shellcheck disable=SC2006,SC2196,SC2126 +PROC_COUNT=`ps aux | egrep 'rpcbind|mountd|nfsd' | grep -v "grep -E rpcbind|mountd|nfsd" | wc -l` +# shellcheck disable=SC2086 +if [ $PROC_COUNT -gt 1 ]; then +echo "####################################################" +echo " NFS Server is up and running " +echo " Create NFS volume for $ORADATA/$ORACLE_SID " +echo "####################################################" +echo $TRUE +else +echo "NFS Server Setup Failed" +fi + +tail -f /tmp/orod.log & +childPID=$! +wait $childPID diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/setupLinuxEnv.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/setupLinuxEnv.sh new file mode 100755 index 0000000000..389aaf7a5b --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/setupLinuxEnv.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 1982-2025 Oracle and/or its affiliates. All rights reserved. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com +# Description: Sets up the unix environment for DB installation. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Setup filesystem and oracle user +# Adjust file permissions, go to /opt/oracle as user 'oracle' to proceed with Oracle installation +# ------------------------------------------------------------ +mkdir /oradata && \ +chmod ug+x /opt/scripts/startup/*.sh && \ +yum -y install oracle-database-preinstall-18c net-tools which zip unzip tar openssh-server openssh-client vim-minimal which vim-minimal passwd sudo nfs-utils && \ +yum clean all diff --git a/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/setupSudo.sh b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/setupSudo.sh new file mode 100755 index 0000000000..d65b6e29ae --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/containerfiles/ol7/setupSudo.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 1982-2025 Oracle and/or its affiliates. All rights reserved. +# +# Since: November, 2018 +# Author: paramdeep.saini@oracle.com, sanjay.singh@oracle.com +# Description: setup the sudo for Oracle user +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +chmod 666 /etc/sudoers +echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +chmod 440 /etc/sudoers diff --git a/OracleDatabase/RAC/OracleRACStorageServer/rac-storage.te b/OracleDatabase/RAC/OracleRACStorageServer/rac-storage.te new file mode 100644 index 0000000000..b57aaaa277 --- /dev/null +++ b/OracleDatabase/RAC/OracleRACStorageServer/rac-storage.te @@ -0,0 +1,31 @@ +module rac-storage 1.0; + +require { + type container_init_t; + type hugetlbfs_t; + type nfsd_fs_t; + type rpc_pipefs_t; + type default_t; + type kernel_t; + class filesystem mount; + class filesystem unmount; + class file { read write open }; + class dir { read watch }; + class bpf { map_create map_read map_write }; + class system module_request; + class fifo_file { open read write }; +} + +#============= container_init_t ============== +allow container_init_t hugetlbfs_t:filesystem mount; +allow container_init_t nfsd_fs_t:filesystem mount; +allow container_init_t rpc_pipefs_t:filesystem mount; +allow container_init_t nfsd_fs_t:file { read write open }; +allow container_init_t nfsd_fs_t:dir { read watch }; +allow container_init_t rpc_pipefs_t:dir { read watch }; +allow container_init_t rpc_pipefs_t:fifo_file { open read write }; +allow container_init_t rpc_pipefs_t:filesystem unmount; +allow container_init_t self:bpf map_create; +allow container_init_t self:bpf { map_read map_write }; +allow container_init_t default_t:dir read; +allow container_init_t kernel_t:system module_request; \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/README.md index 8f3bd66ed0..6103f0c74d 100644 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/README.md +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/README.md @@ -1,1198 +1,300 @@ -# Oracle Real Application Clusters in Linux Containers - -Learn about container deployment options for Oracle Real Application Clusters (Oracle RAC) Release 21c (21.3) - -## Overview of Running Oracle RAC in Containers - -Oracle Real Application Clusters (Oracle RAC) is an option to the award-winning Oracle Database Enterprise Edition. Oracle RAC is a cluster database with a shared cache architecture that overcomes the limitations of traditional shared-nothing and shared-disk approaches to provide highly scalable and available database solutions for all business applications. -Oracle RAC uses Oracle Clusterware as a portable cluster software that allows clustering of independent servers so that they cooperate as a single system and Oracle Automatic Storage Management (Oracle ASM) to provide simplified storage management that is consistent across all servers and storage platforms. -Oracle Clusterware and Oracle ASM are part of the Oracle Grid Infrastructure, which bundles both solutions in an easy to deploy software package. - -For more information on Oracle RAC Database 21c refer to the [Oracle Database documentation](http://docs.oracle.com/en/database/). - -## Using this Image - -To create an Oracle RAC environment, complete these steps in order: - -- [Oracle Real Application Clusters in Linux Containers](#oracle-real-application-clusters-in-linux-containers) - - [Overview of Running Oracle RAC in Containers](#overview-of-running-oracle-rac-in-containers) - - [Using this Image](#using-this-image) - - [Section 1 : Prerequisites for running Oracle RAC in containers](#section-1--prerequisites-for-running-oracle-rac-in-containers) - - [Section 2: Building Oracle RAC Database Container Images](#section-2-building-oracle-rac-database-container-images) - - [Oracle RAC Container Image for Docker](#oracle-rac-container-image-for-docker) - - [Oracle RAC Container Image for Podman](#oracle-rac-container-image-for-podman) - - [Section 3: Network and Password Management](#section-3--network-and-password-management) - - [Section 4: Oracle RAC on Docker](#section-4-oracle-rac-on-docker) - - [Section 4.1 : Prerequisites for Running Oracle RAC on Docker](#section-41--prerequisites-for-running-oracle-rac-on-docker) - - [Section 4.2: Setup Oracle RAC Container on Docker](#section-42-setup-oracle-rac-container-on-docker) - - [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker) - - [Deploying Oracle RAC on Container With Oracle RAC Storage Container](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container) - - [Assign networks to Oracle RAC containers](#assign-networks-to-oracle-rac-containers) - - [Start the first container](#start-the-first-container) - - [Connect to the Oracle RAC container](#connect-to-the-oracle-rac-container) - - [Section 4.3: Adding an Oracle RAC Node using a Docker Container](#section-43-adding-an-oracle-rac-node-using-a-docker-container) - - [Deploying Oracle RAC Additional Node on Container with Block Devices on Docker](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-docker) - - [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Docker](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-docker) - - [Assign Network to additional Oracle RAC container](#assign-network-to-additional-oracle-rac-container) - - [Start Oracle RAC racnode2 container](#start-oracle-rac-racnode2-container) - - [Connect to the Oracle RAC racnode2 container](#connect-to-the-oracle-rac-racnode2-container) - - [Section 4.4: Setup Oracle RAC Container on Docker with Docker Compose](#section-44-setup-oracle-rac-container-on-docker-with-docker-compose) - - [Section 5: Oracle RAC on Podman](#section-5-oracle-rac-on-podman) - - [Section 5.1 : Prerequisites for Running Oracle RAC on Podman](#section-51--prerequisites-for-running-oracle-rac-on-podman) - - [Section 5.2: Setup RAC Containers on Podman](#section-52-setup-rac-containers-on-podman) - - [Deploying Oracle RAC Containers with Block Devices on Podman](#deploying-oracle-rac-containers-with-block-devices-on-podman) - - [Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container-on-podman) - - [Assign networks to Oracle RAC containers Created Using Podman](#assign-networks-to-oracle-rac-containers-created-using-podman) - - [Start the first container Created Using Podman](#start-the-first-container-created-using-podman) - - [Connect to the Oracle RAC container Created Using Podman](#connect-to-the-oracle-rac-container-created-using-podman) - - [Section 5.3: Adding a Oracle RAC Node using a container on Podman](#section-53-adding-a-oracle-rac-node-using-a-container-on-podman) - - [Deploying Oracle RAC Additional Node on Container with Block Devices on Podman](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-podman) - - [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-podman) - - [Assign Network to additional Oracle RAC container Created Using Podman](#assign-network-to-additional-oracle-rac-container-created-using-podman) - - [Start Oracle RAC container](#start-oracle-rac-container) - - [Section 5.4: Setup Oracle RAC Container on Podman with Podman Compose](#section-54-setup-oracle-rac-container-on-podman-with-podman-compose) - - [Section 6: Connecting to an Oracle RAC Database](#section-6-connecting-to-an-oracle-rac-database) - - [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) - - [Section 8: Environment Variables for the Second and Subsequent Nodes](#section-8-environment-variables-for-the-second-and-subsequent-nodes) - - [Section 9: Building a Patched Oracle RAC Container Image](#section-9-building-a-patched-oracle-rac-container-image) - - [Section 10 : Sample Container Files for Older Releases](#section-10--sample-container-files-for-older-releases) - - [Docker](#docker) - - [Podman](#podman) - - [Section 11 : Support](#section-11--support) - - [Docker Support](#docker-support) - - [Podman Support](#podman-support) - - [Section 12 : License](#section-12--license) - - [Section 13 : Copyright](#section-13--copyright) - -## Section 1 : Prerequisites for running Oracle RAC in containers - -Before you proceed to section two, you must complete each of the steps listed in this section. - -To review the resource requirements for Oracle RAC, see Oracle Database 21c Release documentation [Oracle Grid Infrastructure Installation and Upgrade Guide](https://docs.oracle.com/en/database/oracle/oracle-database/21/cwlin/index.html) - -Complete each of the following prerequisites: - -1. Ensure that each container that you will deploy as part of your cluster meets the minimum hardware requirements for Oracle RAC and Oracle Grid Infrastructure software. -2. Ensure all data files, control files, redo log files, and the server parameter file (`SPFILE`) used by the Oracle RAC database reside on shared storage that is accessible by all the Oracle RAC database instances. An Oracle RAC database is a shared-everything database, so each Oracle RAC Node must have the same access. -3. Configure the following addresses manually in your DNS. - - - Public IP address for each container - - Private IP address for each container - - Virtual IP address for each container - - Three single client access name (SCAN) addresses for the cluster. -4. If you are planning to set up RAC on Docker, refer Docker Host machine details in [Section 4.1](#section-41--prerequisites-for-running-oracle-rac-on-docker) -5. If you are planning to set up RAC on Podman, refer Podman Host machine details in [Section 5.1](#section-51--prerequisites-for-running-oracle-rac-on-podman) -6. Block storage: If you are planning to use block devices for shared storage, then allocate block devices for OCR, voting and database files. -7. NFS storage: If you are planning to use NFS storage for OCR, Voting Disk and Database files, then configure NFS storage and export at least one NFS mount. You can also use `/docker-images/OracleDatabase/RAC/OracleRACStorageServer` container for shared file system on NFS. -8. Set`/etc/sysctl.conf`parameters: For Oracle RAC, you must set following parameters at host level in `/etc/sysctl.conf`: - - ```INI - fs.aio-max-nr = 1048576 - fs.file-max = 6815744 - net.core.rmem_max = 4194304 - net.core.rmem_default = 262144 - net.core.wmem_max = 1048576 - net.core.wmem_default = 262144 - net.core.rmem_default = 262144 - ``` - -9. List and reload parameters: After the `/etc/sysctl.conf` file is modified, run the following commands: - - ```bash - sysctl -a - sysctl -p - ``` - -10. To resolve VIPs and SCAN IPs, we are using a DNS container in this guide. Before proceeding to the next step, create a [DNS server container](../OracleDNSServer/README.md). -**Note** If you have a pre-configured DNS server in your environment, then you can replace `-e DNS_SERVERS=172.16.1.25`, `--dns=172.16.1.25`, `-e DOMAIN=example.com` and `--dns-search=example.com` parameters in **Section 2: Building Oracle RAC Database Podman Install Images** with the `DOMAIN_NAME` and `DNS_SERVER` based on your environment. -11. If you are running RAC on Podman, make sure that you have installed the `podman-docker` rpm package so that podman commands can be run using `docker` utility. -12. The Oracle RAC `Dockerfile` does not contain any Oracle software binaries. Download the following software from the [Oracle Technology Network](https://www.oracle.com/technetwork/database/enterprise-edition/downloads/index.html) and stage them under `/docker-images/OracleDatabase/RAC/OracleRealApplicationCluster/dockerfiles/` folder. - - - Oracle Database 21c Grid Infrastructure (21.3) for Linux x86-64 - - Oracle Database 21c (21.3) for Linux x86-64 - - - If you are deploying Oracle RAC on Podman then execute following, otherwise skip to next section. - - Because Oracle RAC on Podman is supported on Release 21c (21.7) or later, you must download the grid release update (RU) from [support.oracle.com](https://support.oracle.com/portal/). - - - In this Example we download the following latest one-off patches for release 21.13 from [support.oracle.com](https://support.oracle.com/portal/) - - `36031790` - - `36041222` -13. Ensure you have git configured in your host machine, [refer this page](https://docs.oracle.com/en/learn/ol-git-start/index.html) for instructions. Clone this git repo by running below command - -```bash -git clone git@github.com:oracle/docker-images.git -``` - -**Notes** - -- If you are planning to use a `DNSServer` container for SCAN IPs, VIPs resolution, then configure the DNSServer. For development and testing purposes only, use the Oracle `DNSServer` image to deploy a container providing DNS resolutions. Please check [OracleDNSServer](../OracleDNSServer/README.md) for details. -- `OracleRACStorageServer` docker image can be used only for development and testing purpose. Please check [OracleRACStorageServer](../OracleRACStorageServer/README.md) for details. -- When you want to deploy RAC on Docker or Podman on Single host, create bridge networks for containers. -- When you want to deploy RAC on Docker or Podman on Multiple host, create macvlan networks for containers. -- To run Oracle RAC using Podman on multiple hosts, refer [Podman macvlan network](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html). - To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, refer [Docker macvlan network](https://docs.docker.com/network/macvlan/). -- If the Docker or Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleConnectionManager) to access the Oracle RAC Database from outside the host. - -## Section 2: Building Oracle RAC Database Container Images - -**IMPORTANT :** This section assumes that you have gone through all the prerequisites in Section 1 and completed all the steps, based on your environment. Do not uncompress the binaries and patches. - -To assist in building the images, you can use the [`buildContainerImage.sh`](https://github.com/oracle/docker-images/blob/master/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/buildContainerImage.sh) script. See the following for instructions and usage. - -### Oracle RAC Container Image for Docker - -If you are planing to deploy Oracle RAC container image on Podman, skip to the section [Oracle RAC Container Image for Podman](#oracle-rac-container-image-for-podman). - - ```bash - cd /docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles - ./buildContainerImage.sh -v -o '--build-arg BASE_OL_IMAGE=oraclelinux:7 --build-arg SLIMMING=true|false' - - # for example ./buildContainerImage.sh -v 21.3.0 -o '--build-arg BASE_OL_IMAGE=oraclelinux:7 --build-arg SLIMMING=false' - ``` - -### Oracle RAC Container Image for Podman - -If you are planing to deploy Oracle RAC container image on Docker, skip to the section [Oracle RAC Container Image for Docker](#oracle-rac-container-image-for-docker). - - ```bash - cd /docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles - ./buildContainerImage.sh -v -o '--build-arg BASE_OL_IMAGE=oraclelinux:8 --build-arg SLIMMING=true|false' - - # for example ./buildContainerImage.sh -v 21.3.0 -o '--build-arg BASE_OL_IMAGE=oraclelinux:8 --build-arg SLIMMING=false' - ``` - -- After the `21.3.0` Oracle RAC container image is built, start building a patched image with the download 21.7 RU and one-offs. To build the patch image, refer [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). - - -**Notes** - -- The resulting images will contain the Oracle Grid Infrastructure binaries and Oracle RAC Database binaries. -- If you are behind a proxy wall, then you must set the `https_proxy` environment variable based on your environment before building the image. - -## Section 3: Network and Password Management - -1. Before you start the installation, you must plan your private and public network. You can create a network bridge on every container host so containers running within that host can communicate with each other. - - For example, create `rac_pub1_nw` for the public network (`172.16.1.0/24`) and `rac_priv1_nw` (`192.168.17.0/24`) for a private network. You can use any network subnet for testing. - - In this document we reference the public network on `172.16.1.0/24` and the private network on `192.168.17.0/24`. - - ```bash - docker network create --driver=bridge --subnet=172.16.1.0/24 rac_pub1_nw - docker network create --driver=bridge --subnet=192.168.17.0/24 rac_priv1_nw - ``` - - - To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, you will need to create a [Docker macvlan network](https://docs.docker.com/network/macvlan/) using the following commands: - - ```bash - docker network create -d macvlan --subnet=172.16.1.0/24 --gateway=172.16.1.1 -o parent=eth0 rac_pub1_nw - docker network create -d macvlan --subnet=192.168.17.0/24 --gateway=192.168.17.1 -o parent=eth1 rac_priv1_nw - ``` - -2. Specify the secret volume for resetting the grid, oracle, and database user password during node creation or node addition. The volume can be a shared volume among all the containers. For example: - - ```bash - mkdir /opt/.secrets/ - openssl rand -out /opt/.secrets/pwd.key -hex 64 - ``` - - - Edit the `/opt/.secrets/common_os_pwdfile` and seed the password for the grid, oracle and database users. For this deployment scenario, it will be a common password for the grid, oracle, and database users. Run the command: - - ```bash - openssl enc -aes-256-cbc -salt -in /opt/.secrets/common_os_pwdfile -out /opt/.secrets/common_os_pwdfile.enc -pass file:/opt/.secrets/pwd.key - rm -f /opt/.secrets/common_os_pwdfile - ``` - -3. Create `rac_host_file` on both Podman and Docker hosts: - - ```bash - mkdir /opt/containers/ - touch /opt/containers/rac_host_file - ``` - -**Notes** - -- To run Oracle RAC using Podman on multiple hosts, refer [Podman macvlan network](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html). -To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, refer [Docker macvlan network](https://docs.docker.com/network/macvlan/). -- If the Docker or Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleConnectionManager) to access the Oracle RAC Database from outside the host. -- If you want to specify a different password for each of the user accounts, then create three different files, encrypt them under `/opt/.secrets`, and pass the file name to the container using the environment variable. Environment variables can be ORACLE_PWD_FILE for the oracle user, GRID_PWD_FILE for the grid user, and DB_PWD_FILE for the database password. -- If you want to use a common password for the oracle, grid, and database users, then you can assign a password file name to COMMON_OS_PWD_FILE environment variable. - -## Section 4: Oracle RAC on Docker - -If you are deploying Oracle RAC On Podman, skip to the [Section 5: Oracle RAC on Podman](#section-5-oracle-rac-on-podman). - -**Note** Oracle RAC is supported for production use on Docker starting with Oracle Database 21c (21.3). On earlier releases, Oracle RAC on Docker is supported for development and and test environments. To deploy Oracle RAC on Docker, use the pre-built images available on the Oracle Container Registry. Execute the following steps in a given order to deploy RAC on Docker: - -To create an Oracle RAC environment on Docker, complete each of these steps in order. - -### Section 4.1 : Prerequisites for Running Oracle RAC on Docker - -To run Oracle RAC on Docker, you must install and configure [Oracle Container Runtime for Docker](https://docs.oracle.com/cd/E52668_01/E87205/html/index.html) on Oracle Linux 7. You must have sufficient space on docker file system (`/var/lib/docker`), configured with the Docker OverlayFS storage driver option `overlay2`. - -**IMPORTANT:** Completing prerequisite steps is a requirement for successful configuration. - -Complete each prerequisite step in order, customized for your environment. - -1. Verify that you have enough memory and CPU resources available for all containers. For this `README.md`, we used the following configuration: - - - 2 Docker hosts - - CPU Cores: 1 Socket with 4 cores, with 2 threads for each core Intel® Xeon® Platinum 8167M CPU at 2.00 GHz - - RAM: 60GB - - Swap memory: 32 GB - - Oracle Linux 7.9 or later with the Unbreakable Enterprise Kernel 6: 5.4.17-2102.200.13.el7uek.x86_64. - -2. Oracle RAC must run certain processes in real-time mode. To run processes inside a container in real-time mode, you must make changes to the Docker configuration files. For details, see the [`dockerd` documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#examples). Edit the Docker Daemon based on Docker version: - - - Check the Docker version. In the following output, the Oracle `docker-engine` version is 19.03. - - ```bash - rpm -qa | grep docker - docker-cli-19.03.11.ol-9.el7.x86_64 - docker-engine-19.03.11.ol-9.el7.x86_64 - ``` - - - If Oracle `docker-engine` version is greater than or equal to 19.03: Edit `/usr/lib/systemd/system/docker.service` and add additional parameters in the `[Service]` section for the `dockerd` daemon: - - ```bash - ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --cpu-rt-runtime=950000 - ``` - - - If Oracle docker-engine version is less than 19.03: Edit `/etc/sysconfig/docker` and add following - - ```bash - OPTIONS='--selinux-enabled --cpu-rt-runtime=950000' - ``` - -3. After you have modified the `dockerd` daemon, reload the daemon with the changes you have made: - - ```bash - systemctl daemon-reload - systemctl stop docker - systemctl start docker - ``` - -### Section 4.2: Setup Oracle RAC Container on Docker - -This section provides step by step procedure to deploy Oracle RAC on container with block devices and storage container. To understand the details of environment variable, refer For the details of environment variables [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) - -Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. - -#### Deploying Oracle RAC on Container with Block Devices on Docker - -If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container With Oracle RAC Storage Container](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container). - -Make sure the ASM devices do not have any existing file system. To clear any other file system from the devices, use the following command: - - ```bash - dd if=/dev/zero of=/dev/xvde bs=8k count=10000 - ``` - -Repeat for each shared block device. In the preceding example, `/dev/xvde` is a shared Xen virtual block device. - -Now create the Oracle RAC container using the image. You can use the following example to create a container: - - ```bash -docker create -t -i \ - --hostname racnoded1 \ - --volume /boot:/boot:ro \ - --volume /dev/shm \ - --tmpfs /dev/shm:rw,exec,size=4G \ - --volume /opt/containers/rac_host_file:/etc/hosts \ - --volume /opt/.secrets:/run/secrets:ro \ - --dns=172.16.1.25 \ - --dns-search=example.com \ - --device=/dev/oracleoci/oraclevdd:/dev/asm_disk1 \ - --device=/dev/oracleoci/oraclevde:/dev/asm_disk2 \ - --privileged=false \ - --cap-add=SYS_NICE \ - --cap-add=SYS_RESOURCE \ - --cap-add=NET_ADMIN \ - -e DNS_SERVERS="172.16.1.25" \ - -e NODE_VIP=172.16.1.130 \ - -e VIP_HOSTNAME=racnoded1-vip \ - -e PRIV_IP=192.168.17.100 \ - -e PRIV_HOSTNAME=racnoded1-priv \ - -e PUBLIC_IP=172.16.1.100 \ - -e PUBLIC_HOSTNAME=racnoded1 \ - -e SCAN_NAME=racnodedc1-scan \ - -e OP_TYPE=INSTALL \ - -e DOMAIN=example.com \ - -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ - -e ASM_DISCOVERY_DIR=/dev \ - -e CMAN_HOSTNAME=racnodedc1-cman \ - -e CMAN_IP=172.16.1.164 \ - -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ - -e PWD_KEY=pwd.key \ - -e RESET_FAILED_SYSTEMD="true" \ - --restart=always --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ - --cpu-rt-runtime=95000 --ulimit rtprio=99 \ - --name racnoded1 \ - oracle/database-rac:21.3.0 -``` - -**Note:** Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. - -#### Deploying Oracle RAC on Container With Oracle RAC Storage Container - -If you are using block devices, skip to the section [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker) - -Now create the Oracle RAC container using the image. You can use the following example to create a container: - - ```bash - docker create -t -i \ - --hostname racnoded1 \ - --volume /boot:/boot:ro \ - --volume /dev/shm \ - --tmpfs /dev/shm:rw,exec,size=4G \ - --volume /opt/containers/rac_host_file:/etc/hosts \ - --volume /opt/.secrets:/run/secrets:ro \ - --dns=172.16.1.25 \ - --dns-search=example.com \ - --privileged=false \ - --volume racstorage:/oradata \ - --cap-add=SYS_NICE \ - --cap-add=SYS_RESOURCE \ - --cap-add=NET_ADMIN \ - -e DNS_SERVERS="172.16.1.25" \ - -e NODE_VIP=172.16.1.130 \ - -e VIP_HOSTNAME=racnoded1-vip \ - -e PRIV_IP=192.168.17.100 \ - -e PRIV_HOSTNAME=racnoded1-priv \ - -e PUBLIC_IP=172.16.1.100 \ - -e PUBLIC_HOSTNAME=racnoded1 \ - -e SCAN_NAME=racnodedc1-scan \ - -e OP_TYPE=INSTALL \ - -e DOMAIN=example.com \ - -e ASM_DISCOVERY_DIR=/oradata \ - -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ - -e CMAN_HOSTNAME=racnodedc1-cman \ - -e CMAN_IP=172.16.1.164 \ - -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ - -e PWD_KEY=pwd.key \ - -e RESET_FAILED_SYSTEMD="true" \ - --restart=always \ - --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ - --cpu-rt-runtime=95000 \ - --ulimit rtprio=99 \ - --name racnoded1 \ - oracle/database-rac:21.3.0 - ``` - -**Notes:** - -- Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. -- You must have created the `racstorage` volume before the creation of the Oracle RAC Container. For details, please refer [OracleRACStorageServer](../OracleRACStorageServer/README.md). -- For details about the available environment variables, refer the [Section 7](#section-7-environment-variables-for-the-first-node). - -#### Assign networks to Oracle RAC containers - -You need to assign the Docker networks created in section 1 to containers. Execute the following commands: - - ```bash - -docker network disconnect bridge racnoded1 -docker network connect rac_pub1_nw --ip 172.16.1.100 racnoded1 -docker network connect rac_priv1_nw --ip 192.168.17.100 racnoded1 - ``` - -#### Start the first container - -To start the first container, run the following command: - - ```bash - docker start racnoded1 - ``` - -It can take at least 40 minutes or longer to create the first node of the cluster. To check the logs, use the following command from another terminal session: - - ```bash - docker logs -f racnoded1 - ``` - -You should see the database creation success message at the end: - - ```bash - #################################### - ORACLE RAC DATABASE IS READY TO USE! - #################################### - ``` - -#### Connect to the Oracle RAC container - -To connect to the container execute the following command: - -```bash -docker exec -i -t racnoded1 /bin/bash -``` - -If the install fails for any reason, log in to the container using the preceding command and check `/tmp/orod.log`. - -- You can also review the Grid Infrastructure logs located at `$GRID_BASE/diag/crs` and check for failure logs. -- If the failure occurred during the database creation then check the database logs. - -### Section 4.3: Adding an Oracle RAC Node using a Docker Container - -Before proceeding to the next step, ensure Oracle Grid Infrastructure is running and the Oracle RAC Database is open as per instructions in [Section 4.2: Setup Oracle RAC on Docker](#section-42-setup-oracle-rac-container-on-docker). Otherwise, the node addition process will fail. - -Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. - -To understand the details of environment variable, refer For the details of environment variables [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes) - -Reset the password on the existing Oracle RAC node for SSH setup between an existing node in the cluster and the new node. Password must be the same on all the nodes for the `grid` and `oracle` users. Execute the following command on an existing node of the cluster. - -```bash -docker exec -i -t -u root racnode1 /bin/bash -sh /opt/scripts/startup/resetOSPassword.sh --help -sh /opt/scripts/startup/resetOSPassword.sh --op_type reset_grid_oracle --pwd_file common_os_pwdfile.enc --secret_volume /run/secrets --pwd_key_file pwd.key -``` - -**Note:** If you do not have a common secret volume among Oracle RAC containers, populate the password file with the same password that you have used on the new node, encrypt the file, and execute `resetOSPassword.sh` on the existing node of the cluster. - -#### Deploying Oracle RAC Additional Node on Container with Block Devices on Docker - -If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container with Oracle RAC Storage Container on Docker](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container). - -To create additional nodes, use the following command: - -```bash -docker create -t -i \ - --hostname racnoded2 \ - --volume /boot:/boot:ro \ - --volume /dev/shm \ - --tmpfs /dev/shm:rw,exec,size=4G \ - --volume /opt/containers/rac_host_file:/etc/hosts \ - --volume /opt/.secrets:/run/secrets:ro \ - --dns=172.16.1.25 \ - --dns-search=example.com \ - --device=/dev/oracleoci/oraclevdd:/dev/asm_disk1 \ - --device=/dev/oracleoci/oraclevde:/dev/asm_disk2 \ - --privileged=false \ - --cap-add=SYS_NICE \ - --cap-add=SYS_RESOURCE \ - --cap-add=NET_ADMIN \ - -e DNS_SERVERS="172.16.1.25" \ - -e EXISTING_CLS_NODES=racnoded1 \ - -e NODE_VIP=172.16.1.131 \ - -e VIP_HOSTNAME=racnoded2-vip \ - -e PRIV_IP=192.168.17.101 \ - -e PRIV_HOSTNAME=racnoded2-priv \ - -e PUBLIC_IP=172.16.1.101 \ - -e PUBLIC_HOSTNAME=racnoded2 \ - -e DOMAIN=example.com \ - -e SCAN_NAME=racnodedc1-scan \ - -e ASM_DISCOVERY_DIR=/dev \ - -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ - -e ORACLE_SID=ORCLCDB \ - -e OP_TYPE=ADDNODE \ - -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ - -e PWD_KEY=pwd.key \ - -e RESET_FAILED_SYSTEMD="true" \ - --restart=always --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ - --cpu-rt-runtime=95000 --ulimit rtprio=99 \ - --name racnoded2 \ - oracle/database-rac:21.3.0 -``` - -For details of all environment variables and parameters, refer to [Section 7](#section-7-environment-variables-for-the-first-node). - -#### Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Docker - -If you are using physical block devices for shared storage, skip to [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker). - -Use the existing `racstorage:/oradata` volume when creating the additional container using the image. - -For example: - -```bash -docker create -t -i \ - --hostname racnoded2 \ - --volume /boot:/boot:ro \ - --volume /dev/shm \ - --tmpfs /dev/shm:rw,exec,size=4G \ - --volume /opt/containers/rac_host_file:/etc/hosts \ - --volume /opt/.secrets:/run/secrets:ro \ - --dns=172.16.1.25 \ - --dns-search=example.com \ - --volume racstorage:/oradata \ - --privileged=false \ - --cap-add=SYS_NICE \ - --cap-add=SYS_RESOURCE \ - --cap-add=NET_ADMIN \ - -e DNS_SERVERS="172.16.1.25" \ - -e EXISTING_CLS_NODES=racnoded1 \ - -e NODE_VIP=172.16.1.131 \ - -e VIP_HOSTNAME=racnoded2-vip \ - -e PRIV_IP=192.168.17.101 \ - -e PRIV_HOSTNAME=racnoded2-priv \ - -e PUBLIC_IP=172.16.1.101 \ - -e PUBLIC_HOSTNAME=racnoded2 \ - -e DOMAIN=example.com \ - -e SCAN_NAME=racnodedc1-scan \ - -e ASM_DISCOVERY_DIR=/oradata \ - -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ - -e ORACLE_SID=ORCLCDB \ - -e OP_TYPE=ADDNODE \ - -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ - -e PWD_KEY=pwd.key \ - -e RESET_FAILED_SYSTEMD="true" \ - --restart=always --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ - --cpu-rt-runtime=95000 --ulimit rtprio=99 \ - --name racnoded2 \ - oracle/database-rac:21.3.0 -``` - -**Notes:** - -- You must have created **racstorage** volume before the creation of the Oracle RAC container. -- You can change env variables such as IPs and ORACLE_PWD based on your env. For details about the env variables, refer the section 8. - -#### Assign Network to additional Oracle RAC container - -Connect the private and public networks you created earlier to the container: - -```bash -docker network disconnect bridge racnoded2 -docker network connect rac_pub1_nw --ip 172.16.1.101 racnoded2 -docker network connect rac_priv1_nw --ip 192.168.17.101 racnoded2 -``` - -#### Start Oracle RAC racnode2 container - -Start the container - -```bash -docker start racnoded2 -``` - -To check the database logs, tail the logs using the following command: - -```bash -docker logs -f racnoded2 -``` - -You should see the database creation success message at the end. - -```bash -################################################################# -Oracle Database ORCLCDB is up and running on racnoded2 -################################################################# -Running User Script for oracle user -Setting Remote Listener -#################################### -ORACLE RAC DATABASE IS READY TO USE! -#################################### -``` - -#### Connect to the Oracle RAC racnode2 container - -To connect to the container execute the following command: - -```bash -docker exec -i -t racnoded2 /bin/bash -``` - -If the node addition fails, log in to the container using the preceding command and review `/tmp/orod.log`. You can also review the Grid Infrastructure logs i.e. `$GRID_BASE/diag/crs` and check for failure logs. If the node creation has failed during the database creation process, then check DB logs. - -## Section 4.4: Setup Oracle RAC Container on Docker with Docker Compose - -Oracle RAC database can also be deployed with Docker Compose. An example of how to install Oracle RAC Database on Single Host via Bridge Network is explained in this [README.md](./samples/racdockercompose/README.md) - -Same section covers various below scenarios as well with docker compose- -1. Deploying Oracle RAC on Container with Block Devices on Docker with Docker Compose -2. Deploying Oracle RAC on Container With Oracle RAC Storage Container with Docker Compose -3. Deploying Oracle RAC Additional Node on Container with Block Devices on Docker with Docker Compose -4. Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Docker with Docker Compose - -***Note:*** Docker and Docker Compose is not supported with OL8. You need OL7.9 with UEK R5 or R6. - -## Section 5: Oracle RAC on Podman - -If you are deploying Oracle RAC On Docker, skip to [Section 4: Oracle RAC on Docker](#section-4-oracle-rac-on-docker) - -**Note** Oracle RAC is supported for production use on Podman starting with Oracle Database 19c (19.16), and Oracle Database 21c (21.7). You can deploy Oracle RAC on Podman using the pre-built images available on Oracle Container Registry. Execute the following steps in a given order to deploy RAC on Podman: - -To create an Oracle RAC environment on Podman, complete each of these steps in order. - -### Section 5.1 : Prerequisites for Running Oracle RAC on Podman - -You must install and configure [Podman release 4.0.2](https://docs.oracle.com/en/operating-systems/oracle-linux/podman/podman-InstallingPodmanandRelatedUtilities.html#podman-install) or later on Oracle Linux 8.5 or later to run Oracle RAC on Podman. - -**Notes**: - -- You need to remove `"--cpu-rt-runtime=95000 \"` from container creation commands mentioned below in this document in following sections to create the containers if you are running Oracle 8 with UEKR7: - - [Section 5.2: Setup RAC Containers on Podman](#section-52-setup-rac-containers-on-podman). - - [Section 5.3: Adding a Oracle RAC Node using a container on Podman](#section-53-adding-a-oracle-rac-node-using-a-container-on-podman). - -- You can check the details on [Oracle Linux and Unbreakable Enterprise Kernel (UEK) Releases](https://blogs.oracle.com/scoter/post/oracle-linux-and-unbreakable-enterprise-kernel-uek-releases) - -- You do not need to execute step 2 in this section to create and enable `podman-rac-cgroup.service` when we are running Oracle Linux 8 with Unbreakable Enterprise Kernel R7. - -**IMPORTANT:** Completing prerequisite steps is a requirement for successful configuration. - -Complete each prerequisite step in order, customized for your environment. - -1. Verify that you have enough memory and CPU resources available for all containers. In this `README.md` for Podman, we used the following configuration: - - - 2 Podman hosts - - CPU Cores: 1 Socket with 4 cores, with 2 threads for each core Intel® Xeon® Platinum 8167M CPU at 2.00 GHz - - RAM: 60 GB - - Swap memory: 32 GB - - Oracle Linux 8.5 (Linux-x86-64) with the Unbreakable Enterprise Kernel 6: `5.4.17-2136.300.7.el8uek.x86_64`. - -2. Oracle RAC must run certain processes in real-time mode. To run processes inside a container in real-time mode, populate the real-time CPU budgeting on machine restarts. Create a oneshot systemd service as follows: - - - Create a file `/etc/systemd/system/podman-rac-cgroup.service` - - Append the following lines: - - ```INI - [Unit] - Description=Populate Cgroups with real time chunk on machine restart - After=multi-user.target - [Service] - Type=oneshot - ExecStart=/bin/bash -c “/bin/echo 950000 > /sys/fs/cgroup/cpu,cpuacct/machine.slice/cpu.rt_runtime_us && /bin/systemctl restart podman-restart.service” - StandardOutput=journal - CPUAccounting=yes - Slice=machine.slice - [Install] - WantedBy=multi-user.target - ``` - - - After creating the file `/etc/systemd/system/podman-rac-cgroup.service` with the lines appended in the preceding step, reload and restart the Podman daemon using the following steps: - - ```bash - systemctl daemon-reload - systemctl enable podman-rac-cgroup.service - systemctl enable podman-restart.service - systemctl start podman-rac-cgroup.service - ``` - -3. If SELINUX is enabled on the Podman host, then you must create an SELinux policy for Oracle RAC on Podman. - -You can check SELinux Status in your host machine by running the `sestatus` command. - -For details about how to create SELinux policy for Oracle RAC on Podman, see "How to Configure Podman for SELinux Mode" in the publication [Oracle Real Application Clusters Installation Guide for Podman Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racpd/target-configuration-oracle-rac-podman.html#GUID-59138DF8-3781-4033-A38F-E0466884D008). - -### Section 5.2: Setup RAC Containers on Podman - -This section provides step by step procedure to deploy Oracle RAC on container with block devices and storage container. To understand the details of environment variable, refer For the details of environment variables [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) - -Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. - -#### Deploying Oracle RAC Containers with Block Devices on Podman - -If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container-on-podman). - -Make sure the ASM devices do not have any existing file system. To clear any other file system from the devices, use the following command: - - ```bash - dd if=/dev/zero of=/dev/xvde bs=8k count=10000 - ``` - -Repeat for each shared block device. In the preceding example, `/dev/xvde` is a shared Xen virtual block device. - -Now create the Oracle RAC container using the image. For the details of environment variables, refer to section 7. You can use the following example to create a container: - - ```bash - podman create -t -i \ - --hostname racnodep1 \ - --volume /boot:/boot:ro \ - --tmpfs /dev/shm:rw,exec,size=4G \ - --volume /opt/containers/rac_host_file:/etc/hosts \ - --volume /opt/.secrets:/run/secrets:ro \ - --dns=172.16.1.25 \ - --dns-search=example.com \ - --device=/dev/oracleoci/oraclevdd:/dev/asm_disk1 \ - --device=/dev/oracleoci/oraclevde:/dev/asm_disk2 \ - --privileged=false \ - --cap-add=SYS_NICE \ - --cap-add=SYS_RESOURCE \ - --cap-add=NET_ADMIN \ - --cap-add=AUDIT_WRITE \ - --cap-add=AUDIT_CONTROL \ - --memory 16G \ - --memory-swap 32G \ - --sysctl kernel.shmall=2097152 \ - --sysctl "kernel.sem=250 32000 100 128" \ - --sysctl kernel.shmmax=8589934592 \ - --sysctl kernel.shmmni=4096 \ - -e DNS_SERVERS="172.16.1.25" \ - -e NODE_VIP=172.16.1.200 \ - -e VIP_HOSTNAME=racnodep1-vip \ - -e PRIV_IP=192.168.17.170 \ - -e PRIV_HOSTNAME=racnodep1-priv \ - -e PUBLIC_IP=172.16.1.170 \ - -e PUBLIC_HOSTNAME=racnodep1 \ - -e SCAN_NAME=racnodepc1-scan \ - -e OP_TYPE=INSTALL \ - -e DOMAIN=example.com \ - -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ - -e ASM_DISCOVERY_DIR=/dev \ - -e CMAN_HOSTNAME=racnodepc1-cman \ - -e CMAN_IP=172.16.1.166 \ - -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ - -e PWD_KEY=pwd.key \ - -e ORACLE_SID=ORCLCDB \ - -e RESET_FAILED_SYSTEMD="true" \ - -e DEFAULT_GATEWAY="172.16.1.1" \ - -e TMPDIR=/var/tmp \ - --restart=always \ - --systemd=always \ - --cpu-rt-runtime=95000 \ - --ulimit rtprio=99 \ - --name racnodep1 \ - localhost/oracle/database-rac:21.3.0-21.13.0 - ``` - -**Note:** Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. - -#### Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman - -If you are using block devices, skip to the section [Deploying RAC Containers with Block Devices on Podman](#deploying-oracle-rac-containers-with-block-devices-on-podman). -Now create the Oracle RAC container using the image. You can use the following example to create a container: - - ```bash - podman create -t -i \ - --hostname racnodep1 \ - --volume /boot:/boot:ro \ - --tmpfs /dev/shm:rw,exec,size=4G \ - --volume /opt/containers/rac_host_file:/etc/hosts \ - --volume /opt/.secrets:/run/secrets:ro \ - --dns=172.16.1.25 \ - --dns-search=example.com \ - --privileged=false \ - --volume racstorage:/oradata \ - --cap-add=SYS_NICE \ - --cap-add=SYS_RESOURCE \ - --cap-add=NET_ADMIN \ - --cap-add=AUDIT_WRITE \ - --cap-add=AUDIT_CONTROL \ - --memory 16G \ - --memory-swap 32G \ - --sysctl kernel.shmall=2097152 \ - --sysctl "kernel.sem=250 32000 100 128" \ - --sysctl kernel.shmmax=8589934592 \ - --sysctl kernel.shmmni=4096 \ - -e DNS_SERVERS="172.16.1.25" \ - -e NODE_VIP=172.16.1.200 \ - -e VIP_HOSTNAME=racnodep1-vip \ - -e PRIV_IP=192.168.17.170 \ - -e PRIV_HOSTNAME=racnodep1-priv \ - -e PUBLIC_IP=172.16.1.170 \ - -e PUBLIC_HOSTNAME=racnodep1 \ - -e SCAN_NAME=racnodepc1-scan \ - -e OP_TYPE=INSTALL \ - -e DOMAIN=example.com \ - -e ASM_DISCOVERY_DIR=/oradata \ - -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ - -e CMAN_HOSTNAME=racnodepc1-cman \ - -e CMAN_IP=172.16.1.166 \ - -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ - -e PWD_KEY=pwd.key \ - -e ORACLE_SID=ORCLCDB \ - -e RESET_FAILED_SYSTEMD="true" \ - -e DEFAULT_GATEWAY="172.16.1.1" \ - -e TMPDIR=/var/tmp \ - --restart=always \ - --systemd=always \ - --cpu-rt-runtime=95000 \ - --ulimit rtprio=99 \ - --name racnodep1 \ - localhost/oracle/database-rac:21.3.0-21.13.0 - ``` - -**Notes:** - -- Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. -- You must have created the `racstorage` volume before the creation of the Oracle RAC Container. For details about the available environment variables, refer the [Section 7](#section-7-environment-variables-for-the-first-node). - -#### Assign networks to Oracle RAC containers Created Using Podman - -You need to assign the Podman networks created in section 1 to containers. Execute the following commands: - - ```bash - podman network disconnect podman racnodep1 - podman network connect rac_pub1_nw --ip 172.16.1.170 racnodep1 - podman network connect rac_priv1_nw --ip 192.168.17.170 racnodep1 - ``` - -#### Start the first container Created Using Podman - -To start the first container, run the following command: - - ```bash - podman start racnodep1 - ``` - -It can take at least 40 minutes or longer to create the first node of the cluster. To check the database logs, tail the logs using the following command: - -```bash -podman exec racnodep1 /bin/bash -c "tail -f /tmp/orod.log" -``` - -You should see the database creation success message at the end. - -```bash -01-31-2024 12:31:20 UTC : : ################################################################# -01-31-2024 12:31:20 UTC : : Oracle Database ORCLCDB is up and running on racnodep1 -01-31-2024 12:31:20 UTC : : ################################################################# -01-31-2024 12:31:20 UTC : : Running User Script -01-31-2024 12:31:20 UTC : : Setting Remote Listener -01-31-2024 12:31:27 UTC : : 172.16.1.166 -01-31-2024 12:31:27 UTC : : Executing script to set the remote listener -01-31-2024 12:31:28 UTC : : #################################### -01-31-2024 12:31:28 UTC : : ORACLE RAC DATABASE IS READY TO USE! -01-31-2024 12:31:28 UTC : : #################################### -``` - -#### Connect to the Oracle RAC container Created Using Podman - -To connect to the container execute the following command: - -```bash -podman exec -i -t racnodep1 /bin/bash -``` - -If the install fails for any reason, log in to the container using the preceding command and check `/tmp/orod.log`. You can also review the Grid Infrastructure logs located at `$GRID_BASE/diag/crs` and check for failure logs. If the failure occurred during the database creation then check the database logs. - -### Section 5.3: Adding a Oracle RAC Node using a container on Podman - -Before proceeding to the next step, ensure Oracle Grid Infrastructure is running and the Oracle RAC Database is open as per instructions in [Section 5.2: Setup RAC Containers on Podman](#section-52-setup-rac-containers-on-podman). Otherwise, the node addition process will fail. - -Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. - -To understand the details of environment variable, refer For the details of environment variables [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). - -Reset the password on the existing Oracle RAC node for SSH setup between an existing node in the cluster and the new node. Password must be the same on all the nodes for the `grid` and `oracle` users. Execute the following command on an existing node of the cluster. - -```bash -podman exec -i -t -u root racnode1 /bin/bash -sh /opt/scripts/startup/resetOSPassword.sh --help -sh /opt/scripts/startup/resetOSPassword.sh --op_type reset_grid_oracle --pwd_file common_os_pwdfile.enc --secret_volume /run/secrets --pwd_key_file pwd.key -``` - -**Note:** If you do not have a common secret volume among Oracle RAC containers, populate the password file with the same password that you have used on the new node, encrypt the file, and execute `resetOSPassword.sh` on the existing node of the cluster. - -#### Deploying Oracle RAC Additional Node on Container with Block Devices on Podman - -If you are using an NFS volume, skip to the section [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-podman). - -To create additional nodes, use the following command: - -```bash -podman create -t -i \ - --hostname racnodep2 \ - --tmpfs /dev/shm:rw,exec,size=4G \ - --volume /boot:/boot:ro \ - --dns-search=example.com \ - --volume /opt/containers/rac_host_file:/etc/hosts \ - --volume /opt/.secrets:/run/secrets:ro \ - --dns=172.16.1.25 \ - --dns-search=example.com \ - --device=/dev/oracleoci/oraclevdd:/dev/asm_disk1 \ - --device=/dev/oracleoci/oraclevde:/dev/asm_disk2 \ - --privileged=false \ - --cap-add=SYS_NICE \ - --cap-add=SYS_RESOURCE \ - --cap-add=NET_ADMIN \ - --cap-add=AUDIT_CONTROL \ - --cap-add=AUDIT_WRITE \ - --memory 16G \ - --memory-swap 32G \ - --sysctl kernel.shmall=2097152 \ - --sysctl "kernel.sem=250 32000 100 128" \ - --sysctl kernel.shmmax=8589934592 \ - --sysctl kernel.shmmni=4096 \ - -e DNS_SERVERS="172.16.1.25" \ - -e EXISTING_CLS_NODES=racnodep1 \ - -e NODE_VIP=172.16.1.201 \ - -e VIP_HOSTNAME=racnodep2-vip \ - -e PRIV_IP=192.168.17.171 \ - -e PRIV_HOSTNAME=racnodep2-priv \ - -e PUBLIC_IP=172.16.1.171 \ - -e PUBLIC_HOSTNAME=racnodep2 \ - -e DOMAIN=example.com \ - -e SCAN_NAME=racnodepc1-scan \ - -e ASM_DISCOVERY_DIR=/dev \ - -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ - -e ORACLE_SID=ORCLCDB \ - -e OP_TYPE=ADDNODE \ - -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ - -e PWD_KEY=pwd.key \ - -e RESET_FAILED_SYSTEMD="true" \ - -e DEFAULT_GATEWAY="172.16.1.1" \ - -e TMPDIR=/var/tmp \ - --systemd=always \ - --cpu-rt-runtime=95000 \ - --ulimit rtprio=99 \ - --restart=always \ - --name racnodep2 \ - localhost/oracle/database-rac:21.3.0-21.13.0 -``` - -For details of all environment variables and parameters, refer to [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). - -#### Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman - -If you are using physical block devices for shared storage, skip to [Deploying Oracle RAC Additional Node on Container with Block Devices on Podman](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-podman). - -Use the existing `racstorage:/oradata` volume when creating the additional container using the image. - -For example: - -```bash -podman create -t -i \ - --hostname racnodep2 \ - --tmpfs /dev/shm:rw,exec,size=4G \ - --volume /boot:/boot:ro \ - --dns-search=example.com \ - --volume /opt/containers/rac_host_file:/etc/hosts \ - --volume /opt/.secrets:/run/secrets:ro \ - --dns=172.16.1.25 \ - --dns-search=example.com \ - --privileged=false \ - --volume racstorage:/oradata \ - --cap-add=SYS_NICE \ - --cap-add=SYS_RESOURCE \ - --cap-add=NET_ADMIN \ - --cap-add=AUDIT_WRITE \ - --cap-add=AUDIT_CONTROL \ - --memory 16G \ - --memory-swap 32G \ - --sysctl kernel.shmall=2097152 \ - --sysctl "kernel.sem=250 32000 100 128" \ - --sysctl kernel.shmmax=8589934592 \ - --sysctl kernel.shmmni=4096 \ - -e DNS_SERVERS="172.16.1.25" \ - -e EXISTING_CLS_NODES=racnodep1 \ - -e NODE_VIP=172.16.1.201 \ - -e VIP_HOSTNAME=racnodep2-vip \ - -e PRIV_IP=192.168.17.171 \ - -e PRIV_HOSTNAME=racnodep2-priv \ - -e PUBLIC_IP=172.16.1.171 \ - -e PUBLIC_HOSTNAME=racnodep2 \ - -e DOMAIN=example.com \ - -e SCAN_NAME=racnodepc1-scan \ - -e ASM_DISCOVERY_DIR=/oradata \ - -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ - -e ORACLE_SID=ORCLCDB \ - -e OP_TYPE=ADDNODE \ - -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ - -e PWD_KEY=pwd.key \ - -e RESET_FAILED_SYSTEMD="true" \ - -e DEFAULT_GATEWAY="172.16.1.1" \ - -e TMPDIR=/var/tmp \ - --systemd=always \ - --cpu-rt-runtime=95000 \ - --ulimit rtprio=99 \ - --restart=always \ - --name racnodep2 \ - localhost/oracle/database-rac:21.3.0-21.13.0 -``` - -**Notes:** - -- You must have created **racstorage** volume before the creation of the Oracle RAC container. -- You can change env variables such as IPs and ORACLE_PWD based on your env. For details about the env variables, refer the [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). - -#### Assign Network to additional Oracle RAC container Created Using Podman - -Connect the private and public networks you created earlier to the container: - -```bash -podman network disconnect podman racnodep2 -podman network connect rac_pub1_nw --ip 172.16.1.171 racnodep2 -podman network connect rac_priv1_nw --ip 192.168.17.171 racnodep2 -``` - -#### Start Oracle RAC container - -Start the container - -```bash -podman start racnodep2 -``` - -To check the database logs, tail the logs using the following command: - -```bash -podman exec racnodep2 /bin/bash -c "tail -f /tmp/orod.log" -``` - -You should see the database creation success message at the end. - -```bash -02-01-2024 09:36:14 UTC : : ################################################################# -02-01-2024 09:36:14 UTC : : Oracle Database ORCLCDB is up and running on racnodep2 -02-01-2024 09:36:14 UTC : : ################################################################# -02-01-2024 09:36:14 UTC : : Running User Script -02-01-2024 09:36:14 UTC : : Setting Remote Listener -02-01-2024 09:36:14 UTC : : #################################### -02-01-2024 09:36:14 UTC : : ORACLE RAC DATABASE IS READY TO USE! -02-01-2024 09:36:14 UTC : : #################################### -``` -## Section 5.4: Setup Oracle RAC Container on Podman with Podman Compose - -Oracle RAC database can also be deployed with podman Compose. An example of how to install Oracle RAC Database on Single Host via Bridge Network is explained in this [README.md](./samples/racpodmancompose/README.md) - -Same section covers various below scenarios as well with podman compose- -1. Deploying Oracle RAC on Container with Block Devices on Podman with Podman Compose -2. Deploying Oracle RAC on Container with NFS Devices on Podman with Podman Compose -3. Deploying Oracle RAC Additional Node on Container with Block Devices on Podman with Podman Compose -4. Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman with Podman Compose - -***Note:*** Podman and Podman Compose is not supported with OL7. You need minimum OL8.8 with UEK R7. - -## Section 6: Connecting to an Oracle RAC Database - -**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC cluster using the preceding sections. - -If you are using a connection manager and exposed the port 1521 on the host, then connect from an external client using the following connection string, where `` is the host container, and `` is the database system identifier: - -```bash -system/@//:1521/ -``` - -If you are using the bridge created using MACVLAN driver, and you have configured DNS appropriately, then you can connect using the public Single Client Access (SCAN) listener directly from any external client. To connect with the SCAN, use the following connection string, where `` is the SCAN name for the database, and `` is the database system identifier: - -```bash -system/@//:1521/ -``` - -## Section 7: Environment Variables for the First Node - -This section provides information about the environment variables that can be used when creating the first node of a cluster. - -```bash -OP_TYPE=###Specify the Operation TYPE. It can accept 2 values INSTALL OR ADDNODE#### -NODE_VIP=####Specify the Node VIP### -VIP_HOSTNAME=###Specify the VIP hostname### -PRIV_IP=###Specify the Private IP### -PRIV_HOSTNAME=###Specify the Private Hostname### -PUBLIC_IP=###Specify the public IP### -PUBLIC_HOSTNAME=###Specify the public hostname### -SCAN_NAME=###Specify the scan name### -ASM_DEVICE_LIST=###Specify the ASM Disk lists. -SCAN_IP=###Specify this if you do not have DNS server### -DOMAIN=###Default value set to example.com### -PASSWORD=###OS password will be generated by openssl### -CLUSTER_NAME=###Default value set to racnode-c#### -ORACLE_SID=###Default value set to ORCLCDB### -ORACLE_PDB=###Default value set to ORCLPDB### -ORACLE_PWD=###Default value set to generated by openssl random password### -ORACLE_CHARACTERSET=###Default value set AL32UTF8### -DEFAULT_GATEWAY=###Default gateway. You need this env variable if containers will be running on multiple hosts.#### -CMAN_HOSTNAME=###Connection Manager Host Name### -CMAN_IP=###Connection manager Host IP### -ASM_DISCOVERY_DIR=####ASM disk location insdie the container. By default it is /dev###### -COMMON_OS_PWD_FILE=###Pass the file name to setup grid and oracle user password. If you specify ORACLE_PWD_FILE, GRID_PWD_FILE, and DB_PWD_FILE then you do not need to specify this env variable### -ORACLE_PWD_FILE=###Pass the file name to set the password for oracle user.### -GRID_PWD_FILE=###Pass the file name to set the password for grid user.### -DB_PWD_FILE=###Pass the file name to set the password for DB user i.e. sys.### -REMOVE_OS_PWD_FILES=###Set this env variable to true to remove pwd key file and password file after resetting password.### -CONTAINER_DB_FLAG=###Default value is set to true to create container database. Set this to false if you do not want to create container database.### -``` - -## Section 8: Environment Variables for the Second and Subsequent Nodes - -This section provides the details about the environment variables that can be used for all additional nodes added to an existing cluster. - -```bash -OP_TYPE=###Specify the Operation TYPE. It can accept 2 values INSTALL OR ADDNODE### -EXISTING_CLS_NODES=###Specify the Existing Node of the cluster which you want to join. If you have 2 nodes in the cluster and you are trying to add the third node then specify existing 2 nodes of the clusters and separate them by comma.#### -NODE_VIP=###Specify the Node VIP### -VIP_HOSTNAME=###Specify the VIP hostname### -PRIV_IP=###Specify the Private IP### -PRIV_HOSTNAME=###Specify the Private Hostname### -PUBLIC_IP=###Specify the public IP### -PUBLIC_HOSTNAME=###Specify the public hostname### -SCAN_NAME=###Specify the scan name### -SCAN_IP=###Specify this if you do not have DNS server### -ASM_DEVICE_LIST=###Specify the ASM Disk lists. -DOMAIN=###Default value set to example.com### -ORACLE_SID=###Default value set to ORCLCDB### -DEFAULT_GATEWAY=###Default gateway. You need this env variable if containers will be running on multiple hosts.#### -CMAN_HOSTNAME=###Connection Manager Host Name### -CMAN_IP=###Connection manager Host IP### -ASM_DISCOVERY_DIR=####ASM disk location inside the container. By default it is /dev###### -COMMON_OS_PWD_FILE=###You need to pass the file name to setup grid and oracle user password. If you specify ORACLE_PWD_FILE, GRID_PWD_FILE, and DB_PWD_FILE then you do not need to specify this env variable### -ORACLE_PWD_FILE=###You need to pass the file name to set the password for oracle user.### -GRID_PWD_FILE=###You need to pass the file name to set the password for grid user.### -DB_PWD_FILE=###You need to pass the file name to set the password for DB user i.e. sys.### -REMOVE_OS_PWD_FILES=###You need to set this to true to remove pwd key file and password file after resetting password.### -``` - -## Section 9: Building a Patched Oracle RAC Container Image - -If you want to build a patched image based on a base 21.3.0 container image, then refer to the GitHub page [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). - -## Section 10 : Sample Container Files for Older Releases - -### Docker - -This project offers sample container files for Oracle Grid Infrastructure and Oracle Real Application Clusters for dev and test: - -- Oracle Database 19c Oracle Grid Infrastructure (19.3) for Linux x86-64 -- Oracle Database 19c (19.3) for Linux x86-64 - -- Oracle Database 18c Oracle Grid Infrastructure (18.3) for Linux x86-64 - -- Oracle Database 18c (18.3) for Linux x86-64 - -- Oracle Database 12c Release 2 Oracle Grid Infrastructure (12.2.0.1.0) for Linux x86-64 - -- Oracle Database 12c Release 2 (12.2.0.1.0) Enterprise Edition for Linux x86-64 - - **Notes:** - -- Note that the Oracle RAC on Docker Container releases are supported only for test and development environments, but not for production environments. - -- If you are planning to build and deploy Oracle RAC 18.3.0, you need to download Oracle 18.3.0 Grid Infrastructure and Oracle Database 18.3.0 Database. - - - You also need to download Patch# p28322130_183000OCWRU_Linux-x86-64.zip from [Oracle Technology Network](https://www.oracle.com/technetwork/database/database-technologies/clusterware/downloads/docker-4418413.html). - - - Stage it under dockerfiles/18.3.0 folder. - -- If you are planning to build and deploy Oracle RAC 12.2.0.1, you need to download Oracle 12.2.0.1 Grid Infrastructure and Oracle Database 12.2.0.1 Database. - - - You also need to download Patch# p27383741_122010_Linux-x86-64.zip from [Oracle Technology Network](https://www.oracle.com/technetwork/database/database-technologies/clusterware/downloads/docker-4418413.html). - - - Stage it under dockerfiles/12.2.0.1 folder. - -### Podman - -This project offers sample container files for Oracle Grid Infrastructure and Oracle Real Application Clusters for dev and test: - -- Oracle Database 19c Oracle Grid Infrastructure (19.3) for Linux x86-64 -- Oracle Database 19c (19.3) for Linux x86-64 - -**Notes:** -- Because Oracle RAC on Podman is supported on 19c from 19.16 or later, you must download the grid release update (RU) from [support.oracle.com](https://support.oracle.com/portal/). - -- For RAC on Podman for v19.22, download following one-offs from [support.oracle.com](https://support.oracle.com/portal/) - - `35943157` - - `35940989` - -- Before starting the next step, you must edit `docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/19.3.0/Dockerfile`, change `oraclelinux:7-slim` to `oraclelinux:8`, and save the file. - -- You must add `CV_ASSUME_DISTID=OEL8` inside the `Dockerfile` as an env variable. - -- Once the `19.3.0` Oracle RAC on Podman image is built, start building patched image with the download 19.16 RU and one-offs. To build the patch the image, refer [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). - -## Section 11 : Support - -### Docker Support - -At the time of this release, Oracle RAC on Docker is supported only on Oracle Linux 7. To see current details, refer the [Real Application Clusters Installation Guide for Docker Containers Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racdk/oracle-rac-on-docker.html). - -### Podman Support - -At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 8.5 later. To see current Linux support certifications, refer [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) - -## Section 12 : License - -To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. - -All scripts and files hosted in this repository which are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. - -## Section 13 : Copyright - -Copyright (c) 2014-2024 Oracle and/or its affiliates. +# Oracle Real Application Clusters in Linux Containers + +Learn about container deployment options for Oracle Real Application Clusters (Oracle RAC) Release 21c + +## Overview of Running Oracle RAC in Containers + +Oracle Real Application Clusters (Oracle RAC) is an option for the award-winning Oracle Database Enterprise Edition. Oracle RAC is a cluster database with a shared cache architecture that overcomes the limitations of traditional shared-nothing and shared-disk approaches to provide highly scalable and available database solutions for all business applications. + +Oracle RAC uses Oracle Clusterware as a portable cluster software that allows clustering of independent servers so that they cooperate as a single system and Oracle Automatic Storage Management (Oracle ASM) to provide simplified storage management that is consistent across all servers and storage platforms. +Oracle Clusterware and Oracle ASM are part of the Oracle Grid Infrastructure, which bundles both solutions in an easy-to-deploy software package. For more information on Oracle RAC Database 21c refer to the [Oracle Database documentation](http://docs.oracle.com/en/database/). + +This guide helps you install Oracle RAC on Containers on Host Machines as explained in detail below. With the current release, you prepare the host machine, build or use pre-built Oracle RAC Container Images v21c, and setup Oracle RAC on Single or Multiple Host machines with Oracle ASM. +In this installation guide, we use [Podman](https://docs.podman.io/en/v3.0/) to create Oracle RAC Containers and manage them. + +## Using this Documentation +To create an Oracle RAC environment, follow these steps: + +- [Oracle Real Application Clusters in Linux Containers](#oracle-real-application-clusters-in-linux-containers) + - [Overview of Running Oracle RAC in Containers](#overview-of-running-oracle-rac-in-containers) + - [Using this Documentation](#using-this-documentation) + - [Preparation Steps for running Oracle RAC in containers](#preparation-steps-for-running-oracle-rac-database-in-containers) + - [Getting Oracle RAC Database Container Images](#getting-oracle-rac-database-container-images) + - [Building Oracle RAC Database Container Image](#building-oracle-rac-database-container-image) + - [Building Oracle RAC Database Container Slim Image](#building-oracle-rac-database-container-slim-image) + - [Network Management](#network-management) + - [Password Management](#password-management) + - [Oracle RAC on Containers Deployment Scenarios](#oracle-rac-on-containers-deployment-scenarios) + - [Oracle RAC Containers on Podman](#oracle-rac-containers-on-podman) + - [Setup Using Oracle RAC Image](#1-setup-using-oracle-rac-container-image) + - [Setup Using Oracle RAC Slim Image](#2-setup-using-oracle-rac-container-slim-image) + - [Connecting to an Oracle RAC Database](#connecting-to-an-oracle-rac-database) + - [Deletion of Node from Oracle RAC Cluster](#deletion-of-node-from-oracle-rac-cluster) + - [Building a Patched Oracle RAC Container Image](#building-a-patched-oracle-rac-container-image) + - [Cleanup](#cleanup) + - [Sample Container Files for Older Releases](#sample-container-files-for-older-releases) + - [Support](#support) + - [License](#license) + - [Copyright](#copyright) + +## Preparation Steps for running Oracle RAC Database in containers + +Before you proceed to the next section, you must complete each of the steps listed in this section and complete the following prerequisites. + +* Refer to the following sections in the publication [Oracle Real Application Clusters Installation Guide for Podman](https://docs.oracle.com/cd/F39414_01/racpd/oracle-real-application-clusters-installation-guide-podman-oracle-linux-x86-64.pdf) for Podman Oracle Linux x86-64 to complete the preparation steps for Oracle RAC on Container deployment: + * Overview of Oracle RAC on Podman + * Host Preparation for Oracle RAC on Podman + * Podman Host Server Configuration + * **Note**: As we are following command line installation for Oracle RAC on containers, we don't need X Window System to be configured + * Podman Containers and Oracle RAC Nodes + * Provisioning the Podman Host Server + * Podman Host Preparation + * Preparing for Podman Container Installation + * Installing Podman Engine + * Allocate Linux Resources for Oracle Grid Infrastructure Deployment + * How to Configure Podman for SELinux Mode +* Install `git` from dnf or yum repository and clone the git repo. We clone this repo to a path called `` and refer to it. +* Create a NFS Volume if you are planning to use NFS Storage for ASM Devices. See the section `Configuring NFS for Storage for Oracle RAC on Podman` in [Oracle Real Application Clusters Installation Guide for Podman](https://docs.oracle.com/cd/F39414_01/racpd/oracle-real-application-clusters-installation-guide-podman-oracle-linux-x86-64.pdf) for more details. + + **Note:** You can skip this step if you are planning to use block devices for storage. +* If SELinux is enabled on the Podman host, then ensure to create an SELinux policy for Oracle RAC on Podman. +For details about this procedure, see `How to Configure Podman for SELinux Mode` in the publication [Oracle Real Application Clusters Installation Guide for Podman Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racpd/target-configuration-oracle-rac-podman.html#GUID-59138DF8-3781-4033-A38F-E0466884D008). + + Also, When you are performing the installation using any files from podman host machine where SELinux is enabled, you need to make sure they are labeled correctly with `container_file_t` context. You can use `ls -lZ ` to see the security context set for those files. + +* To resolve VIPs and SCAN IPs in this guide, we use a preconfigured DNS server in our environment. +Replace environment variables `-e DNS_SERVERS=10.0.20.25`,`--dns=10.0.20.25`,`-e DOMAIN=example.info` and `--dns-search=example.info` parameters in the examples in this guide based on your environment. + +* The Oracle RAC `Containerfile` does not contain any Oracle software binaries. Download the following software from the [Oracle Technology Network](https://www.oracle.com/technetwork/database/enterprise-edition/downloads/index.html), if you are planning to build Oracle RAC Container Images in the next section. +However, if you are using pre-built RAC Images from the Oracle Container Registry, then you can skip this step. + - Oracle Grid Infrastructure 21c (21) for Linux x86-64 + - Oracle Database 21c (21) for Linux x86-64 + +**Notes** +* If the Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN Container](../OracleConnectionManager/README.md) to access the Oracle RAC Database from outside the host. + +## Getting Oracle RAC Database Container Images + +Oracle RAC is supported for production use on Podman starting with Oracle Database 19c (19.16) and Oracle Database 21c (21.7). You can also deploy Oracle RAC on Podman using the pre-built images available on the Oracle Container Registry. +Refer to this [documentation](https://docs.oracle.com/en/operating-systems/oracle-linux/docker/docker-UsingDockerRegistries.html#docker-registry) for details on using Oracle Container Registry. + +Example of pulling an Oracle RAC Database Image from the Oracle Container Registry: +```bash +podman pull container-registry.oracle.com/database/rac_ru:21.16 +podman tag container-registry.oracle.com/database/rac_ru:21.16 localhost/oracle/database-rac:21c +``` + +If you are using pre-built Oracle RAC images from the [Oracle Container Registry](https://container-registry.oracle.com), then you can skip the section [Building Oracle RAC Database Container Image](#building-oracle-rac-database-container-image). + +**Note:** +* The Oracle Container registry doesn't contains Oracle RAC Slim Image. If you are planning to use Oracle RAC Slim Image, then refer to [Building Oracle RAC Database Container Slim Image](#building-oracle-rac-database-container-slim-image) + +* If you want to build the latest Oracle RAC Image from this Github repository, instead of using a pre-built image, then follow below instructions to build `Oracle RAC Container Image` and `Oracle RAC Container Slim Image`. + +* Below section assumes that you have completed all of the prerequisites in [Preparation Steps for running Oracle RAC Database in containers](#preparation-steps-for-running-oracle-rac-database-in-containers) and completed all the steps, based on your environment. + + **Note:** Ensure that you do not uncompress the binaries and patches manually before building the Oracle RAC Image. + +* To assist in building the images, you can use the [`buildContainerImage.sh`](./containerfiles/buildContainerImage.sh) script. See the following sections for instructions and usage. + +* Ensure that you have enough space in `/var/lib/containers` while building the Oracle RAC Image. Also, if required use `export TMPDIR=` for Podman to use another folder as the temporary podman cache location instead of the default `/tmp` location. + +### Building Oracle RAC Database Container Image +In this document,an `Oracle RAC Database Container Image` refers to an Oracle RAC Database Container Image with Oracle Grid Infrastructure and Oracle Database Software Binaries installed during Oracle RAC Podman Image creation. The resulting images will contain the Oracle Grid Infrastructure and Oracle RAC Database Software Binaries. + +Before you begin, you must download Oracle Grid Infrastructure and Oracle RDBMS Binaries and stage them under `/docker-images/OracleDatabase/RAC/OracleRealApplicationCluster/containerfiles/`. + +Use the below command to build the Oracle RAC Database Container Image: +```bash +./buildContainerImage.sh -v +``` +Example: To build Oracle RAC Database Container Image for version 21.3.0, use below command: +```bash +./buildContainerImage.sh -v 21.3.0 +``` + +Retag it as below as we are going to refer this image as `localhost/oracle/database-rac:21c` everywhere: +```bash +podman tag localhost/oracle/database-rac:21.3.0 localhost/oracle/database-rac:21c +``` + +### Building Oracle RAC Database Container Slim Image +In this document, an `Oracle RAC Database Container Slim Image` refers to a container image that does not include installation of Oracle Grid Infrastructure and Oracle Database Software Binaries during the Oracle RAC Database Container Image creation. +To build an Oracle RAC Database Container Slim Image that doesn't contain the Oracle Grid infrastructure and Oracle RAC Database software, run the following command: +```bash +./buildContainerImage.sh -v -i -o '--build-arg SLIMMING=true' +``` +Example: To build Oracle RAC Database Container Slim Image for version 21.3.0, use the below command: +```bash +./buildContainerImage.sh -v 21.3.0 -i -o '--build-arg SLIMMING=true' +``` +To build an Oracle RAC Database Container Slim Image, you need to use `--build-arg SLIMMING=true`. + +To change the Base Image during building Oracle RAC Database Container Images, you must use `--build-arg BASE_OL_IMAGE=oraclelinux:8`. + +Retag it as below as we are going to refer this image as `localhost/oracle/database-rac:21c-slim` everywhere: +```bash +podman tag localhost/oracle/database-rac:21.3.0-slim localhost/oracle/database-rac:21c-slim +``` + +**Notes** +- Usage of `./buildContainerImage.sh`: + ```text + -v: version to build + -i: ignore the MD5 checksums + -t: user-defined image name and tag (e.g., image_name:tag). Default is set to `oracle/database-rac:` for RAC Image and `oracle/database-rac:-slim` for RAC slim image. + -o: passes on container build option (e.g., --build-arg SLIMMIMG=true for slim,--build-arg BASE_OL_IMAGE=oraclelinux:8 to change base image). The default is "--build-arg SLIMMING=false" + ``` +- After building the `21.3.0` Oracle RAC Database Container Image, to apply the 21c RU and build the 21c patched image, refer to [Example of how to create a patched database image](./samples/applypatch/README.md). +- If you are behind a proxy wall, then you must set the `https_proxy` or `http_proxy` environment variable based on your environment before building the image. +- In case of the Oracle RAC Database Container Slim Image, the resulting images will not contain the Oracle Grid Infrastructure and Oracle RAC Database Software Binaries. + +## Network Management + +Before you start the installation, you must plan your private and public podman networks. Refer to section `Podman Host Preparation` in the publication [Oracle Real Application Clusters Installation Guide for Podman](https://docs.oracle.com/cd/F39414_01/racpd/oracle-real-application-clusters-installation-guide-podman-oracle-linux-x86-64.pdf). + +You can create a [Podman Network](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html) on every container host so that the containers running within that host can communicate with each other. +For example: Create Podman Network named `rac_pub1_nw` for the public network (`10.0.20.0/24`), `rac_priv1_nw` (`192.168.17.0/24`) and `rac_priv2_nw`(`192.168.18.0/24`) for private networks. You can use any network subnet based on your environment. + +### Standard Frames MTU Networks Configuration +```bash +ip link show|grep ens +3: ens5: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000 +4: ens6: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000 +5: ens7: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000 +``` + +To run Oracle RAC using Oracle Container Runtime for Podman on a single host, create Podman Bridge networks using the following commands: +```bash +podman network create --driver=bridge --subnet=10.0.20.0/24 rac_pub1_nw +podman network create --driver=bridge --subnet=192.168.17.0/24 rac_priv1_nw --disable-dns --internal +podman network create --driver=bridge --subnet=192.168.18.0/24 rac_priv2_nw --disable-dns --internal +``` + + +To run Oracle RAC using Oracle Container Runtime for Podman on multiple hosts, you must create one of the following: + +a. Create Podman macvlan networks using the following commands: +```bash +podman network create -d macvlan --subnet=10.0.20.0/24 -o parent=ens5 rac_pub1_nw +podman network create -d macvlan --subnet=192.168.17.0/24 -o parent=ens6 rac_priv1_nw --disable-dns --internal +podman network create -d macvlan --subnet=192.168.18.0/24 -o parent=ens7 rac_priv2_nw --disable-dns --internal +``` + + +b. Create Podman ipvlan networks using the following commands: +```bash +podman network create -d ipvlan --subnet=10.0.20.0/24 -o parent=ens5 rac_pub1_nw +podman network create -d ipvlan --subnet=192.168.17.0/24 -o parent=ens6 rac_priv1_nw --disable-dns --internal +podman network create -d ipvlan --subnet=192.168.18.0/24 -o parent=ens7 rac_priv2_nw --disable-dns --internal +``` + +### Jumbo Frames MTU Network Configuration +```bash +ip link show | egrep "ens" +3: ens5: mtu 9000 qdisc mq state UP mode DEFAULT group default qlen 1000 +4: ens6: mtu 9000 qdisc mq state UP mode DEFAULT group default qlen 1000 +5: ens7: mtu 9000 qdisc mq state UP mode DEFAULT group default qlen 1000 +``` +If the MTU on each interface is set to 9000, then you can then run the following commands on each Podman host to extend the maximum payload length for each network to use the entire MTU: +```bash +#Podman bridge networks +podman network create --driver=bridge --subnet=10.0.20.0/24 --opt mtu=9000 rac_pub1_nw +podman network create --driver=bridge --subnet=192.168.17.0/24 --opt mtu=9000 rac_priv1_nw --disable-dns --internal +podman network create --driver=bridge --subnet=192.168.18.0/24 --opt mtu=9000 rac_priv2_nw --disable-dns --internal + +# Podman macvlan networks +podman network create -d macvlan --subnet=10.0.20.0/24 --opt mtu=9000 -o parent=ens5 rac_pub1_nw +podman network create -d macvlan --subnet=192.168.17.0/24 --opt mtu=9000 -o parent=ens6 rac_priv1_nw --disable-dns --internal +podman network create -d macvlan --subnet=192.168.18.0/24 --opt mtu=9000 -o parent=ens7 rac_priv2_nw --disable-dns --internal + +#Podman ipvlan networks +podman network create -d ipvlan --subnet=10.0.20.0/24 --opt mtu=9000 -o parent=ens5 rac_pub1_nw +podman network create -d ipvlan --subnet=192.168.17.0/24 --opt mtu=9000 -o parent=ens6 rac_priv1_nw --disable-dns --internal +podman network create -d ipvlan --subnet=192.168.18.0/24 --opt mtu=9000 -o parent=ens7 rac_priv2_nw --disable-dns --internal +``` +## Password Management +- Specify the secret volume for resetting the grid, oracle, and database user password during node creation or node addition. The volume can be a shared volume among all the containers. For example: + + ```bash + mkdir /opt/.secrets/ + ``` +- Generate a password file + + Edit the `/opt/.secrets/pwdfile.txt` and seed the password for the grid, oracle, and database users. + + For this deployment scenario, it will be a common password for the grid, oracle, and database users. + + Run the below commands: + ```bash + cd /opt/.secrets + openssl genrsa -out key.pem + openssl rsa -in key.pem -out key.pub -pubout + openssl pkeyutl -in pwdfile.txt -out pwdfile.enc -pubin -inkey key.pub -encrypt + rm -rf /opt/.secrets/pwdfile.txt + ``` +- Oracle recommends using Podman secrets inside the containers. To create Podman secrets, run the following commands: + ```bash + podman secret create pwdsecret /opt/.secrets/pwdfile.enc + podman secret create keysecret /opt/.secrets/key.pem + ``` + +- To check the details of the created Podman Secrets, run the commands as below: + ```bash + podman secret ls + ID NAME DRIVER CREATED UPDATED + 7eb7f573905283c808bdabaff keysecret file 13 hours ago 13 hours ago + e3ac963fd736d8bc01dcd44dd pwdsecret file 13 hours ago 13 hours ago + + podman secret inspect + ``` +Notes: +- In this example we use `pwdsecret` as the common password for SSH setup between containers for the oracle, grid, and Oracle RAC database users. Also, `keysecret` is used to extract secrets inside the Oracle RAC Containers. + +## Oracle RAC on Containers Deployment Scenarios +Oracle RAC can be deployed with various scenarios, such as using NFS vs Block Devices, Oracle RAC Container Image vs Slim Image, with User Defined Response files, and so on. All are covered in detail in the instructions below. + +### Oracle RAC Containers on Podman +#### [1. Setup Using Oracle RAC Container Image](docs/rac-container/racimage/README.md) +#### [2. Setup Using Oracle RAC Container Slim Image](docs/rac-container/racslimimage/README.md) + +## Connecting to an Oracle RAC Database + +**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC Database using the preceding sections. + +Refer to [Connecting to an Oracle RAC Database](./docs/CONNECTING.md) for instructions on how to connect to the Oracle RAC Database. + +## Deletion of Node from Oracle RAC Cluster +Refer to [Deleting a Node](./docs/DELETION.md) for instructions on how to delete a Node from Existing Oracle RAC Container Cluster. + +## Building a Patched Oracle RAC Container Image + +If you want to build a patched image based on a base 21.3.0 container image, then refer to the GitHub page [Example of how to create an Oracle RAC Database Container Patched Image](./samples/applypatch/README.md). + +## Cleanup +Refer to [Cleanup Oracle RAC Database Container Environment](./docs/CLEANUP.md) for instructions on how to connect to an Oracle RAC Database Container Environment. + +## Sample Container Files for Older Releases + +This project offers example container files for Oracle Grid Infrastructure and Oracle Real Application Clusters for dev and test: + +* Oracle Database 18c Oracle Grid Infrastructure (18.3) for Linux x86-64 +* Oracle Database 18c (18.3) for Linux x86-64 +* Oracle Database 12c Release 2 Oracle Grid Infrastructure (12.2.0.1.0) for Linux x86-64 +* Oracle Database 12c Release 2 (12.2.0.1.0) Enterprise Edition for Linux x86-64 + +To install older releases of Oracle RAC on Podman or Oracle RAC on Docker, refer to the [README.md](./docs/README_1.md) + +## Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 8.10 or later. To see the current Linux support certifications, refer to [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## License + +To download and run Oracle Grid Infrastructure and Oracle Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository that are required to build the container images are, unless otherwise noted, released under a UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/README1.md b/OracleDatabase/RAC/OracleRealApplicationClusters/README1.md new file mode 100644 index 0000000000..ad36e25c55 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/README1.md @@ -0,0 +1,1194 @@ +# Oracle Real Application Clusters in Linux Containers + +Learn about container deployment options for Oracle Real Application Clusters (Oracle RAC) Release 21c (21.3) + +## Overview of Running Oracle RAC in Containers + +Oracle Real Application Clusters (Oracle RAC) is an option to the award-winning Oracle Database Enterprise Edition. Oracle RAC is a cluster database with a shared cache architecture that overcomes the limitations of traditional shared-nothing and shared-disk approaches to provide highly scalable and available database solutions for all business applications. +Oracle RAC uses Oracle Clusterware as a portable cluster software that allows clustering of independent servers so that they cooperate as a single system and Oracle Automatic Storage Management (Oracle ASM) to provide simplified storage management that is consistent across all servers and storage platforms. +Oracle Clusterware and Oracle ASM are part of the Oracle Grid Infrastructure, which bundles both solutions in an easy to deploy software package. + +For more information on Oracle RAC Database 21c refer to the [Oracle Database documentation](http://docs.oracle.com/en/database/). + +## Using this Image + +To create an Oracle RAC environment, complete these steps in order: + +- [Oracle Real Application Clusters in Linux Containers](#oracle-real-application-clusters-in-linux-containers) + - [Overview of Running Oracle RAC in Containers](#overview-of-running-oracle-rac-in-containers) + - [Using this Image](#using-this-image) + - [Section 1 : Prerequisites for running Oracle RAC in containers](#section-1--prerequisites-for-running-oracle-rac-in-containers) + - [Section 2: Building Oracle RAC Database Container Images](#section-2-building-oracle-rac-database-container-images) + - [Oracle RAC Container Image for Docker](#oracle-rac-container-image-for-docker) + - [Oracle RAC Container Image for Podman](#oracle-rac-container-image-for-podman) + - [Section 3: Network and Password Management](#section-3--network-and-password-management) + - [Section 4: Oracle RAC on Docker](#section-4-oracle-rac-on-docker) + - [Section 4.1 : Prerequisites for Running Oracle RAC on Docker](#section-41--prerequisites-for-running-oracle-rac-on-docker) + - [Section 4.2: Setup Oracle RAC Container on Docker](#section-42-setup-oracle-rac-container-on-docker) + - [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker) + - [Deploying Oracle RAC on Container With Oracle RAC Storage Container](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container) + - [Assign networks to Oracle RAC containers](#assign-networks-to-oracle-rac-containers) + - [Start the first container](#start-the-first-container) + - [Connect to the Oracle RAC container](#connect-to-the-oracle-rac-container) + - [Section 4.3: Adding an Oracle RAC Node using a Docker Container](#section-43-adding-an-oracle-rac-node-using-a-docker-container) + - [Deploying Oracle RAC Additional Node on Container with Block Devices on Docker](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-docker) + - [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Docker](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-docker) + - [Assign Network to additional Oracle RAC container](#assign-network-to-additional-oracle-rac-container) + - [Start Oracle RAC racnode2 container](#start-oracle-rac-racnode2-container) + - [Connect to the Oracle RAC racnode2 container](#connect-to-the-oracle-rac-racnode2-container) + - [Section 4.4: Setup Oracle RAC Container on Docker with Docker Compose](#section-44-setup-oracle-rac-container-on-docker-with-docker-compose) + - [Section 5: Oracle RAC on Podman](#section-5-oracle-rac-on-podman) + - [Section 5.1 : Prerequisites for Running Oracle RAC on Podman](#section-51--prerequisites-for-running-oracle-rac-on-podman) + - [Section 5.2: Setup RAC Containers on Podman](#section-52-setup-rac-containers-on-podman) + - [Deploying Oracle RAC Containers with Block Devices on Podman](#deploying-oracle-rac-containers-with-block-devices-on-podman) + - [Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container-on-podman) + - [Assign networks to Oracle RAC containers Created Using Podman](#assign-networks-to-oracle-rac-containers-created-using-podman) + - [Start the first container Created Using Podman](#start-the-first-container-created-using-podman) + - [Connect to the Oracle RAC container Created Using Podman](#connect-to-the-oracle-rac-container-created-using-podman) + - [Section 5.3: Adding a Oracle RAC Node using a container on Podman](#section-53-adding-a-oracle-rac-node-using-a-container-on-podman) + - [Deploying Oracle RAC Additional Node on Container with Block Devices on Podman](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-podman) + - [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-podman) + - [Assign Network to additional Oracle RAC container Created Using Podman](#assign-network-to-additional-oracle-rac-container-created-using-podman) + - [Start Oracle RAC container](#start-oracle-rac-container) + - [Section 5.4: Setup Oracle RAC Container on Podman with Podman Compose](#section-54-setup-oracle-rac-container-on-podman-with-podman-compose) + - [Section 6: Connecting to an Oracle RAC Database](#section-6-connecting-to-an-oracle-rac-database) + - [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) + - [Section 8: Environment Variables for the Second and Subsequent Nodes](#section-8-environment-variables-for-the-second-and-subsequent-nodes) + - [Section 9: Building a Patched Oracle RAC Container Image](#section-9-building-a-patched-oracle-rac-container-image) + - [Section 10 : Sample Container Files for Older Releases](#section-10--sample-container-files-for-older-releases) + - [Docker](#docker) + - [Podman](#podman) + - [Section 11 : Support](#section-11--support) + - [Docker Support](#docker-support) + - [Podman Support](#podman-support) + - [Section 12 : License](#section-12--license) + - [Section 13 : Copyright](#section-13--copyright) + +## Section 1 : Prerequisites for running Oracle RAC in containers + +Before you proceed to section two, you must complete each of the steps listed in this section. + +To review the resource requirements for Oracle RAC, see Oracle Database 21c Release documentation [Oracle Grid Infrastructure Installation and Upgrade Guide](https://docs.oracle.com/en/database/oracle/oracle-database/21/cwlin/index.html) + +Complete each of the following prerequisites: + +1. Ensure that each container that you will deploy as part of your cluster meets the minimum hardware requirements for Oracle RAC and Oracle Grid Infrastructure software. +2. Ensure all data files, control files, redo log files, and the server parameter file (`SPFILE`) used by the Oracle RAC database reside on shared storage that is accessible by all the Oracle RAC database instances. An Oracle RAC database is a shared-everything database, so each Oracle RAC Node must have the same access. +3. Configure the following addresses manually in your DNS. + + - Public IP address for each container + - Private IP address for each container + - Virtual IP address for each container + - Three single client access name (SCAN) addresses for the cluster. +4. If you are planning to set up RAC on Docker, refer Docker Host machine details in [Section 4.1](#section-41--prerequisites-for-running-oracle-rac-on-docker) +5. If you are planning to set up RAC on Podman, refer Podman Host machine details in [Section 5.1](#section-51--prerequisites-for-running-oracle-rac-on-podman) +6. Block storage: If you are planning to use block devices for shared storage, then allocate block devices for OCR, voting and database files. +7. NFS storage: If you are planning to use NFS storage for OCR, Voting Disk and Database files, then configure NFS storage and export at least one NFS mount. You can also use `/docker-images/OracleDatabase/RAC/OracleRACStorageServer` container for shared file system on NFS. +8. Set`/etc/sysctl.conf`parameters: For Oracle RAC, you must set following parameters at host level in `/etc/sysctl.conf`: + ```INI + fs.aio-max-nr = 1048576 + fs.file-max = 6815744 + net.core.rmem_max = 4194304 + net.core.rmem_default = 262144 + net.core.wmem_max = 1048576 + net.core.wmem_default = 262144 + net.core.rmem_default = 262144 + ``` +9. List and reload parameters: After the `/etc/sysctl.conf` file is modified, run the following commands: + ```bash + sysctl -a + sysctl -p + ``` +10. To resolve VIPs and SCAN IPs, we are using a DNS container in this guide. Before proceeding to the next step, create a [DNS server container](../OracleDNSServer/README.md). +**Note** If you have a pre-configured DNS server in your environment, then you can replace `-e DNS_SERVERS=172.16.1.25`, `--dns=172.16.1.25`, `-e DOMAIN=example.com` and `--dns-search=example.com` parameters in **Section 2: Building Oracle RAC Database Podman Install Images** with the `DOMAIN_NAME` and `DNS_SERVER` based on your environment. +11. If you are running RAC on Podman, make sure that you have installed the `podman-docker` rpm package so that podman commands can be run using `docker` utility. +12. The Oracle RAC `Dockerfile` does not contain any Oracle software binaries. Download the following software from the [Oracle Technology Network](https://www.oracle.com/technetwork/database/enterprise-edition/downloads/index.html) and stage them under `/docker-images/OracleDatabase/RAC/OracleRealApplicationCluster/dockerfiles/` folder. + + - Oracle Database 21c Grid Infrastructure (21.3) for Linux x86-64 + - Oracle Database 21c (21.3) for Linux x86-64 + + - If you are deploying Oracle RAC on Podman then execute following, otherwise skip to next section. + - Because Oracle RAC on Podman is supported on Release 21c (21.7) or later, you must download the grid release update (RU) from [support.oracle.com](https://support.oracle.com/portal/). + + - In this Example we download the following latest one-off patches for release 21.13 from [support.oracle.com](https://support.oracle.com/portal/) + - `36031790` + - `36041222` +13. Ensure you have git configured in your host machine, [refer this page](https://docs.oracle.com/en/learn/ol-git-start/index.html) for instructions. Clone this git repo by running below command - +```bash +git clone git@github.com:oracle/docker-images.git +``` + +**Notes** + +- If you are planning to use a `DNSServer` container for SCAN IPs, VIPs resolution, then configure the DNSServer. For development and testing purposes only, use the Oracle `DNSServer` image to deploy a container providing DNS resolutions. Please check [OracleDNSServer](../OracleDNSServer/README.md) for details. +- `OracleRACStorageServer` docker image can be used only for development and testing purpose. Please check [OracleRACStorageServer](../OracleRACStorageServer/README.md) for details. +- When you want to deploy RAC on Docker or Podman on Single host, create bridge networks for containers. +- When you want to deploy RAC on Docker or Podman on Multiple host, create macvlan networks for containers. +- To run Oracle RAC using Podman on multiple hosts, refer [Podman macvlan network](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html). + To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, refer [Docker macvlan network](https://docs.docker.com/network/macvlan/). +- If the Docker or Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleConnectionManager) to access the Oracle RAC Database from outside the host. + +## Section 2: Building Oracle RAC Database Container Images + +**IMPORTANT :** This section assumes that you have gone through all the prerequisites in Section 1 and completed all the steps, based on your environment. Do not uncompress the binaries and patches. + +To assist in building the images, you can use the [`buildContainerImage.sh`](https://github.com/oracle/docker-images/blob/master/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/buildContainerImage.sh) script. See the following for instructions and usage. + +### Oracle RAC Container Image for Docker + +If you are planing to deploy Oracle RAC container image on Podman, skip to the section [Oracle RAC Container Image for Podman](#oracle-rac-container-image-for-podman). + +```bash +cd /docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles +./buildContainerImage.sh -v -o '--build-arg BASE_OL_IMAGE=oraclelinux:7 --build-arg SLIMMING=true|false' + +# for example ./buildContainerImage.sh -v 21.3.0 -o '--build-arg BASE_OL_IMAGE=oraclelinux:7 --build-arg SLIMMING=false' +``` + +### Oracle RAC Container Image for Podman + +If you are planing to deploy Oracle RAC container image on Docker, skip to the section [Oracle RAC Container Image for Docker](#oracle-rac-container-image-for-docker). + +```bash +cd /docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles +./buildContainerImage.sh -v -o '--build-arg BASE_OL_IMAGE=oraclelinux:8 --build-arg SLIMMING=true|false' + +# for example ./buildContainerImage.sh -v 21.3.0 -o '--build-arg BASE_OL_IMAGE=oraclelinux:8 --build-arg SLIMMING=false' +``` + +- After the `21.3.0` Oracle RAC container image is built, start building a patched image with the download 21.7 RU and one-offs. To build the patch image, refer [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). + + +**Notes** + +- The resulting images will contain the Oracle Grid Infrastructure binaries and Oracle RAC Database binaries. +- If you are behind a proxy wall, then you must set the `https_proxy` environment variable based on your environment before building the image. + +## Section 3: Network and Password Management + +1. Before you start the installation, you must plan your private and public network. You can create a network bridge on every container host so containers running within that host can communicate with each other. + - For example, create `rac_pub1_nw` for the public network (`172.16.1.0/24`) and `rac_priv1_nw` (`192.168.17.0/24`) for a private network. You can use any network subnet for testing. + - In this document we reference the public network on `172.16.1.0/24` and the private network on `192.168.17.0/24`. + + ```bash + docker network create --driver=bridge --subnet=172.16.1.0/24 rac_pub1_nw + docker network create --driver=bridge --subnet=192.168.17.0/24 rac_priv1_nw + ``` + + - To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, you will need to create a [Docker macvlan network](https://docs.docker.com/network/macvlan/) using the following commands: + + ```bash + docker network create -d macvlan --subnet=172.16.1.0/24 --gateway=172.16.1.1 -o parent=eth0 rac_pub1_nw + docker network create -d macvlan --subnet=192.168.17.0/24 --gateway=192.168.17.1 -o parent=eth1 rac_priv1_nw + ``` + +2. Specify the secret volume for resetting the grid, oracle, and database user password during node creation or node addition. The volume can be a shared volume among all the containers. For example: + + ```bash + mkdir /opt/.secrets/ + openssl rand -out /opt/.secrets/pwd.key -hex 64 + ``` + + - Edit the `/opt/.secrets/common_os_pwdfile` and seed the password for the grid, oracle and database users. For this deployment scenario, it will be a common password for the grid, oracle, and database users. Run the command: + + ```bash + openssl enc -aes-256-cbc -salt -in /opt/.secrets/common_os_pwdfile -out /opt/.secrets/common_os_pwdfile.enc -pass file:/opt/.secrets/pwd.key + rm -f /opt/.secrets/common_os_pwdfile + ``` + +3. Create `rac_host_file` on both Podman and Docker hosts: + + ```bash + mkdir /opt/containers/ + touch /opt/containers/rac_host_file + ``` + +**Notes** + +- To run Oracle RAC using Podman on multiple hosts, refer [Podman macvlan network](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html). +To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, refer [Docker macvlan network](https://docs.docker.com/network/macvlan/). +- If the Docker or Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleConnectionManager) to access the Oracle RAC Database from outside the host. +- If you want to specify a different password for each of the user accounts, then create three different files, encrypt them under `/opt/.secrets`, and pass the file name to the container using the environment variable. Environment variables can be ORACLE_PWD_FILE for the oracle user, GRID_PWD_FILE for the grid user, and DB_PWD_FILE for the database password. +- If you want to use a common password for the oracle, grid, and database users, then you can assign a password file name to COMMON_OS_PWD_FILE environment variable. + +## Section 4: Oracle RAC on Docker + +If you are deploying Oracle RAC On Podman, skip to the [Section 5: Oracle RAC on Podman](#section-5-oracle-rac-on-podman). + +**Note** Oracle RAC is supported for production use on Docker starting with Oracle Database 21c (21.3). On earlier releases, Oracle RAC on Docker is supported for development and and test environments. To deploy Oracle RAC on Docker, use the pre-built images available on the Oracle Container Registry. Execute the following steps in a given order to deploy RAC on Docker: + +To create an Oracle RAC environment on Docker, complete each of these steps in order. + +### Section 4.1 : Prerequisites for Running Oracle RAC on Docker + +To run Oracle RAC on Docker, you must install and configure [Oracle Container Runtime for Docker](https://docs.oracle.com/cd/E52668_01/E87205/html/index.html) on Oracle Linux 7. You must have sufficient space on docker file system (`/var/lib/docker`), configured with the Docker OverlayFS storage driver option `overlay2`. + +**IMPORTANT:** Completing prerequisite steps is a requirement for successful configuration. + +Complete each prerequisite step in order, customized for your environment. + +1. Verify that you have enough memory and CPU resources available for all containers. For this `README.md`, we used the following configuration: + + - 2 Docker hosts + - CPU Cores: 1 Socket with 4 cores, with 2 threads for each core Intel® Xeon® Platinum 8167M CPU at 2.00 GHz + - RAM: 60GB + - Swap memory: 32 GB + - Oracle Linux 7.9 or later with the Unbreakable Enterprise Kernel 6: 5.4.17-2102.200.13.el7uek.x86_64. + +2. Oracle RAC must run certain processes in real-time mode. To run processes inside a container in real-time mode, you must make changes to the Docker configuration files. For details, see the [`dockerd` documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#examples). Edit the Docker Daemon based on Docker version: + + - Check the Docker version. In the following output, the Oracle `docker-engine` version is 19.03. + + ```bash + rpm -qa | grep docker + docker-cli-19.03.11.ol-9.el7.x86_64 + docker-engine-19.03.11.ol-9.el7.x86_64 + ``` + + - If Oracle `docker-engine` version is greater than or equal to 19.03: Edit `/usr/lib/systemd/system/docker.service` and add additional parameters in the `[Service]` section for the `dockerd` daemon: + + ```bash + ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --cpu-rt-runtime=950000 + ``` + + - If Oracle docker-engine version is less than 19.03: Edit `/etc/sysconfig/docker` and add following + + ```bash + OPTIONS='--selinux-enabled --cpu-rt-runtime=950000' + ``` + +3. After you have modified the `dockerd` daemon, reload the daemon with the changes you have made: + + ```bash + systemctl daemon-reload + systemctl stop docker + systemctl start docker + ``` + +### Section 4.2: Setup Oracle RAC Container on Docker + +This section provides step by step procedure to deploy Oracle RAC on container with block devices and storage container. To understand the details of environment variable, refer For the details of environment variables [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) + +Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. + +#### Deploying Oracle RAC on Container with Block Devices on Docker + +If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container With Oracle RAC Storage Container](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container). + +Make sure the ASM devices do not have any existing file system. To clear any other file system from the devices, use the following command: + + ```bash + dd if=/dev/zero of=/dev/xvde bs=8k count=10000 + ``` + +Repeat for each shared block device. In the preceding example, `/dev/xvde` is a shared Xen virtual block device. + +Now create the Oracle RAC container using the image. You can use the following example to create a container: + + ```bash +docker create -t -i \ + --hostname racnoded1 \ + --volume /boot:/boot:ro \ + --volume /dev/shm \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --device=/dev/oracleoci/oraclevdd:/dev/asm_disk1 \ + --device=/dev/oracleoci/oraclevde:/dev/asm_disk2 \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + -e DNS_SERVERS="172.16.1.25" \ + -e NODE_VIP=172.16.1.130 \ + -e VIP_HOSTNAME=racnoded1-vip \ + -e PRIV_IP=192.168.17.100 \ + -e PRIV_HOSTNAME=racnoded1-priv \ + -e PUBLIC_IP=172.16.1.100 \ + -e PUBLIC_HOSTNAME=racnoded1 \ + -e SCAN_NAME=racnodedc1-scan \ + -e OP_TYPE=INSTALL \ + -e DOMAIN=example.com \ + -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ + -e ASM_DISCOVERY_DIR=/dev \ + -e CMAN_HOSTNAME=racnodedc1-cman \ + -e CMAN_IP=172.16.1.164 \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + -e RESET_FAILED_SYSTEMD="true" \ + --restart=always --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ + --cpu-rt-runtime=95000 --ulimit rtprio=99 \ + --name racnoded1 \ + oracle/database-rac:21.3.0 +``` + +**Note:** Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. + +#### Deploying Oracle RAC on Container With Oracle RAC Storage Container + +If you are using block devices, skip to the section [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker) + +Now create the Oracle RAC container using the image. You can use the following example to create a container: + + ```bash + docker create -t -i \ + --hostname racnoded1 \ + --volume /boot:/boot:ro \ + --volume /dev/shm \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --privileged=false \ + --volume racstorage:/oradata \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + -e DNS_SERVERS="172.16.1.25" \ + -e NODE_VIP=172.16.1.130 \ + -e VIP_HOSTNAME=racnoded1-vip \ + -e PRIV_IP=192.168.17.100 \ + -e PRIV_HOSTNAME=racnoded1-priv \ + -e PUBLIC_IP=172.16.1.100 \ + -e PUBLIC_HOSTNAME=racnoded1 \ + -e SCAN_NAME=racnodedc1-scan \ + -e OP_TYPE=INSTALL \ + -e DOMAIN=example.com \ + -e ASM_DISCOVERY_DIR=/oradata \ + -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ + -e CMAN_HOSTNAME=racnodedc1-cman \ + -e CMAN_IP=172.16.1.164 \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + -e RESET_FAILED_SYSTEMD="true" \ + --restart=always \ + --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --name racnoded1 \ + oracle/database-rac:21.3.0 + ``` + +**Notes:** + +- Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. +- You must have created the `racstorage` volume before the creation of the Oracle RAC Container. For details, please refer [OracleRACStorageServer](../OracleRACStorageServer/README.md). +- For details about the available environment variables, refer the [Section 7](#section-7-environment-variables-for-the-first-node). + +#### Assign networks to Oracle RAC containers + +You need to assign the Docker networks created in section 1 to containers. Execute the following commands: + + ```bash + +docker network disconnect bridge racnoded1 +docker network connect rac_pub1_nw --ip 172.16.1.100 racnoded1 +docker network connect rac_priv1_nw --ip 192.168.17.100 racnoded1 + ``` + +#### Start the first container + +To start the first container, run the following command: + + ```bash + docker start racnoded1 + ``` + +It can take at least 40 minutes or longer to create the first node of the cluster. To check the logs, use the following command from another terminal session: + + ```bash + docker logs -f racnoded1 + ``` + +You should see the database creation success message at the end: + + ```bash + #################################### + ORACLE RAC DATABASE IS READY TO USE! + #################################### + ``` + +#### Connect to the Oracle RAC container + +To connect to the container execute the following command: + +```bash +docker exec -i -t racnoded1 /bin/bash +``` + +If the install fails for any reason, log in to the container using the preceding command and check `/tmp/orod.log`. + +- You can also review the Grid Infrastructure logs located at `$GRID_BASE/diag/crs` and check for failure logs. +- If the failure occurred during the database creation then check the database logs. + +### Section 4.3: Adding an Oracle RAC Node using a Docker Container + +Before proceeding to the next step, ensure Oracle Grid Infrastructure is running and the Oracle RAC Database is open as per instructions in [Section 4.2: Setup Oracle RAC on Docker](#section-42-setup-oracle-rac-container-on-docker). Otherwise, the node addition process will fail. + +Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. + +To understand the details of environment variable, refer For the details of environment variables [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes) + +Reset the password on the existing Oracle RAC node for SSH setup between an existing node in the cluster and the new node. Password must be the same on all the nodes for the `grid` and `oracle` users. Execute the following command on an existing node of the cluster. + +```bash +docker exec -i -t -u root racnode1 /bin/bash +sh /opt/scripts/startup/resetOSPassword.sh --help +sh /opt/scripts/startup/resetOSPassword.sh --op_type reset_grid_oracle --pwd_file common_os_pwdfile.enc --secret_volume /run/secrets --pwd_key_file pwd.key +``` + +**Note:** If you do not have a common secret volume among Oracle RAC containers, populate the password file with the same password that you have used on the new node, encrypt the file, and execute `resetOSPassword.sh` on the existing node of the cluster. + +#### Deploying Oracle RAC Additional Node on Container with Block Devices on Docker + +If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container with Oracle RAC Storage Container on Docker](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container). + +To create additional nodes, use the following command: + +```bash +docker create -t -i \ + --hostname racnoded2 \ + --volume /boot:/boot:ro \ + --volume /dev/shm \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --device=/dev/oracleoci/oraclevdd:/dev/asm_disk1 \ + --device=/dev/oracleoci/oraclevde:/dev/asm_disk2 \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + -e DNS_SERVERS="172.16.1.25" \ + -e EXISTING_CLS_NODES=racnoded1 \ + -e NODE_VIP=172.16.1.131 \ + -e VIP_HOSTNAME=racnoded2-vip \ + -e PRIV_IP=192.168.17.101 \ + -e PRIV_HOSTNAME=racnoded2-priv \ + -e PUBLIC_IP=172.16.1.101 \ + -e PUBLIC_HOSTNAME=racnoded2 \ + -e DOMAIN=example.com \ + -e SCAN_NAME=racnodedc1-scan \ + -e ASM_DISCOVERY_DIR=/dev \ + -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ + -e ORACLE_SID=ORCLCDB \ + -e OP_TYPE=ADDNODE \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + -e RESET_FAILED_SYSTEMD="true" \ + --restart=always --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ + --cpu-rt-runtime=95000 --ulimit rtprio=99 \ + --name racnoded2 \ + oracle/database-rac:21.3.0 +``` + +For details of all environment variables and parameters, refer to [Section 7](#section-7-environment-variables-for-the-first-node). + +#### Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Docker + +If you are using physical block devices for shared storage, skip to [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker). + +Use the existing `racstorage:/oradata` volume when creating the additional container using the image. + +For example: + +```bash +docker create -t -i \ + --hostname racnoded2 \ + --volume /boot:/boot:ro \ + --volume /dev/shm \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --volume racstorage:/oradata \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + -e DNS_SERVERS="172.16.1.25" \ + -e EXISTING_CLS_NODES=racnoded1 \ + -e NODE_VIP=172.16.1.131 \ + -e VIP_HOSTNAME=racnoded2-vip \ + -e PRIV_IP=192.168.17.101 \ + -e PRIV_HOSTNAME=racnoded2-priv \ + -e PUBLIC_IP=172.16.1.101 \ + -e PUBLIC_HOSTNAME=racnoded2 \ + -e DOMAIN=example.com \ + -e SCAN_NAME=racnodedc1-scan \ + -e ASM_DISCOVERY_DIR=/oradata \ + -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ + -e ORACLE_SID=ORCLCDB \ + -e OP_TYPE=ADDNODE \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + -e RESET_FAILED_SYSTEMD="true" \ + --restart=always --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ + --cpu-rt-runtime=95000 --ulimit rtprio=99 \ + --name racnoded2 \ + oracle/database-rac:21.3.0 +``` + +**Notes:** + +- You must have created **racstorage** volume before the creation of the Oracle RAC container. +- You can change env variables such as IPs and ORACLE_PWD based on your env. For details about the env variables, refer the section 8. + +#### Assign Network to additional Oracle RAC container + +Connect the private and public networks you created earlier to the container: + +```bash +docker network disconnect bridge racnoded2 +docker network connect rac_pub1_nw --ip 172.16.1.101 racnoded2 +docker network connect rac_priv1_nw --ip 192.168.17.101 racnoded2 +``` + +#### Start Oracle RAC racnode2 container + +Start the container + +```bash +docker start racnoded2 +``` + +To check the database logs, tail the logs using the following command: + +```bash +docker logs -f racnoded2 +``` + +You should see the database creation success message at the end. + +```bash +################################################################# +Oracle Database ORCLCDB is up and running on racnoded2 +################################################################# +Running User Script for oracle user +Setting Remote Listener +#################################### +ORACLE RAC DATABASE IS READY TO USE! +#################################### +``` + +#### Connect to the Oracle RAC racnode2 container + +To connect to the container execute the following command: + +```bash +docker exec -i -t racnoded2 /bin/bash +``` + +If the node addition fails, log in to the container using the preceding command and review `/tmp/orod.log`. You can also review the Grid Infrastructure logs i.e. `$GRID_BASE/diag/crs` and check for failure logs. If the node creation has failed during the database creation process, then check DB logs. + +## Section 4.4: Setup Oracle RAC Container on Docker with Docker Compose + +Oracle RAC database can also be deployed with Docker Compose. An example of how to install Oracle RAC Database on Single Host via Bridge Network is explained in this [README.md](./samples/racdockercompose/README.md) + +Same section covers various below scenarios as well with docker compose- +1. Deploying Oracle RAC on Container with Block Devices on Docker with Docker Compose +2. Deploying Oracle RAC on Container With Oracle RAC Storage Container with Docker Compose +3. Deploying Oracle RAC Additional Node on Container with Block Devices on Docker with Docker Compose +4. Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Docker with Docker Compose + +***Note:*** Docker and Docker Compose is not supported with OL8. You need OL7.9 with UEK R5 or R6. + +## Section 5: Oracle RAC on Podman + +If you are deploying Oracle RAC On Docker, skip to [Section 4: Oracle RAC on Docker](#section-4-oracle-rac-on-docker) + +**Note** Oracle RAC is supported for production use on Podman starting with Oracle Database 19c (19.16), and Oracle Database 21c (21.7). You can deploy Oracle RAC on Podman using the pre-built images available on Oracle Container Registry. Execute the following steps in a given order to deploy RAC on Podman: + +To create an Oracle RAC environment on Podman, complete each of these steps in order. + +### Section 5.1 : Prerequisites for Running Oracle RAC on Podman + +You must install and configure [Podman release 4.0.2](https://docs.oracle.com/en/operating-systems/oracle-linux/podman/podman-InstallingPodmanandRelatedUtilities.html#podman-install) or later on Oracle Linux 8.5 or later to run Oracle RAC on Podman. + +**Notes**: + +- You need to remove `"--cpu-rt-runtime=95000 \"` from container creation commands mentioned below in this document in following sections to create the containers if you are running Oracle 8 with UEKR7: + - [Section 5.2: Setup RAC Containers on Podman](#section-52-setup-rac-containers-on-podman). + - [Section 5.3: Adding a Oracle RAC Node using a container on Podman](#section-53-adding-a-oracle-rac-node-using-a-container-on-podman). + +- You can check the details on [Oracle Linux and Unbreakable Enterprise Kernel (UEK) Releases](https://blogs.oracle.com/scoter/post/oracle-linux-and-unbreakable-enterprise-kernel-uek-releases) + +- You do not need to execute step 2 in this section to create and enable `podman-rac-cgroup.service` when we are running Oracle Linux 8 with Unbreakable Enterprise Kernel R7. + +**IMPORTANT:** Completing prerequisite steps is a requirement for successful configuration. + +Complete each prerequisite step in order, customized for your environment. + +1. Verify that you have enough memory and CPU resources available for all containers. In this `README.md` for Podman, we used the following configuration: + + - 2 Podman hosts + - CPU Cores: 1 Socket with 4 cores, with 2 threads for each core Intel® Xeon® Platinum 8167M CPU at 2.00 GHz + - RAM: 60 GB + - Swap memory: 32 GB + - Oracle Linux 8.5 (Linux-x86-64) with the Unbreakable Enterprise Kernel 6: `5.4.17-2136.300.7.el8uek.x86_64`. + +2. Oracle RAC must run certain processes in real-time mode. To run processes inside a container in real-time mode, populate the real-time CPU budgeting on machine restarts. Create a oneshot systemd service as follows: + + - Create a file `/etc/systemd/system/podman-rac-cgroup.service` + - Append the following lines: + + ```INI + [Unit] + Description=Populate Cgroups with real time chunk on machine restart + After=multi-user.target + [Service] + Type=oneshot + ExecStart=/bin/bash -c “/bin/echo 950000 > /sys/fs/cgroup/cpu,cpuacct/machine.slice/cpu.rt_runtime_us && /bin/systemctl restart podman-restart.service” + StandardOutput=journal + CPUAccounting=yes + Slice=machine.slice + [Install] + WantedBy=multi-user.target + ``` + + - After creating the file `/etc/systemd/system/podman-rac-cgroup.service` with the lines appended in the preceding step, reload and restart the Podman daemon using the following steps: + + ```bash + systemctl daemon-reload + systemctl enable podman-rac-cgroup.service + systemctl enable podman-restart.service + systemctl start podman-rac-cgroup.service + ``` + +3. If SELINUX is enabled on the Podman host, then you must create an SELinux policy for Oracle RAC on Podman. + +You can check SELinux Status in your host machine by running the `sestatus` command. + +For details about how to create SELinux policy for Oracle RAC on Podman, see "How to Configure Podman for SELinux Mode" in the publication [Oracle Real Application Clusters Installation Guide for Podman Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racpd/target-configuration-oracle-rac-podman.html#GUID-59138DF8-3781-4033-A38F-E0466884D008). + +### Section 5.2: Setup RAC Containers on Podman + +This section provides step by step procedure to deploy Oracle RAC on container with block devices and storage container. To understand the details of environment variable, refer For the details of environment variables [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) + +Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. + +#### Deploying Oracle RAC Containers with Block Devices on Podman + +If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container-on-podman). + +Make sure the ASM devices do not have any existing file system. To clear any other file system from the devices, use the following command: + + ```bash + dd if=/dev/zero of=/dev/xvde bs=8k count=10000 + ``` + +Repeat for each shared block device. In the preceding example, `/dev/xvde` is a shared Xen virtual block device. + +Now create the Oracle RAC container using the image. For the details of environment variables, refer to section 7. You can use the following example to create a container: + + ```bash + podman create -t -i \ + --hostname racnodep1 \ + --volume /boot:/boot:ro \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --device=/dev/oracleoci/oraclevdd:/dev/asm_disk1 \ + --device=/dev/oracleoci/oraclevde:/dev/asm_disk2 \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=AUDIT_WRITE \ + --cap-add=AUDIT_CONTROL \ + --memory 16G \ + --memory-swap 32G \ + --sysctl kernel.shmall=2097152 \ + --sysctl "kernel.sem=250 32000 100 128" \ + --sysctl kernel.shmmax=8589934592 \ + --sysctl kernel.shmmni=4096 \ + -e DNS_SERVERS="172.16.1.25" \ + -e NODE_VIP=172.16.1.200 \ + -e VIP_HOSTNAME=racnodep1-vip \ + -e PRIV_IP=192.168.17.170 \ + -e PRIV_HOSTNAME=racnodep1-priv \ + -e PUBLIC_IP=172.16.1.170 \ + -e PUBLIC_HOSTNAME=racnodep1 \ + -e SCAN_NAME=racnodepc1-scan \ + -e OP_TYPE=INSTALL \ + -e DOMAIN=example.com \ + -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ + -e ASM_DISCOVERY_DIR=/dev \ + -e CMAN_HOSTNAME=racnodepc1-cman \ + -e CMAN_IP=172.16.1.166 \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + -e ORACLE_SID=ORCLCDB \ + -e RESET_FAILED_SYSTEMD="true" \ + -e DEFAULT_GATEWAY="172.16.1.1" \ + -e TMPDIR=/var/tmp \ + --restart=always \ + --systemd=always \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --name racnodep1 \ + localhost/oracle/database-rac:21.3.0-21.13.0 + ``` + +**Note:** Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. + +#### Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman + +If you are using block devices, skip to the section [Deploying RAC Containers with Block Devices on Podman](#deploying-oracle-rac-containers-with-block-devices-on-podman). +Now create the Oracle RAC container using the image. You can use the following example to create a container: + + ```bash + podman create -t -i \ + --hostname racnodep1 \ + --volume /boot:/boot:ro \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --privileged=false \ + --volume racstorage:/oradata \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=AUDIT_WRITE \ + --cap-add=AUDIT_CONTROL \ + --memory 16G \ + --memory-swap 32G \ + --sysctl kernel.shmall=2097152 \ + --sysctl "kernel.sem=250 32000 100 128" \ + --sysctl kernel.shmmax=8589934592 \ + --sysctl kernel.shmmni=4096 \ + -e DNS_SERVERS="172.16.1.25" \ + -e NODE_VIP=172.16.1.200 \ + -e VIP_HOSTNAME=racnodep1-vip \ + -e PRIV_IP=192.168.17.170 \ + -e PRIV_HOSTNAME=racnodep1-priv \ + -e PUBLIC_IP=172.16.1.170 \ + -e PUBLIC_HOSTNAME=racnodep1 \ + -e SCAN_NAME=racnodepc1-scan \ + -e OP_TYPE=INSTALL \ + -e DOMAIN=example.com \ + -e ASM_DISCOVERY_DIR=/oradata \ + -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ + -e CMAN_HOSTNAME=racnodepc1-cman \ + -e CMAN_IP=172.16.1.166 \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + -e ORACLE_SID=ORCLCDB \ + -e RESET_FAILED_SYSTEMD="true" \ + -e DEFAULT_GATEWAY="172.16.1.1" \ + -e TMPDIR=/var/tmp \ + --restart=always \ + --systemd=always \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --name racnodep1 \ + localhost/oracle/database-rac:21.3.0-21.13.0 + ``` + +**Notes:** + +- Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. +- You must have created the `racstorage` volume before the creation of the Oracle RAC Container. For details about the available environment variables, refer the [Section 7](#section-7-environment-variables-for-the-first-node). + +#### Assign networks to Oracle RAC containers Created Using Podman + +You need to assign the Podman networks created in section 1 to containers. Execute the following commands: + + ```bash + podman network disconnect podman racnodep1 + podman network connect rac_pub1_nw --ip 172.16.1.170 racnodep1 + podman network connect rac_priv1_nw --ip 192.168.17.170 racnodep1 + ``` + +#### Start the first container Created Using Podman + +To start the first container, run the following command: + + ```bash + podman start racnodep1 + ``` + +It can take at least 40 minutes or longer to create the first node of the cluster. To check the database logs, tail the logs using the following command: + +```bash +podman exec racnodep1 /bin/bash -c "tail -f /tmp/orod.log" +``` + +You should see the database creation success message at the end. + +```bash +01-31-2024 12:31:20 UTC : : ################################################################# +01-31-2024 12:31:20 UTC : : Oracle Database ORCLCDB is up and running on racnodep1 +01-31-2024 12:31:20 UTC : : ################################################################# +01-31-2024 12:31:20 UTC : : Running User Script +01-31-2024 12:31:20 UTC : : Setting Remote Listener +01-31-2024 12:31:27 UTC : : 172.16.1.166 +01-31-2024 12:31:27 UTC : : Executing script to set the remote listener +01-31-2024 12:31:28 UTC : : #################################### +01-31-2024 12:31:28 UTC : : ORACLE RAC DATABASE IS READY TO USE! +01-31-2024 12:31:28 UTC : : #################################### +``` + +#### Connect to the Oracle RAC container Created Using Podman + +To connect to the container execute the following command: + +```bash +podman exec -i -t racnodep1 /bin/bash +``` + +If the install fails for any reason, log in to the container using the preceding command and check `/tmp/orod.log`. You can also review the Grid Infrastructure logs located at `$GRID_BASE/diag/crs` and check for failure logs. If the failure occurred during the database creation then check the database logs. + +### Section 5.3: Adding a Oracle RAC Node using a container on Podman + +Before proceeding to the next step, ensure Oracle Grid Infrastructure is running and the Oracle RAC Database is open as per instructions in [Section 5.2: Setup RAC Containers on Podman](#section-52-setup-rac-containers-on-podman). Otherwise, the node addition process will fail. + +Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. + +To understand the details of environment variable, refer For the details of environment variables [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). + +Reset the password on the existing Oracle RAC node for SSH setup between an existing node in the cluster and the new node. Password must be the same on all the nodes for the `grid` and `oracle` users. Execute the following command on an existing node of the cluster. + +```bash +podman exec -i -t -u root racnode1 /bin/bash +sh /opt/scripts/startup/resetOSPassword.sh --help +sh /opt/scripts/startup/resetOSPassword.sh --op_type reset_grid_oracle --pwd_file common_os_pwdfile.enc --secret_volume /run/secrets --pwd_key_file pwd.key +``` + +**Note:** If you do not have a common secret volume among Oracle RAC containers, populate the password file with the same password that you have used on the new node, encrypt the file, and execute `resetOSPassword.sh` on the existing node of the cluster. + +#### Deploying Oracle RAC Additional Node on Container with Block Devices on Podman + +If you are using an NFS volume, skip to the section [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-podman). + +To create additional nodes, use the following command: + +```bash +podman create -t -i \ + --hostname racnodep2 \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /boot:/boot:ro \ + --dns-search=example.com \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --device=/dev/oracleoci/oraclevdd:/dev/asm_disk1 \ + --device=/dev/oracleoci/oraclevde:/dev/asm_disk2 \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=AUDIT_CONTROL \ + --cap-add=AUDIT_WRITE \ + --memory 16G \ + --memory-swap 32G \ + --sysctl kernel.shmall=2097152 \ + --sysctl "kernel.sem=250 32000 100 128" \ + --sysctl kernel.shmmax=8589934592 \ + --sysctl kernel.shmmni=4096 \ + -e DNS_SERVERS="172.16.1.25" \ + -e EXISTING_CLS_NODES=racnodep1 \ + -e NODE_VIP=172.16.1.201 \ + -e VIP_HOSTNAME=racnodep2-vip \ + -e PRIV_IP=192.168.17.171 \ + -e PRIV_HOSTNAME=racnodep2-priv \ + -e PUBLIC_IP=172.16.1.171 \ + -e PUBLIC_HOSTNAME=racnodep2 \ + -e DOMAIN=example.com \ + -e SCAN_NAME=racnodepc1-scan \ + -e ASM_DISCOVERY_DIR=/dev \ + -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ + -e ORACLE_SID=ORCLCDB \ + -e OP_TYPE=ADDNODE \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + -e RESET_FAILED_SYSTEMD="true" \ + -e DEFAULT_GATEWAY="172.16.1.1" \ + -e TMPDIR=/var/tmp \ + --systemd=always \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --restart=always \ + --name racnodep2 \ + localhost/oracle/database-rac:21.3.0-21.13.0 +``` + +For details of all environment variables and parameters, refer to [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). + +#### Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman + +If you are using physical block devices for shared storage, skip to [Deploying Oracle RAC Additional Node on Container with Block Devices on Podman](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-podman). + +Use the existing `racstorage:/oradata` volume when creating the additional container using the image. + +For example: + +```bash +podman create -t -i \ + --hostname racnodep2 \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /boot:/boot:ro \ + --dns-search=example.com \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --privileged=false \ + --volume racstorage:/oradata \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=AUDIT_WRITE \ + --cap-add=AUDIT_CONTROL \ + --memory 16G \ + --memory-swap 32G \ + --sysctl kernel.shmall=2097152 \ + --sysctl "kernel.sem=250 32000 100 128" \ + --sysctl kernel.shmmax=8589934592 \ + --sysctl kernel.shmmni=4096 \ + -e DNS_SERVERS="172.16.1.25" \ + -e EXISTING_CLS_NODES=racnodep1 \ + -e NODE_VIP=172.16.1.201 \ + -e VIP_HOSTNAME=racnodep2-vip \ + -e PRIV_IP=192.168.17.171 \ + -e PRIV_HOSTNAME=racnodep2-priv \ + -e PUBLIC_IP=172.16.1.171 \ + -e PUBLIC_HOSTNAME=racnodep2 \ + -e DOMAIN=example.com \ + -e SCAN_NAME=racnodepc1-scan \ + -e ASM_DISCOVERY_DIR=/oradata \ + -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ + -e ORACLE_SID=ORCLCDB \ + -e OP_TYPE=ADDNODE \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + -e RESET_FAILED_SYSTEMD="true" \ + -e DEFAULT_GATEWAY="172.16.1.1" \ + -e TMPDIR=/var/tmp \ + --systemd=always \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --restart=always \ + --name racnodep2 \ + localhost/oracle/database-rac:21.3.0-21.13.0 +``` + +**Notes:** + +- You must have created **racstorage** volume before the creation of the Oracle RAC container. +- You can change env variables such as IPs and ORACLE_PWD based on your env. For details about the env variables, refer the [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). + +#### Assign Network to additional Oracle RAC container Created Using Podman + +Connect the private and public networks you created earlier to the container: + +```bash +podman network disconnect podman racnodep2 +podman network connect rac_pub1_nw --ip 172.16.1.171 racnodep2 +podman network connect rac_priv1_nw --ip 192.168.17.171 racnodep2 +``` + +#### Start Oracle RAC container + +Start the container + +```bash +podman start racnodep2 +``` + +To check the database logs, tail the logs using the following command: + +```bash +podman exec racnodep2 /bin/bash -c "tail -f /tmp/orod.log" +``` + +You should see the database creation success message at the end. + +```bash +02-01-2024 09:36:14 UTC : : ################################################################# +02-01-2024 09:36:14 UTC : : Oracle Database ORCLCDB is up and running on racnodep2 +02-01-2024 09:36:14 UTC : : ################################################################# +02-01-2024 09:36:14 UTC : : Running User Script +02-01-2024 09:36:14 UTC : : Setting Remote Listener +02-01-2024 09:36:14 UTC : : #################################### +02-01-2024 09:36:14 UTC : : ORACLE RAC DATABASE IS READY TO USE! +02-01-2024 09:36:14 UTC : : #################################### +``` +## Section 5.4: Setup Oracle RAC Container on Podman with Podman Compose + +Oracle RAC database can also be deployed with podman Compose. An example of how to install Oracle RAC Database on Single Host via Bridge Network is explained in this [README.md](./samples/racpodmancompose/README.md) + +Same section covers various below scenarios as well with podman compose- +1. Deploying Oracle RAC on Container with Block Devices on Podman with Podman Compose +2. Deploying Oracle RAC on Container with NFS Devices on Podman with Podman Compose +3. Deploying Oracle RAC Additional Node on Container with Block Devices on Podman with Podman Compose +4. Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman with Podman Compose + +***Note:*** Podman and Podman Compose is not supported with OL7. You need minimum OL8.8 with UEK R7. + +## Section 6: Connecting to an Oracle RAC Database + +**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC cluster using the preceding sections. + +If you are using a connection manager and exposed the port 1521 on the host, then connect from an external client using the following connection string, where `` is the host container, and `` is the database system identifier: + +```bash +system/@//:1521/ +``` + +If you are using the bridge created using MACVLAN driver, and you have configured DNS appropriately, then you can connect using the public Single Client Access (SCAN) listener directly from any external client. To connect with the SCAN, use the following connection string, where `` is the SCAN name for the database, and `` is the database system identifier: + +```bash +system/@//:1521/ +``` + +## Section 7: Environment Variables for the First Node + +This section provides information about the environment variables that can be used when creating the first node of a cluster. + +```bash +OP_TYPE=###Specify the Operation TYPE. It can accept 2 values INSTALL OR ADDNODE#### +NODE_VIP=####Specify the Node VIP### +VIP_HOSTNAME=###Specify the VIP hostname### +PRIV_IP=###Specify the Private IP### +PRIV_HOSTNAME=###Specify the Private Hostname### +PUBLIC_IP=###Specify the public IP### +PUBLIC_HOSTNAME=###Specify the public hostname### +SCAN_NAME=###Specify the scan name### +ASM_DEVICE_LIST=###Specify the ASM Disk lists. +SCAN_IP=###Specify this if you do not have DNS server### +DOMAIN=###Default value set to example.com### +PASSWORD=###OS password will be generated by openssl### +CLUSTER_NAME=###Default value set to racnode-c#### +ORACLE_SID=###Default value set to ORCLCDB### +ORACLE_PDB=###Default value set to ORCLPDB### +ORACLE_PWD=###Default value set to generated by openssl random password### +ORACLE_CHARACTERSET=###Default value set AL32UTF8### +DEFAULT_GATEWAY=###Default gateway. You need this env variable if containers will be running on multiple hosts.#### +CMAN_HOSTNAME=###Connection Manager Host Name### +CMAN_IP=###Connection manager Host IP### +ASM_DISCOVERY_DIR=####ASM disk location insdie the container. By default it is /dev###### +COMMON_OS_PWD_FILE=###Pass the file name to setup grid and oracle user password. If you specify ORACLE_PWD_FILE, GRID_PWD_FILE, and DB_PWD_FILE then you do not need to specify this env variable### +ORACLE_PWD_FILE=###Pass the file name to set the password for oracle user.### +GRID_PWD_FILE=###Pass the file name to set the password for grid user.### +DB_PWD_FILE=###Pass the file name to set the password for DB user i.e. sys.### +REMOVE_OS_PWD_FILES=###Set this env variable to true to remove pwd key file and password file after resetting password.### +CONTAINER_DB_FLAG=###Default value is set to true to create container database. Set this to false if you do not want to create container database.### +``` + +## Section 8: Environment Variables for the Second and Subsequent Nodes + +This section provides the details about the environment variables that can be used for all additional nodes added to an existing cluster. + +```bash +OP_TYPE=###Specify the Operation TYPE. It can accept 2 values INSTALL OR ADDNODE### +EXISTING_CLS_NODES=###Specify the Existing Node of the cluster which you want to join. If you have 2 nodes in the cluster and you are trying to add the third node then specify existing 2 nodes of the clusters and separate them by comma.#### +NODE_VIP=###Specify the Node VIP### +VIP_HOSTNAME=###Specify the VIP hostname### +PRIV_IP=###Specify the Private IP### +PRIV_HOSTNAME=###Specify the Private Hostname### +PUBLIC_IP=###Specify the public IP### +PUBLIC_HOSTNAME=###Specify the public hostname### +SCAN_NAME=###Specify the scan name### +SCAN_IP=###Specify this if you do not have DNS server### +ASM_DEVICE_LIST=###Specify the ASM Disk lists. +DOMAIN=###Default value set to example.com### +ORACLE_SID=###Default value set to ORCLCDB### +DEFAULT_GATEWAY=###Default gateway. You need this env variable if containers will be running on multiple hosts.#### +CMAN_HOSTNAME=###Connection Manager Host Name### +CMAN_IP=###Connection manager Host IP### +ASM_DISCOVERY_DIR=####ASM disk location inside the container. By default it is /dev###### +COMMON_OS_PWD_FILE=###You need to pass the file name to setup grid and oracle user password. If you specify ORACLE_PWD_FILE, GRID_PWD_FILE, and DB_PWD_FILE then you do not need to specify this env variable### +ORACLE_PWD_FILE=###You need to pass the file name to set the password for oracle user.### +GRID_PWD_FILE=###You need to pass the file name to set the password for grid user.### +DB_PWD_FILE=###You need to pass the file name to set the password for DB user i.e. sys.### +REMOVE_OS_PWD_FILES=###You need to set this to true to remove pwd key file and password file after resetting password.### +``` + +## Section 9: Building a Patched Oracle RAC Container Image + +If you want to build a patched image based on a base 21.3.0 container image, then refer to the GitHub page [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). + +## Section 10 : Sample Container Files for Older Releases + +### Docker + +This project offers sample container files for Oracle Grid Infrastructure and Oracle Real Application Clusters for dev and test: + +- Oracle Database 19c Oracle Grid Infrastructure (19.3) for Linux x86-64 +- Oracle Database 19c (19.3) for Linux x86-64 + +- Oracle Database 18c Oracle Grid Infrastructure (18.3) for Linux x86-64 + +- Oracle Database 18c (18.3) for Linux x86-64 + +- Oracle Database 12c Release 2 Oracle Grid Infrastructure (12.2.0.1.0) for Linux x86-64 + +- Oracle Database 12c Release 2 (12.2.0.1.0) Enterprise Edition for Linux x86-64 + + **Notes:** + +- Note that the Oracle RAC on Docker Container releases are supported only for test and development environments, but not for production environments. + +- If you are planning to build and deploy Oracle RAC 18.3.0, you need to download Oracle 18.3.0 Grid Infrastructure and Oracle Database 18.3.0 Database. + + - You also need to download Patch# p28322130_183000OCWRU_Linux-x86-64.zip from [Oracle Technology Network](https://www.oracle.com/technetwork/database/database-technologies/clusterware/downloads/docker-4418413.html). + + - Stage it under dockerfiles/18.3.0 folder. + +- If you are planning to build and deploy Oracle RAC 12.2.0.1, you need to download Oracle 12.2.0.1 Grid Infrastructure and Oracle Database 12.2.0.1 Database. + + - You also need to download Patch# p27383741_122010_Linux-x86-64.zip from [Oracle Technology Network](https://www.oracle.com/technetwork/database/database-technologies/clusterware/downloads/docker-4418413.html). + + - Stage it under dockerfiles/12.2.0.1 folder. + +### Podman + +This project offers sample container files for Oracle Grid Infrastructure and Oracle Real Application Clusters for dev and test: + +- Oracle Database 19c Oracle Grid Infrastructure (19.3) for Linux x86-64 +- Oracle Database 19c (19.3) for Linux x86-64 + +**Notes:** +- Because Oracle RAC on Podman is supported on 19c from 19.16 or later, you must download the grid release update (RU) from [support.oracle.com](https://support.oracle.com/portal/). + +- For RAC on Podman for v19.22, download following one-offs from [support.oracle.com](https://support.oracle.com/portal/) + - `35943157` + - `35940989` + +- Before starting the next step, you must edit `docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/19.3.0/Dockerfile`, change `oraclelinux:7-slim` to `oraclelinux:8`, and save the file. + +- You must add `CV_ASSUME_DISTID=OEL8` inside the `Dockerfile` as an env variable. + +- Once the `19.3.0` Oracle RAC on Podman image is built, start building patched image with the download 19.16 RU and one-offs. To build the patch the image, refer [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). + +## Section 11 : Support + +### Docker Support + +At the time of this release, Oracle RAC on Docker is supported only on Oracle Linux 7. To see current details, refer the [Real Application Clusters Installation Guide for Docker Containers Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racdk/oracle-rac-on-docker.html). + +### Podman Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 8.5 later. To see current Linux support certifications, refer [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## Section 12 : License + +To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository which are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Section 13 : Copyright + +Copyright (c) 2014-2024 Oracle and/or its affiliates. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/cmdExec b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/cmdExec deleted file mode 100755 index d8a5f39ba0..0000000000 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/cmdExec +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -############################# -# Copyright (c) 2024, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl -# Author: paramdeep.saini@oracle.com -############################ - -TIMESTAMP=$(date "+%Y-%m-%d") -LOGFILE="/tmp/oracle_rac_cmd_${TIMESTAMP}.log" -# shellcheck disable=SC2145 -echo "$(date -u) : $@" >> "$LOGFILE" -# shellcheck disable=SC2124 -cmd=("$@") -# shellcheck disable=SC2128 -$cmd -# shellcheck disable=SC2181 -if [ $? -eq 0 ]; then - exit 0 -else - exit 127 -fi \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py.org1 b/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py.org1 deleted file mode 100644 index b9e8855ffb..0000000000 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py.org1 +++ /dev/null @@ -1,440 +0,0 @@ -#!/usr/bin/python - -############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl -# Author: sanjay.singh@oracle.com,paramdeep.saini@oracle.com -############################ - -""" - This file contains to the code call different classes objects based on setup type -""" - -from distutils.log import debug -import os -import sys -import traceback - -from oralogger import * -from oraenv import * -from oracommon import * -from oramachine import * -from orasetupenv import * -from orasshsetup import * -from oracvu import * -from oragiprov import * -from oraasmca import * - -class OraRacProv: - """ - This class provision the RAC database - """ - def __init__(self,oralogger,orahandler,oraenv,oracommon,oracvu,orasetupssh): - try: - self.ologger = oralogger - self.ohandler = orahandler - self.oenv = oraenv.get_instance() - self.ocommon = oracommon - self.ora_env_dict = oraenv.get_env_vars() - self.file_name = os.path.basename(__file__) - self.osetupssh = orasetupssh - self.ocvu = oracvu - self.mythread = {} - self.ogiprov = OraGIProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) - self.oasmca = OraAsmca(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) - except BaseException as ex: - traceback.print_exc(file = sys.stdout) - - def setup(self): - """ - This function setup the RAC home on this machine - """ - sshFlag=False - self.ogiprov.setup() - self.env_param_checks() - pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") - crs_nodes=pub_nodes.replace(" ",",") - for node in crs_nodes.split(","): - self.clu_checks(node) - dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() - retcode1=self.ocvu.check_home(None,dbhome,dbuser) - status=self.ocommon.check_rac_installed(retcode1) - if not status: - self.perform_ssh_setup() - sshFlag=True - status=self.ocommon.check_home_inv(None,dbhome,dbuser) - if not status: - self.db_sw_install() - self.run_rootsh() - # else: - # self.ocommon.log_info_message("DB Home " + dbhome + " is already registered with the inventory",self.file_name) - self.ocommon.rac_setup_complete() - if not self.ocommon.check_key("SKIP_DBCA",self.ora_env_dict): - self.create_asmdg() - status,osid,host,mode=self.ocommon.check_dbinst() - hostname=self.ocommon.get_public_hostname() - if status: - msg='''Database instance {0} already exist on this machine {1}.'''.format(osid,hostname) - self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) - else: - if not sshFlag: - self.perform_ssh_setup() - self.create_db() - status,osid,host,mode=self.ocommon.check_dbinst() - if status: - # self.ocommon.set_primary_for_standby() - self.ocommon.rac_setup_complete() - msg='''Oracle Database {0} is up and running on {1}.'''.format(osid,host) - self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) - self.ocommon.run_custom_scripts("CUSTOM_DB_SCRIPT_DIR","CUSTOM_DB_SCRIPT_FILE",dbuser) - self.ocommon.set_remote_listener() - os.system("echo ORACLE RAC DATABASE IS READY TO USE > /dev/pts/0") - msg='''ORACLE RAC DATABASE IS READY TO USE''' - self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) - else: - msg='''Oracle Database {0} is not up and running on {1}.'''.format(osid,host) - self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) - self.ocommon.prog_exit("127") - - def env_param_checks(self): - """ - Perform the env setup checks - """ - self.ocommon.check_env_variable("DB_HOME",True) - self.ocommon.check_env_variable("DB_BASE",True) - self.ocommon.check_env_variable("INVENTORY",True) - - def clu_checks(self,hostname): - """ - Performing clu checks - """ - self.ocommon.log_info_message("Performing CVU checks before DB home installation to make sure clusterware is up and running on " + hostname,self.file_name) - # hostname=self.ocommon.get_public_hostname() - retcode1=self.ocvu.check_ohasd(hostname) - retcode2=self.ocvu.check_asm(hostname) - retcode3=self.ocvu.check_clu(hostname,None) - - if retcode1 == 0: - msg="Cluvfy ohasd check passed!" - self.ocommon.log_info_message(msg,self.file_name) - else: - msg="Cluvfy ohasd check faild. Exiting.." - self.ocommon.log_error_message(msg,self.file_name) - self.ocommon.prog_exit("127") - - if retcode2 == 0: - msg="Cluvfy asm check passed!" - self.ocommon.log_info_message(msg,self.file_name) - else: - msg="Cluvfy asm check faild. Exiting.." - self.ocommon.log_error_message(msg,self.file_name) - #self.ocommon.prog_exit("127") - - if retcode3 == 0: - msg="Cluvfy clumgr check passed!" - self.ocommon.log_info_message(msg,self.file_name) - else: - msg="Cluvfy clumgr check faild. Exiting.." - self.ocommon.log_error_message(msg,self.file_name) - self.ocommon.prog_exit("127") - - def perform_ssh_setup(self): - """ - Perform ssh setup - """ - if not self.ocommon.detect_k8s_env(): - dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() - self.osetupssh.setupssh(dbuser,dbhome,"INSTALL") - self.osetupssh.verifyssh(dbuser,"INSTALL") - - def db_sw_install(self): - """ - Perform the db_install - """ - dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() - pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") - crs_nodes=pub_nodes.replace(" ",",") - osdba=self.ora_env_dict["OSDBA_GROUP"] if self.ocommon.check_key("OSDBA",self.ora_env_dict) else "dba" - osbkp=self.ora_env_dict["OSBACKUPDBA_GROUP"] if self.ocommon.check_key("OSBACKUPDBA_GROUP",self.ora_env_dict) else "backupdba" - osoper=self.ora_env_dict["OSPER_GROUP"] if self.ocommon.check_key("OSPER_GROUP",self.ora_env_dict) else "oper" - osdgdba=self.ora_env_dict["OSDGDBA_GROUP"] if self.ocommon.check_key("OSDGDBA_GROUP",self.ora_env_dict) else "dgdba" - oskmdba=self.ora_env_dict["OSKMDBA_GROUP"] if self.ocommon.check_key("OSKMDBA_GROUP",self.ora_env_dict) else "kmdba" - osracdba=self.ora_env_dict["OSRACDBA_GROUP"] if self.ocommon.check_key("OSRACDBA_GROUP",self.ora_env_dict) else "racdba" - osasm=self.ora_env_dict["OSASM_GROUP"] if self.ocommon.check_key("OSASM_GROUP",self.ora_env_dict) else "asmadmin" - unixgrp="oinstall" - hostname=self.ocommon.get_public_hostname() - lang=self.ora_env_dict["LANGUAGE"] if self.ocommon.check_key("LANGUAGE",self.ora_env_dict) else "en" - edition= self.ora_env_dict["DB_EDITION"] if self.ocommon.check_key("DB_EDITION",self.ora_env_dict) else "EE" - - copyflag=" -noCopy " - if not self.ocommon.check_key("COPY_DB_SOFTWARE",self.ora_env_dict): - copyflag=" -noCopy " - - mythread_list=[] - - oraversion=self.ocommon.get_rsp_version("INSTALL",None) - version=oraversion.split(".",1)[0].strip() - - self.mythread.clear() - for node in pub_nodes.split(" "): - val1=Thread(target=self.db_sw_install_on_node,args=(dbuser,hostname,unixgrp,crs_nodes,oinv,lang,dbhome,dbase,edition,osdba,osbkp,osdgdba,oskmdba,osracdba,copyflag,node)) - mythread_list=[val1,'TRUE'] - self.mythread[node]=mythread_list - self.ocommon.log_info_message("Running DB Sw install on node " + node,self.file_name) - val1.start() - - self.manage_thread() - - def db_sw_install_on_node(self,dbuser,hostname,unixgrp,crs_nodes,oinv,lang,dbhome,dbase,edition,osdba,osbkp,osdgdba,oskmdba,osracdba,copyflag,node): - """ - Perform the db_install - """ - runCmd="" - if self.ocommon.check_key("APPLY_RU_LOCATION",self.ora_env_dict): - ruLoc=self.ora_env_dict["APPLY_RU_LOCATION"] - runCmd='''runInstaller -applyRU "{0}"'''.format(self.ora_env_dict["APPLY_RU_LOCATION"]) - else: - runCmd='''runInstaller ''' - - - if self.ocommon.check_key("DEBUG_MODE",self.ora_env_dict): - dbgCmd='''{0} -debug '''.format(runCmd) - runCmd=dbgCmd - - rspdata='''su - {0} -c "ssh {17} {1}/{16} -ignorePrereq -waitforcompletion {15} -silent - oracle.install.option=INSTALL_DB_SWONLY - ORACLE_HOSTNAME={2} - UNIX_GROUP_NAME={3} - oracle.install.db.CLUSTER_NODES={4} - INVENTORY_LOCATION={5} - SELECTED_LANGUAGES={6} - ORACLE_HOME={7} - ORACLE_BASE={8} - oracle.install.db.InstallEdition={9} - oracle.install.db.OSDBA_GROUP={10} - oracle.install.db.OSBACKUPDBA_GROUP={11} - oracle.install.db.OSDGDBA_GROUP={12} - oracle.install.db.OSKMDBA_GROUP={13} - oracle.install.db.OSRACDBA_GROUP={14} - SECURITY_UPDATES_VIA_MYORACLESUPPORT=false - DECLINE_SECURITY_UPDATES=true"'''.format(dbuser,dbhome,hostname,unixgrp,crs_nodes,oinv,lang,dbhome,dbase,edition,osdba,osbkp,osdgdba,oskmdba,osracdba,copyflag,runCmd,node) - cmd=rspdata.replace('\n'," ") - #dbswrsp="/tmp/dbswrsp.rsp" - #self.ocommon.write_file(dbswrsp,rspdata) - #if os.path.isfile(dbswrsp): - #cmd='''su - {0} -c "{1}/runInstaller -ignorePrereq -waitforcompletion -silent -responseFile {2}"'''.format(dbuser,dbhome,dbswrsp) - output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) - self.ocommon.check_os_err(output,error,retcode,None) - #else: - # self.ocommon.log_error_message("DB response file does not exist at its location: " + dbswrsp + ".Exiting..",self.file_name) - # self.ocommon.prog_exit("127") - if len(self.mythread) > 0: - if node in self.mythread.keys(): - swthread_list=self.mythread[node] - value=swthread_list[0] - new_list=[value,'FALSE'] - new_val={node,tuple(new_list)} - self.mythread.update(new_val) - - def run_rootsh(self): - """ - This function run the root.sh after DB home install - """ - dbuser,dbhome,dbbase,oinv=self.ocommon.get_db_params() - pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") - for node in pub_nodes.split(" "): - cmd='''su - {0} -c "ssh {1} sudo {2}/root.sh"'''.format(dbuser,node,dbhome) - output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) - self.ocommon.check_os_err(output,error,retcode,True) - - def create_asmdg(self): - """ - Perform the asm disk group creation - """ - dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() - if (self.ocommon.check_key("REDO_ASM_DEVICE_LIST",self.ora_env_dict)) and (self.ocommon.check_key("LOG_FILE_DEST",self.ora_env_dict)): - lgdest=self.ocommon.rmdgprefix(self.ora_env_dict["LOG_FILE_DEST"]) - device_prop=self.ora_env_dict["REDO_ASMDG_PROPERTIES"] if self.ocommon.check_key("REDO_ASMDG_PROPERTIES",self.ora_env_dict) else None - self.ocommon.log_info_message("dg validation for :" + lgdest + " is in progress", self.file_name) - status=self.oasmca.validate_dg(self.ora_env_dict["REDO_ASM_DEVICE_LIST"],device_prop,lgdest) - if not status: - self.oasmca.create_dg(self.ora_env_dict["REDO_ASM_DEVICE_LIST"],device_prop,lgdest) - else: - self.ocommon.log_info_message("ASM diskgroup exist!",self.file_name) - - if (self.ocommon.check_key("RECO_ASM_DEVICE_LIST",self.ora_env_dict)) and (self.ocommon.check_key("DB_RECOVERY_FILE_DEST",self.ora_env_dict)): - dbrdest=self.ocommon.rmdgprefix(self.ora_env_dict["DB_RECOVERY_FILE_DEST"]) - device_prop=self.ora_env_dict["RECO_ASMDG_PROPERTIES"] if self.ocommon.check_key("RECO_ASMDG_PROPERTIES",self.ora_env_dict) else None - self.ocommon.log_info_message("dg validation for :" + dbrdest + " is in progress", self.file_name) - status=self.oasmca.validate_dg(self.ora_env_dict["RECO_ASM_DEVICE_LIST"],device_prop,dbrdest) - if not status: - self.oasmca.create_dg(self.ora_env_dict["RECO_ASM_DEVICE_LIST"],device_prop,dbrdest) - else: - self.ocommon.log_info_message("ASM diskgroup exist!",self.file_name) - - if (self.ocommon.check_key("DB_ASM_DEVICE_LIST",self.ora_env_dict)) and (self.ocommon.check_key("DB_DATA_FILE_DEST",self.ora_env_dict)): - dbfiledest=self.ocommon.rmdgprefix(self.ora_env_dict["DB_DATA_FILE_DEST"]) - device_prop=self.ora_env_dict["DB_ASMDG_PROPERTIES"] if self.ocommon.check_key("DB_ASMDG_PROPERTIES",self.ora_env_dict) else None - self.ocommon.log_info_message("dg validation for :" + dbfiledest + " is in progress", self.file_name) - status=self.oasmca.validate_dg(self.ora_env_dict["DB_ASM_DEVICE_LIST"],device_prop,dbfiledest) - if not status: - self.oasmca.create_dg(self.ora_env_dict["DB_ASM_DEVICE_LIST"],device_prop,dbfiledest) - else: - self.ocommon.log_info_message("ASM diskgroup exist!",self.file_name) - - def check_responsefile(self): - """ - This function returns the valid response file - """ - dbrsp=None - if self.ocommon.check_key("DBCA_RESPONSE_FILE",self.ora_env_dict): - dbrsp=self.ora_env_dict["DBCA_RESPONSE_FILE"] - self.ocommon.log_info_message("DBCA_RESPONSE_FILE parameter is set and file location is:" + dbrsp ,self.file_name) - else: - self.ocommon.log_error_message("DBCA response file does not exist at its location: " + dbrsp + ".Exiting..",self.file_name) - self.ocommon.prog_exit("127") - - if os.path.isfile(dbrsp): - return dbrsp - - def create_db(self): - """ - Perform the DB Creation - """ - cmd="" - dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() - if self.ocommon.check_key("DBCA_RESPONSE_FILE",self.ora_env_dict): - dbrsp=self.check_responsefile() - cmd='''su - {0} -c "{1}/bin/dbca -silent -ignorePreReqs -createDatabase -responseFile {2}"'''.format(dbuser,dbhome,dbrsp) - else: - cmd=self.prepare_db_cmd() - - dbpasswd=self.ocommon.get_db_passwd() - self.ocommon.set_mask_str(dbpasswd) - output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) - self.ocommon.check_os_err(output,error,retcode,True) - ### Unsetting the encrypt value to None - self.ocommon.unset_mask_str() - - def prepare_db_cmd(self): - """ - Perform the asm disk group creation - """ - dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() - pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") - crs_nodes=pub_nodes.replace(" ",",") - dbname,osid,dbuname=self.ocommon.getdbnameinfo() - dgname=self.ocommon.setdgprefix(self.ocommon.getcrsdgname()) - dbfiledest=self.ocommon.setdgprefix(self.ocommon.getdbdestdgname(dgname)) - cdbflag=self.ora_env_dict["CONTAINERDB_FLAG"] if self.ocommon.check_key("CONTAINERDB_FLAG",self.ora_env_dict) else "true" - stype=self.ora_env_dict["DB_STORAGE_TYPE"] if self.ocommon.check_key("DB_STORAGE_TYPE",self.ora_env_dict) else "ASM" - charset=self.ora_env_dict["DB_CHARACTERSET"] if self.ocommon.check_key("DB_CHARACTERSET",self.ora_env_dict) else "AL32UTF8" - redosize=self.ora_env_dict["DB_REDOFILE_SIZE"] if self.ocommon.check_key("DB_REDOFILE_SIZE",self.ora_env_dict) else "1024" - dbtype=self.ora_env_dict["DB_TYPE"] if self.ocommon.check_key("DB_TYPE",self.ora_env_dict) else "OLTP" - dbctype=self.ora_env_dict["DB_CONFIG_TYPE"] if self.ocommon.check_key("DB_CONFIG_TYPE",self.ora_env_dict) else "RAC" - arcmode=self.ora_env_dict["ENABLE_ARCHIVELOG"] if self.ocommon.check_key("ENABLE_ARCHIVELOG",self.ora_env_dict) else "true" - pdbsettings=self.get_pdb_params() - initparams=self.get_init_params() - #memorypct=self.get_memorypct() - - rspdata='''su - {0} -c "{1}/bin/dbca -silent -ignorePrereqFailure -createDatabase \ - -templateName General_Purpose.dbc \ - -gdbname {2} \ - -createAsContainerDatabase {3} \ - -sysPassword HIDDEN_STRING \ - -systemPassword HIDDEN_STRING \ - -datafileDestination {4} \ - -storageType {5} \ - -characterSet {6} \ - -redoLogFileSize {7} \ - -databaseType {8} \ - -databaseConfigType {9} \ - -nodelist {10} \ - -useOMF true \ - {12} \ - {13} \ - -enableArchive {14}"'''.format(dbuser,dbhome,dbname,cdbflag,dbfiledest,stype,charset,redosize,dbtype,dbctype,crs_nodes,dbname,pdbsettings,initparams,arcmode) - cmd='\n'.join(line.lstrip() for line in rspdata.splitlines()) - - return cmd - - def get_pdb_params(self): - """ - Perform the asm disk group creation - """ - pdbnum=self.ora_env_dict["PDB_COUNT"] if self.ocommon.check_key("PDB_COUNT",self.ora_env_dict) else "1" - pdbname=self.ora_env_dict["ORACLE_PDB_NAME"] if self.ocommon.check_key("ORACLE_PDB_NAME",self.ora_env_dict) else "ORCLPDB" - rspdata='''-numberOfPDBs {0} \ - -pdbAdminPassword HIDDEN_STRING \ - -pdbName {1}'''.format(pdbnum,pdbname) - cmd='\n'.join(line.lstrip() for line in rspdata.splitlines()) - return cmd - - def get_init_params(self): - """ - Perform the asm disk group creation - """ - sgasize=self.ora_env_dict["INIT_SGA_SIZE"] if self.ocommon.check_key("INIT_SGA_SIZE",self.ora_env_dict) else None - pgasize=self.ora_env_dict["INIT_PGA_SIZE"] if self.ocommon.check_key("INIT_PGA_SIZE",self.ora_env_dict) else None - processes=self.ora_env_dict["INIT_PROCESSES"] if self.ocommon.check_key("INIT_PROCESSES",self.ora_env_dict) else None - dbname,osid,dbuname=self.ocommon.getdbnameinfo() - dgname=self.ocommon.setdgprefix(self.ocommon.getcrsdgname()) - dbdest=self.ocommon.setdgprefix(self.ocommon.getdbdestdgname(dgname)) - dbrdest=self.ocommon.setdgprefix(self.ocommon.getdbrdestdgname(dbdest)) - dbrdestsize=self.ora_env_dict["DB_RECOVERY_FILE_DEST_SIZE"] if self.ocommon.check_key("DB_RECOVERY_FILE_DEST_SIZE",self.ora_env_dict) else None - cpucount=self.ora_env_dict["CPU_COUNT"] if self.ocommon.check_key("CPU_COUNT",self.ora_env_dict) else None - dbfiles=self.ora_env_dict["DB_FILES"] if self.ocommon.check_key("DB_FILES",self.ora_env_dict) else "1024" - lgbuffer=self.ora_env_dict["LOG_BUFFER"] if self.ocommon.check_key("LOG_BUFFER",self.ora_env_dict) else "256M" - dbrettime=self.ora_env_dict["DB_FLASHBACK_RETENTION_TARGET"] if self.ocommon.check_key("DB_FLASHBACK_RETENTION_TARGET",self.ora_env_dict) else "120" - dbblkck=self.ora_env_dict["DB_BLOCK_CHECKSUM"] if self.ocommon.check_key("DB_BLOCK_CHECKSUM",self.ora_env_dict) else "TYPICAL" - dblwp=self.ora_env_dict["DB_LOST_WRITE_PROTECT"] if self.ocommon.check_key("DB_LOST_WRITE_PROTECT",self.ora_env_dict) else "TYPICAL" - ptpc=self.ora_env_dict["PARALLEL_THREADS_PER_CPU"] if self.ocommon.check_key("PARALLEL_THREADS_PER_CPU",self.ora_env_dict) else "1" - dgbr1=self.ora_env_dict["DG_BROKER_CONFIG_FILE1"] if self.ocommon.check_key("DG_BROKER_CONFIG_FILE1",self.ora_env_dict) else dbdest - dgbr2=self.ora_env_dict["DG_BROKER_CONFIG_FILE2"] if self.ocommon.check_key("DG_BROKER_CONFIG_FILE2",self.ora_env_dict) else dbrdest - remotepasswdfile="REMOTE_LOGIN_PASSWORDFILE=EXCLUSIVE" - lgformat="LOG_ARCHIVE_FORMAT=%t_%s_%r.arc" - - initprm='''db_recovery_file_dest={0},db_create_file_dest={2},{3},{4},db_unique_name={5},db_files={6},LOG_BUFFER={7},DB_FLASHBACK_RETENTION_TARGET={8},DB_BLOCK_CHECKSUM={9},DB_LOST_WRITE_PROTECT={10},PARALLEL_THREADS_PER_CPU={11},DG_BROKER_CONFIG_FILE1={12},DG_BROKER_CONFIG_FILE2={13}'''.format(dbrdest,dbrdest,dbdest,remotepasswdfile,lgformat,dbuname,dbfiles,lgbuffer,dbrettime,dbblkck,dblwp,ptpc,dgbr1,dgbr2) - - if sgasize: - initprm= initprm + ''',sga_target={0},sga_max_size={0}'''.format(sgasize) - - if pgasize: - initprm= initprm + ''',pga_aggregate_size={0}'''.format(pgasize) - - if processes: - initprm= initprm + ''',processes={0}'''.format(processes) - - if cpucount: - initprm= initprm + ''',cpu_count={0}'''.format(cpucount) - - if dbrdestsize: - initprm = initprm + ''',db_recovery_file_dest_size={0}'''.format(dbrdestsize) - - initparams=""" -initparams '{0}'""".format(initprm) - - return initparams - - def manage_thread(self): - """ - This function manage the thread and exit the thread if the job is done - """ - self.ocommon.log_info_message("going to run the loop",self.file_name) - checkthread=True - self.ocommon.log_info_message("mythread list has values" + str(len(self.mythread)),self.file_name) - while checkthread: - for key,value in self.mythread.items(): - print(key, value) - swthread=value[0] - if swthread.is_alive(): - self.ocommon.log_info_message("I am in status block1. Key is set to " + key + "Flag is set to : " + value[1] ,self.file_name) - checkthread=True - sleep(10) - else: - checkthread=False - self.ocommon.log_info_message("I am not Alive",self.file_name) - sleep(10) - - if value[1] == 'FALSE': - self.ocommon.log_info_message("I am in status block. Key is set to " + key + "Flag is set to : " + value[1] ,self.file_name) - break # exit the main loop diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/Checksum b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/Checksum new file mode 100644 index 0000000000..47029ca3b5 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/Checksum @@ -0,0 +1,2 @@ +b7c4c66f801f92d14faa0d791ccda721 19.3.0/LINUX.X64_193000_db_home.zip +1858bd0d281c60f4ddabd87b1c214a4f 19.3.0/LINUX.X64_193000_grid_home.zip \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/Containerfile b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/Containerfile new file mode 100644 index 0000000000..aa5eb32344 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/Containerfile @@ -0,0 +1,258 @@ +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# ORACLE CONTAINERFILES PROJECT +# -------------------------- +# This is the Containerfile for Oracle Database 19c Release 3 Real Application Clusters +# +# REQUIRED FILES TO BUILD THIS IMAGE +# ---------------------------------- +# (1) LINUX.X64_193000_grid_home.zip +# (2 LINUX.X64_193000_db_home.zip +# Download Oracle Grid 19c Release 3 Enterprise Edition for Linux x64 +# Download Oracle Database 19c Release 3 Enterprise Edition for Linux x64 +# from http://www.oracle.com/technetwork/database/enterprise-edition/downloads/index.html +# +# HOW TO BUILD THIS IMAGE +# ----------------------- +# Run: +# $ docker build -t oracle/database:19c-rac . + + +ARG BASE_OL_IMAGE=oraclelinux:8 +ARG SLIMMING=false +# Pull base image +# --------------- +# hadolint ignore=DL3006,DL3025 +FROM $BASE_OL_IMAGE AS base +ARG SLIMMING=false +ARG VERSION +# Labels +# ------ +LABEL "provider"="Oracle" \ + "issues"="https://github.com/oracle/docker-images/issues" \ + "volume.setup.location1"="/opt/scripts" \ + "volume.startup.location1"="/opt/scripts/startup" \ + "port.listener"="1521" \ + "port.oemexpress"="5500" + +# Argument to control removal of components not needed after db software installation +ARG INSTALL_FILE_1="LINUX.X64_193000_grid_home.zip" +ARG INSTALL_FILE_2="LINUX.X64_193000_db_home.zip" +ARG DB_EDITION="EE" +ARG USER="root" +ARG WORKDIR="/rac-work-dir" +ARG IGNORE_PREREQ=false + +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +# Linux Env Variable +# hadolint ignore=DL3044 +ENV SETUP_LINUX_FILE="setupLinuxEnv.sh" \ + INSTALL_DIR=/opt/scripts \ +# Grid Env variables + GRID_INSTALL_RSP="gridsetup_19c.rsp" \ + GRID_SW_INSTALL_RSP="grid_sw_install_19c.rsp" \ + GRID_SETUP_FILE="setupGrid.sh" \ + INITSH="initsh" \ + WORKDIR=$WORKDIR \ + FIXUP_PREQ_FILE="fixupPreq.sh" \ + INSTALL_GRID_BINARIES_FILE="installGridBinaries.sh" \ + INSTALL_GRID_PATCH="applyGridPatch.sh" \ + INVENTORY=/u01/app/oraInventory \ + INSTALL_FILE_1=$INSTALL_FILE_1 \ + INSTALL_FILE_2=$INSTALL_FILE_2 \ + DB_EDITION=$DB_EDITION \ + ADDNODE_RSP="grid_addnode.rsp" \ + SETUPSSH="setupSSH.expect" \ + DOCKERORACLEINIT="dockeroracleinit" \ + GRID_USER_HOME="/home/grid" \ + ASM_DISCOVERY_DIR="/dev" \ +# RAC DB Env Variables + DB_INSTALL_RSP="db_sw_install_19c.rsp" \ + DBCA_RSP="dbca_19c.rsp" \ + DB_SETUP_FILE="setupDB.sh" \ + RUN_FILE="runOracle.sh" \ + ENABLE_RAC_FILE="enableRAC.sh" \ + INSTALL_DB_BINARIES_FILE="installDBBinaries.sh" \ + GRID_HOME_CLEANUP="GridHomeCleanup.sh" \ + ORACLE_HOME_CLEANUP="OracleHomeCleanup.sh" \ + DB_USER="oracle" \ + GRID_USER="grid" \ + SLIMMING=$SLIMMING \ + container="true" \ + COMMON_SCRIPTS="/common_scripts" \ + CHECK_SPACE_FILE="checkSpace.sh" \ + RESET_FAILED_UNITS="resetFailedUnits.sh" \ + SET_CRONTAB="setCrontab.sh" \ + CRONTAB_ENTRY="crontabEntry" \ + EXPECT="/usr/bin/expect" \ + BIN="/usr/sbin" \ + IGNORE_PREREQ=$IGNORE_PREREQ + +############################################# +# ------------------------------------------- +# Start new stage for Non-Slim Image +# ------------------------------------------- +############################################# + +FROM base AS rac-image-slim-false +ARG SLIMMING +ARG VERSION +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +# Linux Env Variable +ENV GRID_BASE=/u01/app/grid \ + GRID_HOME=/u01/app/19c/grid \ + DB_BASE=/u01/app/oracle \ + DB_HOME=/u01/app/oracle/product/19c/dbhome_1 +# Use second ENV so that variable get substituted +# hadolint ignore=DL3044 +ENV INSTALL_SCRIPTS=$INSTALL_DIR/install \ + PATH=/bin:/usr/bin:/sbin:/usr/sbin \ + SCRIPT_DIR=$INSTALL_DIR/startup \ + RAC_SCRIPTS_DIR="scripts" \ + GRID_PATH=$GRID_HOME/bin:$GRID_HOME/OPatch/:$GRID_HOME/perl/bin:/usr/sbin:/bin:/sbin \ + DB_PATH=$DB_HOME/bin:$DB_HOME/OPatch/:$DB_HOME/perl/bin:/usr/sbin:/bin:/sbin \ + GRID_LD_LIBRARY_PATH=$GRID_HOME/lib:/usr/lib:/lib \ + DB_LD_LIBRARY_PATH=$DB_HOME/lib:/usr/lib:/lib +ENV CV_ASSUME_DISTID=OEL7.8 +# Copy binaries +# ------------- +# COPY Binaries +COPY $VERSION/$SETUP_LINUX_FILE $VERSION/$GRID_SETUP_FILE $VERSION/$DB_SETUP_FILE $VERSION/$CHECK_SPACE_FILE $VERSION/$FIXUP_PREQ_FILE $INSTALL_SCRIPTS/ + +# Setup Scripts +COPY $VERSION/$RUN_FILE $VERSION/$ADDNODE_RSP $VERSION/$SETUPSSH $VERSION/$GRID_INSTALL_RSP $VERSION/$DBCA_RSP $VERSION/$INITSH $SCRIPT_DIR/ + +COPY $RAC_SCRIPTS_DIR $SCRIPT_DIR/scripts +# hadolint ignore=SC2086 +RUN chmod 755 $INSTALL_SCRIPTS/*.sh && \ + sync && \ + $INSTALL_DIR/install/$CHECK_SPACE_FILE && \ + $INSTALL_DIR/install/$SETUP_LINUX_FILE && \ + $INSTALL_DIR/install/$GRID_SETUP_FILE && \ + $INSTALL_DIR/install/$DB_SETUP_FILE && \ + sync + +############################################# +# ------------------------------------------- +# Start new stage for slim image +# ------------------------------------------- +############################################# +FROM base AS rac-image-slim-true +ARG SLIMMING +ARG VERSION +ENV CV_ASSUME_DISTID=OEL7.8 + +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +# Linux Env Variable +ENV INSTALL_SCRIPTS=$INSTALL_DIR/install \ + PATH=/bin:/usr/bin:/sbin:/usr/sbin \ + SCRIPT_DIR=$INSTALL_DIR/startup \ + RAC_SCRIPTS_DIR="scripts" + +# Copy binaries +# ------------- +# COPY Binaries +COPY $VERSION/$SETUP_LINUX_FILE $VERSION/$GRID_SETUP_FILE $VERSION/$DB_SETUP_FILE $VERSION/$CHECK_SPACE_FILE $VERSION/$FIXUP_PREQ_FILE $INSTALL_SCRIPTS/ + +# Setup Scripts +COPY $VERSION/$RUN_FILE $VERSION/$SETUPSSH $VERSION/$INITSH $SCRIPT_DIR/ + +COPY $RAC_SCRIPTS_DIR $SCRIPT_DIR/scripts +# hadolint ignore=SC2086 +RUN chmod 755 $INSTALL_SCRIPTS/*.sh && \ + sync && \ + $INSTALL_DIR/install/$CHECK_SPACE_FILE && \ + $INSTALL_DIR/install/$SETUP_LINUX_FILE && \ + $INSTALL_DIR/install/$GRID_SETUP_FILE && \ + $INSTALL_DIR/install/$DB_SETUP_FILE && \ + sync + + +############################################# +# ------------------------------------------- +# Start new stage for installing the grid and DB +# ------------------------------------------- +############################################# +# hadolint ignore=DL3006 +FROM rac-image-slim-${SLIMMING} AS builder +ARG SLIMMING +# hadolint ignore=DL3006 +ARG VERSION +COPY $VERSION/$INSTALL_GRID_BINARIES_FILE $VERSION/$GRID_SW_INSTALL_RSP $VERSION/$DB_SETUP_FILE $VERSION/$DB_INSTALL_RSP $VERSION/$INSTALL_DB_BINARIES_FILE $VERSION/$ENABLE_RAC_FILE $VERSION/$GRID_HOME_CLEANUP $VERSION/$ORACLE_HOME_CLEANUP $VERSION/$INSTALL_FILE_1* $VERSION/$INSTALL_FILE_2* $INSTALL_SCRIPTS/ +# hadolint ignore=SC2086 +RUN chmod 755 $INSTALL_SCRIPTS/*.sh + +## Install software if SLIMMING is false +# hadolint ignore=SC2086 +RUN if [ "${SLIMMING}x" != 'truex' ]; then \ + sed -e '/hard *memlock/s/^/#/g' -i /etc/security/limits.d/oracle-database-preinstall-19c.conf && \ + sed -e '/ *nofile /s/^/#/g' -i /etc/security/limits.d/oracle-database-preinstall-19c.conf && \ + su $GRID_USER -c "$INSTALL_DIR/install/$INSTALL_GRID_BINARIES_FILE EE $PATCH_NUMBER" && \ + $INVENTORY/orainstRoot.sh && \ + $GRID_HOME/root.sh && \ + su $DB_USER -c "$INSTALL_DIR/install/$INSTALL_DB_BINARIES_FILE EE" && \ + su $DB_USER -c "$INSTALL_DIR/install/$ENABLE_RAC_FILE" && \ + $INVENTORY/orainstRoot.sh && \ + $DB_HOME/root.sh && \ + su $GRID_USER -c "$INSTALL_SCRIPTS/$GRID_HOME_CLEANUP" && \ + su $DB_USER -c "$INSTALL_SCRIPTS/$ORACLE_HOME_CLEANUP" && \ + :; \ + fi +# hadolint ignore=SC3014 +RUN if [ "${SLIMMING}x" == 'truex' ]; then \ + mkdir /u01 && \ + :; \ + fi +# hadolint ignore=SC2086 +RUN rm -f $INSTALL_DIR/install/* && \ + sync + +############################################# +# ------------------------------------------- +# Start new layer for grid & database runtime +# ------------------------------------------- +############################################# +# hadolint ignore=DL3006 +FROM rac-image-slim-${SLIMMING} AS final +# hadolint ignore=DL3006 +COPY --from=builder /u01 /u01 +# hadolint ignore=SC2086 +RUN if [ "${SLIMMING}x" != 'truex' ]; then \ + $INVENTORY/orainstRoot.sh && \ + $GRID_HOME/root.sh && \ + $DB_HOME/root.sh && \ + chmod 666 $SCRIPT_DIR/*.rsp && \ + :; \ + fi && \ + $INSTALL_DIR/install/$FIXUP_PREQ_FILE && \ + sync && \ + chmod 755 $SCRIPT_DIR/*.sh && \ + chmod 755 $SCRIPT_DIR/scripts/*.py && \ + chmod 755 $SCRIPT_DIR/scripts/cmdExec && \ + chmod 755 $SCRIPT_DIR/scripts/*.expect && \ + echo "nohup $SCRIPT_DIR/runOracle.sh &" >> /etc/rc.local && \ + rm -f /etc/rc.d/init.d/oracle-database-preinstall-19c-firstboot && \ + chmod +x /etc/rc.d/rc.local && \ + cp $SCRIPT_DIR/$INITSH /usr/bin/$INITSH && \ + setcap 'cap_net_admin,cap_net_raw+ep' /usr/bin/ping && \ + chmod 755 /usr/bin/$INITSH && \ + rm -f /etc/sysctl.d/99-oracle-database-preinstall-19c-sysctl.conf && \ + rm -f /etc/sysctl.d/99-sysctl.conf && \ + rm -f $INSTALL_DIR/install/* && \ + sync + +USER ${USER} +VOLUME ["/common_scripts"] +WORKDIR $WORKDIR + +HEALTHCHECK --interval=2m --start-period=30m \ + CMD "$SCRIPT_DIR/scripts/main.py --checkracinst=true" >/dev/null || exit 1 + +# Define default command to start Oracle Grid and RAC Database setup. +# hadolint ignore=DL3025 +ENTRYPOINT /usr/bin/$INITSH \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/GridHomeCleanup.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/GridHomeCleanup.sh new file mode 100755 index 0000000000..42f33d4f70 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/GridHomeCleanup.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2019,2025 Oracle and/or its affiliates. +# +# Since: January, 2019 +# Author: paramdeep.saini@oracle.com +# Description: Cleanup the $GRID_HOME and ORACLE_BASE after Grid confguration in the image +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Image Cleanup Script +# shellcheck disable=SC1090 +source /home/"${GRID_USER}"/.bashrc +# shellcheck disable=SC2034 +ORACLE_HOME=${GRID_HOME} + +rm -rf /u01/app/grid/* +rm -rf "$GRID_HOME"/log +rm -rf "$GRID_HOME"/logs +rm -rf "$GRID_HOME"/crs/init +rm -rf "$GRID_HOME"/crs/install/rhpdata +rm -rf "$GRID_HOME"/crs/log +rm -rf "$GRID_HOME"/racg/dump +rm -rf "$GRID_HOME"/srvm/log +rm -rf "$GRID_HOME"/cv/log +rm -rf "$GRID_HOME"/cdata +rm -rf "$GRID_HOME"/bin/core* +rm -rf "$GRID_HOME"/bin/diagsnap.pl +rm -rf "$GRID_HOME"/cfgtoollogs/* +rm -rf "$GRID_HOME"/network/admin/listener.ora +rm -rf "$GRID_HOME"/crf +rm -rf "$GRID_HOME"/ologgerd/init +rm -rf "$GRID_HOME"/osysmond/init +rm -rf "$GRID_HOME"/ohasd/init +rm -rf "$GRID_HOME"/ctss/init +rm -rf "$GRID_HOME"/dbs/.*.dat +rm -rf "$GRID_HOME"/oc4j/j2ee/home/log +rm -rf "$GRID_HOME"/inventory/Scripts/ext/bin/log +rm -rf "$GRID_HOME"/inventory/backup/* +rm -rf "$GRID_HOME"/mdns/init +rm -rf "$GRID_HOME"/gnsd/init +rm -rf "$GRID_HOME"/evm/init +rm -rf "$GRID_HOME"/gipc/init +rm -rf "$GRID_HOME"/gpnp/gpnp_bcp.* +rm -rf "$GRID_HOME"/gpnp/init +rm -rf "$GRID_HOME"/auth +rm -rf "$GRID_HOME"/tfa +rm -rf "$GRID_HOME"/suptools/tfa/release/diag +rm -rf "$GRID_HOME"/rdbms/audit/* +rm -rf "$GRID_HOME"/rdbms/log/* +rm -rf "$GRID_HOME"/network/log/* +rm -rf "$GRID_HOME"/inventory/Scripts/comps.xml.* +rm -rf "$GRID_HOME"/inventory/Scripts/oraclehomeproperties.xml.* +rm -rf "$GRID_HOME"/inventory/Scripts/oraInst.loc.* +rm -rf "$GRID_HOME"/inventory/Scripts/inventory.xml.* +rm -rf "$GRID_HOME"/log_file_client.log +rm -rf "$INVENTORY"/logs/* diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/OracleHomeCleanup.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/OracleHomeCleanup.sh new file mode 100755 index 0000000000..bd46f5012e --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/OracleHomeCleanup.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2019,2025 Oracle and/or its affiliates. +# +# Since: January, 2019 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Cleanup the $ORACLE_HOME and ORACLE_BASE after Grid confguration in the image +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Image Cleanup Script +# shellcheck disable=SC1090 +source /home/"${DB_USER}"/.bashrc +ORACLE_HOME=${DB_HOME} + +rm -rf "$ORACLE_HOME"/bin/extjob +rm -rf "$ORACLE_HOME"/PAF +rm -rf "$ORACLE_HOME"/install/oratab +rm -rf "$ORACLE_HOME"/install/make.log +rm -rf "$ORACLE_HOME"/network/admin/listener.ora +rm -rf "$ORACLE_HOME"/network/admin/tnsnames.ora +rm -rf "$ORACLE_HOME"/bin/nmo +rm -rf "$ORACLE_HOME"/bin/nmb +rm -rf "$ORACLE_HOME"/bin/nmhs +rm -rf "$ORACLE_HOME"/log/.* +rm -rf "$ORACLE_HOME"/oc4j/j2ee/oc4j_applications/applications/em/em/images/chartCache/* +rm -rf "$ORACLE_HOME"/rdbms/audit/* +rm -rf "$ORACLE_HOME"/cfgtoollogs/* +rm -rf "$ORACLE_HOME"/inventory/Scripts/comps.xml.* +rm -rf "$ORACLE_HOME"/inventory/Scripts/oraclehomeproperties.xml.* +rm -rf "$ORACLE_HOME"/inventory/Scripts/oraInst.loc.* +rm -rf "$ORACLE_HOME"/inventory/Scripts/inventory.xml.* +rm -rf "$INVENTORY"/logs/* diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/applyGridPatch.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/applyGridPatch.sh new file mode 100755 index 0000000000..247edd87f6 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/applyGridPatch.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Apply Patch for Oracle Grid and Databas. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +PATCH=$1 + +# Check whether edition has been passed on +if [ "$PATCH" == "" ]; then + echo "ERROR: No Patch has been passed on!" + echo "Please specify the correct PATCH!" + exit 1; +fi; + +# Check whether GRID_BASE is set +if [ "$GRID_BASE" == "" ]; then + echo "ERROR: GRID_BASE has not been set!" + echo "You have to have the GRID_BASE environment variable set to a valid value!" + exit 1; +fi; + +# Check whether GRID_HOME is set +if [ "$GRID_HOME" == "" ]; then + echo "ERROR: GRID_HOME has not been set!" + echo "You have to have the GRID_HOME environment variable set to a valid value!" + exit 1; +fi; + +# Install Oracle binaries +# shellcheck disable=SC2115 +unzip -q "$INSTALL_SCRIPTS"/"$PATCH" -d "$GRID_USER_HOME" && \ +rm -f "$INSTALL_SCRIPTS"/"$GRID_PATCH" && \ +cd "$GRID_USER_HOME"/"$PATCH_NUMBER"/"$PATCH_NUMBER" && \ +"$GRID_HOME"/OPatch/opatch napply -silent -local -oh "$GRID_HOME" -id "$PATCH_NUMBER" && \ +cd "$GRID_USER_HOME" && \ +rm -rf "$GRID_USER_HOME"/"$PATCH_NUMBER" diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/checkSpace.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/checkSpace.sh new file mode 100755 index 0000000000..de8568f350 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/checkSpace.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Checks the available space of the system. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +REQUIRED_SPACE_GB=35 +AVAILABLE_SPACE_GB=`df -PB 1G / | tail -n 1 | awk '{print $4}'` + +if [ $AVAILABLE_SPACE_GB -lt $REQUIRED_SPACE_GB ]; then + script_name=`basename "$0"` + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "$script_name: ERROR - There is not enough space available in the docker container." + echo "$script_name: The container needs at least $REQUIRED_SPACE_GB GB , but only $AVAILABLE_SPACE_GB available." + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + exit 1; +fi; diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_inst.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_inst.rsp new file mode 100644 index 0000000000..68e58b1ecb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_inst.rsp @@ -0,0 +1,125 @@ +#################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved.## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +#################################################################### + + +#------------------------------------------------------------------------------- +# Do not change the following system generated value. +#------------------------------------------------------------------------------- +oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v18.0.0 + +#------------------------------------------------------------------------------- +# Specify the installation option. +# It can be one of the following: +# - INSTALL_DB_SWONLY +# - INSTALL_DB_AND_CONFIG +#------------------------------------------------------------------------------- +oracle.install.option=INSTALL_DB_SWONLY + +#------------------------------------------------------------------------------- +# Specify the Unix group to be set for the inventory directory. +#------------------------------------------------------------------------------- +UNIX_GROUP_NAME=oinstall + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=/u01/app/oraInventory +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Home. +#------------------------------------------------------------------------------- +ORACLE_HOME=/u01/app/oracle/product/18.3.0/dbhome_1 + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=/u01/app/oracle + +#------------------------------------------------------------------------------- +# Specify the installation edition of the component. +# +# The value should contain only one of these choices. +# - EE : Enterprise Edition +# - SE2 : Standard Edition 2 + + +#------------------------------------------------------------------------------- + +oracle.install.db.InstallEdition=EE +############################################################################### +# # +# PRIVILEGED OPERATING SYSTEM GROUPS # +# ------------------------------------------ # +# Provide values for the OS groups to which SYSDBA and SYSOPER privileges # +# needs to be granted. If the install is being performed as a member of the # +# group "dba", then that will be used unless specified otherwise below. # +# # +# The value to be specified for OSDBA and OSOPER group is only for UNIX based # +# Operating System. # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.db.OSDBA_GROUP=dba + +#------------------------------------------------------------------------------ +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +#------------------------------------------------------------------------------ +oracle.install.db.OSOPER_GROUP=oper + +#------------------------------------------------------------------------------ +# The OSBACKUPDBA_GROUP is the OS group which is to be granted SYSBACKUP privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSBACKUPDBA_GROUP=backupdba + +#------------------------------------------------------------------------------ +# The OSDGDBA_GROUP is the OS group which is to be granted SYSDG privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSDGDBA_GROUP=dgdba + +#------------------------------------------------------------------------------ +# The OSKMDBA_GROUP is the OS group which is to be granted SYSKM privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSKMDBA_GROUP=kmdba + +#------------------------------------------------------------------------------ +# The OSRACDBA_GROUP is the OS group which is to be granted SYSRAC privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSRACDBA_GROUP=racdba +#------------------------------------------------------------------------------ +# Specify whether to enable the user to set the password for +# My Oracle Support credentials. The value can be either true or false. +# If left blank it will be assumed to be false. +# +# Example : SECURITY_UPDATES_VIA_MYORACLESUPPORT=true +#------------------------------------------------------------------------------ +SECURITY_UPDATES_VIA_MYORACLESUPPORT=false + +#------------------------------------------------------------------------------ +# Specify whether user doesn't want to configure Security Updates. +# The value for this variable should be true if you don't want to configure +# Security Updates, false otherwise. +# +# The value can be either true or false. If left blank it will be assumed +# to be true. +# +# Example : DECLINE_SECURITY_UPDATES=false +#------------------------------------------------------------------------------ +DECLINE_SECURITY_UPDATES=true diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_install_19cv1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_install_19cv1.rsp new file mode 100644 index 0000000000..c6a6ee9740 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_install_19cv1.rsp @@ -0,0 +1,356 @@ +#################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved.## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +#################################################################### + + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v19.0.0 + +#------------------------------------------------------------------------------- +# Specify the installation option. +# It can be one of the following: +# - INSTALL_DB_SWONLY +# - INSTALL_DB_AND_CONFIG +#------------------------------------------------------------------------------- +oracle.install.option= + +#------------------------------------------------------------------------------- +# Specify the Unix group to be set for the inventory directory. +#------------------------------------------------------------------------------- +UNIX_GROUP_NAME= + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION= +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Home. +#------------------------------------------------------------------------------- +ORACLE_HOME= + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE= + +#------------------------------------------------------------------------------- +# Specify the installation edition of the component. +# +# The value should contain only one of these choices. +# - EE : Enterprise Edition +# - SE2 : Standard Edition 2 + + +#------------------------------------------------------------------------------- + +oracle.install.db.InstallEdition= +############################################################################### +# # +# PRIVILEGED OPERATING SYSTEM GROUPS # +# ------------------------------------------ # +# Provide values for the OS groups to which SYSDBA and SYSOPER privileges # +# needs to be granted. If the install is being performed as a member of the # +# group "dba", then that will be used unless specified otherwise below. # +# # +# The value to be specified for OSDBA and OSOPER group is only for UNIX based # +# Operating System. # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.db.OSDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +#------------------------------------------------------------------------------ +oracle.install.db.OSOPER_GROUP= + +#------------------------------------------------------------------------------ +# The OSBACKUPDBA_GROUP is the OS group which is to be granted SYSBACKUP privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSBACKUPDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSDGDBA_GROUP is the OS group which is to be granted SYSDG privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSDGDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSKMDBA_GROUP is the OS group which is to be granted SYSKM privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSKMDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSRACDBA_GROUP is the OS group which is to be granted SYSRAC privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSRACDBA_GROUP= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.executeRootScript= + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.configMethod= +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# Applicable only when SUDO configuration method was chosen. +# Note:For Single Instance database installations,the sudo user name must be the username of the user installing the database. +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.sudoUserName= + +############################################################################### +# # +# Grid Options # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# Value is required only if the specified install option is INSTALL_DB_SWONLY +# +# Specify the cluster node names selected during the installation. +# +# Example : oracle.install.db.CLUSTER_NODES=node1,node2 +#------------------------------------------------------------------------------ +oracle.install.db.CLUSTER_NODES= + +############################################################################### +# # +# Database Configuration Options # +# # +############################################################################### + +#------------------------------------------------------------------------------- +# Specify the type of database to create. +# It can be one of the following: +# - GENERAL_PURPOSE +# - DATA_WAREHOUSE +# GENERAL_PURPOSE: A starter database designed for general purpose use or transaction-heavy applications. +# DATA_WAREHOUSE : A starter database optimized for data warehousing applications. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.type= + +#------------------------------------------------------------------------------- +# Specify the Starter Database Global Database Name. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.globalDBName= + +#------------------------------------------------------------------------------- +# Specify the Starter Database SID. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.SID= + +#------------------------------------------------------------------------------- +# Specify whether the database should be configured as a Container database. +# The value can be either "true" or "false". If left blank it will be assumed +# to be "false". +#------------------------------------------------------------------------------- +oracle.install.db.ConfigureAsContainerDB= + +#------------------------------------------------------------------------------- +# Specify the Pluggable Database name for the pluggable database in Container Database. +#------------------------------------------------------------------------------- +oracle.install.db.config.PDBName= + +#------------------------------------------------------------------------------- +# Specify the Starter Database character set. +# +# One of the following +# AL32UTF8, WE8ISO8859P15, WE8MSWIN1252, EE8ISO8859P2, +# EE8MSWIN1250, NE8ISO8859P10, NEE8ISO8859P4, BLT8MSWIN1257, +# BLT8ISO8859P13, CL8ISO8859P5, CL8MSWIN1251, AR8ISO8859P6, +# AR8MSWIN1256, EL8ISO8859P7, EL8MSWIN1253, IW8ISO8859P8, +# IW8MSWIN1255, JA16EUC, JA16EUCTILDE, JA16SJIS, JA16SJISTILDE, +# KO16MSWIN949, ZHS16GBK, TH8TISASCII, ZHT32EUC, ZHT16MSWIN950, +# ZHT16HKSCS, WE8ISO8859P9, TR8MSWIN1254, VN8MSWIN1258 +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.characterSet= + +#------------------------------------------------------------------------------ +# This variable should be set to true if Automatic Memory Management +# in Database is desired. +# If Automatic Memory Management is not desired, and memory allocation +# is to be done manually, then set it to false. +#------------------------------------------------------------------------------ +oracle.install.db.config.starterdb.memoryOption= + +#------------------------------------------------------------------------------- +# Specify the total memory allocation for the database. Value(in MB) should be +# at least 256 MB, and should not exceed the total physical memory available +# on the system. +# Example: oracle.install.db.config.starterdb.memoryLimit=512 +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.memoryLimit= + +#------------------------------------------------------------------------------- +# This variable controls whether to load Example Schemas onto +# the starter database or not. +# The value can be either "true" or "false". If left blank it will be assumed +# to be "false". +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.installExampleSchemas= + +############################################################################### +# # +# Passwords can be supplied for the following four schemas in the # +# starter database: # +# SYS # +# SYSTEM # +# DBSNMP (used by Enterprise Manager) # +# # +# Same password can be used for all accounts (not recommended) # +# or different passwords for each account can be provided (recommended) # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# This variable holds the password that is to be used for all schemas in the +# starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.ALL= + +#------------------------------------------------------------------------------- +# Specify the SYS password for the starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.SYS= + +#------------------------------------------------------------------------------- +# Specify the SYSTEM password for the starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.SYSTEM= + +#------------------------------------------------------------------------------- +# Specify the DBSNMP password for the starter database. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.DBSNMP= + +#------------------------------------------------------------------------------- +# Specify the PDBADMIN password required for creation of Pluggable Database in the Container Database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.PDBADMIN= + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing the database. +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your database with Enterprise Manager Cloud Control along with Database Express. +# 2. DEFAULT -If you want to manage your database using the default Database Express option. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.managementOption= + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.omsPort= + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.emAdminPassword= + +############################################################################### +# # +# SPECIFY RECOVERY OPTIONS # +# ------------------------------------ # +# Recovery options for the database can be mentioned using the entries below # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# This variable is to be set to false if database recovery is not required. Else +# this can be set to true. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.enableRecovery= + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for the database. +# It can be one of the following: +# - FILE_SYSTEM_STORAGE +# - ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.storageType= + +#------------------------------------------------------------------------------- +# Specify the database file location which is a directory for datafiles, control +# files, redo logs. +# +# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.fileSystemStorage.dataLocation= + +#------------------------------------------------------------------------------- +# Specify the recovery location. +# +# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.fileSystemStorage.recoveryLocation= + +#------------------------------------------------------------------------------- +# Specify the existing ASM disk groups to be used for storage. +# +# Applicable only when oracle.install.db.config.starterdb.storageType=ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.asm.diskGroup= + +#------------------------------------------------------------------------------- +# Specify the password for ASMSNMP user of the ASM instance. +# +# Applicable only when oracle.install.db.config.starterdb.storage=ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.asm.ASMSNMPPassword= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_sw_install_19c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_sw_install_19c.rsp new file mode 100644 index 0000000000..25dc006b8e --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/db_sw_install_19c.rsp @@ -0,0 +1,45 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v19.0.0 +oracle.install.option=INSTALL_DB_SWONLY +UNIX_GROUP_NAME=oinstall +INVENTORY_LOCATION=/u01/app/oraInventory +ORACLE_HOME=/u01/app/oracle/product/19c/dbhome_1 +ORACLE_BASE=/u01/app/oracle +oracle.install.db.InstallEdition=EE +oracle.install.db.OSDBA_GROUP=dba +oracle.install.db.OSOPER_GROUP=oper +oracle.install.db.OSBACKUPDBA_GROUP=backupdba +oracle.install.db.OSDGDBA_GROUP=dgdba +oracle.install.db.OSKMDBA_GROUP=kmdba +oracle.install.db.OSRACDBA_GROUP=racdba +oracle.install.db.rootconfig.executeRootScript= +oracle.install.db.rootconfig.configMethod= +oracle.install.db.rootconfig.sudoPath= +oracle.install.db.rootconfig.sudoUserName= +oracle.install.db.CLUSTER_NODES= +oracle.install.db.config.starterdb.type= +oracle.install.db.config.starterdb.globalDBName= +oracle.install.db.config.starterdb.SID= +oracle.install.db.ConfigureAsContainerDB= +oracle.install.db.config.PDBName= +oracle.install.db.config.starterdb.characterSet= +oracle.install.db.config.starterdb.memoryOption= +oracle.install.db.config.starterdb.memoryLimit= +oracle.install.db.config.starterdb.installExampleSchemas= +oracle.install.db.config.starterdb.password.ALL= +oracle.install.db.config.starterdb.password.SYS= +oracle.install.db.config.starterdb.password.SYSTEM= +oracle.install.db.config.starterdb.password.DBSNMP= +oracle.install.db.config.starterdb.password.PDBADMIN= +oracle.install.db.config.starterdb.managementOption= +oracle.install.db.config.starterdb.omsHost= +oracle.install.db.config.starterdb.omsPort= +oracle.install.db.config.starterdb.emAdminUser= +oracle.install.db.config.starterdb.emAdminPassword= +oracle.install.db.config.starterdb.enableRecovery= +oracle.install.db.config.starterdb.storageType= +oracle.install.db.config.starterdb.fileSystemStorage.dataLocation= +oracle.install.db.config.starterdb.fileSystemStorage.recoveryLocation= +oracle.install.db.config.asm.diskGroup= +oracle.install.db.config.asm.ASMSNMPPassword= +SECURITY_UPDATES_VIA_MYORACLESUPPORT=false +DECLINE_SECURITY_UPDATES=true diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca.rsp new file mode 100644 index 0000000000..745fdc7d70 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca.rsp @@ -0,0 +1,605 @@ +############################################################################## +## ## +## DBCA response file ## +## ------------------ ## +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +############################################################################## +#------------------------------------------------------------------------------- +# Do not change the following system generated value. +#------------------------------------------------------------------------------- +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v18.0.0 + +#----------------------------------------------------------------------------- +# Name : gdbName +# Datatype : String +# Description : Global database name of the database +# Valid values : . - when database domain isn't NULL +# - when database domain is NULL +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +gdbName=###ORACLE_SID### + +#----------------------------------------------------------------------------- +# Name : sid +# Datatype : String +# Description : System identifier (SID) of the database +# Valid values : Check Oracle12c Administrator's Guide +# Default value : specified in GDBNAME +# Mandatory : No +#----------------------------------------------------------------------------- +sid=###ORACLE_SID### + +#----------------------------------------------------------------------------- +# Name : databaseConfigType +# Datatype : String +# Description : database conf type as Single Instance, Real Application Cluster or Real Application Cluster One Nodes database +# Valid values : SI\RAC\RACONENODE +# Default value : SI +# Mandatory : No +#----------------------------------------------------------------------------- +databaseConfigType=RAC + +#----------------------------------------------------------------------------- +# Name : RACOneNodeServiceName +# Datatype : String +# Description : Service is required by application to connect to RAC One +# Node Database +# Valid values : Service Name +# Default value : None +# Mandatory : No [required in case DATABASECONFTYPE is set to RACONENODE ] +#----------------------------------------------------------------------------- +RACOneNodeServiceName= + +#----------------------------------------------------------------------------- +# Name : policyManaged +# Datatype : Boolean +# Description : Set to true if Database is policy managed and +# set to false if Database is admin managed +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +policyManaged=false + + +#----------------------------------------------------------------------------- +# Name : createServerPool +# Datatype : Boolean +# Description : Set to true if new server pool need to be created for database +# if this option is specified then the newly created database +# will use this newly created serverpool. +# Multiple serverpoolname can not be specified for database +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +createServerPool=false + +#----------------------------------------------------------------------------- +# Name : serverPoolName +# Datatype : String +# Description : Only one serverpool name need to be specified +# if Create Server Pool option is specified. +# Comma-separated list of Serverpool names if db need to use +# multiple Server pool +# Valid values : ServerPool name + +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +serverPoolName= + +#----------------------------------------------------------------------------- +# Name : cardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation + +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +cardinality= + +#----------------------------------------------------------------------------- +# Name : force +# Datatype : Boolean +# Description : Set to true if new server pool need to be created by force +# if this option is specified then the newly created serverpool +# will be assigned server even if no free servers are available. +# This may affect already running database. +# This flag can be specified for Admin managed as well as policy managed db. +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +force=false + +#----------------------------------------------------------------------------- +# Name : pqPoolName +# Datatype : String +# Description : Only one serverpool name needs to be specified +# if create server pool option is specified. +# Comma-separated list of serverpool names if use +# server pool. This is required to +# create Parallel Query (PQ) database. Applicable to Big Cluster +# Valid values : Parallel Query (PQ) pool name +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +pqPoolName= + +#----------------------------------------------------------------------------- +# Name : pqCardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation. +# Applicable to Big Cluster +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +pqCardinality= + +#----------------------------------------------------------------------------- +# Name : createAsContainerDatabase +# Datatype : boolean +# Description : flag to create database as container database +# Valid values : Check Oracle12c Administrator's Guide +# Default value : false +# Mandatory : No +#----------------------------------------------------------------------------- +createAsContainerDatabase=###CONTAINER_DB_FLAG### + +#----------------------------------------------------------------------------- +# Name : numberOfPDBs +# Datatype : Number +# Description : Specify the number of pdb to be created +# Valid values : 0 to 252 +# Default value : 0 +# Mandatory : No +#----------------------------------------------------------------------------- +numberOfPDBs=1 + +#----------------------------------------------------------------------------- +# Name : pdbName +# Datatype : String +# Description : Specify the pdbname/pdbanme prefix if one or more pdb need to be created +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +pdbName=###ORACLE_PDB### + +#----------------------------------------------------------------------------- +# Name : useLocalUndoForPDBs +# Datatype : boolean +# Description : Flag to create local undo tablespace for all PDB's. +# Valid values : TRUE\FALSE +# Default value : TRUE +# Mandatory : No +#----------------------------------------------------------------------------- +useLocalUndoForPDBs=true + +#----------------------------------------------------------------------------- +# Name : pdbAdminPassword +# Datatype : String +# Description : PDB Administrator user password +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- + +pdbAdminPassword=###ORACLE_PWD### + +#----------------------------------------------------------------------------- +# Name : nodelist +# Datatype : String +# Description : Comma-separated list of cluster nodes +# Valid values : Cluster node names +# Default value : None +# Mandatory : No (Yes for RAC database-centric database ) +#----------------------------------------------------------------------------- +nodelist=###PUBLIC_HOSTNAME### + +#----------------------------------------------------------------------------- +# Name : templateName +# Datatype : String +# Description : Name of the template +# Valid values : Template file name +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +templateName=/u01/app/oracle/product/18.3.0/dbhome_1/assistants/dbca/templates/General_Purpose.dbc + +#----------------------------------------------------------------------------- +# Name : sysPassword +# Datatype : String +# Description : Password for SYS user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +sysPassword=###ORACLE_PWD### + +#----------------------------------------------------------------------------- +# Name : systemPassword +# Datatype : String +# Description : Password for SYSTEM user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +systemPassword=###ORACLE_PWD### + +#----------------------------------------------------------------------------- +# Name : serviceUserPassword +# Datatype : String +# Description : Password for Windows Service user +# Default value : None +# Mandatory : If Oracle home is installed with windows service user +#----------------------------------------------------------------------------- +serviceUserPassword= + +#----------------------------------------------------------------------------- +# Name : emConfiguration +# Datatype : String +# Description : Enterprise Manager Configuration Type +# Valid values : CENTRAL|DBEXPRESS|BOTH|NONE +# Default value : NONE +# Mandatory : No +#----------------------------------------------------------------------------- +emConfiguration=DBEXPRESS + +#----------------------------------------------------------------------------- +# Name : emExpressPort +# Datatype : Number +# Description : Enterprise Manager Configuration Type +# Valid values : Check Oracle12c Administrator's Guide +# Default value : NONE +# Mandatory : No, will be picked up from DBEXPRESS_HTTPS_PORT env variable +# or auto generates a free port between 5500 and 5599 +#----------------------------------------------------------------------------- +emExpressPort=5500 + +#----------------------------------------------------------------------------- +# Name : runCVUChecks +# Datatype : Boolean +# Description : Specify whether to run Cluster Verification Utility checks +# periodically in Cluster environment +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +runCVUChecks=true + +#----------------------------------------------------------------------------- +# Name : dbsnmpPassword +# Datatype : String +# Description : Password for DBSNMP user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if emConfiguration is specified or +# the value of runCVUChecks is TRUE +#----------------------------------------------------------------------------- +dbsnmpPassword=###ORACLE_PWD### + +#----------------------------------------------------------------------------- +# Name : omsHost +# Datatype : String +# Description : EM management server host name +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsHost= + +#----------------------------------------------------------------------------- +# Name : omsPort +# Datatype : Number +# Description : EM management server port number +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsPort=0 + +#----------------------------------------------------------------------------- +# Name : emUser +# Datatype : String +# Description : EM Admin username to add or modify targets +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emUser= + +#----------------------------------------------------------------------------- +# Name : emPassword +# Datatype : String +# Description : EM Admin user password +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emPassword= + +#----------------------------------------------------------------------------- +# Name : dvConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Database vault +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +dvConfiguration=false + +#----------------------------------------------------------------------------- +# Name : dvUserName +# Datatype : String +# Description : DataVault Owner +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserName= + +#----------------------------------------------------------------------------- +# Name : dvUserPassword +# Datatype : String +# Description : Password for DataVault Owner +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserPassword= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerName +# Datatype : String +# Description : DataVault Account Manager +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerName= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerPassword +# Datatype : String +# Description : Password for DataVault Account Manager +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerPassword= + +#----------------------------------------------------------------------------- +# Name : olsConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Label Security +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +olsConfiguration=false + +#----------------------------------------------------------------------------- +# Name : datafileJarLocation +# Datatype : String +# Description : Location of the data file jar +# Valid values : Directory containing compressed datafile jar +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ + +#----------------------------------------------------------------------------- +# Name : datafileDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Directory for all the database files +# Default value : $ORACLE_BASE/oradata +# Mandatory : No +#----------------------------------------------------------------------------- +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ + +#----------------------------------------------------------------------------- +# Name : recoveryAreaDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Recovery Area location +# Default value : $ORACLE_BASE/flash_recovery_area +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryAreaDestination= + +#----------------------------------------------------------------------------- +# Name : storageType +# Datatype : String +# Description : Specifies the storage on which the database is to be created +# Valid values : FS (CFS for RAC), ASM +# Default value : FS +# Mandatory : No +#----------------------------------------------------------------------------- +storageType=ASM + +#----------------------------------------------------------------------------- +# Name : diskGroupName +# Datatype : String +# Description : Specifies the disk group name for the storage +# Default value : DATA +# Mandatory : No +#----------------------------------------------------------------------------- +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ + +#----------------------------------------------------------------------------- +# Name : asmsnmpPassword +# Datatype : String +# Description : Password for ASM Monitoring +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +asmsnmpPassword= + +#----------------------------------------------------------------------------- +# Name : recoveryGroupName +# Datatype : String +# Description : Specifies the disk group name for the recovery area +# Default value : RECOVERY +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryGroupName= + +#----------------------------------------------------------------------------- +# Name : characterSet +# Datatype : String +# Description : Character set of the database +# Valid values : Check Oracle12c National Language Support Guide +# Default value : "US7ASCII" +# Mandatory : NO +#----------------------------------------------------------------------------- +characterSet=AL32UTF8 + +#----------------------------------------------------------------------------- +# Name : nationalCharacterSet +# Datatype : String +# Description : National Character set of the database +# Valid values : "UTF8" or "AL16UTF16". For details, check Oracle12c National Language Support Guide +# Default value : "AL16UTF16" +# Mandatory : No +#----------------------------------------------------------------------------- +nationalCharacterSet=AL16UTF16 + +#----------------------------------------------------------------------------- +# Name : registerWithDirService +# Datatype : Boolean +# Description : Specifies whether to register with Directory Service. +# Valid values : TRUE \ FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +registerWithDirService=false + + +#----------------------------------------------------------------------------- +# Name : dirServiceUserName +# Datatype : String +# Description : Specifies the name of the directory service user +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServiceUserName= + +#----------------------------------------------------------------------------- +# Name : dirServicePassword +# Datatype : String +# Description : The password of the directory service user. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServicePassword= + +#----------------------------------------------------------------------------- +# Name : walletPassword +# Datatype : String +# Description : The password for wallet to created or modified. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +walletPassword= + +#----------------------------------------------------------------------------- +# Name : listeners +# Datatype : String +# Description : Specifies list of listeners to register the database with. +# By default the database is configured for all the listeners specified in the +# $ORACLE_HOME/network/admin/listener.ora +# Valid values : The list should be comma separated like "listener1,listener2". +# Mandatory : NO +#----------------------------------------------------------------------------- +listeners=LISTENER + +#----------------------------------------------------------------------------- +# Name : variablesFile +# Datatype : String +# Description : Location of the file containing variable value pair +# Valid values : A valid file-system file. The variable value pair format in this file +# is =. Each pair should be in a new line. +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variablesFile= + +#----------------------------------------------------------------------------- +# Name : variables +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides variables defined in variablefile and templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variables=DB_UNIQUE_NAME=###ORACLE_SID###,ORACLE_BASE=###DB_BASE###,PDB_NAME=###ORACLE_PDB###,DB_NAME=###ORACLE_SID###,ORACLE_HOME=###DB_HOME###,SID=###ORACLE_SID### + +#----------------------------------------------------------------------------- +# Name : initParams +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides initialization parameters defined in templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +#initParams=family:dw_helper.instance_mode=read-only,processes=640,nls_language=AMERICAN,pga_aggregate_target=2008MB,sga_target=6022MB,dispatchers=(PROTOCOL=TCP) (SERVICE=orclXDB),db_block_size=8192BYTES,orcl1.undo_tablespace=UNDOTBS1,diagnostic_dest={ORACLE_BASE},cluster_database=true,orcl1.thread=1,audit_file_dest={ORACLE_BASE}/admin/{DB_UNIQUE_NAME}/adump,db_create_file_dest=+DATA/{DB_UNIQUE_NAME}/,nls_territory=AMERICA,local_listener=-oraagent-dummy-,compatible=12.2.0,db_name=orcl,audit_trail=db,orcl1.instance_number=1,remote_login_passwordfile=exclusive,open_cursors=300 +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive + +#----------------------------------------------------------------------------- +# Name : sampleSchema +# Datatype : Boolean +# Description : Specifies whether or not to add the Sample Schemas to your database +# Valid values : TRUE \ FALSE +# Default value : FASLE +# Mandatory : No +#----------------------------------------------------------------------------- +sampleSchema=false + +#----------------------------------------------------------------------------- +# Name : memoryPercentage +# Datatype : String +# Description : percentage of physical memory for Oracle +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +memoryPercentage=40 + +#----------------------------------------------------------------------------- +# Name : databaseType +# Datatype : String +# Description : used for memory distribution when memoryPercentage specified +# Valid values : MULTIPURPOSE|DATA_WAREHOUSING|OLTP +# Default value : MULTIPURPOSE +# Mandatory : NO +#----------------------------------------------------------------------------- +databaseType=MULTIPURPOSE + +#----------------------------------------------------------------------------- +# Name : automaticMemoryManagement +# Datatype : Boolean +# Description : flag to indicate Automatic Memory Management is used +# Valid values : TRUE/FALSE +# Default value : TRUE +# Mandatory : NO +#----------------------------------------------------------------------------- +automaticMemoryManagement=false + +#----------------------------------------------------------------------------- +# Name : totalMemory +# Datatype : String +# Description : total memory in MB to allocate to Oracle +# Valid values : +# Default value : +# Mandatory : NO +#----------------------------------------------------------------------------- +totalMemory=5000 diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca1.rsp new file mode 100644 index 0000000000..2810cc645d --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca1.rsp @@ -0,0 +1,605 @@ +############################################################################## +## ## +## DBCA response file ## +## ------------------ ## +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +############################################################################## +#------------------------------------------------------------------------------- +# Do not change the following system generated value. +#------------------------------------------------------------------------------- +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v18.0.0 + +#----------------------------------------------------------------------------- +# Name : gdbName +# Datatype : String +# Description : Global database name of the database +# Valid values : . - when database domain isn't NULL +# - when database domain is NULL +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +gdbName=ORCLCDB + +#----------------------------------------------------------------------------- +# Name : sid +# Datatype : String +# Description : System identifier (SID) of the database +# Valid values : Check Oracle12c Administrator's Guide +# Default value : specified in GDBNAME +# Mandatory : No +#----------------------------------------------------------------------------- +sid=ORCLCDB + +#----------------------------------------------------------------------------- +# Name : databaseConfigType +# Datatype : String +# Description : database conf type as Single Instance, Real Application Cluster or Real Application Cluster One Nodes database +# Valid values : SI\RAC\RACONENODE +# Default value : SI +# Mandatory : No +#----------------------------------------------------------------------------- +databaseConfigType=RAC + +#----------------------------------------------------------------------------- +# Name : RACOneNodeServiceName +# Datatype : String +# Description : Service is required by application to connect to RAC One +# Node Database +# Valid values : Service Name +# Default value : None +# Mandatory : No [required in case DATABASECONFTYPE is set to RACONENODE ] +#----------------------------------------------------------------------------- +RACOneNodeServiceName= + +#----------------------------------------------------------------------------- +# Name : policyManaged +# Datatype : Boolean +# Description : Set to true if Database is policy managed and +# set to false if Database is admin managed +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +policyManaged=false + + +#----------------------------------------------------------------------------- +# Name : createServerPool +# Datatype : Boolean +# Description : Set to true if new server pool need to be created for database +# if this option is specified then the newly created database +# will use this newly created serverpool. +# Multiple serverpoolname can not be specified for database +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +createServerPool=false + +#----------------------------------------------------------------------------- +# Name : serverPoolName +# Datatype : String +# Description : Only one serverpool name need to be specified +# if Create Server Pool option is specified. +# Comma-separated list of Serverpool names if db need to use +# multiple Server pool +# Valid values : ServerPool name + +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +serverPoolName= + +#----------------------------------------------------------------------------- +# Name : cardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation + +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +cardinality= + +#----------------------------------------------------------------------------- +# Name : force +# Datatype : Boolean +# Description : Set to true if new server pool need to be created by force +# if this option is specified then the newly created serverpool +# will be assigned server even if no free servers are available. +# This may affect already running database. +# This flag can be specified for Admin managed as well as policy managed db. +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +force=false + +#----------------------------------------------------------------------------- +# Name : pqPoolName +# Datatype : String +# Description : Only one serverpool name needs to be specified +# if create server pool option is specified. +# Comma-separated list of serverpool names if use +# server pool. This is required to +# create Parallel Query (PQ) database. Applicable to Big Cluster +# Valid values : Parallel Query (PQ) pool name +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +pqPoolName= + +#----------------------------------------------------------------------------- +# Name : pqCardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation. +# Applicable to Big Cluster +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +pqCardinality= + +#----------------------------------------------------------------------------- +# Name : createAsContainerDatabase +# Datatype : boolean +# Description : flag to create database as container database +# Valid values : Check Oracle12c Administrator's Guide +# Default value : false +# Mandatory : No +#----------------------------------------------------------------------------- +createAsContainerDatabase=true + +#----------------------------------------------------------------------------- +# Name : numberOfPDBs +# Datatype : Number +# Description : Specify the number of pdb to be created +# Valid values : 0 to 252 +# Default value : 0 +# Mandatory : No +#----------------------------------------------------------------------------- +numberOfPDBs=1 + +#----------------------------------------------------------------------------- +# Name : pdbName +# Datatype : String +# Description : Specify the pdbname/pdbanme prefix if one or more pdb need to be created +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +pdbName=ORCLPDB + +#----------------------------------------------------------------------------- +# Name : useLocalUndoForPDBs +# Datatype : boolean +# Description : Flag to create local undo tablespace for all PDB's. +# Valid values : TRUE\FALSE +# Default value : TRUE +# Mandatory : No +#----------------------------------------------------------------------------- +useLocalUndoForPDBs=true + +#----------------------------------------------------------------------------- +# Name : pdbAdminPassword +# Datatype : String +# Description : PDB Administrator user password +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- + +pdbAdminPassword=Oracle_12c + +#----------------------------------------------------------------------------- +# Name : nodelist +# Datatype : String +# Description : Comma-separated list of cluster nodes +# Valid values : Cluster node names +# Default value : None +# Mandatory : No (Yes for RAC database-centric database ) +#----------------------------------------------------------------------------- +nodelist=racnode1 + +#----------------------------------------------------------------------------- +# Name : templateName +# Datatype : String +# Description : Name of the template +# Valid values : Template file name +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +templateName=/u01/app/oracle/product/18.3.0/dbhome_1/assistants/dbca/templates/General_Purpose.dbc + +#----------------------------------------------------------------------------- +# Name : sysPassword +# Datatype : String +# Description : Password for SYS user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +sysPassword=Oracle_12c + +#----------------------------------------------------------------------------- +# Name : systemPassword +# Datatype : String +# Description : Password for SYSTEM user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +systemPassword=Oracle_12c + +#----------------------------------------------------------------------------- +# Name : serviceUserPassword +# Datatype : String +# Description : Password for Windows Service user +# Default value : None +# Mandatory : If Oracle home is installed with windows service user +#----------------------------------------------------------------------------- +serviceUserPassword= + +#----------------------------------------------------------------------------- +# Name : emConfiguration +# Datatype : String +# Description : Enterprise Manager Configuration Type +# Valid values : CENTRAL|DBEXPRESS|BOTH|NONE +# Default value : NONE +# Mandatory : No +#----------------------------------------------------------------------------- +emConfiguration=DBEXPRESS + +#----------------------------------------------------------------------------- +# Name : emExpressPort +# Datatype : Number +# Description : Enterprise Manager Configuration Type +# Valid values : Check Oracle12c Administrator's Guide +# Default value : NONE +# Mandatory : No, will be picked up from DBEXPRESS_HTTPS_PORT env variable +# or auto generates a free port between 5500 and 5599 +#----------------------------------------------------------------------------- +emExpressPort=5500 + +#----------------------------------------------------------------------------- +# Name : runCVUChecks +# Datatype : Boolean +# Description : Specify whether to run Cluster Verification Utility checks +# periodically in Cluster environment +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +runCVUChecks=true + +#----------------------------------------------------------------------------- +# Name : dbsnmpPassword +# Datatype : String +# Description : Password for DBSNMP user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if emConfiguration is specified or +# the value of runCVUChecks is TRUE +#----------------------------------------------------------------------------- +dbsnmpPassword=Oracle_12c + +#----------------------------------------------------------------------------- +# Name : omsHost +# Datatype : String +# Description : EM management server host name +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsHost= + +#----------------------------------------------------------------------------- +# Name : omsPort +# Datatype : Number +# Description : EM management server port number +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsPort=0 + +#----------------------------------------------------------------------------- +# Name : emUser +# Datatype : String +# Description : EM Admin username to add or modify targets +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emUser= + +#----------------------------------------------------------------------------- +# Name : emPassword +# Datatype : String +# Description : EM Admin user password +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emPassword= + +#----------------------------------------------------------------------------- +# Name : dvConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Database vault +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +dvConfiguration=false + +#----------------------------------------------------------------------------- +# Name : dvUserName +# Datatype : String +# Description : DataVault Owner +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserName= + +#----------------------------------------------------------------------------- +# Name : dvUserPassword +# Datatype : String +# Description : Password for DataVault Owner +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserPassword= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerName +# Datatype : String +# Description : DataVault Account Manager +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerName= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerPassword +# Datatype : String +# Description : Password for DataVault Account Manager +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerPassword= + +#----------------------------------------------------------------------------- +# Name : olsConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Label Security +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +olsConfiguration=false + +#----------------------------------------------------------------------------- +# Name : datafileJarLocation +# Datatype : String +# Description : Location of the data file jar +# Valid values : Directory containing compressed datafile jar +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ + +#----------------------------------------------------------------------------- +# Name : datafileDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Directory for all the database files +# Default value : $ORACLE_BASE/oradata +# Mandatory : No +#----------------------------------------------------------------------------- +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ + +#----------------------------------------------------------------------------- +# Name : recoveryAreaDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Recovery Area location +# Default value : $ORACLE_BASE/flash_recovery_area +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryAreaDestination= + +#----------------------------------------------------------------------------- +# Name : storageType +# Datatype : String +# Description : Specifies the storage on which the database is to be created +# Valid values : FS (CFS for RAC), ASM +# Default value : FS +# Mandatory : No +#----------------------------------------------------------------------------- +storageType=ASM + +#----------------------------------------------------------------------------- +# Name : diskGroupName +# Datatype : String +# Description : Specifies the disk group name for the storage +# Default value : DATA +# Mandatory : No +#----------------------------------------------------------------------------- +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ + +#----------------------------------------------------------------------------- +# Name : asmsnmpPassword +# Datatype : String +# Description : Password for ASM Monitoring +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +asmsnmpPassword= + +#----------------------------------------------------------------------------- +# Name : recoveryGroupName +# Datatype : String +# Description : Specifies the disk group name for the recovery area +# Default value : RECOVERY +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryGroupName= + +#----------------------------------------------------------------------------- +# Name : characterSet +# Datatype : String +# Description : Character set of the database +# Valid values : Check Oracle12c National Language Support Guide +# Default value : "US7ASCII" +# Mandatory : NO +#----------------------------------------------------------------------------- +characterSet=AL32UTF8 + +#----------------------------------------------------------------------------- +# Name : nationalCharacterSet +# Datatype : String +# Description : National Character set of the database +# Valid values : "UTF8" or "AL16UTF16". For details, check Oracle12c National Language Support Guide +# Default value : "AL16UTF16" +# Mandatory : No +#----------------------------------------------------------------------------- +nationalCharacterSet=AL16UTF16 + +#----------------------------------------------------------------------------- +# Name : registerWithDirService +# Datatype : Boolean +# Description : Specifies whether to register with Directory Service. +# Valid values : TRUE \ FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +registerWithDirService=false + + +#----------------------------------------------------------------------------- +# Name : dirServiceUserName +# Datatype : String +# Description : Specifies the name of the directory service user +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServiceUserName= + +#----------------------------------------------------------------------------- +# Name : dirServicePassword +# Datatype : String +# Description : The password of the directory service user. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServicePassword= + +#----------------------------------------------------------------------------- +# Name : walletPassword +# Datatype : String +# Description : The password for wallet to created or modified. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +walletPassword= + +#----------------------------------------------------------------------------- +# Name : listeners +# Datatype : String +# Description : Specifies list of listeners to register the database with. +# By default the database is configured for all the listeners specified in the +# $ORACLE_HOME/network/admin/listener.ora +# Valid values : The list should be comma separated like "listener1,listener2". +# Mandatory : NO +#----------------------------------------------------------------------------- +listeners=LISTENER + +#----------------------------------------------------------------------------- +# Name : variablesFile +# Datatype : String +# Description : Location of the file containing variable value pair +# Valid values : A valid file-system file. The variable value pair format in this file +# is =. Each pair should be in a new line. +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variablesFile= + +#----------------------------------------------------------------------------- +# Name : variables +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides variables defined in variablefile and templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variables=DB_UNIQUE_NAME=ORCLCDB,ORACLE_BASE=/u01/app/oracle,PDB_NAME=ORCLPDB,DB_NAME=ORCLCDB,ORACLE_HOME=/u01/app/oracle/product/18.3.0/dbhome_1,SID=ORCLCDB + +#----------------------------------------------------------------------------- +# Name : initParams +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides initialization parameters defined in templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +#initParams=family:dw_helper.instance_mode=read-only,processes=640,nls_language=AMERICAN,pga_aggregate_target=2008MB,sga_target=6022MB,dispatchers=(PROTOCOL=TCP) (SERVICE=orclXDB),db_block_size=8192BYTES,orcl1.undo_tablespace=UNDOTBS1,diagnostic_dest={ORACLE_BASE},cluster_database=true,orcl1.thread=1,audit_file_dest={ORACLE_BASE}/admin/{DB_UNIQUE_NAME}/adump,db_create_file_dest=+DATA/{DB_UNIQUE_NAME}/,nls_territory=AMERICA,local_listener=-oraagent-dummy-,compatible=12.2.0,db_name=orcl,audit_trail=db,orcl1.instance_number=1,remote_login_passwordfile=exclusive,open_cursors=300 +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive + +#----------------------------------------------------------------------------- +# Name : sampleSchema +# Datatype : Boolean +# Description : Specifies whether or not to add the Sample Schemas to your database +# Valid values : TRUE \ FALSE +# Default value : FASLE +# Mandatory : No +#----------------------------------------------------------------------------- +sampleSchema=false + +#----------------------------------------------------------------------------- +# Name : memoryPercentage +# Datatype : String +# Description : percentage of physical memory for Oracle +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +memoryPercentage=40 + +#----------------------------------------------------------------------------- +# Name : databaseType +# Datatype : String +# Description : used for memory distribution when memoryPercentage specified +# Valid values : MULTIPURPOSE|DATA_WAREHOUSING|OLTP +# Default value : MULTIPURPOSE +# Mandatory : NO +#----------------------------------------------------------------------------- +databaseType=MULTIPURPOSE + +#----------------------------------------------------------------------------- +# Name : automaticMemoryManagement +# Datatype : Boolean +# Description : flag to indicate Automatic Memory Management is used +# Valid values : TRUE/FALSE +# Default value : TRUE +# Mandatory : NO +#----------------------------------------------------------------------------- +automaticMemoryManagement=false + +#----------------------------------------------------------------------------- +# Name : totalMemory +# Datatype : String +# Description : total memory in MB to allocate to Oracle +# Valid values : +# Default value : +# Mandatory : NO +#----------------------------------------------------------------------------- +totalMemory=5000 diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca_19c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca_19c.rsp new file mode 100644 index 0000000000..157111d993 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca_19c.rsp @@ -0,0 +1,58 @@ +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v19.0.0 +gdbName=###ORACLE_SID### +sid=###ORACLE_SID### +databaseConfigType=###DATABASE_CONFIG_TYPE### +RACOneNodeServiceName= +policyManaged=false +createServerPool=false +serverPoolName= +cardinality= +force=false +pqPoolName= +pqCardinality= +createAsContainerDatabase=###CONTAINER_DB_FLAG### +numberOfPDBs=###PDB_COUNT### +pdbName=###ORACLE_PDB### +useLocalUndoForPDBs=true +pdbAdminPassword=###ORACLE_PWD### +nodelist=###DB_NODES### +templateName={ORACLE_HOME}/assistants/dbca/templates/General_Purpose.dbc +sysPassword=###ORACLE_PWD### +systemPassword=###ORACLE_PWD### +oracleHomeUserPassword= +emConfiguration=DBEXPRESS +emExpressPort=5500 +runCVUChecks=true +dbsnmpPassword=###ORACLE_PWD### +omsHost= +omsPort= +emUser= +emPassword= +dvConfiguration=false +dvUserName= +dvUserPassword= +dvAccountManagerName= +dvAccountManagerPassword= +olsConfiguration=false +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ +recoveryAreaDestination= +storageType=ASM +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ +asmsnmpPassword= +recoveryGroupName= +characterSet=AL32UTF8 +nationalCharacterSet=AL16UTF16 +registerWithDirService=false +dirServiceUserName= +dirServicePassword= +walletPassword= +listeners=LISTENER +variablesFile= +variables=DB_UNIQUE_NAME=###ORACLE_SID###,ORACLE_BASE=###DB_BASE###,PDB_NAME=###ORACLE_PDB###,DB_NAME=###ORACLE_SID###,ORACLE_HOME=###DB_HOME###,SID=###ORACLE_SID### +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive +sampleSchema=false +memoryPercentage=40 +databaseType=MULTIPURPOSE +automaticMemoryManagement=false +totalMemory=###TOTAL_MEMORY### diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca_19cv1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca_19cv1.rsp new file mode 100644 index 0000000000..2bad03a67c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/dbca_19cv1.rsp @@ -0,0 +1,604 @@ +############################################################################## +## ## +## DBCA response file ## +## ------------------ ## +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +############################################################################## +#------------------------------------------------------------------------------- +# Do not change the following system generated value. +#------------------------------------------------------------------------------- +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v19.0.0 + +#----------------------------------------------------------------------------- +# Name : gdbName +# Datatype : String +# Description : Global database name of the database +# Valid values : . - when database domain isn't NULL +# - when database domain is NULL +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +gdbName= + +#----------------------------------------------------------------------------- +# Name : sid +# Datatype : String +# Description : System identifier (SID) of the database +# Valid values : Check Oracle19c Administrator's Guide +# Default value : specified in GDBNAME +# Mandatory : No +#----------------------------------------------------------------------------- +sid= + +#----------------------------------------------------------------------------- +# Name : databaseConfigType +# Datatype : String +# Description : database conf type as Single Instance, Real Application Cluster or Real Application Cluster One Nodes database +# Valid values : SI\RAC\RACONENODE +# Default value : SI +# Mandatory : No +#----------------------------------------------------------------------------- +databaseConfigType= + +#----------------------------------------------------------------------------- +# Name : RACOneNodeServiceName +# Datatype : String +# Description : Service is required by application to connect to RAC One +# Node Database +# Valid values : Service Name +# Default value : None +# Mandatory : No [required in case DATABASECONFTYPE is set to RACONENODE ] +#----------------------------------------------------------------------------- +RACOneNodeServiceName= + +#----------------------------------------------------------------------------- +# Name : policyManaged +# Datatype : Boolean +# Description : Set to true if Database is policy managed and +# set to false if Database is admin managed +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +policyManaged= + + +#----------------------------------------------------------------------------- +# Name : createServerPool +# Datatype : Boolean +# Description : Set to true if new server pool need to be created for database +# if this option is specified then the newly created database +# will use this newly created serverpool. +# Multiple serverpoolname can not be specified for database +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +createServerPool= + +#----------------------------------------------------------------------------- +# Name : serverPoolName +# Datatype : String +# Description : Only one serverpool name need to be specified +# if Create Server Pool option is specified. +# Comma-separated list of Serverpool names if db need to use +# multiple Server pool +# Valid values : ServerPool name + +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +serverPoolName= + +#----------------------------------------------------------------------------- +# Name : cardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation + +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +cardinality= + +#----------------------------------------------------------------------------- +# Name : force +# Datatype : Boolean +# Description : Set to true if new server pool need to be created by force +# if this option is specified then the newly created serverpool +# will be assigned server even if no free servers are available. +# This may affect already running database. +# This flag can be specified for Admin managed as well as policy managed db. +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +force= + +#----------------------------------------------------------------------------- +# Name : pqPoolName +# Datatype : String +# Description : Only one serverpool name needs to be specified +# if create server pool option is specified. +# Comma-separated list of serverpool names if use +# server pool. This is required to +# create Parallel Query (PQ) database. Applicable to Big Cluster +# Valid values : Parallel Query (PQ) pool name +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +pqPoolName= + +#----------------------------------------------------------------------------- +# Name : pqCardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation. +# Applicable to Big Cluster +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +pqCardinality= + +#----------------------------------------------------------------------------- +# Name : createAsContainerDatabase +# Datatype : boolean +# Description : flag to create database as container database +# Valid values : Check Oracle19c Administrator's Guide +# Default value : false +# Mandatory : No +#----------------------------------------------------------------------------- +createAsContainerDatabase= + +#----------------------------------------------------------------------------- +# Name : numberOfPDBs +# Datatype : Number +# Description : Specify the number of pdb to be created +# Valid values : 0 to 4094 +# Default value : 0 +# Mandatory : No +#----------------------------------------------------------------------------- +numberOfPDBs= + +#----------------------------------------------------------------------------- +# Name : pdbName +# Datatype : String +# Description : Specify the pdbname/pdbanme prefix if one or more pdb need to be created +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +pdbName= + +#----------------------------------------------------------------------------- +# Name : useLocalUndoForPDBs +# Datatype : boolean +# Description : Flag to create local undo tablespace for all PDB's. +# Valid values : TRUE\FALSE +# Default value : TRUE +# Mandatory : No +#----------------------------------------------------------------------------- +useLocalUndoForPDBs= + +#----------------------------------------------------------------------------- +# Name : pdbAdminPassword +# Datatype : String +# Description : PDB Administrator user password +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- + +pdbAdminPassword= + +#----------------------------------------------------------------------------- +# Name : nodelist +# Datatype : String +# Description : Comma-separated list of cluster nodes +# Valid values : Cluster node names +# Default value : None +# Mandatory : No (Yes for RAC database-centric database ) +#----------------------------------------------------------------------------- +nodelist= + +#----------------------------------------------------------------------------- +# Name : templateName +# Datatype : String +# Description : Name of the template +# Valid values : Template file name +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +templateName= + +#----------------------------------------------------------------------------- +# Name : sysPassword +# Datatype : String +# Description : Password for SYS user +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +sysPassword= + +#----------------------------------------------------------------------------- +# Name : systemPassword +# Datatype : String +# Description : Password for SYSTEM user +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +systemPassword= + +#----------------------------------------------------------------------------- +# Name : oracleHomeUserPassword +# Datatype : String +# Description : Password for Windows Service user +# Default value : None +# Mandatory : If Oracle home is installed with windows service user +#----------------------------------------------------------------------------- +oracleHomeUserPassword= + +#----------------------------------------------------------------------------- +# Name : emConfiguration +# Datatype : String +# Description : Enterprise Manager Configuration Type +# Valid values : CENTRAL|DBEXPRESS|BOTH|NONE +# Default value : NONE +# Mandatory : No +#----------------------------------------------------------------------------- +emConfiguration= + +#----------------------------------------------------------------------------- +# Name : emExpressPort +# Datatype : Number +# Description : Enterprise Manager Configuration Type +# Valid values : Check Oracle19c Administrator's Guide +# Default value : NONE +# Mandatory : No, will be picked up from DBEXPRESS_HTTPS_PORT env variable +# or auto generates a free port between 5500 and 5599 +#----------------------------------------------------------------------------- +emExpressPort=5500 + +#----------------------------------------------------------------------------- +# Name : runCVUChecks +# Datatype : Boolean +# Description : Specify whether to run Cluster Verification Utility checks +# periodically in Cluster environment +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +runCVUChecks= + +#----------------------------------------------------------------------------- +# Name : dbsnmpPassword +# Datatype : String +# Description : Password for DBSNMP user +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : Yes, if emConfiguration is specified or +# the value of runCVUChecks is TRUE +#----------------------------------------------------------------------------- +dbsnmpPassword= + +#----------------------------------------------------------------------------- +# Name : omsHost +# Datatype : String +# Description : EM management server host name +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsHost= + +#----------------------------------------------------------------------------- +# Name : omsPort +# Datatype : Number +# Description : EM management server port number +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsPort= + +#----------------------------------------------------------------------------- +# Name : emUser +# Datatype : String +# Description : EM Admin username to add or modify targets +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emUser= + +#----------------------------------------------------------------------------- +# Name : emPassword +# Datatype : String +# Description : EM Admin user password +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emPassword= + +#----------------------------------------------------------------------------- +# Name : dvConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Database vault +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +dvConfiguration= + +#----------------------------------------------------------------------------- +# Name : dvUserName +# Datatype : String +# Description : DataVault Owner +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserName= + +#----------------------------------------------------------------------------- +# Name : dvUserPassword +# Datatype : String +# Description : Password for DataVault Owner +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserPassword= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerName +# Datatype : String +# Description : DataVault Account Manager +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerName= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerPassword +# Datatype : String +# Description : Password for DataVault Account Manager +# Valid values : Check Oracle19c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerPassword= + +#----------------------------------------------------------------------------- +# Name : olsConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Label Security +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +olsConfiguration= + +#----------------------------------------------------------------------------- +# Name : datafileJarLocation +# Datatype : String +# Description : Location of the data file jar +# Valid values : Directory containing compressed datafile jar +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +datafileJarLocation= + +#----------------------------------------------------------------------------- +# Name : datafileDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Directory for all the database files +# Default value : $ORACLE_BASE/oradata +# Mandatory : No +#----------------------------------------------------------------------------- +datafileDestination= + +#----------------------------------------------------------------------------- +# Name : recoveryAreaDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Recovery Area location +# Default value : $ORACLE_BASE/flash_recovery_area +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryAreaDestination= + +#----------------------------------------------------------------------------- +# Name : storageType +# Datatype : String +# Description : Specifies the storage on which the database is to be created +# Valid values : FS (CFS for RAC), ASM +# Default value : FS +# Mandatory : No +#----------------------------------------------------------------------------- +storageType= + +#----------------------------------------------------------------------------- +# Name : diskGroupName +# Datatype : String +# Description : Specifies the disk group name for the storage +# Default value : DATA +# Mandatory : No +#----------------------------------------------------------------------------- +diskGroupName= + +#----------------------------------------------------------------------------- +# Name : asmsnmpPassword +# Datatype : String +# Description : Password for ASM Monitoring +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +asmsnmpPassword= + +#----------------------------------------------------------------------------- +# Name : recoveryGroupName +# Datatype : String +# Description : Specifies the disk group name for the recovery area +# Default value : RECOVERY +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryGroupName= + +#----------------------------------------------------------------------------- +# Name : characterSet +# Datatype : String +# Description : Character set of the database +# Valid values : Check Oracle19c National Language Support Guide +# Default value : "US7ASCII" +# Mandatory : NO +#----------------------------------------------------------------------------- +characterSet= + +#----------------------------------------------------------------------------- +# Name : nationalCharacterSet +# Datatype : String +# Description : National Character set of the database +# Valid values : "UTF8" or "AL16UTF16". For details, check Oracle19c National Language Support Guide +# Default value : "AL16UTF16" +# Mandatory : No +#----------------------------------------------------------------------------- +nationalCharacterSet= + +#----------------------------------------------------------------------------- +# Name : registerWithDirService +# Datatype : Boolean +# Description : Specifies whether to register with Directory Service. +# Valid values : TRUE \ FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +registerWithDirService= + + +#----------------------------------------------------------------------------- +# Name : dirServiceUserName +# Datatype : String +# Description : Specifies the name of the directory service user +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServiceUserName= + +#----------------------------------------------------------------------------- +# Name : dirServicePassword +# Datatype : String +# Description : The password of the directory service user. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServicePassword= + +#----------------------------------------------------------------------------- +# Name : walletPassword +# Datatype : String +# Description : The password for wallet to created or modified. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +walletPassword= + +#----------------------------------------------------------------------------- +# Name : listeners +# Datatype : String +# Description : Specifies list of listeners to register the database with. +# By default the database is configured for all the listeners specified in the +# $ORACLE_HOME/network/admin/listener.ora +# Valid values : The list should be comma separated like "listener1,listener2". +# Mandatory : NO +#----------------------------------------------------------------------------- +listeners= + +#----------------------------------------------------------------------------- +# Name : variablesFile +# Datatype : String +# Description : Location of the file containing variable value pair +# Valid values : A valid file-system file. The variable value pair format in this file +# is =. Each pair should be in a new line. +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variablesFile= + +#----------------------------------------------------------------------------- +# Name : variables +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides variables defined in variablefile and templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variables= + +#----------------------------------------------------------------------------- +# Name : initParams +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides initialization parameters defined in templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +initParams= + +#----------------------------------------------------------------------------- +# Name : sampleSchema +# Datatype : Boolean +# Description : Specifies whether or not to add the Sample Schemas to your database +# Valid values : TRUE \ FALSE +# Default value : FASLE +# Mandatory : No +#----------------------------------------------------------------------------- +sampleSchema= + +#----------------------------------------------------------------------------- +# Name : memoryPercentage +# Datatype : String +# Description : percentage of physical memory for Oracle +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +memoryPercentage= + +#----------------------------------------------------------------------------- +# Name : databaseType +# Datatype : String +# Description : used for memory distribution when memoryPercentage specified +# Valid values : MULTIPURPOSE|DATA_WAREHOUSING|OLTP +# Default value : MULTIPURPOSE +# Mandatory : NO +#----------------------------------------------------------------------------- +databaseType= + +#----------------------------------------------------------------------------- +# Name : automaticMemoryManagement +# Datatype : Boolean +# Description : flag to indicate Automatic Memory Management is used +# Valid values : TRUE/FALSE +# Default value : TRUE +# Mandatory : NO +#----------------------------------------------------------------------------- +automaticMemoryManagement= + +#----------------------------------------------------------------------------- +# Name : totalMemory +# Datatype : String +# Description : total memory in MB to allocate to Oracle +# Valid values : +# Default value : +# Mandatory : NO +#----------------------------------------------------------------------------- +totalMemory= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/enableRAC.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/enableRAC.sh new file mode 100755 index 0000000000..15e36e22fb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/enableRAC.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Enable RAC feature in Oracle Software +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# shellcheck disable=SC1090 +source /home/"${DB_USER}"/.bashrc + +export ORACLE_HOME=${DB_HOME} +export PATH=${ORACLE_HOME}/bin:/bin:/sbin:/usr/bin +export LD_LIBRARY_PATH=${ORACLE_HOME}/lib:/lib:/usr/lib + +make -f "$DB_HOME"/rdbms/lib/ins_rdbms.mk rac_on +make -f "$DB_HOME"/rdbms/lib/ins_rdbms.mk ioracle diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/fixupPreq.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/fixupPreq.sh new file mode 100755 index 0000000000..1e051dfaa8 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/fixupPreq.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Setup the Linux kernel parameter inside the container. Note that some parameter need to be set on container host. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. + +rpm -Uvh "$GRID_HOME/cv/rpm/cvuqdisk*" +echo "oracle soft nofile 1024" > /etc/security/limits.conf +echo "oracle hard nofile 65536" >> /etc/security/limits.conf +echo "oracle soft nproc 16384" >> /etc/security/limits.conf +echo "oracle hard nproc 16384" >> /etc/security/limits.conf +echo "oracle soft stack 10240" >> /etc/security/limits.conf +echo "oracle hard stack 32768" >> /etc/security/limits.conf +echo "oracle hard memlock 134217728" >> /etc/security/limits.conf +echo "oracle soft memlock 134217728" >> /etc/security/limits.conf +echo "grid soft nofile 1024" >> /etc/security/limits.conf +echo "grid hard nofile 65536" >> /etc/security/limits.conf +echo "grid soft nproc 16384" >> /etc/security/limits.conf +echo "grid hard nproc 16384" >> /etc/security/limits.conf +echo "grid soft stack 10240" >> /etc/security/limits.conf +echo "grid hard stack 32768" >> /etc/security/limits.conf +echo "grid hard memlock 134217728" >> /etc/security/limits.conf +echo "grid soft memlock 134217728" >> /etc/security/limits.conf +echo "ulimit -S -s 10240" >> /home/grid/.bashrc +echo "ulimit -S -s 10240" >> /home/oracle/.bashrc diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid.rsp new file mode 100644 index 0000000000..c05b65c395 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid.rsp @@ -0,0 +1,672 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v18.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=###INVENTORY### + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option=CRS_CONFIG + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=###GRID_BASE### + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA=dba + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER= + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM=asmadmin + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType=###SCAN_TYPE### + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile=###SHARED_SCAN_FILE### + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName=###SCAN_NAME### + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort=###SCAN_PORT### + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration=###CLUSTER_TYPE### + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster=false + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile=###MEMBERDB_FILE### + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 15 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9), hyphen(-) +# and underscore(_). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName=###CLUSTER_NAME### + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS=###CONFIGURE_GNS### + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP=###DHCP_CONF### + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption=###GNS_OPTIONS### + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain=###GNS_SUBDOMAIN### +oracle.install.crs.config.gpnp.gnsVIPAddress=###GNSVIP_HOSTNAME### + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the role of node (HUB,LEAF). This has to +# be provided only if Flex Cluster is being configured. +# For Extended Cluster only HUB should be specified for all nodes +# 4. The fourth field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# The 2nd and 3rd fields are not applicable if you have chosen CRS_SWONLY as installation option +# The 2nd and 3rd fields are not applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB:site1,node2:node2-vip:HUB:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +#oracle.install.crs.config.clusterNodes=###HOSTNAME###:###HOSTNAME_VIP###:HUB +oracle.install.crs.config.clusterNodes=###CRS_CONFIG_NODES### + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList=###NETWORK_STRING### + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG=###GIMR_DG_FLAG### + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# +# Applicable only for MEMBERDB cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption=###STORAGE_OPTIONS_FOR_MEMBERDB### +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# ASM Storage Type +# Allowed values are : ASM and ASM_ON_NAS +# ASM_ON_NAS applicable only if +# oracle.install.crs.config.ClusterConfiguration=STANDALONE +#------------------------------------------------------------------------------- +oracle.install.asm.storageOption=ASM + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing OCR/VDSK +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store OCR/VDSK files +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.ocrLocation= +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup on NAS to store GIMR data +# Specify 'true' if you would like to separate GIMR data with clusterware data, else +# specify 'false' +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------ +oracle.install.asmOnNAS.configureGIMRDataDG=false + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing GIMR data +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store the GIMR database +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +# and oracle.install.asmOnNAS.configureGIMRDataDG=true +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.gimrLocation= + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword=###PASSWORD### + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name=###DB_ASM_DISKGROUP### + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy=EXTERNAL + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize=4 + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2,,/dev/asm-disk3, +oracle.install.asm.diskGroup.disksWithFailureGroupNames=###ASM_DISKGROUP_FG_DISKS### + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2,/dev/asm-disk3 +oracle.install.asm.diskGroup.disks=###ASM_DISKGROUP_DISKS### + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.diskGroup.diskDiscoveryString=###ASM_DISCOVERY_STRING### + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword=###PASSWORD### + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name=###GIMR_DG_NAME### + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy=###GIMR_DG_REDUNDANCY### + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups=###GIMR_DG_FAILURE_GROUP### + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames=###GIMR_DISKGROUP_FG_DISKS### + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks=###GIMR_DISKGROUP_DISKS### + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD=false +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS=false + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes=false +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption=NONE + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort=0 + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript=false + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod=ROOT +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# Only one type of node role can be used for each batch. +# Root script execution should be done first in all HUB nodes and then, when +# existent, in all the LEAF nodes. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:2,HUBNode3:2,LEAFNode4:3 +# 2. oracle.install.crs.config.batchinfo=HUBNode1:1,LEAFNode2:2,LEAFNode3:2,LEAFNode4:2 +# 3. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:1,LEAFNode3:2,LEAFNode4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid1.rsp new file mode 100644 index 0000000000..4e2737c73b --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid1.rsp @@ -0,0 +1,671 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v18.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=/u01/app/oraInventory + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option=CRS_CONFIG + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=/u01/app/grid + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA=dba + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER= + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM=asmadmin + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType=LOCAL_SCAN + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile= + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName=racnode-scan + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort=1521 + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration=STANDALONE + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster=false + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile= + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 15 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9), hyphen(-) +# and underscore(_). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName=rac01cluster + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption= + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the role of node (HUB,LEAF). This has to +# be provided only if Flex Cluster is being configured. +# For Extended Cluster only HUB should be specified for all nodes +# 4. The fourth field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# The 2nd and 3rd fields are not applicable if you have chosen CRS_SWONLY as installation option +# The 2nd and 3rd fields are not applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB:site1,node2:node2-vip:HUB:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterNodes=racnode1:racnode1-vip:HUB,racnode2:racnode2-vip:HUB + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList=eth0:192.168.17.0:5,eth1:172.16.1.0:1 + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG=false + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# +# Applicable only for MEMBERDB cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption= +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# ASM Storage Type +# Allowed values are : ASM and ASM_ON_NAS +# ASM_ON_NAS applicable only if +# oracle.install.crs.config.ClusterConfiguration=STANDALONE +#------------------------------------------------------------------------------- +oracle.install.asm.storageOption=ASM + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing OCR/VDSK +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store OCR/VDSK files +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.ocrLocation= +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup on NAS to store GIMR data +# Specify 'true' if you would like to separate GIMR data with clusterware data, else +# specify 'false' +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------ +oracle.install.asmOnNAS.configureGIMRDataDG=false + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing GIMR data +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store the GIMR database +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +# and oracle.install.asmOnNAS.configureGIMRDataDG=true +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.gimrLocation= + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword=Oracle_12c + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name=DATA + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy=EXTERNAL + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize=4 + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2,,/dev/asm-disk3, +oracle.install.asm.diskGroup.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2,/dev/asm-disk3 +oracle.install.asm.diskGroup.disks=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.diskGroup.diskDiscoveryString=/oradata/asm_* + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword=Oracle_12c + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD=false +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS=false + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes=false +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption=NONE + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort=0 + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript=false + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod=ROOT +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# Only one type of node role can be used for each batch. +# Root script execution should be done first in all HUB nodes and then, when +# existent, in all the LEAF nodes. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:2,HUBNode3:2,LEAFNode4:3 +# 2. oracle.install.crs.config.batchinfo=HUBNode1:1,LEAFNode2:2,LEAFNode3:2,LEAFNode4:2 +# 3. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:1,LEAFNode3:2,LEAFNode4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid_addnode.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid_addnode.rsp new file mode 100644 index 0000000000..5c51e4fee3 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid_addnode.rsp @@ -0,0 +1,672 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v19.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=###INVENTORY### + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option=CRS_ADDNODE + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=###GRID_BASE### + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA=asmdba + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER=asmoper + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM=asmadmin + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType= + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile= + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName= + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort= + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration= + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster=false + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile= + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 15 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9), hyphen(-) +# and underscore(_). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS=false + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the role of node (HUB,LEAF). This has to +# be provided only if Flex Cluster is being configured. +# For Extended Cluster only HUB should be specified for all nodes +# 4. The fourth field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# The 2nd and 3rd fields are not applicable if you have chosen CRS_SWONLY as installation option +# The 2nd and 3rd fields are not applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB:site1,node2:node2-vip:HUB:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +#oracle.install.crs.config.clusterNodes=###PUBLIC_HOSTNAME###:###HOSTNAME_VIP###:HUB +oracle.install.crs.config.clusterNodes=###CRS_CONFIG_NODES### + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList= + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG=false + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# +# Applicable only for MEMBERDB cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption= +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# ASM Storage Type +# Allowed values are : ASM and ASM_ON_NAS +# ASM_ON_NAS applicable only if +# oracle.install.crs.config.ClusterConfiguration=STANDALONE +#------------------------------------------------------------------------------- +oracle.install.asm.storageOption=ASM + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing OCR/VDSK +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store OCR/VDSK files +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.ocrLocation= +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup on NAS to store GIMR data +# Specify 'true' if you would like to separate GIMR data with clusterware data, else +# specify 'false' +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------ +oracle.install.asmOnNAS.configureGIMRDataDG=false + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing GIMR data +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store the GIMR database +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +# and oracle.install.asmOnNAS.configureGIMRDataDG=true +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.gimrLocation= + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword= + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name=DATA + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2,,/dev/asm-disk3, +oracle.install.asm.diskGroup.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2,/dev/asm-disk3 +oracle.install.asm.diskGroup.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.diskGroup.diskDiscoveryString= + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword= + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD=false +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS=false + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes=false +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption=NONE + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort=0 + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript=false + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod=ROOT +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# Only one type of node role can be used for each batch. +# Root script execution should be done first in all HUB nodes and then, when +# existent, in all the LEAF nodes. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:2,HUBNode3:2,LEAFNode4:3 +# 2. oracle.install.crs.config.batchinfo=HUBNode1:1,LEAFNode2:2,LEAFNode3:2,LEAFNode4:2 +# 3. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:1,LEAFNode3:2,LEAFNode4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid_sw_install_19c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid_sw_install_19c.rsp new file mode 100644 index 0000000000..d95b0779f6 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/grid_sw_install_19c.rsp @@ -0,0 +1,668 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v19.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=###INVENTORY### + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option=###INSTALL_TYPE### + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=###GRID_BASE### + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA=asmdba + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER=asmoper + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM=asmadmin + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType=LOCAL_SCAN + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile= + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName= + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort= + + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration= + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster= + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile= + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 15 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9), hyphen(-) +# and underscore(_). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS=false + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the role of node (HUB,LEAF). This has to +# be provided only if Flex Cluster is being configured. +# For Extended Cluster only HUB should be specified for all nodes +# 4. The fourth field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# The 2nd and 3rd fields are not applicable if you have chosen CRS_SWONLY as installation option +# The 2nd and 3rd fields are not applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB:site1,node2:node2-vip:HUB:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterNodes=###HOSTNAME### + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList= + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG=false + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# +# Applicable only for MEMBERDB cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption= +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# ASM Storage Type +# Allowed values are : ASM and ASM_ON_NAS +# ASM_ON_NAS applicable only if +# oracle.install.crs.config.ClusterConfiguration=STANDALONE +#------------------------------------------------------------------------------- +oracle.install.asm.storageOption=ASM + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing OCR/VDSK +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store OCR/VDSK files +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.ocrLocation= +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup on NAS to store GIMR data +# Specify 'true' if you would like to separate GIMR data with clusterware data, else +# specify 'false' +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------ +oracle.install.asmOnNAS.configureGIMRDataDG=false + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing GIMR data +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store the GIMR database +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +# and oracle.install.asmOnNAS.configureGIMRDataDG=true +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.gimrLocation= + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword= + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.diskDiscoveryString= + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword= + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD=false +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS=false + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes=false +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption=NONE + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort=0 + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript=false + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod= +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# Only one type of node role can be used for each batch. +# Root script execution should be done first in all HUB nodes and then, when +# existent, in all the LEAF nodes. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:2,HUBNode3:2,LEAFNode4:3 +# 2. oracle.install.crs.config.batchinfo=HUBNode1:1,LEAFNode2:2,LEAFNode3:2,LEAFNode4:2 +# 3. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:1,LEAFNode3:2,LEAFNode4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/gridsetup_19c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/gridsetup_19c.rsp new file mode 100644 index 0000000000..7f27a11e65 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/gridsetup_19c.rsp @@ -0,0 +1,63 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v19.0.0 +INVENTORY_LOCATION=###INVENTORY### +oracle.install.option=CRS_CONFIG +ORACLE_BASE=###GRID_BASE### +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=###SCAN_TYPE### +oracle.install.crs.config.SCANClientDataFile=###SHARED_SCAN_FILE### +oracle.install.crs.config.gpnp.scanName=###SCAN_NAME### +oracle.install.crs.config.gpnp.scanPort=###SCAN_PORT### +oracle.install.crs.config.ClusterConfiguration=###CLUSTER_TYPE### +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile=###MEMBERDB_FILE### +oracle.install.crs.config.clusterName=###CLUSTER_NAME### +oracle.install.crs.config.gpnp.configureGNS=###CONFIGURE_GNS### +oracle.install.crs.config.autoConfigureClusterNodeVIP=###DHCP_CONF### +oracle.install.crs.config.gpnp.gnsOption=###GNS_OPTIONS### +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain=###GNS_SUBDOMAIN### +oracle.install.crs.config.gpnp.gnsVIPAddress=###GNSVIP_HOSTNAME### +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=###CRS_CONFIG_NODES### +oracle.install.crs.config.networkInterfaceList=###NETWORK_STRING### +oracle.install.crs.configureGIMR=###GIMR_FLAG### +oracle.install.asm.configureGIMRDataDG=###GIMR_DG_FLAG### +oracle.install.crs.config.storageOption=###STORAGE_OPTIONS_FOR_MEMBERDB### +oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations= +oracle.install.crs.config.sharedFileSystemStorage.ocrLocations= +oracle.install.crs.config.useIPMI= +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.SYSASMPassword=###PASSWORD### +oracle.install.asm.diskGroup.name=###DB_ASM_DISKGROUP### +oracle.install.asm.diskGroup.redundancy=###ASM_REDUNDANCY### +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups=###ASM_DG_FAILURE_GROUP### +oracle.install.asm.diskGroup.disksWithFailureGroupNames=###ASM_DISKGROUP_FG_DISKS### +oracle.install.asm.diskGroup.disks=###ASM_DISKGROUP_DISKS### +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=###ASM_DISCOVERY_STRING### +oracle.install.asm.monitorPassword=###PASSWORD### +oracle.install.asm.gimrDG.name=###GIMR_DG_NAME### +oracle.install.asm.gimrDG.redundancy=###GIMR_DG_REDUNDANCY### +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups=###GIMR_DG_FAILURE_GROUP### +oracle.install.asm.gimrDG.disksWithFailureGroupNames=###GIMR_DISKGROUP_FG_DISKS### +oracle.install.asm.gimrDG.disks=###GIMR_DISKGROUP_DISKS### +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.app.applicationAddress= +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/gridsetup_19cv1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/gridsetup_19cv1.rsp new file mode 100644 index 0000000000..216be2b8a1 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/gridsetup_19cv1.rsp @@ -0,0 +1,653 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v19.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION= + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option= + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE= + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA= + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER= + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM= + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType= + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile= + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName= + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort= + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration= + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster= + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile= + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 63 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9) and hyphens (-). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP= + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption= + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# Only the 1st field is applicable if you have chosen CRS_SWONLY as installation option +# Only the 1st field is applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip,node2:node2-vip +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip,node2:node2-vip +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:site1,node2:node2-vip:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterNodes= + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList= + +#------------------------------------------------------------------------------ +# Specify 'true' if you would like to configure Grid Infrastructure Management +# Repository (GIMR), else specify 'false'. +# This option is only applicable when CRS_CONFIG is chosen as install option, +# and STANDALONE is chosen as cluster configuration. +#------------------------------------------------------------------------------ +oracle.install.crs.configureGIMR= + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG= + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files. Only applicable for Standalone and MemberDB cluster. +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# - FILE_SYSTEM_STORAGE +# +# Option FILE_SYSTEM_STORAGE is only for STANDALONE cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption= + +#------------------------------------------------------------------------------- +# These properties are applicable only if FILE_SYSTEM_STORAGE is chosen for +# storing OCR and voting disk +# Specify the location(s) for OCR and voting disks +# Three(3) or one(1) location(s) should be specified for OCR and voting disk, +# separated by commas. +# Example: +# For Unix based Operating System: +# oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations=/oradbocfs/storage/vdsk1,/oradbocfs/storage/vdsk2,/oradbocfs/storage/vdsk3 +# oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=/oradbocfs/storage/ocr1,/oradbocfs/storage/ocr2,/oradbocfs/storage/ocr3 +# For Windows based Operating System OCR/VDSK on shared storage is not supported. +#------------------------------------------------------------------------------- +oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations= +oracle.install.crs.config.sharedFileSystemStorage.ocrLocations= +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI= + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword= + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize= + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.diskDiscoveryString= + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword= + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize= + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD= +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS= + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes= +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption= + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort= + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript= + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod= +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# Applicable only when SUDO configuration method was chosen. +# Note:For Grid Infrastructure for Standalone server installations,the sudo user name must be the username of the user performing the installation. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=Node1:1,Node2:2,Node3:2,Node4:3 +# 2. oracle.install.crs.config.batchinfo=Node1:1,Node2:2,Node3:2,Node4:2 +# 3. oracle.install.crs.config.batchinfo=Node1:1,Node2:1,Node3:2,Node4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/initsh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/initsh new file mode 100755 index 0000000000..288be9b92c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/initsh @@ -0,0 +1,15 @@ +#!/bin/bash +############################# + +# Copyright 2025, Oracle Corporation and/or affiliates. All rights reserved. + +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl + +# Author: paramdeep.saini@oracle.com + +echo "Creating env variables file /etc/rac_env_vars" +/bin/bash -c "cat /proc/1/environ | tr '\0' '\n' > /etc/rac_env_vars" +/bin/bash -c "sed -i -e 's/^/export /' /etc/rac_env_vars" + +echo "Starting Systemd" +exec /lib/systemd/systemd diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/installDBBinaries.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/installDBBinaries.sh new file mode 100755 index 0000000000..c898a24df1 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/installDBBinaries.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: December, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description:Installing Oracle DB software +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +EDITION=$1 + +# Check whether edition has been passed on +if [ "$EDITION" == "" ]; then + echo "ERROR: No edition has been passed on!" + echo "Please specify the correct edition!" + exit 1; +fi; + +# Check whether correct edition has been passed on +# shellcheck disable=SC2166 +if [ "$EDITION" != "EE" -a "$EDITION" != "SE2" ]; then + echo "ERROR: Wrong edition has been passed on!" + echo "Edition $EDITION is no a valid edition!" + exit 1; +fi; + +# Check whether DB_BASE is set +if [ "$DB_BASE" == "" ]; then + echo "ERROR: DB_BASE has not been set!" + echo "You have to have the DB_BASE environment variable set to a valid value!" + exit 1; +fi; + +# Check whether DB_HOME is set +if [ "$DB_HOME" == "" ]; then + echo "ERROR: DB_HOME has not been set!" + echo "You have to have the DB_HOME environment variable set to a valid value!" + exit 1; +fi; + +# Replace place holders +# --------------------- +sed -i -e "s|###ORACLE_EDITION###|$EDITION|g" "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" && \ +sed -i -e "s|###DB_BASE###|$DB_BASE|g" "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" && \ +sed -i -e "s|###DB_HOME###|$DB_HOME|g" "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" && \ +sed -i -e "s|###INVENTORY###|$INVENTORY|g" "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" + +export ORACLE_HOME=${DB_HOME} +export PATH=${ORACLE_HOME}/bin:/bin:/sbin:/usr/bin +export LD_LIBRARY_PATH=${ORACLE_HOME}/lib:/lib:/usr/lib + +# Install Oracle binaries +if [ "${DB_USER}" != "${GRID_USER}" ]; then +mkdir -p /home/"${DB_USER}"/.ssh && \ +chmod 700 /home/"${DB_USER}"/.ssh +fi + + +# Install Oracle binaries +# shellcheck disable=SC2015 +unzip -q "$INSTALL_SCRIPTS"/"$INSTALL_FILE_2" -d "$DB_HOME" && \ +"$DB_HOME"/runInstaller -silent -force -waitforcompletion -responsefile "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" -ignorePrereqFailure || true diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/installGridBinaries.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/installGridBinaries.sh new file mode 100755 index 0000000000..ebab8e91d8 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/installGridBinaries.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: December, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Install grid software inside the container. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +EDITION=$1 +# shellcheck disable=SC2034 +PATCH_NUMBER=$2 + +# Check whether edition has been passed on +if [ "$EDITION" == "" ]; then + echo "ERROR: No edition has been passed on!" + echo "Please specify the correct edition!" + exit 1; +fi; + +# Check whether correct edition has been passed on +if [ "$EDITION" != "EE" ]; then + echo "ERROR: Wrong edition has been passed on!" + echo "Edition $EDITION is no a valid edition!" + exit 1; +fi; + +# Check whether GRID_BASE is set +if [ "$GRID_BASE" == "" ]; then + echo "ERROR: GRID_BASE has not been set!" + echo "You have to have the GRID_BASE environment variable set to a valid value!" + exit 1; +fi; + +# Check whether GRID_HOME is set +if [ "$GRID_HOME" == "" ]; then + echo "ERROR: GRID_HOME has not been set!" + echo "You have to have the GRID_HOME environment variable set to a valid value!" + exit 1; +fi; + + +temp_var1=`hostname` + +# Replace place holders +# --------------------- +sed -i -e "s|###HOSTNAME###|$temp_var1|g" "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" && \ +sed -i -e "s|###INSTALL_TYPE###|CRS_SWONLY|g" "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" && \ +sed -i -e "s|###GRID_BASE###|$GRID_BASE|g" "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" && \ +sed -i -e "s|###INVENTORY###|$INVENTORY|g" "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" + +# Install Oracle binaries +mkdir -p /home/grid/.ssh && \ +chmod 700 /home/grid/.ssh && \ +unzip -q "$INSTALL_SCRIPTS"/"$INSTALL_FILE_1" -d "$GRID_HOME" && \ +"$GRID_HOME"/gridSetup.sh -silent -responseFile "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" -ignorePrereqFailure || true diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/runOracle.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/runOracle.sh new file mode 100755 index 0000000000..34b6808475 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/runOracle.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Runs the Oracle RAC Database inside the container +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +if [ -f /etc/rac_env_vars ]; then +source /etc/rac_env_vars +fi + +################################### +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # +############# MAIN ################ +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # +################################### + +if [ -z ${BASE_DIR} ]; then + BASE_DIR=/opt/scripts/startup/scripts +else + BASE_DIR=$SCRIPT_DIR/scripts +fi + +if [ -z ${MAIN_SCRIPT} ]; then + SCRIPT_NAME="main.py" +fi + +if [ -z ${EXECUTOR} ]; then + EXECUTOR="python3" +fi +# shellcheck disable=SC2164 +cd $BASE_DIR +$EXECUTOR $SCRIPT_NAME + +# Tail on alert log and wait (otherwise container will exit) diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupDB.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupDB.sh new file mode 100755 index 0000000000..2ee6797d2e --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupDB.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com +# Description: Sets up the unix environment for DB installation. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Create Directories +if [ "${SLIMMING}x" != 'truex' ]; then + mkdir -p "$DB_BASE" + mkdir -p "$DB_HOME" +fi + +usermod -g oinstall -G oinstall,dba,oper,backupdba,dgdba,kmdba,asmdba,asmoper,racdba,asmadmin "${DB_USER}" + +chmod 775 "$INSTALL_SCRIPTS" + + +if [ "${SLIMMING}x" != 'truex' ]; then + chown -R "${DB_USER}":oinstall "$DB_BASE" + chown -R "${DB_USER}":oinstall "$DB_HOME" + chown -R "${DB_USER}":oinstall "$INSTALL_SCRIPTS" + echo "export PATH=$DB_PATH" >> /home/"${DB_USER}"/.bashrc + echo "export LD_LIBRARY_PATH=$DB_LD_LIBRARY_PATH" >> /home/"${DB_USER}"/.bashrc + echo "export SCRIPT_DIR=$SCRIPT_DIR" >> /home/"${DB_USER}"/.bashrc + echo "export GRID_HOME=$GRID_HOME" >> /home/"${DB_USER}"/.bashrc + echo "export DB_BASE=$DB_BASE" >> /home/"${DB_USER}"/.bashrc + echo "export DB_HOME=$DB_HOME" >> /home/"${DB_USER}"/.bashrc +fi + +if [ "${SLIMMING}x" != 'truex' ]; then + if [ "${DB_USER}" == "${GRID_USER}" ]; then + sed -i '/PATH=/d' /home/"${DB_USER}"/.bashrc + echo "export PATH=$GRID_HOME/bin:$DB_PATH" >> /home/"${DB_USER}"/.bashrc + echo "export LD_LIBRARY_PATH=$GRID_HOME/lib:$DB_LD_LIBRARY_PATH" >> /home/"${DB_USER}"/.bashrc + fi +fi diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupGrid.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupGrid.sh new file mode 100755 index 0000000000..1788a0d8ec --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupGrid.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com +# Description: Sets up the unix environment for Grid installation. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# +# shellcheck disable=SC2034 +EDITION=$1 + +# Create Directories +if [ "${SLIMMING}x" != 'truex' ] ; then + mkdir -p "$GRID_BASE" + mkdir -p "$GRID_HOME" +fi + +groupadd -g 54334 asmadmin +groupadd -g 54335 asmdba +groupadd -g 54336 asmoper +useradd -u 54332 -g oinstall -G oinstall,asmadmin,asmdba,asmoper,racdba,dba "${GRID_USER}" + +chmod 666 /etc/sudoers +echo "${DB_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +echo "${GRID_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +chmod 440 /etc/sudoers + +if [ "${SLIMMING}x" != 'truex' ] ; then + chown -R "${GRID_USER}":oinstall "$GRID_BASE" + chown -R "${GRID_USER}":oinstall "$GRID_HOME" + mkdir -p "$INVENTORY" + chown -R "${GRID_USER}":oinstall "$INVENTORY" + # shellcheck disable=SC2129 + echo "export PATH=$GRID_PATH" >> /home/"${GRID_USER}"/.bashrc + echo "export LD_LIBRARY_PATH=$GRID_LD_LIBRARY_PATH" >> /home/"${GRID_USER}"/.bashrc + echo "export SCRIPT_DIR=$SCRIPT_DIR" >> /home/"${GRID_USER}"/.bashrc + echo "export GRID_HOME=$GRID_HOME" >> /home/"${GRID_USER}"/.bashrc + echo "export GRID_BASE=$GRID_BASE" >> /home/"${GRID_USER}"/.bashrc + echo "export DB_HOME=$DB_HOME" >> /home/"${GRID_USER}"/.bashrc +fi diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupLinuxEnv.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupLinuxEnv.sh new file mode 100755 index 0000000000..2c60f2ce41 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupLinuxEnv.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com +# Description: Sets up the unix environment for DB installation. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Setup filesystem and oracle user +# Adjust file permissions, go to /opt/oracle as user 'oracle' to proceed with Oracle installation +# ------------------------------------------------------------ +## Use OCI yum repos on OCI instead of public yum +region=$(curl --noproxy '*' -sfm 3 -H "Authorization: Bearer Oracle" http://169.254.169.254/opc/v2/instance/ | sed -nE 's/^ *"regionIdentifier": "([^"]+)".*/\1/p') +if [ -n "$region" ]; then + echo "Detected OCI Region: $region" + for proxy in $(printenv | grep -i _proxy | cut -d= -f1); do unset $proxy; done + echo "-$region" > /etc/yum/vars/ociregion +fi + +mkdir /asmdisks && \ +mkdir /responsefiles && \ +chmod ug+x /opt/scripts/startup/*.sh && \ +yum -y install systemd oracle-database-preinstall-19c net-tools which zip unzip tar openssl expect e2fsprogs openssh-server vim-minimal passwd which sudo hostname policycoreutils-python-utils python3 lsof rsync && \ +yum clean all diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupSSH.expect b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupSSH.expect new file mode 100644 index 0000000000..627dedff34 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/setupSSH.expect @@ -0,0 +1,45 @@ +#!/usr/bin/expect -f +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Setup SSH between nodes +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +set username [lindex $argv 0]; +set script_loc [lindex $argv 1]; +set cluster_nodes [lindex $argv 2]; +set ssh_pass [lindex $argv 3]; + +set timeout 120 + +# Procedure to setup ssh from server +proc sshproc { ssh_pass } { + expect { + # Send password at 'Password' prompt and tell expect to continue(i.e. exp_continue) + -re "\[P|p]assword:" { exp_send "$ssh_pass\r" + exp_continue } + # Tell expect stay in this 'expect' block and for each character that SCP prints while doing the copy + # reset the timeout counter back to 0. + -re . { exp_continue } + timeout { return 1 } + eof { return 0 } + } +} + +# Execute sshUserSetup.sh Script +set ssh_cmd "$script_loc/sshUserSetup.sh -user $username -hosts \"${cluster_nodes}\" -logfile /tmp/${username}_SetupSSH.log -advanced -exverify -noPromptPassphrase -confirm" + +eval spawn $ssh_cmd +set ssh_results [sshproc $ssh_pass] + +if { $ssh_results == 0 } { + exit 0 +} + +# Error attempting SSH, so exit with non-zero status +exit 1 diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/tempfile b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/tempfile new file mode 100644 index 0000000000..e69de29bb2 diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/Checksum b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/Checksum new file mode 100644 index 0000000000..039f9e9edc --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/Checksum @@ -0,0 +1,2 @@ +8ac915a800800ddf16a382506d3953db 21.3.0/LINUX.X64_213000_db_home.zip +b3fbdb7621ad82cbd4f40943effdd1be 21.3.0/LINUX.X64_213000_grid_home.zip \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/Containerfile b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/Containerfile new file mode 100644 index 0000000000..55a596eb67 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/Containerfile @@ -0,0 +1,256 @@ +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# ORACLE CONTAINERFILES PROJECT +# -------------------------- +# This is the Containerfile for Oracle Database 21c Release 3 Real Application Clusters +# +# REQUIRED FILES TO BUILD THIS IMAGE +# ---------------------------------- +# (1) LINUX.X64_213000_db_home.zip +# (2) LINUX.X64_213000_grid_home.zip +# Download Oracle Grid 21c Release 3 Enterprise Edition for Linux x64 +# Download Oracle Database 21c Release 3 Enterprise Edition for Linux x64 +# from http://www.oracle.com/technetwork/database/enterprise-edition/downloads/index.html +# +# HOW TO BUILD THIS IMAGE +# ----------------------- +# Run: +# $ docker build -t oracle/database:21c-rac . + + +ARG BASE_OL_IMAGE=oraclelinux:8 +ARG SLIMMING=false +# Pull base image +# --------------- +# hadolint ignore=DL3006,DL3025 +FROM $BASE_OL_IMAGE AS base +ARG SLIMMING=false +ARG VERSION +# Labels +# ------ +LABEL "provider"="Oracle" \ + "issues"="https://github.com/oracle/docker-images/issues" \ + "volume.setup.location1"="/opt/scripts" \ + "volume.startup.location1"="/opt/scripts/startup" \ + "port.listener"="1521" \ + "port.oemexpress"="5500" + +# Argument to control removal of components not needed after db software installation +ARG INSTALL_FILE_1="LINUX.X64_213000_grid_home.zip" +ARG INSTALL_FILE_2="LINUX.X64_213000_db_home.zip" +ARG DB_EDITION="EE" +ARG USER="root" +ARG WORKDIR="/rac-work-dir" +ARG IGNORE_PREREQ=false + +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +# Linux Env Variable +# hadolint ignore=DL3044 +ENV SETUP_LINUX_FILE="setupLinuxEnv.sh" \ + INSTALL_DIR=/opt/scripts \ +# Grid Env variables + GRID_INSTALL_RSP="gridsetup_21c.rsp" \ + GRID_SW_INSTALL_RSP="grid_sw_install_21c.rsp" \ + GRID_SETUP_FILE="setupGrid.sh" \ + INITSH="initsh" \ + WORKDIR=$WORKDIR \ + FIXUP_PREQ_FILE="fixupPreq.sh" \ + INSTALL_GRID_BINARIES_FILE="installGridBinaries.sh" \ + INSTALL_GRID_PATCH="applyGridPatch.sh" \ + INVENTORY=/u01/app/oraInventory \ + INSTALL_FILE_1=$INSTALL_FILE_1 \ + INSTALL_FILE_2=$INSTALL_FILE_2 \ + DB_EDITION=$DB_EDITION \ + ADDNODE_RSP="grid_addnode_21c.rsp" \ + SETUPSSH="setupSSH.expect" \ + DOCKERORACLEINIT="dockeroracleinit" \ + GRID_USER_HOME="/home/grid" \ + ASM_DISCOVERY_DIR="/dev" \ +# RAC DB Env Variables + DB_INSTALL_RSP="db_sw_install_21c.rsp" \ + DBCA_RSP="dbca_21c.rsp" \ + DB_SETUP_FILE="setupDB.sh" \ + RUN_FILE="runOracle.sh" \ + ENABLE_RAC_FILE="enableRAC.sh" \ + INSTALL_DB_BINARIES_FILE="installDBBinaries.sh" \ + GRID_HOME_CLEANUP="GridHomeCleanup.sh" \ + ORACLE_HOME_CLEANUP="OracleHomeCleanup.sh" \ + DB_USER="oracle" \ + GRID_USER="grid" \ + SLIMMING=$SLIMMING \ + container="true" \ + COMMON_SCRIPTS="/common_scripts" \ + CHECK_SPACE_FILE="checkSpace.sh" \ + RESET_FAILED_UNITS="resetFailedUnits.sh" \ + SET_CRONTAB="setCrontab.sh" \ + CRONTAB_ENTRY="crontabEntry" \ + EXPECT="/usr/bin/expect" \ + BIN="/usr/sbin" \ + IGNORE_PREREQ=$IGNORE_PREREQ + +############################################# +# ------------------------------------------- +# Start new stage for Non-Slim Image +# ------------------------------------------- +############################################# + +FROM base AS rac-image-slim-false +ARG SLIMMING +ARG VERSION +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +# Linux Env Variable +ENV GRID_BASE=/u01/app/grid \ + GRID_HOME=/u01/app/21c/grid \ + DB_BASE=/u01/app/oracle \ + DB_HOME=/u01/app/oracle/product/21c/dbhome_1 +# Use second ENV so that variable get substituted +# hadolint ignore=DL3044 +ENV INSTALL_SCRIPTS=$INSTALL_DIR/install \ + PATH=/bin:/usr/bin:/sbin:/usr/sbin \ + SCRIPT_DIR=$INSTALL_DIR/startup \ + RAC_SCRIPTS_DIR="scripts" \ + GRID_PATH=$GRID_HOME/bin:$GRID_HOME/OPatch/:$GRID_HOME/perl/bin:/usr/sbin:/bin:/sbin \ + DB_PATH=$DB_HOME/bin:$DB_HOME/OPatch/:$DB_HOME/perl/bin:/usr/sbin:/bin:/sbin \ + GRID_LD_LIBRARY_PATH=$GRID_HOME/lib:/usr/lib:/lib \ + DB_LD_LIBRARY_PATH=$DB_HOME/lib:/usr/lib:/lib + +# Copy binaries +# ------------- +# COPY Binaries +COPY $VERSION/$SETUP_LINUX_FILE $VERSION/$GRID_SETUP_FILE $VERSION/$DB_SETUP_FILE $VERSION/$CHECK_SPACE_FILE $VERSION/$FIXUP_PREQ_FILE $INSTALL_SCRIPTS/ + +# Setup Scripts +COPY $VERSION/$RUN_FILE $VERSION/$ADDNODE_RSP $VERSION/$SETUPSSH $VERSION/$GRID_INSTALL_RSP $VERSION/$DBCA_RSP $VERSION/$INITSH $SCRIPT_DIR/ + +COPY $RAC_SCRIPTS_DIR $SCRIPT_DIR/scripts +# hadolint ignore=SC2086 +RUN chmod 755 $INSTALL_SCRIPTS/*.sh && \ + sync && \ + $INSTALL_DIR/install/$CHECK_SPACE_FILE && \ + $INSTALL_DIR/install/$SETUP_LINUX_FILE && \ + $INSTALL_DIR/install/$GRID_SETUP_FILE && \ + $INSTALL_DIR/install/$DB_SETUP_FILE && \ + sync + +############################################# +# ------------------------------------------- +# Start new stage for slim image +# ------------------------------------------- +############################################# +FROM base AS rac-image-slim-true +ARG SLIMMING +ARG VERSION +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +# Linux Env Variable +ENV INSTALL_SCRIPTS=$INSTALL_DIR/install \ + PATH=/bin:/usr/bin:/sbin:/usr/sbin \ + SCRIPT_DIR=$INSTALL_DIR/startup \ + RAC_SCRIPTS_DIR="scripts" + +# Copy binaries +# ------------- +# COPY Binaries +COPY $VERSION/$SETUP_LINUX_FILE $VERSION/$GRID_SETUP_FILE $VERSION/$DB_SETUP_FILE $VERSION/$CHECK_SPACE_FILE $VERSION/$FIXUP_PREQ_FILE $INSTALL_SCRIPTS/ + +# Setup Scripts +COPY $VERSION/$RUN_FILE $VERSION/$SETUPSSH $VERSION/$INITSH $SCRIPT_DIR/ + +COPY $RAC_SCRIPTS_DIR $SCRIPT_DIR/scripts +# hadolint ignore=SC2086 +RUN chmod 755 $INSTALL_SCRIPTS/*.sh && \ + sync && \ + $INSTALL_DIR/install/$CHECK_SPACE_FILE && \ + $INSTALL_DIR/install/$SETUP_LINUX_FILE && \ + $INSTALL_DIR/install/$GRID_SETUP_FILE && \ + $INSTALL_DIR/install/$DB_SETUP_FILE && \ + sync + + +############################################# +# ------------------------------------------- +# Start new stage for installing the grid and DB +# ------------------------------------------- +############################################# +# hadolint ignore=DL3006 +FROM rac-image-slim-${SLIMMING} AS builder +ARG SLIMMING +# hadolint ignore=DL3006 +ARG VERSION +COPY $VERSION/$INSTALL_GRID_BINARIES_FILE $VERSION/$GRID_SW_INSTALL_RSP $VERSION/$DB_SETUP_FILE $VERSION/$DB_INSTALL_RSP $VERSION/$INSTALL_DB_BINARIES_FILE $VERSION/$ENABLE_RAC_FILE $VERSION/$GRID_HOME_CLEANUP $VERSION/$ORACLE_HOME_CLEANUP $VERSION/$INSTALL_FILE_1* $VERSION/$INSTALL_FILE_2* $INSTALL_SCRIPTS/ +# hadolint ignore=SC2086 +RUN chmod 755 $INSTALL_SCRIPTS/*.sh + +## Install software if SLIMMING is false +# hadolint ignore=SC2086 +RUN if [ "${SLIMMING}x" != 'truex' ]; then \ + sed -e '/hard *memlock/s/^/#/g' -i /etc/security/limits.d/oracle-database-preinstall-21c.conf && \ + sed -e '/ *nofile /s/^/#/g' -i /etc/security/limits.d/oracle-database-preinstall-21c.conf && \ + su $GRID_USER -c "$INSTALL_DIR/install/$INSTALL_GRID_BINARIES_FILE EE $PATCH_NUMBER" && \ + $INVENTORY/orainstRoot.sh && \ + $GRID_HOME/root.sh && \ + su $DB_USER -c "$INSTALL_DIR/install/$INSTALL_DB_BINARIES_FILE EE" && \ + su $DB_USER -c "$INSTALL_DIR/install/$ENABLE_RAC_FILE" && \ + $INVENTORY/orainstRoot.sh && \ + $DB_HOME/root.sh && \ + su $GRID_USER -c "$INSTALL_SCRIPTS/$GRID_HOME_CLEANUP" && \ + su $DB_USER -c "$INSTALL_SCRIPTS/$ORACLE_HOME_CLEANUP" && \ + :; \ + fi +# hadolint ignore=SC3014 +RUN if [ "${SLIMMING}x" == 'truex' ]; then \ + mkdir /u01 && \ + :; \ + fi +# hadolint ignore=SC2086 +RUN rm -f $INSTALL_DIR/install/* && \ + sync + +############################################# +# ------------------------------------------- +# Start new layer for grid & database runtime +# ------------------------------------------- +############################################# +# hadolint ignore=DL3006 +FROM rac-image-slim-${SLIMMING} AS final +# hadolint ignore=DL3006 +COPY --from=builder /u01 /u01 +# hadolint ignore=SC2086 +RUN if [ "${SLIMMING}x" != 'truex' ]; then \ + $INVENTORY/orainstRoot.sh && \ + $GRID_HOME/root.sh && \ + $DB_HOME/root.sh && \ + chmod 666 $SCRIPT_DIR/*.rsp && \ + :; \ + fi && \ + $INSTALL_DIR/install/$FIXUP_PREQ_FILE && \ + sync && \ + chmod 755 $SCRIPT_DIR/*.sh && \ + chmod 755 $SCRIPT_DIR/scripts/*.py && \ + chmod 755 $SCRIPT_DIR/scripts/cmdExec && \ + chmod 755 $SCRIPT_DIR/scripts/*.expect && \ + echo "nohup $SCRIPT_DIR/runOracle.sh &" >> /etc/rc.local && \ + rm -f /etc/rc.d/init.d/oracle-database-preinstall-21c-firstboot && \ + chmod +x /etc/rc.d/rc.local && \ + cp $SCRIPT_DIR/$INITSH /usr/bin/$INITSH && \ + setcap 'cap_net_admin,cap_net_raw+ep' /usr/bin/ping && \ + chmod 755 /usr/bin/$INITSH && \ + rm -f /etc/sysctl.d/99-oracle-database-preinstall-21c-sysctl.conf && \ + rm -f /etc/sysctl.d/99-sysctl.conf && \ + rm -f $INSTALL_DIR/install/* && \ + sync + +USER ${USER} +VOLUME ["/common_scripts"] +WORKDIR $WORKDIR + +HEALTHCHECK --interval=2m --start-period=30m \ + CMD "$SCRIPT_DIR/scripts/main.py --checkracinst=true" >/dev/null || exit 1 + +# Define default command to start Oracle Grid and RAC Database setup. +# hadolint ignore=DL3025 +ENTRYPOINT /usr/bin/$INITSH \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/GridHomeCleanup.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/GridHomeCleanup.sh new file mode 100755 index 0000000000..42f33d4f70 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/GridHomeCleanup.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2019,2025 Oracle and/or its affiliates. +# +# Since: January, 2019 +# Author: paramdeep.saini@oracle.com +# Description: Cleanup the $GRID_HOME and ORACLE_BASE after Grid confguration in the image +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Image Cleanup Script +# shellcheck disable=SC1090 +source /home/"${GRID_USER}"/.bashrc +# shellcheck disable=SC2034 +ORACLE_HOME=${GRID_HOME} + +rm -rf /u01/app/grid/* +rm -rf "$GRID_HOME"/log +rm -rf "$GRID_HOME"/logs +rm -rf "$GRID_HOME"/crs/init +rm -rf "$GRID_HOME"/crs/install/rhpdata +rm -rf "$GRID_HOME"/crs/log +rm -rf "$GRID_HOME"/racg/dump +rm -rf "$GRID_HOME"/srvm/log +rm -rf "$GRID_HOME"/cv/log +rm -rf "$GRID_HOME"/cdata +rm -rf "$GRID_HOME"/bin/core* +rm -rf "$GRID_HOME"/bin/diagsnap.pl +rm -rf "$GRID_HOME"/cfgtoollogs/* +rm -rf "$GRID_HOME"/network/admin/listener.ora +rm -rf "$GRID_HOME"/crf +rm -rf "$GRID_HOME"/ologgerd/init +rm -rf "$GRID_HOME"/osysmond/init +rm -rf "$GRID_HOME"/ohasd/init +rm -rf "$GRID_HOME"/ctss/init +rm -rf "$GRID_HOME"/dbs/.*.dat +rm -rf "$GRID_HOME"/oc4j/j2ee/home/log +rm -rf "$GRID_HOME"/inventory/Scripts/ext/bin/log +rm -rf "$GRID_HOME"/inventory/backup/* +rm -rf "$GRID_HOME"/mdns/init +rm -rf "$GRID_HOME"/gnsd/init +rm -rf "$GRID_HOME"/evm/init +rm -rf "$GRID_HOME"/gipc/init +rm -rf "$GRID_HOME"/gpnp/gpnp_bcp.* +rm -rf "$GRID_HOME"/gpnp/init +rm -rf "$GRID_HOME"/auth +rm -rf "$GRID_HOME"/tfa +rm -rf "$GRID_HOME"/suptools/tfa/release/diag +rm -rf "$GRID_HOME"/rdbms/audit/* +rm -rf "$GRID_HOME"/rdbms/log/* +rm -rf "$GRID_HOME"/network/log/* +rm -rf "$GRID_HOME"/inventory/Scripts/comps.xml.* +rm -rf "$GRID_HOME"/inventory/Scripts/oraclehomeproperties.xml.* +rm -rf "$GRID_HOME"/inventory/Scripts/oraInst.loc.* +rm -rf "$GRID_HOME"/inventory/Scripts/inventory.xml.* +rm -rf "$GRID_HOME"/log_file_client.log +rm -rf "$INVENTORY"/logs/* diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/OracleHomeCleanup.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/OracleHomeCleanup.sh new file mode 100755 index 0000000000..8087835575 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/OracleHomeCleanup.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2019,2025 Oracle and/or its affiliates. +# +# Since: January, 2019 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Cleanup the $ORACLE_HOME and ORACLE_BASE after Grid confguration in the image +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Image Cleanup Script +# shellcheck disable=SC1090 +source /home/"${DB_USER}"/.bashrc +ORACLE_HOME=${DB_HOME} + +rm -rf "$ORACLE_HOME"/bin/extjob +rm -rf "$ORACLE_HOME"/PAF +rm -rf "$ORACLE_HOME"/install/oratab +rm -rf "$ORACLE_HOME"/install/make.log +rm -rf "$ORACLE_HOME"/network/admin/listener.ora +rm -rf "$ORACLE_HOME"/network/admin/tnsnames.ora +rm -rf "$ORACLE_HOME"/bin/nmo +rm -rf "$ORACLE_HOME"/bin/nmb +rm -rf "$ORACLE_HOME"/bin/nmhs +rm -rf "$ORACLE_HOME"/log/.* +rm -rf "$ORACLE_HOME"/oc4j/j2ee/oc4j_applications/applications/em/em/images/chartCache/* +rm -rf "$ORACLE_HOME"/rdbms/audit/* +rm -rf "$ORACLE_HOME"/cfgtoollogs/* +rm -rf "$ORACLE_HOME"/inventory/Scripts/comps.xml.* +rm -rf "$ORACLE_HOME"/inventory/Scripts/oraclehomeproperties.xml.* +rm -rf "$ORACLE_HOME"/inventory/Scripts/oraInst.loc.* +rm -rf "$ORACLE_HOME"/inventory/Scripts/inventory.xml.* +rm -rf "$INVENTORY"/logs/* \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/applyGridPatch.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/applyGridPatch.sh new file mode 100755 index 0000000000..247edd87f6 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/applyGridPatch.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Apply Patch for Oracle Grid and Databas. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +PATCH=$1 + +# Check whether edition has been passed on +if [ "$PATCH" == "" ]; then + echo "ERROR: No Patch has been passed on!" + echo "Please specify the correct PATCH!" + exit 1; +fi; + +# Check whether GRID_BASE is set +if [ "$GRID_BASE" == "" ]; then + echo "ERROR: GRID_BASE has not been set!" + echo "You have to have the GRID_BASE environment variable set to a valid value!" + exit 1; +fi; + +# Check whether GRID_HOME is set +if [ "$GRID_HOME" == "" ]; then + echo "ERROR: GRID_HOME has not been set!" + echo "You have to have the GRID_HOME environment variable set to a valid value!" + exit 1; +fi; + +# Install Oracle binaries +# shellcheck disable=SC2115 +unzip -q "$INSTALL_SCRIPTS"/"$PATCH" -d "$GRID_USER_HOME" && \ +rm -f "$INSTALL_SCRIPTS"/"$GRID_PATCH" && \ +cd "$GRID_USER_HOME"/"$PATCH_NUMBER"/"$PATCH_NUMBER" && \ +"$GRID_HOME"/OPatch/opatch napply -silent -local -oh "$GRID_HOME" -id "$PATCH_NUMBER" && \ +cd "$GRID_USER_HOME" && \ +rm -rf "$GRID_USER_HOME"/"$PATCH_NUMBER" diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/checkSpace.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/checkSpace.sh new file mode 100755 index 0000000000..de8568f350 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/checkSpace.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Checks the available space of the system. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +REQUIRED_SPACE_GB=35 +AVAILABLE_SPACE_GB=`df -PB 1G / | tail -n 1 | awk '{print $4}'` + +if [ $AVAILABLE_SPACE_GB -lt $REQUIRED_SPACE_GB ]; then + script_name=`basename "$0"` + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "$script_name: ERROR - There is not enough space available in the docker container." + echo "$script_name: The container needs at least $REQUIRED_SPACE_GB GB , but only $AVAILABLE_SPACE_GB available." + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + exit 1; +fi; diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_inst.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_inst.rsp new file mode 100644 index 0000000000..68e58b1ecb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_inst.rsp @@ -0,0 +1,125 @@ +#################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved.## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +#################################################################### + + +#------------------------------------------------------------------------------- +# Do not change the following system generated value. +#------------------------------------------------------------------------------- +oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v18.0.0 + +#------------------------------------------------------------------------------- +# Specify the installation option. +# It can be one of the following: +# - INSTALL_DB_SWONLY +# - INSTALL_DB_AND_CONFIG +#------------------------------------------------------------------------------- +oracle.install.option=INSTALL_DB_SWONLY + +#------------------------------------------------------------------------------- +# Specify the Unix group to be set for the inventory directory. +#------------------------------------------------------------------------------- +UNIX_GROUP_NAME=oinstall + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=/u01/app/oraInventory +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Home. +#------------------------------------------------------------------------------- +ORACLE_HOME=/u01/app/oracle/product/18.3.0/dbhome_1 + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=/u01/app/oracle + +#------------------------------------------------------------------------------- +# Specify the installation edition of the component. +# +# The value should contain only one of these choices. +# - EE : Enterprise Edition +# - SE2 : Standard Edition 2 + + +#------------------------------------------------------------------------------- + +oracle.install.db.InstallEdition=EE +############################################################################### +# # +# PRIVILEGED OPERATING SYSTEM GROUPS # +# ------------------------------------------ # +# Provide values for the OS groups to which SYSDBA and SYSOPER privileges # +# needs to be granted. If the install is being performed as a member of the # +# group "dba", then that will be used unless specified otherwise below. # +# # +# The value to be specified for OSDBA and OSOPER group is only for UNIX based # +# Operating System. # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.db.OSDBA_GROUP=dba + +#------------------------------------------------------------------------------ +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +#------------------------------------------------------------------------------ +oracle.install.db.OSOPER_GROUP=oper + +#------------------------------------------------------------------------------ +# The OSBACKUPDBA_GROUP is the OS group which is to be granted SYSBACKUP privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSBACKUPDBA_GROUP=backupdba + +#------------------------------------------------------------------------------ +# The OSDGDBA_GROUP is the OS group which is to be granted SYSDG privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSDGDBA_GROUP=dgdba + +#------------------------------------------------------------------------------ +# The OSKMDBA_GROUP is the OS group which is to be granted SYSKM privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSKMDBA_GROUP=kmdba + +#------------------------------------------------------------------------------ +# The OSRACDBA_GROUP is the OS group which is to be granted SYSRAC privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSRACDBA_GROUP=racdba +#------------------------------------------------------------------------------ +# Specify whether to enable the user to set the password for +# My Oracle Support credentials. The value can be either true or false. +# If left blank it will be assumed to be false. +# +# Example : SECURITY_UPDATES_VIA_MYORACLESUPPORT=true +#------------------------------------------------------------------------------ +SECURITY_UPDATES_VIA_MYORACLESUPPORT=false + +#------------------------------------------------------------------------------ +# Specify whether user doesn't want to configure Security Updates. +# The value for this variable should be true if you don't want to configure +# Security Updates, false otherwise. +# +# The value can be either true or false. If left blank it will be assumed +# to be true. +# +# Example : DECLINE_SECURITY_UPDATES=false +#------------------------------------------------------------------------------ +DECLINE_SECURITY_UPDATES=true diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_install_21cv1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_install_21cv1.rsp new file mode 100644 index 0000000000..be696f52bc --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_install_21cv1.rsp @@ -0,0 +1,356 @@ +#################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved.## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +#################################################################### + + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v21.0.0 + +#------------------------------------------------------------------------------- +# Specify the installation option. +# It can be one of the following: +# - INSTALL_DB_SWONLY +# - INSTALL_DB_AND_CONFIG +#------------------------------------------------------------------------------- +oracle.install.option= + +#------------------------------------------------------------------------------- +# Specify the Unix group to be set for the inventory directory. +#------------------------------------------------------------------------------- +UNIX_GROUP_NAME= + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION= +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Home. +#------------------------------------------------------------------------------- +ORACLE_HOME= + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE= + +#------------------------------------------------------------------------------- +# Specify the installation edition of the component. +# +# The value should contain only one of these choices. +# - EE : Enterprise Edition +# - SE2 : Standard Edition 2 + + +#------------------------------------------------------------------------------- + +oracle.install.db.InstallEdition= +############################################################################### +# # +# PRIVILEGED OPERATING SYSTEM GROUPS # +# ------------------------------------------ # +# Provide values for the OS groups to which SYSDBA and SYSOPER privileges # +# needs to be granted. If the install is being performed as a member of the # +# group "dba", then that will be used unless specified otherwise below. # +# # +# The value to be specified for OSDBA and OSOPER group is only for UNIX based # +# Operating System. # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.db.OSDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +#------------------------------------------------------------------------------ +oracle.install.db.OSOPER_GROUP= + +#------------------------------------------------------------------------------ +# The OSBACKUPDBA_GROUP is the OS group which is to be granted SYSBACKUP privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSBACKUPDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSDGDBA_GROUP is the OS group which is to be granted SYSDG privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSDGDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSKMDBA_GROUP is the OS group which is to be granted SYSKM privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSKMDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSRACDBA_GROUP is the OS group which is to be granted SYSRAC privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSRACDBA_GROUP= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.executeRootScript= + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.configMethod= +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# Applicable only when SUDO configuration method was chosen. +# Note:For Single Instance database installations,the sudo user name must be the username of the user installing the database. +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.sudoUserName= + +############################################################################### +# # +# Grid Options # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# Value is required only if the specified install option is INSTALL_DB_SWONLY +# +# Specify the cluster node names selected during the installation. +# +# Example : oracle.install.db.CLUSTER_NODES=node1,node2 +#------------------------------------------------------------------------------ +oracle.install.db.CLUSTER_NODES= + +############################################################################### +# # +# Database Configuration Options # +# # +############################################################################### + +#------------------------------------------------------------------------------- +# Specify the type of database to create. +# It can be one of the following: +# - GENERAL_PURPOSE +# - DATA_WAREHOUSE +# GENERAL_PURPOSE: A starter database designed for general purpose use or transaction-heavy applications. +# DATA_WAREHOUSE : A starter database optimized for data warehousing applications. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.type= + +#------------------------------------------------------------------------------- +# Specify the Starter Database Global Database Name. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.globalDBName= + +#------------------------------------------------------------------------------- +# Specify the Starter Database SID. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.SID= + +#------------------------------------------------------------------------------- +# Specify whether the database should be configured as a Container database. +# The value can be either "true" or "false". If left blank it will be assumed +# to be "false". +#------------------------------------------------------------------------------- +oracle.install.db.ConfigureAsContainerDB= + +#------------------------------------------------------------------------------- +# Specify the Pluggable Database name for the pluggable database in Container Database. +#------------------------------------------------------------------------------- +oracle.install.db.config.PDBName= + +#------------------------------------------------------------------------------- +# Specify the Starter Database character set. +# +# One of the following +# AL32UTF8, WE8ISO8859P15, WE8MSWIN1252, EE8ISO8859P2, +# EE8MSWIN1250, NE8ISO8859P10, NEE8ISO8859P4, BLT8MSWIN1257, +# BLT8ISO8859P13, CL8ISO8859P5, CL8MSWIN1251, AR8ISO8859P6, +# AR8MSWIN1256, EL8ISO8859P7, EL8MSWIN1253, IW8ISO8859P8, +# IW8MSWIN1255, JA16EUC, JA16EUCTILDE, JA16SJIS, JA16SJISTILDE, +# KO16MSWIN949, ZHS16GBK, TH8TISASCII, ZHT32EUC, ZHT16MSWIN950, +# ZHT16HKSCS, WE8ISO8859P9, TR8MSWIN1254, VN8MSWIN1258 +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.characterSet= + +#------------------------------------------------------------------------------ +# This variable should be set to true if Automatic Memory Management +# in Database is desired. +# If Automatic Memory Management is not desired, and memory allocation +# is to be done manually, then set it to false. +#------------------------------------------------------------------------------ +oracle.install.db.config.starterdb.memoryOption= + +#------------------------------------------------------------------------------- +# Specify the total memory allocation for the database. Value(in MB) should be +# at least 256 MB, and should not exceed the total physical memory available +# on the system. +# Example: oracle.install.db.config.starterdb.memoryLimit=512 +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.memoryLimit= + +#------------------------------------------------------------------------------- +# This variable controls whether to load Example Schemas onto +# the starter database or not. +# The value can be either "true" or "false". If left blank it will be assumed +# to be "false". +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.installExampleSchemas= + +############################################################################### +# # +# Passwords can be supplied for the following four schemas in the # +# starter database: # +# SYS # +# SYSTEM # +# DBSNMP (used by Enterprise Manager) # +# # +# Same password can be used for all accounts (not recommended) # +# or different passwords for each account can be provided (recommended) # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# This variable holds the password that is to be used for all schemas in the +# starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.ALL= + +#------------------------------------------------------------------------------- +# Specify the SYS password for the starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.SYS= + +#------------------------------------------------------------------------------- +# Specify the SYSTEM password for the starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.SYSTEM= + +#------------------------------------------------------------------------------- +# Specify the DBSNMP password for the starter database. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.DBSNMP= + +#------------------------------------------------------------------------------- +# Specify the PDBADMIN password required for creation of Pluggable Database in the Container Database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.PDBADMIN= + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing the database. +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your database with Enterprise Manager Cloud Control along with Database Express. +# 2. DEFAULT -If you want to manage your database using the default Database Express option. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.managementOption= + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.omsPort= + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.emAdminPassword= + +############################################################################### +# # +# SPECIFY RECOVERY OPTIONS # +# ------------------------------------ # +# Recovery options for the database can be mentioned using the entries below # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# This variable is to be set to false if database recovery is not required. Else +# this can be set to true. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.enableRecovery= + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for the database. +# It can be one of the following: +# - FILE_SYSTEM_STORAGE +# - ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.storageType= + +#------------------------------------------------------------------------------- +# Specify the database file location which is a directory for datafiles, control +# files, redo logs. +# +# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.fileSystemStorage.dataLocation= + +#------------------------------------------------------------------------------- +# Specify the recovery location. +# +# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.fileSystemStorage.recoveryLocation= + +#------------------------------------------------------------------------------- +# Specify the existing ASM disk groups to be used for storage. +# +# Applicable only when oracle.install.db.config.starterdb.storageType=ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.asm.diskGroup= + +#------------------------------------------------------------------------------- +# Specify the password for ASMSNMP user of the ASM instance. +# +# Applicable only when oracle.install.db.config.starterdb.storage=ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.asm.ASMSNMPPassword= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_sw_install_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_sw_install_21c.rsp new file mode 100644 index 0000000000..7d5123c853 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_sw_install_21c.rsp @@ -0,0 +1,41 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v21.0.0 +oracle.install.option=INSTALL_DB_SWONLY +UNIX_GROUP_NAME=oinstall +INVENTORY_LOCATION=/u01/app/oraInventory +ORACLE_HOME=/u01/app/oracle/product/21c/dbhome_1 +ORACLE_BASE=/u01/app/oracle +oracle.install.db.InstallEdition=EE +oracle.install.db.OSDBA_GROUP=dba +oracle.install.db.OSOPER_GROUP=oper +oracle.install.db.OSBACKUPDBA_GROUP=backupdba +oracle.install.db.OSDGDBA_GROUP=dgdba +oracle.install.db.OSKMDBA_GROUP=kmdba +oracle.install.db.OSRACDBA_GROUP=racdba +oracle.install.db.rootconfig.executeRootScript= +oracle.install.db.rootconfig.configMethod= +oracle.install.db.rootconfig.sudoPath= +oracle.install.db.rootconfig.sudoUserName= +oracle.install.db.CLUSTER_NODES= +oracle.install.db.config.starterdb.type= +oracle.install.db.config.starterdb.globalDBName= +oracle.install.db.config.starterdb.SID= +oracle.install.db.config.PDBName= +oracle.install.db.config.starterdb.characterSet= +oracle.install.db.config.starterdb.memoryOption= +oracle.install.db.config.starterdb.memoryLimit= +oracle.install.db.config.starterdb.password.ALL= +oracle.install.db.config.starterdb.password.SYS= +oracle.install.db.config.starterdb.password.SYSTEM= +oracle.install.db.config.starterdb.password.DBSNMP= +oracle.install.db.config.starterdb.password.PDBADMIN= +oracle.install.db.config.starterdb.managementOption= +oracle.install.db.config.starterdb.omsHost= +oracle.install.db.config.starterdb.omsPort= +oracle.install.db.config.starterdb.emAdminUser= +oracle.install.db.config.starterdb.emAdminPassword= +oracle.install.db.config.starterdb.enableRecovery= +oracle.install.db.config.starterdb.storageType= +oracle.install.db.config.starterdb.fileSystemStorage.dataLocation= +oracle.install.db.config.starterdb.fileSystemStorage.recoveryLocation= +oracle.install.db.config.asm.diskGroup= +oracle.install.db.config.asm.ASMSNMPPassword= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_sw_install_21cv1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_sw_install_21cv1.rsp new file mode 100644 index 0000000000..b9c73cc1a3 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/db_sw_install_21cv1.rsp @@ -0,0 +1,341 @@ +#################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved.## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +#################################################################### + + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v21.0.0 + +#------------------------------------------------------------------------------- +# Specify the installation option. +# It can be one of the following: +# - INSTALL_DB_SWONLY +# - INSTALL_DB_AND_CONFIG +#------------------------------------------------------------------------------- +oracle.install.option= + +#------------------------------------------------------------------------------- +# Specify the Unix group to be set for the inventory directory. +#------------------------------------------------------------------------------- +UNIX_GROUP_NAME= + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION= +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Home. +#------------------------------------------------------------------------------- +ORACLE_HOME= + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE= + +#------------------------------------------------------------------------------- +# Specify the installation edition of the component. +# +# The value should contain only one of these choices. +# - EE : Enterprise Edition +# - SE2 : Standard Edition 2 + + +#------------------------------------------------------------------------------- + +oracle.install.db.InstallEdition= +############################################################################### +# # +# PRIVILEGED OPERATING SYSTEM GROUPS # +# ------------------------------------------ # +# Provide values for the OS groups to which SYSDBA and SYSOPER privileges # +# needs to be granted. If the install is being performed as a member of the # +# group "dba", then that will be used unless specified otherwise below. # +# # +# The value to be specified for OSDBA and OSOPER group is only for UNIX based # +# Operating System. # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.db.OSDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +#------------------------------------------------------------------------------ +oracle.install.db.OSOPER_GROUP= + +#------------------------------------------------------------------------------ +# The OSBACKUPDBA_GROUP is the OS group which is to be granted SYSBACKUP privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSBACKUPDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSDGDBA_GROUP is the OS group which is to be granted SYSDG privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSDGDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSKMDBA_GROUP is the OS group which is to be granted SYSKM privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSKMDBA_GROUP= + +#------------------------------------------------------------------------------ +# The OSRACDBA_GROUP is the OS group which is to be granted SYSRAC privileges. +#------------------------------------------------------------------------------ +oracle.install.db.OSRACDBA_GROUP= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.executeRootScript= + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.configMethod= +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# Applicable only when SUDO configuration method was chosen. +# Note:For Single Instance database installations,the sudo user name must be the username of the user installing the database. +#-------------------------------------------------------------------------------------- +oracle.install.db.rootconfig.sudoUserName= + +############################################################################### +# # +# Grid Options # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# Value is required only if the specified install option is INSTALL_DB_SWONLY +# +# Specify the cluster node names selected during the installation. +# +# Example : oracle.install.db.CLUSTER_NODES=node1,node2 +#------------------------------------------------------------------------------ +oracle.install.db.CLUSTER_NODES= + +############################################################################### +# # +# Database Configuration Options # +# # +############################################################################### + +#------------------------------------------------------------------------------- +# Specify the type of database to create. +# It can be one of the following: +# - GENERAL_PURPOSE +# - DATA_WAREHOUSE +# GENERAL_PURPOSE: A starter database designed for general purpose use or transaction-heavy applications. +# DATA_WAREHOUSE : A starter database optimized for data warehousing applications. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.type= + +#------------------------------------------------------------------------------- +# Specify the Starter Database Global Database Name. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.globalDBName= + +#------------------------------------------------------------------------------- +# Specify the Starter Database SID. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.SID= + +#------------------------------------------------------------------------------- +# Specify the Pluggable Database name for the pluggable database in Container Database. +#------------------------------------------------------------------------------- +oracle.install.db.config.PDBName= + +#------------------------------------------------------------------------------- +# Specify the Starter Database character set. +# +# One of the following +# AL32UTF8, WE8ISO8859P15, WE8MSWIN1252, EE8ISO8859P2, +# EE8MSWIN1250, NE8ISO8859P10, NEE8ISO8859P4, BLT8MSWIN1257, +# BLT8ISO8859P13, CL8ISO8859P5, CL8MSWIN1251, AR8ISO8859P6, +# AR8MSWIN1256, EL8ISO8859P7, EL8MSWIN1253, IW8ISO8859P8, +# IW8MSWIN1255, JA16EUC, JA16EUCTILDE, JA16SJIS, JA16SJISTILDE, +# KO16MSWIN949, ZHS16GBK, TH8TISASCII, ZHT32EUC, ZHT16MSWIN950, +# ZHT16HKSCS, WE8ISO8859P9, TR8MSWIN1254, VN8MSWIN1258 +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.characterSet= + +#------------------------------------------------------------------------------ +# This variable should be set to true if Automatic Memory Management +# in Database is desired. +# If Automatic Memory Management is not desired, and memory allocation +# is to be done manually, then set it to false. +#------------------------------------------------------------------------------ +oracle.install.db.config.starterdb.memoryOption= + +#------------------------------------------------------------------------------- +# Specify the total memory allocation for the database. Value(in MB) should be +# at least 256 MB, and should not exceed the total physical memory available +# on the system. +# Example: oracle.install.db.config.starterdb.memoryLimit=512 +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.memoryLimit= + +############################################################################### +# # +# Passwords can be supplied for the following four schemas in the # +# starter database: # +# SYS # +# SYSTEM # +# DBSNMP (used by Enterprise Manager) # +# # +# Same password can be used for all accounts (not recommended) # +# or different passwords for each account can be provided (recommended) # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# This variable holds the password that is to be used for all schemas in the +# starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.ALL= + +#------------------------------------------------------------------------------- +# Specify the SYS password for the starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.SYS= + +#------------------------------------------------------------------------------- +# Specify the SYSTEM password for the starter database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.SYSTEM= + +#------------------------------------------------------------------------------- +# Specify the DBSNMP password for the starter database. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.DBSNMP= + +#------------------------------------------------------------------------------- +# Specify the PDBADMIN password required for creation of Pluggable Database in the Container Database. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.password.PDBADMIN= + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing the database. +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your database with Enterprise Manager Cloud Control along with Database Express. +# 2. DEFAULT -If you want to manage your database using the default Database Express option. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.managementOption= + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.omsPort= + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.db.config.starterdb.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.emAdminPassword= + +############################################################################### +# # +# SPECIFY RECOVERY OPTIONS # +# ------------------------------------ # +# Recovery options for the database can be mentioned using the entries below # +# # +############################################################################### + +#------------------------------------------------------------------------------ +# This variable is to be set to false if database recovery is not required. Else +# this can be set to true. +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.enableRecovery= + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for the database. +# It can be one of the following: +# - FILE_SYSTEM_STORAGE +# - ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.storageType= + +#------------------------------------------------------------------------------- +# Specify the database file location which is a directory for datafiles, control +# files, redo logs. +# +# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.fileSystemStorage.dataLocation= + +#------------------------------------------------------------------------------- +# Specify the recovery location. +# +# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.starterdb.fileSystemStorage.recoveryLocation= + +#------------------------------------------------------------------------------- +# Specify the existing ASM disk groups to be used for storage. +# +# Applicable only when oracle.install.db.config.starterdb.storageType=ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.asm.diskGroup= + +#------------------------------------------------------------------------------- +# Specify the password for ASMSNMP user of the ASM instance. +# +# Applicable only when oracle.install.db.config.starterdb.storage=ASM_STORAGE +#------------------------------------------------------------------------------- +oracle.install.db.config.asm.ASMSNMPPassword= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca.rsp new file mode 100644 index 0000000000..745fdc7d70 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca.rsp @@ -0,0 +1,605 @@ +############################################################################## +## ## +## DBCA response file ## +## ------------------ ## +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +############################################################################## +#------------------------------------------------------------------------------- +# Do not change the following system generated value. +#------------------------------------------------------------------------------- +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v18.0.0 + +#----------------------------------------------------------------------------- +# Name : gdbName +# Datatype : String +# Description : Global database name of the database +# Valid values : . - when database domain isn't NULL +# - when database domain is NULL +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +gdbName=###ORACLE_SID### + +#----------------------------------------------------------------------------- +# Name : sid +# Datatype : String +# Description : System identifier (SID) of the database +# Valid values : Check Oracle12c Administrator's Guide +# Default value : specified in GDBNAME +# Mandatory : No +#----------------------------------------------------------------------------- +sid=###ORACLE_SID### + +#----------------------------------------------------------------------------- +# Name : databaseConfigType +# Datatype : String +# Description : database conf type as Single Instance, Real Application Cluster or Real Application Cluster One Nodes database +# Valid values : SI\RAC\RACONENODE +# Default value : SI +# Mandatory : No +#----------------------------------------------------------------------------- +databaseConfigType=RAC + +#----------------------------------------------------------------------------- +# Name : RACOneNodeServiceName +# Datatype : String +# Description : Service is required by application to connect to RAC One +# Node Database +# Valid values : Service Name +# Default value : None +# Mandatory : No [required in case DATABASECONFTYPE is set to RACONENODE ] +#----------------------------------------------------------------------------- +RACOneNodeServiceName= + +#----------------------------------------------------------------------------- +# Name : policyManaged +# Datatype : Boolean +# Description : Set to true if Database is policy managed and +# set to false if Database is admin managed +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +policyManaged=false + + +#----------------------------------------------------------------------------- +# Name : createServerPool +# Datatype : Boolean +# Description : Set to true if new server pool need to be created for database +# if this option is specified then the newly created database +# will use this newly created serverpool. +# Multiple serverpoolname can not be specified for database +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +createServerPool=false + +#----------------------------------------------------------------------------- +# Name : serverPoolName +# Datatype : String +# Description : Only one serverpool name need to be specified +# if Create Server Pool option is specified. +# Comma-separated list of Serverpool names if db need to use +# multiple Server pool +# Valid values : ServerPool name + +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +serverPoolName= + +#----------------------------------------------------------------------------- +# Name : cardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation + +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +cardinality= + +#----------------------------------------------------------------------------- +# Name : force +# Datatype : Boolean +# Description : Set to true if new server pool need to be created by force +# if this option is specified then the newly created serverpool +# will be assigned server even if no free servers are available. +# This may affect already running database. +# This flag can be specified for Admin managed as well as policy managed db. +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +force=false + +#----------------------------------------------------------------------------- +# Name : pqPoolName +# Datatype : String +# Description : Only one serverpool name needs to be specified +# if create server pool option is specified. +# Comma-separated list of serverpool names if use +# server pool. This is required to +# create Parallel Query (PQ) database. Applicable to Big Cluster +# Valid values : Parallel Query (PQ) pool name +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +pqPoolName= + +#----------------------------------------------------------------------------- +# Name : pqCardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation. +# Applicable to Big Cluster +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +pqCardinality= + +#----------------------------------------------------------------------------- +# Name : createAsContainerDatabase +# Datatype : boolean +# Description : flag to create database as container database +# Valid values : Check Oracle12c Administrator's Guide +# Default value : false +# Mandatory : No +#----------------------------------------------------------------------------- +createAsContainerDatabase=###CONTAINER_DB_FLAG### + +#----------------------------------------------------------------------------- +# Name : numberOfPDBs +# Datatype : Number +# Description : Specify the number of pdb to be created +# Valid values : 0 to 252 +# Default value : 0 +# Mandatory : No +#----------------------------------------------------------------------------- +numberOfPDBs=1 + +#----------------------------------------------------------------------------- +# Name : pdbName +# Datatype : String +# Description : Specify the pdbname/pdbanme prefix if one or more pdb need to be created +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +pdbName=###ORACLE_PDB### + +#----------------------------------------------------------------------------- +# Name : useLocalUndoForPDBs +# Datatype : boolean +# Description : Flag to create local undo tablespace for all PDB's. +# Valid values : TRUE\FALSE +# Default value : TRUE +# Mandatory : No +#----------------------------------------------------------------------------- +useLocalUndoForPDBs=true + +#----------------------------------------------------------------------------- +# Name : pdbAdminPassword +# Datatype : String +# Description : PDB Administrator user password +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- + +pdbAdminPassword=###ORACLE_PWD### + +#----------------------------------------------------------------------------- +# Name : nodelist +# Datatype : String +# Description : Comma-separated list of cluster nodes +# Valid values : Cluster node names +# Default value : None +# Mandatory : No (Yes for RAC database-centric database ) +#----------------------------------------------------------------------------- +nodelist=###PUBLIC_HOSTNAME### + +#----------------------------------------------------------------------------- +# Name : templateName +# Datatype : String +# Description : Name of the template +# Valid values : Template file name +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +templateName=/u01/app/oracle/product/18.3.0/dbhome_1/assistants/dbca/templates/General_Purpose.dbc + +#----------------------------------------------------------------------------- +# Name : sysPassword +# Datatype : String +# Description : Password for SYS user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +sysPassword=###ORACLE_PWD### + +#----------------------------------------------------------------------------- +# Name : systemPassword +# Datatype : String +# Description : Password for SYSTEM user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +systemPassword=###ORACLE_PWD### + +#----------------------------------------------------------------------------- +# Name : serviceUserPassword +# Datatype : String +# Description : Password for Windows Service user +# Default value : None +# Mandatory : If Oracle home is installed with windows service user +#----------------------------------------------------------------------------- +serviceUserPassword= + +#----------------------------------------------------------------------------- +# Name : emConfiguration +# Datatype : String +# Description : Enterprise Manager Configuration Type +# Valid values : CENTRAL|DBEXPRESS|BOTH|NONE +# Default value : NONE +# Mandatory : No +#----------------------------------------------------------------------------- +emConfiguration=DBEXPRESS + +#----------------------------------------------------------------------------- +# Name : emExpressPort +# Datatype : Number +# Description : Enterprise Manager Configuration Type +# Valid values : Check Oracle12c Administrator's Guide +# Default value : NONE +# Mandatory : No, will be picked up from DBEXPRESS_HTTPS_PORT env variable +# or auto generates a free port between 5500 and 5599 +#----------------------------------------------------------------------------- +emExpressPort=5500 + +#----------------------------------------------------------------------------- +# Name : runCVUChecks +# Datatype : Boolean +# Description : Specify whether to run Cluster Verification Utility checks +# periodically in Cluster environment +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +runCVUChecks=true + +#----------------------------------------------------------------------------- +# Name : dbsnmpPassword +# Datatype : String +# Description : Password for DBSNMP user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if emConfiguration is specified or +# the value of runCVUChecks is TRUE +#----------------------------------------------------------------------------- +dbsnmpPassword=###ORACLE_PWD### + +#----------------------------------------------------------------------------- +# Name : omsHost +# Datatype : String +# Description : EM management server host name +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsHost= + +#----------------------------------------------------------------------------- +# Name : omsPort +# Datatype : Number +# Description : EM management server port number +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsPort=0 + +#----------------------------------------------------------------------------- +# Name : emUser +# Datatype : String +# Description : EM Admin username to add or modify targets +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emUser= + +#----------------------------------------------------------------------------- +# Name : emPassword +# Datatype : String +# Description : EM Admin user password +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emPassword= + +#----------------------------------------------------------------------------- +# Name : dvConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Database vault +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +dvConfiguration=false + +#----------------------------------------------------------------------------- +# Name : dvUserName +# Datatype : String +# Description : DataVault Owner +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserName= + +#----------------------------------------------------------------------------- +# Name : dvUserPassword +# Datatype : String +# Description : Password for DataVault Owner +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserPassword= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerName +# Datatype : String +# Description : DataVault Account Manager +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerName= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerPassword +# Datatype : String +# Description : Password for DataVault Account Manager +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerPassword= + +#----------------------------------------------------------------------------- +# Name : olsConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Label Security +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +olsConfiguration=false + +#----------------------------------------------------------------------------- +# Name : datafileJarLocation +# Datatype : String +# Description : Location of the data file jar +# Valid values : Directory containing compressed datafile jar +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ + +#----------------------------------------------------------------------------- +# Name : datafileDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Directory for all the database files +# Default value : $ORACLE_BASE/oradata +# Mandatory : No +#----------------------------------------------------------------------------- +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ + +#----------------------------------------------------------------------------- +# Name : recoveryAreaDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Recovery Area location +# Default value : $ORACLE_BASE/flash_recovery_area +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryAreaDestination= + +#----------------------------------------------------------------------------- +# Name : storageType +# Datatype : String +# Description : Specifies the storage on which the database is to be created +# Valid values : FS (CFS for RAC), ASM +# Default value : FS +# Mandatory : No +#----------------------------------------------------------------------------- +storageType=ASM + +#----------------------------------------------------------------------------- +# Name : diskGroupName +# Datatype : String +# Description : Specifies the disk group name for the storage +# Default value : DATA +# Mandatory : No +#----------------------------------------------------------------------------- +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ + +#----------------------------------------------------------------------------- +# Name : asmsnmpPassword +# Datatype : String +# Description : Password for ASM Monitoring +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +asmsnmpPassword= + +#----------------------------------------------------------------------------- +# Name : recoveryGroupName +# Datatype : String +# Description : Specifies the disk group name for the recovery area +# Default value : RECOVERY +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryGroupName= + +#----------------------------------------------------------------------------- +# Name : characterSet +# Datatype : String +# Description : Character set of the database +# Valid values : Check Oracle12c National Language Support Guide +# Default value : "US7ASCII" +# Mandatory : NO +#----------------------------------------------------------------------------- +characterSet=AL32UTF8 + +#----------------------------------------------------------------------------- +# Name : nationalCharacterSet +# Datatype : String +# Description : National Character set of the database +# Valid values : "UTF8" or "AL16UTF16". For details, check Oracle12c National Language Support Guide +# Default value : "AL16UTF16" +# Mandatory : No +#----------------------------------------------------------------------------- +nationalCharacterSet=AL16UTF16 + +#----------------------------------------------------------------------------- +# Name : registerWithDirService +# Datatype : Boolean +# Description : Specifies whether to register with Directory Service. +# Valid values : TRUE \ FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +registerWithDirService=false + + +#----------------------------------------------------------------------------- +# Name : dirServiceUserName +# Datatype : String +# Description : Specifies the name of the directory service user +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServiceUserName= + +#----------------------------------------------------------------------------- +# Name : dirServicePassword +# Datatype : String +# Description : The password of the directory service user. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServicePassword= + +#----------------------------------------------------------------------------- +# Name : walletPassword +# Datatype : String +# Description : The password for wallet to created or modified. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +walletPassword= + +#----------------------------------------------------------------------------- +# Name : listeners +# Datatype : String +# Description : Specifies list of listeners to register the database with. +# By default the database is configured for all the listeners specified in the +# $ORACLE_HOME/network/admin/listener.ora +# Valid values : The list should be comma separated like "listener1,listener2". +# Mandatory : NO +#----------------------------------------------------------------------------- +listeners=LISTENER + +#----------------------------------------------------------------------------- +# Name : variablesFile +# Datatype : String +# Description : Location of the file containing variable value pair +# Valid values : A valid file-system file. The variable value pair format in this file +# is =. Each pair should be in a new line. +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variablesFile= + +#----------------------------------------------------------------------------- +# Name : variables +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides variables defined in variablefile and templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variables=DB_UNIQUE_NAME=###ORACLE_SID###,ORACLE_BASE=###DB_BASE###,PDB_NAME=###ORACLE_PDB###,DB_NAME=###ORACLE_SID###,ORACLE_HOME=###DB_HOME###,SID=###ORACLE_SID### + +#----------------------------------------------------------------------------- +# Name : initParams +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides initialization parameters defined in templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +#initParams=family:dw_helper.instance_mode=read-only,processes=640,nls_language=AMERICAN,pga_aggregate_target=2008MB,sga_target=6022MB,dispatchers=(PROTOCOL=TCP) (SERVICE=orclXDB),db_block_size=8192BYTES,orcl1.undo_tablespace=UNDOTBS1,diagnostic_dest={ORACLE_BASE},cluster_database=true,orcl1.thread=1,audit_file_dest={ORACLE_BASE}/admin/{DB_UNIQUE_NAME}/adump,db_create_file_dest=+DATA/{DB_UNIQUE_NAME}/,nls_territory=AMERICA,local_listener=-oraagent-dummy-,compatible=12.2.0,db_name=orcl,audit_trail=db,orcl1.instance_number=1,remote_login_passwordfile=exclusive,open_cursors=300 +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive + +#----------------------------------------------------------------------------- +# Name : sampleSchema +# Datatype : Boolean +# Description : Specifies whether or not to add the Sample Schemas to your database +# Valid values : TRUE \ FALSE +# Default value : FASLE +# Mandatory : No +#----------------------------------------------------------------------------- +sampleSchema=false + +#----------------------------------------------------------------------------- +# Name : memoryPercentage +# Datatype : String +# Description : percentage of physical memory for Oracle +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +memoryPercentage=40 + +#----------------------------------------------------------------------------- +# Name : databaseType +# Datatype : String +# Description : used for memory distribution when memoryPercentage specified +# Valid values : MULTIPURPOSE|DATA_WAREHOUSING|OLTP +# Default value : MULTIPURPOSE +# Mandatory : NO +#----------------------------------------------------------------------------- +databaseType=MULTIPURPOSE + +#----------------------------------------------------------------------------- +# Name : automaticMemoryManagement +# Datatype : Boolean +# Description : flag to indicate Automatic Memory Management is used +# Valid values : TRUE/FALSE +# Default value : TRUE +# Mandatory : NO +#----------------------------------------------------------------------------- +automaticMemoryManagement=false + +#----------------------------------------------------------------------------- +# Name : totalMemory +# Datatype : String +# Description : total memory in MB to allocate to Oracle +# Valid values : +# Default value : +# Mandatory : NO +#----------------------------------------------------------------------------- +totalMemory=5000 diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca1.rsp new file mode 100644 index 0000000000..2810cc645d --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca1.rsp @@ -0,0 +1,605 @@ +############################################################################## +## ## +## DBCA response file ## +## ------------------ ## +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +############################################################################## +#------------------------------------------------------------------------------- +# Do not change the following system generated value. +#------------------------------------------------------------------------------- +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v18.0.0 + +#----------------------------------------------------------------------------- +# Name : gdbName +# Datatype : String +# Description : Global database name of the database +# Valid values : . - when database domain isn't NULL +# - when database domain is NULL +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +gdbName=ORCLCDB + +#----------------------------------------------------------------------------- +# Name : sid +# Datatype : String +# Description : System identifier (SID) of the database +# Valid values : Check Oracle12c Administrator's Guide +# Default value : specified in GDBNAME +# Mandatory : No +#----------------------------------------------------------------------------- +sid=ORCLCDB + +#----------------------------------------------------------------------------- +# Name : databaseConfigType +# Datatype : String +# Description : database conf type as Single Instance, Real Application Cluster or Real Application Cluster One Nodes database +# Valid values : SI\RAC\RACONENODE +# Default value : SI +# Mandatory : No +#----------------------------------------------------------------------------- +databaseConfigType=RAC + +#----------------------------------------------------------------------------- +# Name : RACOneNodeServiceName +# Datatype : String +# Description : Service is required by application to connect to RAC One +# Node Database +# Valid values : Service Name +# Default value : None +# Mandatory : No [required in case DATABASECONFTYPE is set to RACONENODE ] +#----------------------------------------------------------------------------- +RACOneNodeServiceName= + +#----------------------------------------------------------------------------- +# Name : policyManaged +# Datatype : Boolean +# Description : Set to true if Database is policy managed and +# set to false if Database is admin managed +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +policyManaged=false + + +#----------------------------------------------------------------------------- +# Name : createServerPool +# Datatype : Boolean +# Description : Set to true if new server pool need to be created for database +# if this option is specified then the newly created database +# will use this newly created serverpool. +# Multiple serverpoolname can not be specified for database +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +createServerPool=false + +#----------------------------------------------------------------------------- +# Name : serverPoolName +# Datatype : String +# Description : Only one serverpool name need to be specified +# if Create Server Pool option is specified. +# Comma-separated list of Serverpool names if db need to use +# multiple Server pool +# Valid values : ServerPool name + +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +serverPoolName= + +#----------------------------------------------------------------------------- +# Name : cardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation + +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +cardinality= + +#----------------------------------------------------------------------------- +# Name : force +# Datatype : Boolean +# Description : Set to true if new server pool need to be created by force +# if this option is specified then the newly created serverpool +# will be assigned server even if no free servers are available. +# This may affect already running database. +# This flag can be specified for Admin managed as well as policy managed db. +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +force=false + +#----------------------------------------------------------------------------- +# Name : pqPoolName +# Datatype : String +# Description : Only one serverpool name needs to be specified +# if create server pool option is specified. +# Comma-separated list of serverpool names if use +# server pool. This is required to +# create Parallel Query (PQ) database. Applicable to Big Cluster +# Valid values : Parallel Query (PQ) pool name +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +pqPoolName= + +#----------------------------------------------------------------------------- +# Name : pqCardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation. +# Applicable to Big Cluster +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +pqCardinality= + +#----------------------------------------------------------------------------- +# Name : createAsContainerDatabase +# Datatype : boolean +# Description : flag to create database as container database +# Valid values : Check Oracle12c Administrator's Guide +# Default value : false +# Mandatory : No +#----------------------------------------------------------------------------- +createAsContainerDatabase=true + +#----------------------------------------------------------------------------- +# Name : numberOfPDBs +# Datatype : Number +# Description : Specify the number of pdb to be created +# Valid values : 0 to 252 +# Default value : 0 +# Mandatory : No +#----------------------------------------------------------------------------- +numberOfPDBs=1 + +#----------------------------------------------------------------------------- +# Name : pdbName +# Datatype : String +# Description : Specify the pdbname/pdbanme prefix if one or more pdb need to be created +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +pdbName=ORCLPDB + +#----------------------------------------------------------------------------- +# Name : useLocalUndoForPDBs +# Datatype : boolean +# Description : Flag to create local undo tablespace for all PDB's. +# Valid values : TRUE\FALSE +# Default value : TRUE +# Mandatory : No +#----------------------------------------------------------------------------- +useLocalUndoForPDBs=true + +#----------------------------------------------------------------------------- +# Name : pdbAdminPassword +# Datatype : String +# Description : PDB Administrator user password +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- + +pdbAdminPassword=Oracle_12c + +#----------------------------------------------------------------------------- +# Name : nodelist +# Datatype : String +# Description : Comma-separated list of cluster nodes +# Valid values : Cluster node names +# Default value : None +# Mandatory : No (Yes for RAC database-centric database ) +#----------------------------------------------------------------------------- +nodelist=racnode1 + +#----------------------------------------------------------------------------- +# Name : templateName +# Datatype : String +# Description : Name of the template +# Valid values : Template file name +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +templateName=/u01/app/oracle/product/18.3.0/dbhome_1/assistants/dbca/templates/General_Purpose.dbc + +#----------------------------------------------------------------------------- +# Name : sysPassword +# Datatype : String +# Description : Password for SYS user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +sysPassword=Oracle_12c + +#----------------------------------------------------------------------------- +# Name : systemPassword +# Datatype : String +# Description : Password for SYSTEM user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +systemPassword=Oracle_12c + +#----------------------------------------------------------------------------- +# Name : serviceUserPassword +# Datatype : String +# Description : Password for Windows Service user +# Default value : None +# Mandatory : If Oracle home is installed with windows service user +#----------------------------------------------------------------------------- +serviceUserPassword= + +#----------------------------------------------------------------------------- +# Name : emConfiguration +# Datatype : String +# Description : Enterprise Manager Configuration Type +# Valid values : CENTRAL|DBEXPRESS|BOTH|NONE +# Default value : NONE +# Mandatory : No +#----------------------------------------------------------------------------- +emConfiguration=DBEXPRESS + +#----------------------------------------------------------------------------- +# Name : emExpressPort +# Datatype : Number +# Description : Enterprise Manager Configuration Type +# Valid values : Check Oracle12c Administrator's Guide +# Default value : NONE +# Mandatory : No, will be picked up from DBEXPRESS_HTTPS_PORT env variable +# or auto generates a free port between 5500 and 5599 +#----------------------------------------------------------------------------- +emExpressPort=5500 + +#----------------------------------------------------------------------------- +# Name : runCVUChecks +# Datatype : Boolean +# Description : Specify whether to run Cluster Verification Utility checks +# periodically in Cluster environment +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +runCVUChecks=true + +#----------------------------------------------------------------------------- +# Name : dbsnmpPassword +# Datatype : String +# Description : Password for DBSNMP user +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if emConfiguration is specified or +# the value of runCVUChecks is TRUE +#----------------------------------------------------------------------------- +dbsnmpPassword=Oracle_12c + +#----------------------------------------------------------------------------- +# Name : omsHost +# Datatype : String +# Description : EM management server host name +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsHost= + +#----------------------------------------------------------------------------- +# Name : omsPort +# Datatype : Number +# Description : EM management server port number +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsPort=0 + +#----------------------------------------------------------------------------- +# Name : emUser +# Datatype : String +# Description : EM Admin username to add or modify targets +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emUser= + +#----------------------------------------------------------------------------- +# Name : emPassword +# Datatype : String +# Description : EM Admin user password +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emPassword= + +#----------------------------------------------------------------------------- +# Name : dvConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Database vault +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +dvConfiguration=false + +#----------------------------------------------------------------------------- +# Name : dvUserName +# Datatype : String +# Description : DataVault Owner +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserName= + +#----------------------------------------------------------------------------- +# Name : dvUserPassword +# Datatype : String +# Description : Password for DataVault Owner +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserPassword= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerName +# Datatype : String +# Description : DataVault Account Manager +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerName= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerPassword +# Datatype : String +# Description : Password for DataVault Account Manager +# Valid values : Check Oracle12c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerPassword= + +#----------------------------------------------------------------------------- +# Name : olsConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Label Security +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +olsConfiguration=false + +#----------------------------------------------------------------------------- +# Name : datafileJarLocation +# Datatype : String +# Description : Location of the data file jar +# Valid values : Directory containing compressed datafile jar +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ + +#----------------------------------------------------------------------------- +# Name : datafileDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Directory for all the database files +# Default value : $ORACLE_BASE/oradata +# Mandatory : No +#----------------------------------------------------------------------------- +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ + +#----------------------------------------------------------------------------- +# Name : recoveryAreaDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Recovery Area location +# Default value : $ORACLE_BASE/flash_recovery_area +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryAreaDestination= + +#----------------------------------------------------------------------------- +# Name : storageType +# Datatype : String +# Description : Specifies the storage on which the database is to be created +# Valid values : FS (CFS for RAC), ASM +# Default value : FS +# Mandatory : No +#----------------------------------------------------------------------------- +storageType=ASM + +#----------------------------------------------------------------------------- +# Name : diskGroupName +# Datatype : String +# Description : Specifies the disk group name for the storage +# Default value : DATA +# Mandatory : No +#----------------------------------------------------------------------------- +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ + +#----------------------------------------------------------------------------- +# Name : asmsnmpPassword +# Datatype : String +# Description : Password for ASM Monitoring +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +asmsnmpPassword= + +#----------------------------------------------------------------------------- +# Name : recoveryGroupName +# Datatype : String +# Description : Specifies the disk group name for the recovery area +# Default value : RECOVERY +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryGroupName= + +#----------------------------------------------------------------------------- +# Name : characterSet +# Datatype : String +# Description : Character set of the database +# Valid values : Check Oracle12c National Language Support Guide +# Default value : "US7ASCII" +# Mandatory : NO +#----------------------------------------------------------------------------- +characterSet=AL32UTF8 + +#----------------------------------------------------------------------------- +# Name : nationalCharacterSet +# Datatype : String +# Description : National Character set of the database +# Valid values : "UTF8" or "AL16UTF16". For details, check Oracle12c National Language Support Guide +# Default value : "AL16UTF16" +# Mandatory : No +#----------------------------------------------------------------------------- +nationalCharacterSet=AL16UTF16 + +#----------------------------------------------------------------------------- +# Name : registerWithDirService +# Datatype : Boolean +# Description : Specifies whether to register with Directory Service. +# Valid values : TRUE \ FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +registerWithDirService=false + + +#----------------------------------------------------------------------------- +# Name : dirServiceUserName +# Datatype : String +# Description : Specifies the name of the directory service user +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServiceUserName= + +#----------------------------------------------------------------------------- +# Name : dirServicePassword +# Datatype : String +# Description : The password of the directory service user. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServicePassword= + +#----------------------------------------------------------------------------- +# Name : walletPassword +# Datatype : String +# Description : The password for wallet to created or modified. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +walletPassword= + +#----------------------------------------------------------------------------- +# Name : listeners +# Datatype : String +# Description : Specifies list of listeners to register the database with. +# By default the database is configured for all the listeners specified in the +# $ORACLE_HOME/network/admin/listener.ora +# Valid values : The list should be comma separated like "listener1,listener2". +# Mandatory : NO +#----------------------------------------------------------------------------- +listeners=LISTENER + +#----------------------------------------------------------------------------- +# Name : variablesFile +# Datatype : String +# Description : Location of the file containing variable value pair +# Valid values : A valid file-system file. The variable value pair format in this file +# is =. Each pair should be in a new line. +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variablesFile= + +#----------------------------------------------------------------------------- +# Name : variables +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides variables defined in variablefile and templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variables=DB_UNIQUE_NAME=ORCLCDB,ORACLE_BASE=/u01/app/oracle,PDB_NAME=ORCLPDB,DB_NAME=ORCLCDB,ORACLE_HOME=/u01/app/oracle/product/18.3.0/dbhome_1,SID=ORCLCDB + +#----------------------------------------------------------------------------- +# Name : initParams +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides initialization parameters defined in templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +#initParams=family:dw_helper.instance_mode=read-only,processes=640,nls_language=AMERICAN,pga_aggregate_target=2008MB,sga_target=6022MB,dispatchers=(PROTOCOL=TCP) (SERVICE=orclXDB),db_block_size=8192BYTES,orcl1.undo_tablespace=UNDOTBS1,diagnostic_dest={ORACLE_BASE},cluster_database=true,orcl1.thread=1,audit_file_dest={ORACLE_BASE}/admin/{DB_UNIQUE_NAME}/adump,db_create_file_dest=+DATA/{DB_UNIQUE_NAME}/,nls_territory=AMERICA,local_listener=-oraagent-dummy-,compatible=12.2.0,db_name=orcl,audit_trail=db,orcl1.instance_number=1,remote_login_passwordfile=exclusive,open_cursors=300 +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive + +#----------------------------------------------------------------------------- +# Name : sampleSchema +# Datatype : Boolean +# Description : Specifies whether or not to add the Sample Schemas to your database +# Valid values : TRUE \ FALSE +# Default value : FASLE +# Mandatory : No +#----------------------------------------------------------------------------- +sampleSchema=false + +#----------------------------------------------------------------------------- +# Name : memoryPercentage +# Datatype : String +# Description : percentage of physical memory for Oracle +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +memoryPercentage=40 + +#----------------------------------------------------------------------------- +# Name : databaseType +# Datatype : String +# Description : used for memory distribution when memoryPercentage specified +# Valid values : MULTIPURPOSE|DATA_WAREHOUSING|OLTP +# Default value : MULTIPURPOSE +# Mandatory : NO +#----------------------------------------------------------------------------- +databaseType=MULTIPURPOSE + +#----------------------------------------------------------------------------- +# Name : automaticMemoryManagement +# Datatype : Boolean +# Description : flag to indicate Automatic Memory Management is used +# Valid values : TRUE/FALSE +# Default value : TRUE +# Mandatory : NO +#----------------------------------------------------------------------------- +automaticMemoryManagement=false + +#----------------------------------------------------------------------------- +# Name : totalMemory +# Datatype : String +# Description : total memory in MB to allocate to Oracle +# Valid values : +# Default value : +# Mandatory : NO +#----------------------------------------------------------------------------- +totalMemory=5000 diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca_21c.rsp new file mode 100644 index 0000000000..4b81467bcb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca_21c.rsp @@ -0,0 +1,59 @@ +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v21.0.0 +gdbName=###ORACLE_SID### +sid=###ORACLE_SID### +databaseConfigType=###DATABASE_CONFIG_TYPE### +RACOneNodeServiceName= +policyManaged=false +managementPolicy= +createServerPool=false +serverPoolName= +cardinality= +force=false +pqPoolName= +pqCardinality= +createAsContainerDatabase=###CONTAINER_DB_FLAG### +numberOfPDBs=###PDB_COUNT### +pdbName=###ORACLE_PDB### +useLocalUndoForPDBs=true +pdbAdminPassword=###ORACLE_PWD### +nodelist=###DB_NODES### +templateName={ORACLE_HOME}/assistants/dbca/templates/General_Purpose.dbc +sysPassword=###ORACLE_PWD### +systemPassword=###ORACLE_PWD### +oracleHomeUserPassword= +emConfiguration=DBEXPRESS +emExpressPort=5500 +runCVUChecks=true +dbsnmpPassword=###ORACLE_PWD### +omsHost= +omsPort= +emUser= +emPassword= +dvConfiguration=false +dvUserName= +dvUserPassword= +dvAccountManagerName= +dvAccountManagerPassword= +olsConfiguration=false +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ +recoveryAreaDestination= +storageType=ASM +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ +asmsnmpPassword= +recoveryGroupName= +characterSet=AL32UTF8 +nationalCharacterSet=AL16UTF16 +registerWithDirService=false +dirServiceUserName= +dirServicePassword= +walletPassword= +listeners=LISTENER +variablesFile= +variables=DB_UNIQUE_NAME=###ORACLE_SID###,ORACLE_BASE=###DB_BASE###,PDB_NAME=###ORACLE_PDB###,DB_NAME=###ORACLE_SID###,ORACLE_HOME=###DB_HOME###,SID=###ORACLE_SID### +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive +sampleSchema=false +memoryPercentage=40 +databaseType=MULTIPURPOSE +automaticMemoryManagement=false +totalMemory=###TOTAL_MEMORY### diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca_21cv1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca_21cv1.rsp new file mode 100644 index 0000000000..e644c66048 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/dbca_21cv1.rsp @@ -0,0 +1,613 @@ +############################################################################## +## ## +## DBCA response file ## +## ------------------ ## +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +############################################################################## +#------------------------------------------------------------------------------- +# Do not change the following system generated value. +#------------------------------------------------------------------------------- +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v21.0.0 + +#----------------------------------------------------------------------------- +# Name : gdbName +# Datatype : String +# Description : Global database name of the database +# Valid values : . - when database domain isn't NULL +# - when database domain is NULL +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +gdbName= + +#----------------------------------------------------------------------------- +# Name : sid +# Datatype : String +# Description : System identifier (SID) of the database +# Valid values : Check Oracle21c Administrator's Guide +# Default value : specified in GDBNAME +# Mandatory : No +#----------------------------------------------------------------------------- +sid= + +#----------------------------------------------------------------------------- +# Name : databaseConfigType +# Datatype : String +# Description : database conf type as Single Instance, Real Application Cluster or Real Application Cluster One Nodes database +# Valid values : SI\RAC\RACONENODE +# Default value : SI +# Mandatory : No +#----------------------------------------------------------------------------- +databaseConfigType= + +#----------------------------------------------------------------------------- +# Name : RACOneNodeServiceName +# Datatype : String +# Description : Service is required by application to connect to RAC One +# Node Database +# Valid values : Service Name +# Default value : None +# Mandatory : No [required in case DATABASECONFTYPE is set to RACONENODE ] +#----------------------------------------------------------------------------- +RACOneNodeServiceName= + +#----------------------------------------------------------------------------- +# Name : policyManaged +# Datatype : Boolean +# Description : Set to true if Database is policy managed and +# set to false if Database is admin managed +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +policyManaged= + +#----------------------------------------------------------------------------- +## Name : managementPolicy +## Datatype : String +## Description : Set to AUTOMATIC or RANK based on management policy value +## Valid values : AUTOMATIC\RANK +## Default value : AUTOMATIC +## Mandatory : No +##----------------------------------------------------------------------------- +managementPolicy= + +#----------------------------------------------------------------------------- +# Name : createServerPool +# Datatype : Boolean +# Description : Set to true if new server pool need to be created for database +# if this option is specified then the newly created database +# will use this newly created serverpool. +# Multiple serverpoolname can not be specified for database +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +createServerPool= + +#----------------------------------------------------------------------------- +# Name : serverPoolName +# Datatype : String +# Description : Only one serverpool name need to be specified +# if Create Server Pool option is specified. +# Comma-separated list of Serverpool names if db need to use +# multiple Server pool +# Valid values : ServerPool name + +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +serverPoolName= + +#----------------------------------------------------------------------------- +# Name : cardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation + +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +cardinality= + +#----------------------------------------------------------------------------- +# Name : force +# Datatype : Boolean +# Description : Set to true if new server pool need to be created by force +# if this option is specified then the newly created serverpool +# will be assigned server even if no free servers are available. +# This may affect already running database. +# This flag can be specified for Admin managed as well as policy managed db. +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +force= + +#----------------------------------------------------------------------------- +# Name : pqPoolName +# Datatype : String +# Description : Only one serverpool name needs to be specified +# if create server pool option is specified. +# Comma-separated list of serverpool names if use +# server pool. This is required to +# create Parallel Query (PQ) database. Applicable to Big Cluster +# Valid values : Parallel Query (PQ) pool name +# Default value : None +# Mandatory : No [required in case of RAC service centric database] +#----------------------------------------------------------------------------- +pqPoolName= + +#----------------------------------------------------------------------------- +# Name : pqCardinality +# Datatype : Number +# Description : Specify Cardinality for create server pool operation. +# Applicable to Big Cluster +# Valid values : any positive Integer value +# Default value : Number of qualified nodes on cluster +# Mandatory : No [Required when a new serverpool need to be created] +#----------------------------------------------------------------------------- +pqCardinality= + +#----------------------------------------------------------------------------- +# Name : createAsContainerDatabase +# Datatype : boolean +# Description : flag to create database as container database +# Valid values : Check Oracle21c Administrator's Guide +# Default value : false +# Mandatory : No +#----------------------------------------------------------------------------- +createAsContainerDatabase= + +#----------------------------------------------------------------------------- +# Name : numberOfPDBs +# Datatype : Number +# Description : Specify the number of pdb to be created +# Valid values : 0 to 4094 +# Default value : 0 +# Mandatory : No +#----------------------------------------------------------------------------- +numberOfPDBs= + +#----------------------------------------------------------------------------- +# Name : pdbName +# Datatype : String +# Description : Specify the pdbname/pdbanme prefix if one or more pdb need to be created +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +pdbName= + +#----------------------------------------------------------------------------- +# Name : useLocalUndoForPDBs +# Datatype : boolean +# Description : Flag to create local undo tablespace for all PDB's. +# Valid values : TRUE\FALSE +# Default value : TRUE +# Mandatory : No +#----------------------------------------------------------------------------- +useLocalUndoForPDBs= + +#----------------------------------------------------------------------------- +# Name : pdbAdminPassword +# Datatype : String +# Description : PDB Administrator user password +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- + +pdbAdminPassword= + +#----------------------------------------------------------------------------- +# Name : nodelist +# Datatype : String +# Description : Comma-separated list of cluster nodes +# Valid values : Cluster node names +# Default value : None +# Mandatory : No (Yes for RAC database-centric database ) +#----------------------------------------------------------------------------- +nodelist= + +#----------------------------------------------------------------------------- +# Name : templateName +# Datatype : String +# Description : Name of the template +# Valid values : Template file name +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +templateName= + +#----------------------------------------------------------------------------- +# Name : sysPassword +# Datatype : String +# Description : Password for SYS user +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +sysPassword= + +#----------------------------------------------------------------------------- +# Name : systemPassword +# Datatype : String +# Description : Password for SYSTEM user +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : Yes +#----------------------------------------------------------------------------- +systemPassword= + +#----------------------------------------------------------------------------- +# Name : oracleHomeUserPassword +# Datatype : String +# Description : Password for Windows Service user +# Default value : None +# Mandatory : If Oracle home is installed with windows service user +#----------------------------------------------------------------------------- +oracleHomeUserPassword= + +#----------------------------------------------------------------------------- +# Name : emConfiguration +# Datatype : String +# Description : Enterprise Manager Configuration Type +# Valid values : CENTRAL|DBEXPRESS|BOTH|NONE +# Default value : NONE +# Mandatory : No +#----------------------------------------------------------------------------- +emConfiguration= + +#----------------------------------------------------------------------------- +# Name : emExpressPort +# Datatype : Number +# Description : Enterprise Manager Configuration Type +# Valid values : Check Oracle21c Administrator's Guide +# Default value : NONE +# Mandatory : No, will be picked up from DBEXPRESS_HTTPS_PORT env variable +# or auto generates a free port between 5500 and 5599 +#----------------------------------------------------------------------------- +emExpressPort=5500 + +#----------------------------------------------------------------------------- +# Name : runCVUChecks +# Datatype : Boolean +# Description : Specify whether to run Cluster Verification Utility checks +# periodically in Cluster environment +# Valid values : TRUE\FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +runCVUChecks= + +#----------------------------------------------------------------------------- +# Name : dbsnmpPassword +# Datatype : String +# Description : Password for DBSNMP user +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : Yes, if emConfiguration is specified or +# the value of runCVUChecks is TRUE +#----------------------------------------------------------------------------- +dbsnmpPassword= + +#----------------------------------------------------------------------------- +# Name : omsHost +# Datatype : String +# Description : EM management server host name +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsHost= + +#----------------------------------------------------------------------------- +# Name : omsPort +# Datatype : Number +# Description : EM management server port number +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +omsPort= + +#----------------------------------------------------------------------------- +# Name : emUser +# Datatype : String +# Description : EM Admin username to add or modify targets +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emUser= + +#----------------------------------------------------------------------------- +# Name : emPassword +# Datatype : String +# Description : EM Admin user password +# Default value : None +# Mandatory : Yes, if CENTRAL is specified for emConfiguration +#----------------------------------------------------------------------------- +emPassword= + +#----------------------------------------------------------------------------- +# Name : dvConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Database vault +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +dvConfiguration= + +#----------------------------------------------------------------------------- +# Name : dvUserName +# Datatype : String +# Description : DataVault Owner +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserName= + +#----------------------------------------------------------------------------- +# Name : dvUserPassword +# Datatype : String +# Description : Password for DataVault Owner +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : Yes, if DataVault option is chosen +#----------------------------------------------------------------------------- +dvUserPassword= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerName +# Datatype : String +# Description : DataVault Account Manager +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerName= + +#----------------------------------------------------------------------------- +# Name : dvAccountManagerPassword +# Datatype : String +# Description : Password for DataVault Account Manager +# Valid values : Check Oracle21c Administrator's Guide +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +dvAccountManagerPassword= + +#----------------------------------------------------------------------------- +# Name : olsConfiguration +# Datatype : Boolean +# Description : Specify "True" to configure and enable Oracle Label Security +# Valid values : True/False +# Default value : False +# Mandatory : No +#----------------------------------------------------------------------------- +olsConfiguration= + +#----------------------------------------------------------------------------- +# Name : datafileJarLocation +# Datatype : String +# Description : Location of the data file jar +# Valid values : Directory containing compressed datafile jar +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +datafileJarLocation= + +#----------------------------------------------------------------------------- +# Name : datafileDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Directory for all the database files +# Default value : $ORACLE_BASE/oradata +# Mandatory : No +#----------------------------------------------------------------------------- +datafileDestination= + +#----------------------------------------------------------------------------- +# Name : recoveryAreaDestination +# Datatype : String +# Description : Location of the data file's +# Valid values : Recovery Area location +# Default value : $ORACLE_BASE/flash_recovery_area +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryAreaDestination= + +#----------------------------------------------------------------------------- +# Name : storageType +# Datatype : String +# Description : Specifies the storage on which the database is to be created +# Valid values : FS (CFS for RAC), ASM +# Default value : FS +# Mandatory : No +#----------------------------------------------------------------------------- +storageType= + +#----------------------------------------------------------------------------- +# Name : diskGroupName +# Datatype : String +# Description : Specifies the disk group name for the storage +# Default value : DATA +# Mandatory : No +#----------------------------------------------------------------------------- +diskGroupName= + +#----------------------------------------------------------------------------- +# Name : asmsnmpPassword +# Datatype : String +# Description : Password for ASM Monitoring +# Default value : None +# Mandatory : No +#----------------------------------------------------------------------------- +asmsnmpPassword= + +#----------------------------------------------------------------------------- +# Name : recoveryGroupName +# Datatype : String +# Description : Specifies the disk group name for the recovery area +# Default value : RECOVERY +# Mandatory : No +#----------------------------------------------------------------------------- +recoveryGroupName= + +#----------------------------------------------------------------------------- +# Name : characterSet +# Datatype : String +# Description : Character set of the database +# Valid values : Check Oracle21c National Language Support Guide +# Default value : "US7ASCII" +# Mandatory : NO +#----------------------------------------------------------------------------- +characterSet= + +#----------------------------------------------------------------------------- +# Name : nationalCharacterSet +# Datatype : String +# Description : National Character set of the database +# Valid values : "UTF8" or "AL16UTF16". For details, check Oracle21c National Language Support Guide +# Default value : "AL16UTF16" +# Mandatory : No +#----------------------------------------------------------------------------- +nationalCharacterSet= + +#----------------------------------------------------------------------------- +# Name : registerWithDirService +# Datatype : Boolean +# Description : Specifies whether to register with Directory Service. +# Valid values : TRUE \ FALSE +# Default value : FALSE +# Mandatory : No +#----------------------------------------------------------------------------- +registerWithDirService= + + +#----------------------------------------------------------------------------- +# Name : dirServiceUserName +# Datatype : String +# Description : Specifies the name of the directory service user +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServiceUserName= + +#----------------------------------------------------------------------------- +# Name : dirServicePassword +# Datatype : String +# Description : The password of the directory service user. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +dirServicePassword= + +#----------------------------------------------------------------------------- +# Name : walletPassword +# Datatype : String +# Description : The password for wallet to created or modified. +# You can also specify the password at the command prompt instead of here. +# Mandatory : YES, if the value of registerWithDirService is TRUE +#----------------------------------------------------------------------------- +walletPassword= + +#----------------------------------------------------------------------------- +# Name : listeners +# Datatype : String +# Description : Specifies list of listeners to register the database with. +# By default the database is configured for all the listeners specified in the +# $ORACLE_HOME/network/admin/listener.ora +# Valid values : The list should be comma separated like "listener1,listener2". +# Mandatory : NO +#----------------------------------------------------------------------------- +listeners= + +#----------------------------------------------------------------------------- +# Name : variablesFile +# Datatype : String +# Description : Location of the file containing variable value pair +# Valid values : A valid file-system file. The variable value pair format in this file +# is =. Each pair should be in a new line. +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variablesFile= + +#----------------------------------------------------------------------------- +# Name : variables +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides variables defined in variablefile and templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +variables= + +#----------------------------------------------------------------------------- +# Name : initParams +# Datatype : String +# Description : comma separated list of name=value pairs. Overrides initialization parameters defined in templates +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +initParams= + +#----------------------------------------------------------------------------- +# Name : sampleSchema +# Datatype : Boolean +# Description : Specifies whether or not to add the Sample Schemas to your database +# Valid values : TRUE \ FALSE +# Default value : FASLE +# Mandatory : No +#----------------------------------------------------------------------------- +sampleSchema= + +#----------------------------------------------------------------------------- +# Name : memoryPercentage +# Datatype : String +# Description : percentage of physical memory for Oracle +# Default value : None +# Mandatory : NO +#----------------------------------------------------------------------------- +memoryPercentage= + +#----------------------------------------------------------------------------- +# Name : databaseType +# Datatype : String +# Description : used for memory distribution when memoryPercentage specified +# Valid values : MULTIPURPOSE|DATA_WAREHOUSING|OLTP +# Default value : MULTIPURPOSE +# Mandatory : NO +#----------------------------------------------------------------------------- +databaseType= + +#----------------------------------------------------------------------------- +# Name : automaticMemoryManagement +# Datatype : Boolean +# Description : flag to indicate Automatic Memory Management is used +# Valid values : TRUE/FALSE +# Default value : TRUE +# Mandatory : NO +#----------------------------------------------------------------------------- +automaticMemoryManagement= + +#----------------------------------------------------------------------------- +# Name : totalMemory +# Datatype : String +# Description : total memory in MB to allocate to Oracle +# Valid values : +# Default value : +# Mandatory : NO +#----------------------------------------------------------------------------- +totalMemory= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/enableRAC.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/enableRAC.sh new file mode 100755 index 0000000000..15e36e22fb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/enableRAC.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Enable RAC feature in Oracle Software +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# shellcheck disable=SC1090 +source /home/"${DB_USER}"/.bashrc + +export ORACLE_HOME=${DB_HOME} +export PATH=${ORACLE_HOME}/bin:/bin:/sbin:/usr/bin +export LD_LIBRARY_PATH=${ORACLE_HOME}/lib:/lib:/usr/lib + +make -f "$DB_HOME"/rdbms/lib/ins_rdbms.mk rac_on +make -f "$DB_HOME"/rdbms/lib/ins_rdbms.mk ioracle diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/fixupPreq.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/fixupPreq.sh new file mode 100755 index 0000000000..1e051dfaa8 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/fixupPreq.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Setup the Linux kernel parameter inside the container. Note that some parameter need to be set on container host. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. + +rpm -Uvh "$GRID_HOME/cv/rpm/cvuqdisk*" +echo "oracle soft nofile 1024" > /etc/security/limits.conf +echo "oracle hard nofile 65536" >> /etc/security/limits.conf +echo "oracle soft nproc 16384" >> /etc/security/limits.conf +echo "oracle hard nproc 16384" >> /etc/security/limits.conf +echo "oracle soft stack 10240" >> /etc/security/limits.conf +echo "oracle hard stack 32768" >> /etc/security/limits.conf +echo "oracle hard memlock 134217728" >> /etc/security/limits.conf +echo "oracle soft memlock 134217728" >> /etc/security/limits.conf +echo "grid soft nofile 1024" >> /etc/security/limits.conf +echo "grid hard nofile 65536" >> /etc/security/limits.conf +echo "grid soft nproc 16384" >> /etc/security/limits.conf +echo "grid hard nproc 16384" >> /etc/security/limits.conf +echo "grid soft stack 10240" >> /etc/security/limits.conf +echo "grid hard stack 32768" >> /etc/security/limits.conf +echo "grid hard memlock 134217728" >> /etc/security/limits.conf +echo "grid soft memlock 134217728" >> /etc/security/limits.conf +echo "ulimit -S -s 10240" >> /home/grid/.bashrc +echo "ulimit -S -s 10240" >> /home/oracle/.bashrc diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid.rsp new file mode 100644 index 0000000000..c05b65c395 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid.rsp @@ -0,0 +1,672 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v18.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=###INVENTORY### + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option=CRS_CONFIG + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=###GRID_BASE### + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA=dba + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER= + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM=asmadmin + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType=###SCAN_TYPE### + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile=###SHARED_SCAN_FILE### + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName=###SCAN_NAME### + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort=###SCAN_PORT### + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration=###CLUSTER_TYPE### + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster=false + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile=###MEMBERDB_FILE### + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 15 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9), hyphen(-) +# and underscore(_). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName=###CLUSTER_NAME### + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS=###CONFIGURE_GNS### + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP=###DHCP_CONF### + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption=###GNS_OPTIONS### + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain=###GNS_SUBDOMAIN### +oracle.install.crs.config.gpnp.gnsVIPAddress=###GNSVIP_HOSTNAME### + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the role of node (HUB,LEAF). This has to +# be provided only if Flex Cluster is being configured. +# For Extended Cluster only HUB should be specified for all nodes +# 4. The fourth field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# The 2nd and 3rd fields are not applicable if you have chosen CRS_SWONLY as installation option +# The 2nd and 3rd fields are not applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB:site1,node2:node2-vip:HUB:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +#oracle.install.crs.config.clusterNodes=###HOSTNAME###:###HOSTNAME_VIP###:HUB +oracle.install.crs.config.clusterNodes=###CRS_CONFIG_NODES### + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList=###NETWORK_STRING### + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG=###GIMR_DG_FLAG### + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# +# Applicable only for MEMBERDB cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption=###STORAGE_OPTIONS_FOR_MEMBERDB### +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# ASM Storage Type +# Allowed values are : ASM and ASM_ON_NAS +# ASM_ON_NAS applicable only if +# oracle.install.crs.config.ClusterConfiguration=STANDALONE +#------------------------------------------------------------------------------- +oracle.install.asm.storageOption=ASM + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing OCR/VDSK +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store OCR/VDSK files +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.ocrLocation= +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup on NAS to store GIMR data +# Specify 'true' if you would like to separate GIMR data with clusterware data, else +# specify 'false' +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------ +oracle.install.asmOnNAS.configureGIMRDataDG=false + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing GIMR data +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store the GIMR database +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +# and oracle.install.asmOnNAS.configureGIMRDataDG=true +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.gimrLocation= + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword=###PASSWORD### + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name=###DB_ASM_DISKGROUP### + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy=EXTERNAL + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize=4 + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2,,/dev/asm-disk3, +oracle.install.asm.diskGroup.disksWithFailureGroupNames=###ASM_DISKGROUP_FG_DISKS### + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2,/dev/asm-disk3 +oracle.install.asm.diskGroup.disks=###ASM_DISKGROUP_DISKS### + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.diskGroup.diskDiscoveryString=###ASM_DISCOVERY_STRING### + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword=###PASSWORD### + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name=###GIMR_DG_NAME### + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy=###GIMR_DG_REDUNDANCY### + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups=###GIMR_DG_FAILURE_GROUP### + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames=###GIMR_DISKGROUP_FG_DISKS### + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks=###GIMR_DISKGROUP_DISKS### + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD=false +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS=false + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes=false +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption=NONE + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort=0 + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript=false + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod=ROOT +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# Only one type of node role can be used for each batch. +# Root script execution should be done first in all HUB nodes and then, when +# existent, in all the LEAF nodes. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:2,HUBNode3:2,LEAFNode4:3 +# 2. oracle.install.crs.config.batchinfo=HUBNode1:1,LEAFNode2:2,LEAFNode3:2,LEAFNode4:2 +# 3. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:1,LEAFNode3:2,LEAFNode4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid1.rsp new file mode 100644 index 0000000000..4e2737c73b --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid1.rsp @@ -0,0 +1,671 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v18.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=/u01/app/oraInventory + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option=CRS_CONFIG + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=/u01/app/grid + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA=dba + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER= + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM=asmadmin + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType=LOCAL_SCAN + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile= + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName=racnode-scan + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort=1521 + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration=STANDALONE + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster=false + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile= + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 15 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9), hyphen(-) +# and underscore(_). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName=rac01cluster + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption= + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the role of node (HUB,LEAF). This has to +# be provided only if Flex Cluster is being configured. +# For Extended Cluster only HUB should be specified for all nodes +# 4. The fourth field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# The 2nd and 3rd fields are not applicable if you have chosen CRS_SWONLY as installation option +# The 2nd and 3rd fields are not applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB:site1,node2:node2-vip:HUB:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterNodes=racnode1:racnode1-vip:HUB,racnode2:racnode2-vip:HUB + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList=eth0:192.168.17.0:5,eth1:172.16.1.0:1 + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG=false + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# +# Applicable only for MEMBERDB cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption= +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# ASM Storage Type +# Allowed values are : ASM and ASM_ON_NAS +# ASM_ON_NAS applicable only if +# oracle.install.crs.config.ClusterConfiguration=STANDALONE +#------------------------------------------------------------------------------- +oracle.install.asm.storageOption=ASM + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing OCR/VDSK +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store OCR/VDSK files +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.ocrLocation= +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup on NAS to store GIMR data +# Specify 'true' if you would like to separate GIMR data with clusterware data, else +# specify 'false' +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------ +oracle.install.asmOnNAS.configureGIMRDataDG=false + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing GIMR data +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store the GIMR database +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +# and oracle.install.asmOnNAS.configureGIMRDataDG=true +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.gimrLocation= + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword=Oracle_12c + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name=DATA + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy=EXTERNAL + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize=4 + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2,,/dev/asm-disk3, +oracle.install.asm.diskGroup.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2,/dev/asm-disk3 +oracle.install.asm.diskGroup.disks=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.diskGroup.diskDiscoveryString=/oradata/asm_* + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword=Oracle_12c + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD=false +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS=false + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes=false +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption=NONE + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort=0 + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript=false + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod=ROOT +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# Only one type of node role can be used for each batch. +# Root script execution should be done first in all HUB nodes and then, when +# existent, in all the LEAF nodes. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:2,HUBNode3:2,LEAFNode4:3 +# 2. oracle.install.crs.config.batchinfo=HUBNode1:1,LEAFNode2:2,LEAFNode3:2,LEAFNode4:2 +# 3. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:1,LEAFNode3:2,LEAFNode4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_addnode.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_addnode.rsp new file mode 100644 index 0000000000..3ad05ef82c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_addnode.rsp @@ -0,0 +1,672 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v18.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=###INVENTORY### + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option=CRS_ADDNODE + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=###GRID_BASE### + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA=asmdba + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER=asmoper + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM=asmadmin + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType= + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile= + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName= + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort= + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration= + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster=false + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile= + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 15 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9), hyphen(-) +# and underscore(_). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS=false + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the role of node (HUB,LEAF). This has to +# be provided only if Flex Cluster is being configured. +# For Extended Cluster only HUB should be specified for all nodes +# 4. The fourth field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# The 2nd and 3rd fields are not applicable if you have chosen CRS_SWONLY as installation option +# The 2nd and 3rd fields are not applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB,node2:node2-vip:LEAF +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:HUB:site1,node2:node2-vip:HUB:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +#oracle.install.crs.config.clusterNodes=###PUBLIC_HOSTNAME###:###HOSTNAME_VIP###:HUB +oracle.install.crs.config.clusterNodes=###CRS_CONFIG_NODES### + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList= + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG=false + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# +# Applicable only for MEMBERDB cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption= +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# ASM Storage Type +# Allowed values are : ASM and ASM_ON_NAS +# ASM_ON_NAS applicable only if +# oracle.install.crs.config.ClusterConfiguration=STANDALONE +#------------------------------------------------------------------------------- +oracle.install.asm.storageOption=ASM + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing OCR/VDSK +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store OCR/VDSK files +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.ocrLocation= +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup on NAS to store GIMR data +# Specify 'true' if you would like to separate GIMR data with clusterware data, else +# specify 'false' +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +#------------------------------------------------------------------------------ +oracle.install.asmOnNAS.configureGIMRDataDG=false + +#------------------------------------------------------------------------------- +# NAS location to create ASM disk group for storing GIMR data +# Specify the NAS location where you want the ASM disk group to be created +# to be used to store the GIMR database +# Applicable only if oracle.install.asm.storageOption=ASM_ON_NAS +# and oracle.install.asmOnNAS.configureGIMRDataDG=true +#------------------------------------------------------------------------------- +oracle.install.asmOnNAS.gimrLocation= + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword= + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name=DATA + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2,,/dev/asm-disk3, +oracle.install.asm.diskGroup.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2,/dev/asm-disk3 +oracle.install.asm.diskGroup.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +#oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.diskGroup.diskDiscoveryString= + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword= + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD=false +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS=false + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes=false +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption=NONE + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort=0 + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript=false + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod=ROOT +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# Only one type of node role can be used for each batch. +# Root script execution should be done first in all HUB nodes and then, when +# existent, in all the LEAF nodes. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:2,HUBNode3:2,LEAFNode4:3 +# 2. oracle.install.crs.config.batchinfo=HUBNode1:1,LEAFNode2:2,LEAFNode3:2,LEAFNode4:2 +# 3. oracle.install.crs.config.batchinfo=HUBNode1:1,HUBNode2:1,LEAFNode3:2,LEAFNode4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_addnode_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_addnode_21c.rsp new file mode 100644 index 0000000000..9aa74d2c44 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_addnode_21c.rsp @@ -0,0 +1,67 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=###INVENTORY### +oracle.install.option=CRS_ADDNODE +ORACLE_BASE=###GRID_BASE### +oracle.install.asm.OSDBA=asmdba +oracle.install.asm.OSOPER=asmoper +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType= +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName= +oracle.install.crs.config.gpnp.scanPort= +oracle.install.crs.config.ClusterConfiguration= +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.clusterName= +oracle.install.crs.config.gpnp.configureGNS=false +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=###CRS_CONFIG_NODES### +oracle.install.crs.config.networkInterfaceList= +oracle.install.crs.config.storageOption= +oracle.install.crs.exascale.vault.name= +oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations= +oracle.install.crs.config.sharedFileSystemStorage.ocrLocations= +oracle.install.asm.ClientDataFile= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcBinpath= +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.SYSASMPassword= +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy= +oracle.install.asm.diskGroup.AUSize=1 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames= +oracle.install.asm.diskGroup.disks= +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString= +oracle.install.asm.monitorPassword= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.crs.configureGIMR= +oracle.install.crs.configureRemoteGIMR= +oracle.install.crs.RemoteGIMRCredFile= +oracle.install.asm.configureGIMRDataDG= +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_sw_install_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_sw_install_21c.rsp new file mode 100644 index 0000000000..8f50e83bb0 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/grid_sw_install_21c.rsp @@ -0,0 +1,661 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION=###INVENTORY### + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option=###INSTALL_TYPE### + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE=###GRID_BASE### + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA=asmdba + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER=asmoper + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM=asmadmin + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType=LOCAL_SCAN + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile= + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName= + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort= + + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration= + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster= + + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 63 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9) and hyphens (-). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS=false + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 2 fields if configuring a Flex Cluster +# - 2 fields if adding more nodes to the configured cluster, or +# - 3 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# Only the 1st field is applicable if you have chosen CRS_SWONLY as installation option + +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip,node2:node2-vip +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip,node2:node2-vip +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:site1,node2:node2-vip:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterNodes=###HOSTNAME### + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList= + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files. Only applicable for Standalone cluster. +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# - FILE_SYSTEM_STORAGE +# - EXASCALE_STORAGE +# +# Option FILE_SYSTEM_STORAGE is only for STANDALONE cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption= +#------------------------------------------------------------------------------- +# Specify the vault name if EXASCALE_STORAGE is selected as storage option. +# Example: +# oracle.install.crs.exascale.vault.name=myvault +#------------------------------------------------------------------------------- +oracle.install.crs.exascale.vault.name= +#------------------------------------------------------------------------------- +# These properties are applicable only if FILE_SYSTEM_STORAGE is chosen for +# storing OCR and voting disk +# Specify the location(s) for OCR and voting disks +# Three(3) or one(1) location(s) should be specified for OCR and voting disk, +# separated by commas. +# Example: +# For Unix based Operating System: +# oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations=/oradbocfs/storage/vdsk1,/oradbocfs/storage/vdsk2,/oradbocfs/storage/vdsk3 +# oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=/oradbocfs/storage/ocr1,/oradbocfs/storage/ocr2,/oradbocfs/storage/ocr3 +# For Windows based Operating System OCR/VDSK on shared storage is not supported. +#------------------------------------------------------------------------------- +oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations= +oracle.install.crs.config.sharedFileSystemStorage.ocrLocations= + +#------------------------------------------------------------------------------- +# Applicable only if configuring CLIENT_ASM_STORAGE for OCR/Voting Disk storage +# Specify the path to Client ASM Data file +#------------------------------------------------------------------------------- +oracle.install.asm.ClientDataFile= +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI=false + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the location of the ipmiutil binary +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcBinpath= +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= + +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword= + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX +# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.diskDiscoveryString= + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD=false +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS=false + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes= + +################################################################################ +# # +# SECTION I - GIMR # +# # +################################################################################ + +#------------------------------------------------------------------------------ +# Specify 'true' if you would like to configure Grid Infrastructure Management +# Repository (GIMR), else specify 'false'. Applicable only if CRS_CONFIG is +# chosen as install option and STANDALONE is chosen as cluster configuration. +# If you want to use or configure +# Local GIMR : oracle.install.crs.configureGIMR=true and oracle.install.crs.configureRemoteGIMR=false +# Remote GIMR : oracle.install.crs.configureGIMR=true, oracle.install.crs.configureRemoteGIMR=true +# and oracle.install.crs.RemoteGIMRCredFile= path of the GIMR cred file +# No GIMR : oracle.install.crs.configureGIMR=false +#------------------------------------------------------------------------------ +oracle.install.crs.configureGIMR= +oracle.install.crs.configureRemoteGIMR= +oracle.install.crs.RemoteGIMRCredFile= + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG= + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX +# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize=1 + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption=NONE + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort=0 + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript=false + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod= +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# Applicable only when SUDO configuration method was chosen. +# Note:For Grid Infrastructure for Standalone server installations,the sudo user name must be the username of the user performing the installation. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# Examples: +# 1. oracle.install.crs.config.batchinfo=Node1:1,Node2:2,Node3:2,Node4:3 +# 2. oracle.install.crs.config.batchinfo=Node1:1,Node2:2,Node3:2,Node4:2 +# 3. oracle.install.crs.config.batchinfo=Node1:1,Node2:1,Node3:2,Node4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/gridsetup_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/gridsetup_21c.rsp new file mode 100644 index 0000000000..d982d76f52 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/gridsetup_21c.rsp @@ -0,0 +1,67 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=###INVENTORY### +oracle.install.option=CRS_CONFIG +ORACLE_BASE=###GRID_BASE### +oracle.install.asm.OSDBA=asmdba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=###SCAN_TYPE### +oracle.install.crs.config.SCANClientDataFile=###SHARED_SCAN_FILE### +oracle.install.crs.config.gpnp.scanName=###SCAN_NAME### +oracle.install.crs.config.gpnp.scanPort=###SCAN_PORT### +oracle.install.crs.config.ClusterConfiguration=###CLUSTER_TYPE### +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.clusterName=###CLUSTER_NAME### +oracle.install.crs.config.gpnp.configureGNS=###CONFIGURE_GNS### +oracle.install.crs.config.autoConfigureClusterNodeVIP=###DHCP_CONF### +oracle.install.crs.config.gpnp.gnsOption=###GNS_OPTIONS### +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain=###GNS_SUBDOMAIN### +oracle.install.crs.config.gpnp.gnsVIPAddress=###GNSVIP_HOSTNAME### +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=###CRS_CONFIG_NODES### +oracle.install.crs.config.networkInterfaceList=###NETWORK_STRING### +oracle.install.crs.config.storageOption=###STORAGE_OPTIONS_FOR_MEMBERDB### +oracle.install.crs.exascale.vault.name= +oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations= +oracle.install.crs.config.sharedFileSystemStorage.ocrLocations= +oracle.install.asm.ClientDataFile= +oracle.install.crs.config.useIPMI= +oracle.install.crs.config.ipmi.bmcBinpath= +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.SYSASMPassword=###PASSWORD### +oracle.install.asm.diskGroup.name=###DB_ASM_DISKGROUP### +oracle.install.asm.diskGroup.redundancy=###ASM_REDUNDANCY### +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups=###ASM_DG_FAILURE_GROUP### +oracle.install.asm.diskGroup.disksWithFailureGroupNames=###ASM_DISKGROUP_FG_DISKS### +oracle.install.asm.diskGroup.disks=###ASM_DISKGROUP_DISKS### +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=###ASM_DISCOVERY_STRING### +oracle.install.asm.monitorPassword=###PASSWORD### +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes= +oracle.install.crs.configureGIMR= +oracle.install.crs.configureRemoteGIMR= +oracle.install.crs.RemoteGIMRCredFile= +oracle.install.asm.configureGIMRDataDG= +oracle.install.asm.gimrDG.name=###GIMR_DG_NAME### +oracle.install.asm.gimrDG.redundancy=###GIMR_DG_REDUNDANCY### +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups=###GIMR_DG_FAILURE_GROUP### +oracle.install.asm.gimrDG.disksWithFailureGroupNames=###GIMR_DISKGROUP_FG_DISKS### +oracle.install.asm.gimrDG.disks=###GIMR_DISKGROUP_DISKS### +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/gridsetup_21cv1.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/gridsetup_21cv1.rsp new file mode 100644 index 0000000000..78cdbd650a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/gridsetup_21cv1.rsp @@ -0,0 +1,653 @@ +############################################################################### +## Copyright(c) Oracle Corporation 1998,2025. All rights reserved. ## +## ## +## Specify values for the variables listed below to customize ## +## your installation. ## +## ## +## Each variable is associated with a comment. The comment ## +## can help to populate the variables with the appropriate ## +## values. ## +## ## +## IMPORTANT NOTE: This file contains plain text passwords and ## +## should be secured to have read permission only by oracle user ## +## or db administrator who owns this installation. ## +## ## +############################################################################### + +############################################################################### +## ## +## Instructions to fill this response file ## +## To register and configure 'Grid Infrastructure for Cluster' ## +## - Fill out sections A,B,C,D,E,F and G ## +## - Fill out section G if OCR and voting disk should be placed on ASM ## +## ## +## To register and configure 'Grid Infrastructure for Standalone server' ## +## - Fill out sections A,B and G ## +## ## +## To register software for 'Grid Infrastructure' ## +## - Fill out sections A,B and D ## +## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ## +## installation option in section A ## +## ## +## To upgrade clusterware and/or Automatic storage management of earlier ## +## releases ## +## - Fill out sections A,B,C,D and H ## +## ## +## To add more nodes to the cluster ## +## - Fill out sections A and D ## +## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ## +## installation option in section A ## +## ## +############################################################################### + +#------------------------------------------------------------------------------ +# Do not change the following system generated value. +#------------------------------------------------------------------------------ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 + +############################################################################### +# # +# SECTION A - BASIC # +# # +############################################################################### + + +#------------------------------------------------------------------------------- +# Specify the location which holds the inventory files. +# This is an optional parameter if installing on +# Windows based Operating System. +#------------------------------------------------------------------------------- +INVENTORY_LOCATION= + +#------------------------------------------------------------------------------- +# Specify the installation option. +# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY +# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster +# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server +# - UPGRADE : To register home and upgrade clusterware software of earlier release +# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster +# or stand alone server later) +# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand +# alone server later. This is only supported on Windows.) +# - CRS_ADDNODE : To add more nodes to the cluster +# - CRS_DELETE_NODE : To delete nodes to the cluster +#------------------------------------------------------------------------------- +oracle.install.option= + +#------------------------------------------------------------------------------- +# Specify the complete path of the Oracle Base. +#------------------------------------------------------------------------------- +ORACLE_BASE= + +################################################################################ +# # +# SECTION B - GROUPS # +# # +# The following three groups need to be assigned for all GI installations. # +# OSDBA and OSOPER can be the same or different. OSASM must be different # +# than the other two. # +# The value to be specified for OSDBA, OSOPER and OSASM group is only for # +# Unix based Operating System. # +# These groups are not required for upgrades, as they will be determined # +# from the Oracle home to upgrade. # +# # +################################################################################ +#------------------------------------------------------------------------------- +# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. +#------------------------------------------------------------------------------- +oracle.install.asm.OSDBA= + +#------------------------------------------------------------------------------- +# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. +# The value to be specified for OSOPER group is optional. +# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE. +#------------------------------------------------------------------------------- +oracle.install.asm.OSOPER= + +#------------------------------------------------------------------------------- +# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This +# must be different than the previous two. +#------------------------------------------------------------------------------- +oracle.install.asm.OSASM= + +################################################################################ +# # +# SECTION C - SCAN # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the type of SCAN configuration for the cluster +# Allowed values : LOCAL_SCAN and SHARED_SCAN +#------------------------------------------------------------------------------- +oracle.install.crs.config.scanType= + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_SCAN is being configured for cluster +# Specify the path to the SCAN client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.SCANClientDataFile= + +#------------------------------------------------------------------------------- +# Specify a name for SCAN +# Applicable if LOCAL_SCAN is being configured for the cluster +# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.scanName= + +#------------------------------------------------------------------------------- +# Specify a unused port number for SCAN service +#------------------------------------------------------------------------------- + +oracle.install.crs.config.gpnp.scanPort= + +################################################################################ +# # +# SECTION D - CLUSTER & GNS # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify the required cluster configuration +# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP +#------------------------------------------------------------------------------- +oracle.install.crs.config.ClusterConfiguration= + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure the cluster as Extended, else +# specify 'false' +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.configureAsExtendedCluster= + + +#------------------------------------------------------------------------------- +# Specify the Member Cluster Manifest file +# +# Applicable only for MEMBERDB and MEMBERAPP cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.memberClusterManifestFile= + +#------------------------------------------------------------------------------- +# Specify a name for the Cluster you are creating. +# +# The maximum length allowed for clustername is 63 characters. The name can be +# any combination of lower and uppercase alphabets (A - Z), (0 - 9) and hyphens (-). +# +# Applicable only for STANDALONE and DOMAIN cluster configuration +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterName= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration. +# Specify 'true' if you would like to configure Grid Naming Service(GNS), else +# specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.configureGNS= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS. +# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP +# , else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.autoConfigureClusterNodeVIP= + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure GNS. +# Specify the type of GNS configuration for cluster +# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS +# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsOption= + +#------------------------------------------------------------------------------- +# Applicable only if SHARED_GNS is being configured for cluster +# Specify the path to the GNS client data file +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsClientDataFile= + +#------------------------------------------------------------------------------- +# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to +# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS +# Specify the GNS subdomain and an unused virtual hostname for GNS service +#------------------------------------------------------------------------------- +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= + +#------------------------------------------------------------------------------- +# Specify the list of sites - only if configuring an Extended Cluster +#------------------------------------------------------------------------------- +oracle.install.crs.config.sites= + +#------------------------------------------------------------------------------- +# Specify the list of nodes that have to be configured to be part of the cluster. +# +# The list should a comma-separated list of tuples. Each tuple should be a +# colon-separated string that contains +# - 1 field if you have chosen CRS_SWONLY as installation option, or +# - 1 field if configuring an Application Cluster, or +# - 3 fields if configuring a Flex Cluster +# - 3 fields if adding more nodes to the configured cluster, or +# - 4 fields if configuring an Extended Cluster +# +# The fields should be ordered as follows: +# 1. The first field should be the public node name. +# 2. The second field should be the virtual host name +# (Should be specified as AUTO if you have chosen 'auto configure for VIP' +# i.e. autoConfigureClusterNodeVIP=true) +# 3. The third field indicates the site designation for the node. To be specified only if configuring an Extended Cluster. +# Only the 1st field is applicable if you have chosen CRS_SWONLY as installation option +# Only the 1st field is applicable if configuring an Application Cluster +# +# Examples +# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2 +# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip,node2:node2-vip +# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2 +# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip,node2:node2-vip +# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:site1,node2:node2-vip:site2 +# You can specify a range of nodes in the tuple using colon separated fields of format +# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.clusterNodes= + +#------------------------------------------------------------------------------- +# The value should be a comma separated strings where each string is as shown below +# InterfaceName:SubnetAddress:InterfaceType +# where InterfaceType can be either "1", "2", "3", "4", or "5" +# InterfaceType stand for the following values +# - 1 : PUBLIC +# - 2 : PRIVATE +# - 3 : DO NOT USE +# - 4 : ASM +# - 5 : ASM & PRIVATE +# +# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3 +# +#------------------------------------------------------------------------------- +oracle.install.crs.config.networkInterfaceList= + +#------------------------------------------------------------------------------ +# Specify 'true' if you would like to configure Grid Infrastructure Management +# Repository (GIMR), else specify 'false'. +# This option is only applicable when CRS_CONFIG is chosen as install option, +# and STANDALONE is chosen as cluster configuration. +#------------------------------------------------------------------------------ +oracle.install.crs.configureGIMR= + +#------------------------------------------------------------------------------ +# Create a separate ASM DiskGroup to store GIMR data. +# Specify 'true' if you would like to separate GIMR data with clusterware data, +# else specify 'false' +# Value should be 'true' for DOMAIN cluster configurations +# Value can be true/false for STANDALONE cluster configurations. +#------------------------------------------------------------------------------ +oracle.install.asm.configureGIMRDataDG= + +################################################################################ +# # +# SECTION E - STORAGE # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting +# Disks files. Only applicable for Standalone and MemberDB cluster. +# - FLEX_ASM_STORAGE +# - CLIENT_ASM_STORAGE +# - FILE_SYSTEM_STORAGE +# +# Option FILE_SYSTEM_STORAGE is only for STANDALONE cluster configuration. +#------------------------------------------------------------------------------- +oracle.install.crs.config.storageOption= + +#------------------------------------------------------------------------------- +# These properties are applicable only if FILE_SYSTEM_STORAGE is chosen for +# storing OCR and voting disk +# Specify the location(s) for OCR and voting disks +# Three(3) or one(1) location(s) should be specified for OCR and voting disk, +# separated by commas. +# Example: +# For Unix based Operating System: +# oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations=/oradbocfs/storage/vdsk1,/oradbocfs/storage/vdsk2,/oradbocfs/storage/vdsk3 +# oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=/oradbocfs/storage/ocr1,/oradbocfs/storage/ocr2,/oradbocfs/storage/ocr3 +# For Windows based Operating System OCR/VDSK on shared storage is not supported. +#------------------------------------------------------------------------------- +oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations= +oracle.install.crs.config.sharedFileSystemStorage.ocrLocations= +################################################################################ +# # +# SECTION F - IPMI # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify 'true' if you would like to configure Intelligent Power Management interface +# (IPMI), else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.useIPMI= + +#------------------------------------------------------------------------------- +# Applicable only if you choose to configure IPMI +# i.e. oracle.install.crs.config.useIPMI=true +# Specify the username and password for using IPMI service +#------------------------------------------------------------------------------- +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +################################################################################ +# # +# SECTION G - ASM # +# # +################################################################################ + + +#------------------------------------------------------------------------------- +# Password for SYS user of Oracle ASM +#------------------------------------------------------------------------------- +oracle.install.asm.SYSASMPassword= + +#------------------------------------------------------------------------------- +# The ASM DiskGroup +# +# Example: oracle.install.asm.diskGroup.name=data +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.diskGroup.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.diskGroup.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.AUSize= + +#------------------------------------------------------------------------------- +# Failure Groups for the disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create a ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create a ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.quorumFailureGroupNames= +#------------------------------------------------------------------------------- +# The disk discovery string to be used to discover the disks used create a ASM DiskGroup +# +# Example: +# For Unix based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/* +# For Windows based Operating System: +# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* +# +#------------------------------------------------------------------------------- +oracle.install.asm.diskGroup.diskDiscoveryString= + +#------------------------------------------------------------------------------- +# Password for ASMSNMP account +# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances +#------------------------------------------------------------------------------- +oracle.install.asm.monitorPassword= + +#------------------------------------------------------------------------------- +# GIMR Storage data ASM DiskGroup +# Applicable only when +# oracle.install.asm.configureGIMRDataDG=true +# Example: oracle.install.asm.GIMRDG.name=MGMT +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.name= + +#------------------------------------------------------------------------------- +# Redundancy level to be used by ASM. +# It can be one of the following +# - NORMAL +# - HIGH +# - EXTERNAL +# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED) +# Example: oracle.install.asm.gimrDG.redundancy=NORMAL +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.redundancy= + +#------------------------------------------------------------------------------- +# Allocation unit size to be used by ASM. +# It can be one of the following values +# - 1 +# - 2 +# - 4 +# - 8 +# - 16 +# Example: oracle.install.asm.gimrDG.AUSize=4 +# size unit is MB +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.AUSize= + +#------------------------------------------------------------------------------- +# Failure Groups for the GIMR storage data ASM disk group +# If configuring for Extended cluster specify as list of "failure group name:site" +# tuples. +# Else just specify as list of failure group names +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.FailureGroups= + +#------------------------------------------------------------------------------- +# List of disks and their failure groups to create GIMR data ASM DiskGroup +# (Use this if each of the disks have an associated failure group) +# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disksWithFailureGroupNames= + +#------------------------------------------------------------------------------- +# List of disks to create GIMR data ASM DiskGroup +# (Use this variable only if failure groups configuration is not required) +# Example: +# For Unix based Operating System: +# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2 +# For Windows based Operating System: +# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 +# +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.disks= + +#------------------------------------------------------------------------------- +# List of failure groups to be marked as QUORUM. +# Quorum failure groups contain only voting disk data, no user data is stored +# Example: +# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2 +#------------------------------------------------------------------------------- +oracle.install.asm.gimrDG.quorumFailureGroupNames= + +#------------------------------------------------------------------------------- +# Configure AFD - ASM Filter Driver +# Applicable only for FLEX_ASM_STORAGE option +# Specify 'true' if you want to configure AFD, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.asm.configureAFD= +#------------------------------------------------------------------------------- +# Configure RHPS - Rapid Home Provisioning Service +# Applicable only for DOMAIN cluster configuration +# Specify 'true' if you want to configure RHP service, else specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.configureRHPS= + +################################################################################ +# # +# SECTION H - UPGRADE # +# # +################################################################################ +#------------------------------------------------------------------------------- +# Specify whether to ignore down nodes during upgrade operation. +# Value should be 'true' to ignore down nodes otherwise specify 'false' +#------------------------------------------------------------------------------- +oracle.install.crs.config.ignoreDownNodes= +################################################################################ +# # +# MANAGEMENT OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the management option to use for managing Oracle Grid Infrastructure +# Options are: +# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control. +#------------------------------------------------------------------------------- +oracle.install.config.managementOption= + +#------------------------------------------------------------------------------- +# Specify the OMS host to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsHost= + +#------------------------------------------------------------------------------- +# Specify the OMS port to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.omsPort= + +#------------------------------------------------------------------------------- +# Specify the EM Admin user name to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminUser= + +#------------------------------------------------------------------------------- +# Specify the EM Admin password to use to connect to Cloud Control. +# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL +#------------------------------------------------------------------------------- +oracle.install.config.emAdminPassword= +################################################################################ +# # +# Root script execution configuration # +# # +################################################################################ + +#------------------------------------------------------------------------------------------------------- +# Specify the root script execution mode. +# +# - true : To execute the root script automatically by using the appropriate configuration methods. +# - false : To execute the root script manually. +# +# If this option is selected, password should be specified on the console. +#------------------------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.executeRootScript= + +#-------------------------------------------------------------------------------------- +# Specify the configuration method to be used for automatic root script execution. +# +# Following are the possible choices: +# - ROOT +# - SUDO +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.configMethod= +#-------------------------------------------------------------------------------------- +# Specify the absolute path of the sudo program. +# +# Applicable only when SUDO configuration method was chosen. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoPath= + +#-------------------------------------------------------------------------------------- +# Specify the name of the user who is in the sudoers list. +# Applicable only when SUDO configuration method was chosen. +# Note:For Grid Infrastructure for Standalone server installations,the sudo user name must be the username of the user performing the installation. +#-------------------------------------------------------------------------------------- +oracle.install.crs.rootconfig.sudoUserName= +#-------------------------------------------------------------------------------------- +# Specify the nodes batch map. +# +# This should be a comma separated list of node:batch pairs. +# During upgrade, you can sequence the automatic execution of root scripts +# by pooling the nodes into batches. +# A maximum of three batches can be specified. +# Installer will execute the root scripts on all the nodes in one batch before +# proceeding to next batch. +# Root script execution on the local node must be in Batch 1. +# +# Examples: +# 1. oracle.install.crs.config.batchinfo=Node1:1,Node2:2,Node3:2,Node4:3 +# 2. oracle.install.crs.config.batchinfo=Node1:1,Node2:2,Node3:2,Node4:2 +# 3. oracle.install.crs.config.batchinfo=Node1:1,Node2:1,Node3:2,Node4:3 +# +# Applicable only for UPGRADE install option. +#-------------------------------------------------------------------------------------- +oracle.install.crs.config.batchinfo= +################################################################################ +# # +# APPLICATION CLUSTER OPTIONS # +# # +################################################################################ + +#------------------------------------------------------------------------------- +# Specify the Virtual hostname to configure virtual access for your Application +# The value to be specified for Virtual hostname is optional. +#------------------------------------------------------------------------------- +oracle.install.crs.app.applicationAddress= +################################################################################# +# # +# DELETE NODE OPTIONS # +# # +################################################################################# + +#-------------------------------------------------------------------------------- +# Specify the node names to delete nodes from cluster. +# Delete node will be performed only for the remote nodes from the cluster. +#-------------------------------------------------------------------------------- +oracle.install.crs.deleteNode.nodes= diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/initsh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/initsh new file mode 100755 index 0000000000..288be9b92c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/initsh @@ -0,0 +1,15 @@ +#!/bin/bash +############################# + +# Copyright 2025, Oracle Corporation and/or affiliates. All rights reserved. + +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl + +# Author: paramdeep.saini@oracle.com + +echo "Creating env variables file /etc/rac_env_vars" +/bin/bash -c "cat /proc/1/environ | tr '\0' '\n' > /etc/rac_env_vars" +/bin/bash -c "sed -i -e 's/^/export /' /etc/rac_env_vars" + +echo "Starting Systemd" +exec /lib/systemd/systemd diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/installDBBinaries.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/installDBBinaries.sh new file mode 100755 index 0000000000..c898a24df1 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/installDBBinaries.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: December, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description:Installing Oracle DB software +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +EDITION=$1 + +# Check whether edition has been passed on +if [ "$EDITION" == "" ]; then + echo "ERROR: No edition has been passed on!" + echo "Please specify the correct edition!" + exit 1; +fi; + +# Check whether correct edition has been passed on +# shellcheck disable=SC2166 +if [ "$EDITION" != "EE" -a "$EDITION" != "SE2" ]; then + echo "ERROR: Wrong edition has been passed on!" + echo "Edition $EDITION is no a valid edition!" + exit 1; +fi; + +# Check whether DB_BASE is set +if [ "$DB_BASE" == "" ]; then + echo "ERROR: DB_BASE has not been set!" + echo "You have to have the DB_BASE environment variable set to a valid value!" + exit 1; +fi; + +# Check whether DB_HOME is set +if [ "$DB_HOME" == "" ]; then + echo "ERROR: DB_HOME has not been set!" + echo "You have to have the DB_HOME environment variable set to a valid value!" + exit 1; +fi; + +# Replace place holders +# --------------------- +sed -i -e "s|###ORACLE_EDITION###|$EDITION|g" "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" && \ +sed -i -e "s|###DB_BASE###|$DB_BASE|g" "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" && \ +sed -i -e "s|###DB_HOME###|$DB_HOME|g" "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" && \ +sed -i -e "s|###INVENTORY###|$INVENTORY|g" "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" + +export ORACLE_HOME=${DB_HOME} +export PATH=${ORACLE_HOME}/bin:/bin:/sbin:/usr/bin +export LD_LIBRARY_PATH=${ORACLE_HOME}/lib:/lib:/usr/lib + +# Install Oracle binaries +if [ "${DB_USER}" != "${GRID_USER}" ]; then +mkdir -p /home/"${DB_USER}"/.ssh && \ +chmod 700 /home/"${DB_USER}"/.ssh +fi + + +# Install Oracle binaries +# shellcheck disable=SC2015 +unzip -q "$INSTALL_SCRIPTS"/"$INSTALL_FILE_2" -d "$DB_HOME" && \ +"$DB_HOME"/runInstaller -silent -force -waitforcompletion -responsefile "$INSTALL_SCRIPTS"/"$DB_INSTALL_RSP" -ignorePrereqFailure || true diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/installGridBinaries.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/installGridBinaries.sh new file mode 100755 index 0000000000..ebab8e91d8 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/installGridBinaries.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: December, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Install grid software inside the container. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +EDITION=$1 +# shellcheck disable=SC2034 +PATCH_NUMBER=$2 + +# Check whether edition has been passed on +if [ "$EDITION" == "" ]; then + echo "ERROR: No edition has been passed on!" + echo "Please specify the correct edition!" + exit 1; +fi; + +# Check whether correct edition has been passed on +if [ "$EDITION" != "EE" ]; then + echo "ERROR: Wrong edition has been passed on!" + echo "Edition $EDITION is no a valid edition!" + exit 1; +fi; + +# Check whether GRID_BASE is set +if [ "$GRID_BASE" == "" ]; then + echo "ERROR: GRID_BASE has not been set!" + echo "You have to have the GRID_BASE environment variable set to a valid value!" + exit 1; +fi; + +# Check whether GRID_HOME is set +if [ "$GRID_HOME" == "" ]; then + echo "ERROR: GRID_HOME has not been set!" + echo "You have to have the GRID_HOME environment variable set to a valid value!" + exit 1; +fi; + + +temp_var1=`hostname` + +# Replace place holders +# --------------------- +sed -i -e "s|###HOSTNAME###|$temp_var1|g" "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" && \ +sed -i -e "s|###INSTALL_TYPE###|CRS_SWONLY|g" "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" && \ +sed -i -e "s|###GRID_BASE###|$GRID_BASE|g" "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" && \ +sed -i -e "s|###INVENTORY###|$INVENTORY|g" "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" + +# Install Oracle binaries +mkdir -p /home/grid/.ssh && \ +chmod 700 /home/grid/.ssh && \ +unzip -q "$INSTALL_SCRIPTS"/"$INSTALL_FILE_1" -d "$GRID_HOME" && \ +"$GRID_HOME"/gridSetup.sh -silent -responseFile "$INSTALL_SCRIPTS"/"$GRID_SW_INSTALL_RSP" -ignorePrereqFailure || true diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/runOracle.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/runOracle.sh new file mode 100755 index 0000000000..f1ff5cc3e3 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/runOracle.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Runs the Oracle RAC Database inside the container +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +if [ -f /etc/rac_env_vars ]; then +source /etc/rac_env_vars +fi + +################################### +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # +############# MAIN ################ +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # +################################### + +if [ -z ${BASE_DIR} ]; then + BASE_DIR=/opt/scripts/startup/scripts +else + BASE_DIR=$SCRIPT_DIR/scripts +fi + +if [ -z ${MAIN_SCRIPT} ]; then + SCRIPT_NAME="main.py" +fi + +if [ -z ${EXECUTOR} ]; then + EXECUTOR="python3" +fi +# shellcheck disable=SC2164 +cd $BASE_DIR +$EXECUTOR $SCRIPT_NAME + +# Tail on alert log and wait (otherwise container will exit) \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupDB.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupDB.sh new file mode 100755 index 0000000000..053d234541 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupDB.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com +# Description: Sets up the unix environment for DB installation. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Create Directories +if [ "${SLIMMING}x" != 'truex' ]; then + mkdir -p "$DB_BASE" + mkdir -p "$DB_HOME" +fi + +usermod -g oinstall -G oinstall,dba,oper,backupdba,dgdba,kmdba,asmdba,asmoper,racdba,asmadmin "${DB_USER}" + +chmod 775 "$INSTALL_SCRIPTS" + + +if [ "${SLIMMING}x" != 'truex' ]; then + chown -R "${DB_USER}":oinstall "$DB_BASE" + chown -R "${DB_USER}":oinstall "$DB_HOME" + chown -R "${DB_USER}":oinstall "$INSTALL_SCRIPTS" + echo "export PATH=$DB_PATH" >> /home/"${DB_USER}"/.bashrc + echo "export LD_LIBRARY_PATH=$DB_LD_LIBRARY_PATH" >> /home/"${DB_USER}"/.bashrc + echo "export SCRIPT_DIR=$SCRIPT_DIR" >> /home/"${DB_USER}"/.bashrc + echo "export GRID_HOME=$GRID_HOME" >> /home/"${DB_USER}"/.bashrc + echo "export DB_BASE=$DB_BASE" >> /home/"${DB_USER}"/.bashrc + echo "export DB_HOME=$DB_HOME" >> /home/"${DB_USER}"/.bashrc +fi + +if [ "${SLIMMING}x" != 'truex' ]; then + if [ "${DB_USER}" == "${GRID_USER}" ]; then + sed -i '/PATH=/d' /home/"${DB_USER}"/.bashrc + echo "export PATH=$GRID_HOME/bin:$DB_PATH" >> /home/"${DB_USER}"/.bashrc + echo "export LD_LIBRARY_PATH=$GRID_HOME/lib:$DB_LD_LIBRARY_PATH" >> /home/"${DB_USER}"/.bashrc + fi +fi \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupGrid.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupGrid.sh new file mode 100755 index 0000000000..f37cf8c01a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupGrid.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com +# Description: Sets up the unix environment for Grid installation. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# +# shellcheck disable=SC2034 +EDITION=$1 + +# Create Directories +if [ "${SLIMMING}x" != 'truex' ] ; then + mkdir -p "$GRID_BASE" + mkdir -p "$GRID_HOME" +fi + +groupadd -g 54334 asmadmin +groupadd -g 54335 asmdba +groupadd -g 54336 asmoper +useradd -u 54332 -g oinstall -G oinstall,asmadmin,asmdba,asmoper,racdba,dba "${GRID_USER}" + +chmod 666 /etc/sudoers +echo "${DB_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +echo "${GRID_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +chmod 440 /etc/sudoers + +if [ "${SLIMMING}x" != 'truex' ] ; then + chown -R "${GRID_USER}":oinstall "$GRID_BASE" + chown -R "${GRID_USER}":oinstall "$GRID_HOME" + mkdir -p "$INVENTORY" + chown -R "${GRID_USER}":oinstall "$INVENTORY" + # shellcheck disable=SC2129 + echo "export PATH=$GRID_PATH" >> /home/"${GRID_USER}"/.bashrc + echo "export LD_LIBRARY_PATH=$GRID_LD_LIBRARY_PATH" >> /home/"${GRID_USER}"/.bashrc + echo "export SCRIPT_DIR=$SCRIPT_DIR" >> /home/"${GRID_USER}"/.bashrc + echo "export GRID_HOME=$GRID_HOME" >> /home/"${GRID_USER}"/.bashrc + echo "export GRID_BASE=$GRID_BASE" >> /home/"${GRID_USER}"/.bashrc + echo "export DB_HOME=$DB_HOME" >> /home/"${GRID_USER}"/.bashrc +fi \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupLinuxEnv.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupLinuxEnv.sh new file mode 100755 index 0000000000..31f7394caa --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupLinuxEnv.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: paramdeep.saini@oracle.com +# Description: Sets up the unix environment for DB installation. +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +# Setup filesystem and oracle user +# Adjust file permissions, go to /opt/oracle as user 'oracle' to proceed with Oracle installation +# ------------------------------------------------------------ +## Use OCI yum repos on OCI instead of public yum +region=$(curl --noproxy '*' -sfm 3 -H "Authorization: Bearer Oracle" http://169.254.169.254/opc/v2/instance/ | sed -nE 's/^ *"regionIdentifier": "([^"]+)".*/\1/p') +if [ -n "$region" ]; then + echo "Detected OCI Region: $region" + for proxy in $(printenv | grep -i _proxy | cut -d= -f1); do unset $proxy; done + echo "-$region" > /etc/yum/vars/ociregion +fi + +mkdir /asmdisks && \ +mkdir /responsefiles && \ +chmod ug+x /opt/scripts/startup/*.sh && \ +yum -y install systemd oracle-database-preinstall-21c vim passwd expect sudo passwd openssl openssh-server hostname python3 lsof rsync && \ +yum clean all diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupSSH.expect b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupSSH.expect new file mode 100644 index 0000000000..627dedff34 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/21.3.0/setupSSH.expect @@ -0,0 +1,45 @@ +#!/usr/bin/expect -f +# LICENSE UPL 1.0 +# +# Copyright (c) 2018,2025 Oracle and/or its affiliates. +# +# Since: January, 2018 +# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com +# Description: Setup SSH between nodes +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# + +set username [lindex $argv 0]; +set script_loc [lindex $argv 1]; +set cluster_nodes [lindex $argv 2]; +set ssh_pass [lindex $argv 3]; + +set timeout 120 + +# Procedure to setup ssh from server +proc sshproc { ssh_pass } { + expect { + # Send password at 'Password' prompt and tell expect to continue(i.e. exp_continue) + -re "\[P|p]assword:" { exp_send "$ssh_pass\r" + exp_continue } + # Tell expect stay in this 'expect' block and for each character that SCP prints while doing the copy + # reset the timeout counter back to 0. + -re . { exp_continue } + timeout { return 1 } + eof { return 0 } + } +} + +# Execute sshUserSetup.sh Script +set ssh_cmd "$script_loc/sshUserSetup.sh -user $username -hosts \"${cluster_nodes}\" -logfile /tmp/${username}_SetupSSH.log -advanced -exverify -noPromptPassphrase -confirm" + +eval spawn $ssh_cmd +set ssh_results [sshproc $ssh_pass] + +if { $ssh_results == 0 } { + exit 0 +} + +# Error attempting SSH, so exit with non-zero status +exit 1 diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/buildContainerImage.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/buildContainerImage.sh new file mode 100755 index 0000000000..8bd77e9a92 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/buildContainerImage.sh @@ -0,0 +1,176 @@ +#!/bin/bash +# +# Since: November, 2018 +# Author: paramdeep.saini@oracle.com +# Description: Build script for building RAC container image +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. +# +# Copyright (c) 2014,2025 Oracle and/or its affiliates. +# + +usage() { + cat << EOF + +Usage: buildContainerImage.sh -v [version] -t [image_name:tag] [-o] [-i] +It builds a container image for a DNS server + +Parameters: + -v: version to build + -i: ignores the MD5 checksums + -t: user defined image name and tag (e.g., image_name:tag) + -o: passes on container build option (e.g., --build-arg SLIMMIMG=true for slim) + +LICENSE UPL 1.0 + +Copyright (c) 2014,2025 Oracle and/or its affiliates. + +EOF + exit 0 +} + +# Validate packages +checksumPackages() { + if hash md5sum 2>/dev/null; then + echo "Checking if required packages are present and valid..." + md5sum -c ${VERSION}/Checksum + # shellcheck disable=SC2181 + if [ "$?" -ne 0 ]; then + echo "MD5 for required packages to build this image did not match!" + echo "Make sure to download missing files in folder $VERSION." + # shellcheck disable=SC2320 + exit $? + fi + else + echo "Ignored MD5 sum, 'md5sum' command not available."; + fi +} + +############## +#### MAIN #### +############## + +if [ "$#" -eq 0 ]; then + usage; +fi + +# Parameters +VERSION="12.2.0.1" +SKIPMD5=0 +DOCKEROPS="" +IMAGE_NAME="" +SLIM="false" +DOCKEROPS=" --build-arg SLIMMING=false" + +while getopts "hiv:o:t:" optname; do + case "$optname" in + "h") + usage + ;; + "i") + SKIPMD5=1 + ;; + "v") + VERSION="$OPTARG" + ;; + "o") + DOCKEROPS="$OPTARG" + if [[ "$DOCKEROPS" != *"--build-arg SLIMMING="* ]]; then + DOCKEROPS+=" --build-arg SLIMMING=false" + SLIM="false" + fi + if [[ "$OPTARG" == *"--build-arg SLIMMING=true"* ]]; then + SLIM="true" + fi + ;; + "t") + IMAGE_NAME="$OPTARG" + ;; + "?") + usage; + ;; + *) + # Should not occur + echo "Unknown error while processing options inside buildContainerImage.sh" + ;; + esac +done + +# Oracle Database Image Name +if [ "${IMAGE_NAME}"x = "x" ] && [ "${SLIM}" == "true" ]; then + IMAGE_NAME="oracle/database-rac:${VERSION}-slim" +elif [ "${IMAGE_NAME}"x = "x" ] && [ "${SLIM}" == "false" ]; then + IMAGE_NAME="oracle/database-rac:${VERSION}" +else + echo "Image name is passed as an variable" +fi + + echo "Container Image set to : ${IMAGE_NAME}" + +# Go into version folder +#cd "$VERSION" || exit + +if [ ! "$SKIPMD5" -eq 1 ]; then + checksumPackages +else + echo "Ignored MD5 checksum." +fi +echo "==========================" +echo "DOCKER info:" +docker info +echo "==========================" + +# Proxy settings +PROXY_SETTINGS="" +# shellcheck disable=SC2154 +if [ "${http_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg http_proxy=${http_proxy}" +fi +# shellcheck disable=SC2154 +if [ "${https_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg https_proxy=${https_proxy}" +fi +# shellcheck disable=SC2154 +if [ "${ftp_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg ftp_proxy=${ftp_proxy}" +fi +# shellcheck disable=SC2154 +if [ "${no_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg no_proxy=${no_proxy}" +fi +# shellcheck disable=SC2154 +if [ "$PROXY_SETTINGS" != "" ]; then + echo "Proxy settings were found and will be used during the build." +fi + +# ################## # +# BUILDING THE IMAGE # +# ################## # +echo "Building image '$IMAGE_NAME' ..." + +# BUILD THE IMAGE (replace all environment variables) +BUILD_START=$(date '+%s') +# shellcheck disable=SC2086 +docker build --force-rm=true --no-cache=true ${DOCKEROPS} ${PROXY_SETTINGS} --build-arg VERSION="${VERSION}" -t ${IMAGE_NAME} -f "${VERSION}"/Containerfile . || { + echo "There was an error building the image." + exit 1 +} +BUILD_END=$(date '+%s') +# shellcheck disable=SC2154,SC2003 +BUILD_ELAPSED=$((BUILD_END - BUILD_START)) + +echo "" +# shellcheck disable=SC2181,SC2320 +if [ $? -eq 0 ]; then +cat << EOF + Oracle Database container Image for Real Application Clusters (RAC) version $VERSION is ready to be extended: + + --> $IMAGE_NAME + + Build completed in $BUILD_ELAPSED seconds. + +EOF + +else + echo "Oracle Database Real Application Clusters Container Image was NOT successfully created. Check the output and correct any reported problems with the container build operation." +fi diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/cmdExec b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/cmdExec new file mode 100755 index 0000000000..c06148708c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/cmdExec @@ -0,0 +1,22 @@ +#!/bin/bash +############################# + +# Copyright 2025, Oracle Corporation and/or affiliates. All rights reserved. + +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl + +# Author: paramdeep.saini@oracle.com +TIMESTAMP=`date "+%Y-%m-%d"` +LOGFILE="/tmp/oracle_rac_cmd_${TIMESTAMP}.log" +# shellcheck disable=SC2046,SC2068 +echo $(date -u) " : " $@ >> $LOGFILE +# shellcheck disable=SC2124 +cmd=$@ + +$cmd + +if [ $? -eq 0 ]; then + exit 0 +else + exit 127 +fi diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/main.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/main.py old mode 100644 new mode 100755 similarity index 80% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/main.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/main.py index cc8d0a1c3f..2829eff1df --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/main.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/main.py @@ -1,14 +1,14 @@ #!/usr/bin/python ############################# -# Copyright 2020 - 2024, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020 - 2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com # Contributor: saurabh.ahuja@oracle.com ############################ """ -This is the main file which calls other file to setup the sharding. +This is the main file which calls other file to setup the real application clusters. """ from oralogger import * @@ -22,7 +22,7 @@ def main(): # Checking Comand line Args opts="" try: - opts, args = getopt.getopt(sys.argv[1:], '', ['help','resetpassword=','delracnode=','addtns=', 'checkracinst=', 'checkgilocal=','checkdbrole=','checkracdb=','checkracstatus','checkconnstr=','checkpdbconnstr=','setupdblsnr=','setuplocallsnr=','checkdbsvc=','modifydbsvc=','checkdbversion=','updatelsnrendp=','updateasmcount=','modifyscan=','updateasmdevices=']) + opts, args = getopt.getopt(sys.argv[1:], '', ['help','resetpassword=','delracnode=','addtns=', 'checkracinst=', 'checkgilocal=','checkdbrole=','checkracdb=','checkracstatus','checkconnstr=','checkpdbconnstr=','setupdblsnr=','setuplocallsnr=','checkdbsvc=','modifydbsvc=','checkdbversion=','updatelsnrendp=','updateasmcount=','modifyscan=','updateasmdevices=','getasmdiskgroup=','getasmdisks=','getdgredundancy=','getasminstname=','getasminststatus=']) except getopt.GetoptError: pass @@ -42,7 +42,7 @@ def main(): stdout_handler.nextHandler = file_handler file_handler.nextHandler = console_handler console_handler.nextHandler = PassHandler() - + ocommon = OraCommon(oralogger,stdout_handler,oenv) for opt, arg in opts: @@ -224,6 +224,56 @@ def main(): oenv.update_key("OP_TYPE","miscops") else: oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--getasmdiskgroups'): + file_name = oenv.logfile_name("LIST_ASMDG") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("LIST_ASMDG",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--getasmdisks'): + file_name = oenv.logfile_name("LIST_ASMDISKS") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("LIST_ASMDISKS",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--getdgredundancy'): + file_name = oenv.logfile_name("LIST_ASMDGREDUNDANCY") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("LIST_ASMDGREDUNDANCY",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--getasminstname'): + file_name = oenv.logfile_name("LIST_ASMINSTNAME") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("LIST_ASMINSTNAME",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") + elif opt in ('--getasminststatus'): + file_name = oenv.logfile_name("LIST_ASMINSTSTATUS") + oralogger.filename_ = file_name + ocommon.log_info_message("=======================================================================",file_name) + oenv.add_custom_variable("LIST_ASMINSTSTATUS",arg) + oenv.add_custom_variable("CUSTOM_RUN_FLAG","true") + if ocommon.check_key("OP_TYPE",oenv.get_env_dict()): + oenv.update_key("OP_TYPE","miscops") + else: + oenv.add_custom_variable("OP_TYPE","miscops") elif opt in ('--updatelsnrendp'): file_name = oenv.logfile_name("UPDATE_LISTENERENDP") oralogger.filename_ = file_name @@ -251,4 +301,4 @@ def main(): # Using the special variable if __name__=="__main__": - main() + main() diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraasmca.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraasmca.py old mode 100644 new mode 100755 similarity index 98% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraasmca.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraasmca.py index 05a6cc98dd..ca456d1425 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraasmca.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraasmca.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2021-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracommon.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oracommon.py similarity index 96% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracommon.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oracommon.py index 394cee6f39..a1d463445c 100755 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracommon.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oracommon.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2020, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ @@ -1345,7 +1345,16 @@ def set_asmdisk_perm(self,key,eflag): if self.disk_exists(device): msg='''Changing device permission {0}'''.format(device) self.log_info_message(msg,self.file_name) - cmd='''chmod 660 {0};chown grid:asmadmin {0}'''.format(device) + oraversion=self.get_rsp_version("INSTALL",None) + version = oraversion.split(".", 1)[0].strip() + self.log_info_message("disk" + version, self.file_name) + + if int(version) == 19 or int(version) == 21: + cmd = '''chmod 660 {0};chown grid:asmadmin {0}'''.format(device) + else: + cmd = '''chmod 660 {0};chown grid:asmdba {0}'''.format(device) + + self.log_info_message("Executing command:" + cmd , self.file_name) output,error,retcode=self.execute_cmd(cmd,None,None) self.check_os_err(output,error,retcode,True) else: @@ -1356,7 +1365,7 @@ def set_asmdisk_perm(self,key,eflag): self.log_error_message(key + " is not passed. Exiting....",self.file_name) self.prog_exit("None") -######## sCLeanup the disks ############### +######## CLeanup the disks ############### def asm_disk_cleanup(self,disk): """ This function cleanup the ASM Disks @@ -1557,7 +1566,7 @@ def get_rsp_version(self,key,node): return vdata ######### Check if GI is already installed on this machine ########### - def check_gi_installed(self,retcode1,gihome,giuser): + def check_gi_installed(self,retcode1,gihome,giuser,node,oinv): """ Check if the Gi is installed on this machine """ @@ -1575,6 +1584,7 @@ def check_gi_installed(self,retcode1,gihome,giuser): if not status: return False else: + self.run_orainstsh_local(giuser,node,oinv) status=self.start_crs(gihome,giuser) if status: return True @@ -1730,6 +1740,9 @@ def get_inst_sid(self,dbuser,dbhome,osid,hostname): """ return the sid """ + if self.check_key("CRS_GPC",self.ora_env_dict): + return osid + path='''/usr/bin:/bin:/sbin:/usr/local/sbin:{0}/bin'''.format(dbhome) ldpath='''{0}/lib:/lib:/usr/lib'''.format(dbhome) cmd='''su - {5} -c "export ORACLE_HOME={0};export PATH={1};export LD_LIBRARY_PATH={2}; {0}/bin/srvctl status database -d {3} | grep {4}"'''.format(dbhome,path,ldpath,osid,hostname,dbuser) @@ -2374,6 +2387,7 @@ def check_dbinst(self): dbname,osid,dbuname=self.getdbnameinfo() hostname = self.get_public_hostname() inst_sid=self.get_inst_sid(osuser,dbhome,osid,hostname) + connect_str=self.get_sqlplus_str(dbhome,inst_sid,osuser,"sys",None,None,None,None,None,None,None) if inst_sid: status=self.get_dbinst_status(osuser,dbhome,inst_sid,connect_str) @@ -3154,17 +3168,17 @@ def updateasmcount(self,giuser,gihome,asmcount): else: return False - def updateasmdevices(self,giuser,gihome,diskname,diskgroup,processtype): + def updateasmdevices(self, giuser, gihome, diskname, diskgroup, processtype): """ - Update ASM devices, handle addition or deletion + Update ASM devices, handle addition or deletion. """ - retcode=1 + retcode = 1 if processtype == "addition": - cmd = '''su - {0} -c "{1}/bin/asmcmd add disk -g {2} -d {3}"'''.format(giuser, gihome, diskgroup, diskname) + cmd = '''su - {0} -c "{1}/bin/asmca -silent -addDisk -diskGroupName {2} -disk {3}"'''.format(giuser, gihome, diskgroup, diskname) output, error, retcode = self.execute_cmd(cmd, None, None) self.check_os_err(output, error, retcode, None) elif processtype == "deletion": - cmd = '''su - {0} -c "{1}/bin/asmcmd drop disk -g {2} -d {3}"'''.format(giuser, gihome, diskgroup, diskname) + cmd = '''su - {0} -c "{1}/bin/asmca -silent -removeDisk -diskGroupName {2} -disk {3}"'''.format(giuser, gihome, diskgroup, diskname) output, error, retcode = self.execute_cmd(cmd, None, None) self.check_os_err(output, error, retcode, None) if retcode == 0: @@ -3183,3 +3197,99 @@ def updatelistenerendp(self,giuser,gihome,listenername,portlist): return True else: return False + + def get_asmsid(self,giuser,gihome): + """ + get the asm sid details + """ + sid=None + cmd='''su - {0} -c "{1}/bin/olsnodes -n"'''.format(giuser,gihome) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + if retcode == 0: + pubhost=self.get_public_hostname() + for line in output.splitlines(): + if pubhost in line: + nodeid = line.split() + if len(nodeid) == 2: + sid="+ASM" + nodeid[1] + break + if sid is not None: + self.log_info_message("ASM sid set to :" + sid,self.file_name) + return sid + else: + return None + + def check_asminst(self,giuser,gihome): + """ + check asm instance + """ + sid=self.get_asmsid(giuser,gihome) + if sid is not None: + sqlpluslogincmd=self.get_sqlplus_str(gihome,sid,giuser,"sys",None,None,None,sid,None,None,None) + sqlcmd=""" + set heading off + set feedback off + set term off + SET NEWPAGE NONE + select status from v$instance; + exit; + """ + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + if "STARTED" in ''.join(output.upper()): + return 0 + else: + return 1 + + def get_asmdg(self,giuser,gihome): + """ + get the asm dg list + """ + sid=self.get_asmsid(giuser,gihome) + if sid is not None: + sqlpluslogincmd=self.get_sqlplus_str(gihome,sid,giuser,"sys",None,None,None,sid,None,None,None) + sqlcmd=""" + set heading off + set feedback off + set term off + SET NEWPAGE NONE + select name from v$asm_diskgroup; + """ + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + return output.strip().replace('\n',',') + + def get_asmdgrd(self,giuser,gihome,dg): + """ + get the asm disk redudancy + """ + sid=self.get_asmsid(giuser,gihome) + if sid is not None: + sqlpluslogincmd=self.get_sqlplus_str(gihome,sid,giuser,"sys",None,None,None,sid,None,None,None) + sqlcmd=""" + set heading off + set feedback off + set term off + SET NEWPAGE NONE + select type from v$asm_diskgroup where upper(name)=upper('{0}'); + """.format(dg) + output,error,retcode=self.run_sqlplus(sqlpluslogincmd,sqlcmd,None) + self.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name) + self.check_sql_err(output,error,retcode,True) + return output + + def get_asmdsk(self,giuser,gihome,dg): + """ + check asm disks based on dg group + """ + sid=self.get_asmsid(giuser,gihome) + cmd='''su - {0} -c "asmcmd lsdsk -G {1} --suppressheader --member"'''.format(giuser,dg) + output,error,retcode=self.execute_cmd(cmd,None,None) + self.check_os_err(output,error,retcode,None) + if retcode == 0: + return output.strip().replace('\n',',') + else: + return "ERROR OCCURRED" diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracvu.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oracvu.py old mode 100644 new mode 100755 similarity index 98% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracvu.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oracvu.py index f287084fda..f7b2b29401 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oracvu.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oracvu.py @@ -1,10 +1,10 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com -############################ +############################# """ This file contains to the code call different classes objects based on setup type diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraenv.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraenv.py old mode 100644 new mode 100755 similarity index 87% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraenv.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraenv.py index 4ee8270012..0512a44f06 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraenv.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraenv.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2020, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ @@ -155,6 +155,18 @@ def logfile_name(file_type): OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_update_asmdevices_status.log" elif file_type == "UPDATE_LISTENERENDP": OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_update_listenerendp_status.log" + elif file_type == "LIST_ASMDG": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_list_asmdg_status.log" + elif file_type == "LIST_ASMDISKS": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_list_asmdisks_status.log" + elif file_type == "LIST_ASMDGREDUNDANCY": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_list_asmdgredudancy_status.log" + elif file_type == "LIST_ASMINSTNAME": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_list_asminstname_status.log" + elif file_type == "LIST_ASMINSTSTATUS": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_list_amsinst_status.log" + elif file_type == "UPDATE_LISTENERENDP": + OraEnv.__env_var_dict["LOG_FILE_NAME"] = OraEnv.logdir__ + "/oracle_update_listenerendp_status.log" else: pass diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orafactory.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orafactory.py old mode 100644 new mode 100755 similarity index 81% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orafactory.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orafactory.py index 0734d9718a..54c7e0bdaa --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orafactory.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orafactory.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2020, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ @@ -12,6 +12,7 @@ import os import sys +import re sys.path.insert(0, "/opt/scripts/startup/scripts") @@ -96,9 +97,35 @@ def get_ora_objs(self): self.ora_env_dict=self.ocommon.add_key("OP_TYPE","nosetup",self.ora_env_dict) msg="OP_TYPE variable is set to default nosetup. No value passed as an enviornment variable." self.ocommon.log_info_message(msg,self.file_name) + #default version as 0 integer, will read from rsp file + version=0 + if self.ocommon.check_key("GRID_RESPONSE_FILE",self.ora_env_dict): + gridrsp=self.ora_env_dict["GRID_RESPONSE_FILE"] + self.ocommon.log_info_message("GRID_RESPONSE_FILE parameter is set and file location is:" + gridrsp ,self.file_name) - ## Calling this function from here to make sure INSTALL_NODE is set - self.ocommon.update_gi_env_vars_from_rspfile() + if os.path.isfile(gridrsp): + with open(gridrsp) as fp: + for line in fp: + if len(line.split("=")) == 2: + key=(line.split("=")[0]).strip() + value=(line.split("=")[1]).strip() + self.ocommon.log_info_message("KEY and Value pair set to: " + key + ":" + value ,self.file_name) + if key == "oracle.install.responseFileVersion": + match = re.search(r'v(\d{2})', value) + if match: + version=int(match.group(1)) + else: + # Default to version 23 if no match is found + version=23 + #print version in logs + msg="Version detected in response file is {0}".format(version) + self.ocommon.log_info_message(msg,self.file_name) + ## Calling this function from here to make sure INSTALL_NODE is set + if version == int(19) or version == int(21): + self.ocommon.update_pre_23c_gi_env_vars_from_rspfile() + else: + # default to read when its either set as 23 in response file or if response file is not present + self.ocommon.update_gi_env_vars_from_rspfile() # Check the OP_TYPE value and call objects based on it value install_node,pubhost=self.ocommon.get_installnode() if install_node.lower() == pubhost.lower(): @@ -112,17 +139,17 @@ def get_ora_objs(self): oracdb = OraRacProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) self.ocommon.log_info_message(msg,self.file_name) ofactory_obj.append(oracdb) - elif self.ora_env_dict["OP_TYPE"] == 'catalog': + elif self.ora_env_dict["OP_TYPE"] in ['setuprac,catalog','catalog,setuprac']: msg="Creating and calling instance to prov RAC DB for catalog setup" oracdb = OraRacProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) self.ocommon.log_info_message(msg,self.file_name) ofactory_obj.append(oracdb) - elif self.ora_env_dict["OP_TYPE"] == 'primaryshard': + elif self.ora_env_dict["OP_TYPE"] in ['setuprac,primaryshard','primaryshard,setuprac']: msg="Creating and calling instance to prov RAC DB for primary shard" oracdb = OraRacProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) self.ocommon.log_info_message(msg,self.file_name) ofactory_obj.append(oracdb) - elif self.ora_env_dict["OP_TYPE"] == 'standbyshard': + elif self.ora_env_dict["OP_TYPE"] in ['setuprac,standbyshard','standbyshard,setuprac']: msg="Creating and calling instance to prov RAC DB for standby shard setup" oracdb = OraRacProv(self.ologger,self.ohandler,self.oenv,self.ocommon,self.ocvu,self.osetupssh) self.ocommon.log_info_message(msg,self.file_name) diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiadd.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragiadd.py old mode 100644 new mode 100755 similarity index 99% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiadd.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragiadd.py index fad68ad383..fbbfe394b7 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiadd.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragiadd.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ @@ -265,7 +265,7 @@ def prepare_responsefile(self): oracle.install.crs.rootconfig.configMethod=ROOT oracle.install.asm.configureAFD=false oracle.install.crs.rootconfig.executeRootScript=false - oracle.install.crs.configureRHPS={3} + oracle.install.crs.configureRHPS=false '''.format(obase,invloc,clunodes,oraversion,"false") # fdata="\n".join([s for s in rspdata.split("\n") if s]) else: diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiprov.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragiprov.py old mode 100644 new mode 100755 similarity index 95% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiprov.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragiprov.py index 065057a1c8..8f00de04ae --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragiprov.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragiprov.py @@ -1,10 +1,9 @@ #!/usr/bin/python ############################# -# Copyright 2021-2024, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com -# Contributor: saurabh.ahuja@oracle.com ############################ """ @@ -321,6 +320,7 @@ def prepare_responsefile(self): ## Variable Assignments clusterusage="GENERAL_PURPOSE" if self.ocommon.check_key("CRS_GPC",self.ora_env_dict) else "RAC" + crsconfig="HA_CONFIG" if self.ocommon.check_key("CRS_GPC",self.ora_env_dict) else "CRS_CONFIG" if clusterusage != "GENERAL_PURPOSE": scanname=self.ora_env_dict["SCAN_NAME"] scanport=self.ora_env_dict["SCAN_PORT"] if self.ocommon.check_key("SCAN_PORT",self.ora_env_dict) else "1521" @@ -347,19 +347,21 @@ def prepare_responsefile(self): version=oraversion.split(".",1)[0].strip() self.ocommon.log_info_message("disk" + version, self.file_name) if int(version) < 23: - return self.get_responsefile(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist) + if self.ocommon.check_key("CRS_GPC",self.ora_env_dict): + clsnodes=None + return self.get_responsefile(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist,crsconfig) else: return self.get_23c_responsefile(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist,clusterusage) - def get_responsefile(self,obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist): + def get_responsefile(self,obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,disksWithFGNames,oraversion,gridrsp,netmasklist,crsconfig): """ This function prepare the response file if no response file passed """ self.ocommon.log_info_message("I am in get_responsefile", self.file_name) rspdata=''' oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v{15} - oracle.install.option=CRS_CONFIG + oracle.install.option={19} ORACLE_BASE={0} INVENTORY_LOCATION={1} oracle.install.asm.OSDBA=asmdba @@ -388,7 +390,7 @@ def get_responsefile(self,obase,invloc,scanname,scanport,clutype,cluname,clunode oracle.install.config.managementOption=NONE oracle.install.crs.configureRHPS={16} oracle.install.crs.config.ClusterConfiguration={17} - '''.format(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,oraversion,"false","STANDALONE",disksWithFGNames) + '''.format(obase,invloc,scanname,scanport,clutype,cluname,clunodes,nwiface,gimrflag,passwd,dgname,dgred,fgname,asmdisk,asmstr,oraversion,"false","STANDALONE",disksWithFGNames,crsconfig) # fdata="\n".join([s for s in rspdata.split("\n") if s]) self.ocommon.write_file(gridrsp,rspdata) if os.path.isfile(gridrsp): @@ -484,20 +486,19 @@ def run_rootsh(self): self.mythread.clear() mythreads=[] for node in pub_nodes.split(" "): - self.ocommon.log_info_message("Running root.sh on node " + node,self.file_name) - thread=Process(target=self.run_rootsh_on_node,args=(node,giuser,gihome)) - #thread.setDaemon(True) - mythreads.append(thread) - thread.start() - -# for thread in mythreads: -# thread.start() -# sleep(10) -# self.ocommon.log_info_message("Starting root.sh thread ",self.file_name) - - for thread in mythreads: # iterates over the threads - thread.join() # waits until the thread has finished wor - self.ocommon.log_info_message("Joining the root.sh thread ",self.file_name) + oraversion=self.ocommon.get_rsp_version("INSTALL",None) + version = oraversion.split(".", 1)[0].strip() + self.ocommon.log_info_message("oraversion" + version, self.file_name) + if int(version) == 19 or int(version) == 21: + self.run_rootsh_on_node(node,giuser,gihome) + else: + self.ocommon.log_info_message("Running root.sh on node " + node,self.file_name) + thread=Process(target=self.run_rootsh_on_node,args=(node,giuser,gihome)) + mythreads.append(thread) + thread.start() + for thread in mythreads: # iterates over the threads + thread.join() # waits until the thread has finished wor + self.ocommon.log_info_message("Joining the root.sh thread ",self.file_name) def run_rootsh_on_node(self,node,giuser,gihome): """ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragridadd.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragridadd.py old mode 100644 new mode 100755 similarity index 95% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragridadd.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragridadd.py index a4885b3ac9..10927e5225 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oragridadd.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oragridadd.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oralogger.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oralogger.py old mode 100644 new mode 100755 similarity index 98% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oralogger.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oralogger.py index 552fedc7b2..dc02b287f0 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oralogger.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oralogger.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2020, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramachine.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oramachine.py old mode 100644 new mode 100755 similarity index 96% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramachine.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oramachine.py index bffbedfd9f..98db72c775 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramachine.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oramachine.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2020, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramiscops.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oramiscops.py old mode 100644 new mode 100755 similarity index 89% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramiscops.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oramiscops.py index 4df8d4944a..3395afc78c --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oramiscops.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oramiscops.py @@ -1,9 +1,9 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl -# Author: sanjay.singh@oracle.com,paramdeep.saini@oracle.com +# Author: paramdeep.saini@oracle.com ############################ """ @@ -143,15 +143,36 @@ def setup(self): self.updateasmcount() else: pass - if self.ocommon.check_key("UPDATE_ASMDEVICES",self.ora_env_dict): - self.updateasmdevices() - else: - pass if self.ocommon.check_key("UPDATE_LISTENERENDP",self.ora_env_dict): self.updatelistenerendp() else: pass - + if self.ocommon.check_key("LIST_ASMDG",self.ora_env_dict): + self.listasmdg() + else: + pass + if self.ocommon.check_key("LIST_ASMDISKS",self.ora_env_dict): + self.listasmdisks() + else: + pass + if self.ocommon.check_key("LIST_ASMDGREDUNDANCY",self.ora_env_dict): + self.listasmdgredundancy() + else: + pass + if self.ocommon.check_key("LIST_ASMINSTNAME",self.ora_env_dict): + self.listasminstname() + else: + pass + if self.ocommon.check_key("LIST_ASMINSTSTATUS",self.ora_env_dict): + self.listasminststatus() + else: + pass + if self.ocommon.check_key("UPDATE_ASMDEVICES",self.ora_env_dict): + self.updateasmdevices() + else: + pass + + ct = datetime.datetime.now() ets = ct.timestamp() totaltime=ets - bts @@ -595,7 +616,53 @@ def updateasmcount(self): msg='''ASM Counts Details is now updated to {0}'''.format(asmcount) status="UPDATE_ASMCOUNT_UPDATED_SUCCESSFULLY" self.ocommon.log_info_message(msg,self.file_name) + print(status) + + def process_listenerendpoint_params(self,key): + """ + check listenerendpoint params + """ + status="" + msg="" + listenername=None + portlist=None + + self.ocommon.log_info_message("processing listenerendpoint params {0}".format(key),self.file_name) + cvar_str=self.ora_env_dict[key] + self.ocommon.log_info_message("processing listenerendpoint params {0}".format(cvar_str),self.file_name) + cvar_str=cvar_str.replace('"', '') + try: + cvar_dict = dict(item.split("=") for item in cvar_str.split(";") if "=" in item) + except ValueError as e: + self.ocommon.prog_exit("Error occurred") + for ckey in cvar_dict.keys(): + if ckey == 'lsnrname': + listenername = cvar_dict[ckey] + if ckey == 'portlist': + portlist = cvar_dict[ckey] + return listenername,portlist + + def updatelistenerendp(self): + """ + update listener end points details + """ + status="" + msg="" + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + self.ocommon.log_info_message("updating listener end points details params",self.file_name) + listenername,portlist=self.process_listenerendpoint_params("UPDATE_LISTENERENDP") + retvalue=self.ocommon.updatelistenerendp(giuser,gihome,listenername,portlist) + if not retvalue: + status="UPDATE_LISTENERENDPOINT_NOT_UPDATED" + msg='''Listener {0} End Point Details is not updated to portlist {1}'''.format(listenername,portlist) + self.ocommon.log_info_message(msg,self.file_name) print(status) + self.ocommon.prog_exit("Error occurred") + else: + msg='''Listener End Point Details is now updated to listenername-> {0} portlist-> {1}'''.format(listenername,portlist) + status="UPDATE_LISTENERENDPOINT_UPDATED_SUCCESSFULLY" + self.ocommon.log_info_message(msg,self.file_name) + print(status) def process_asmdevices_params(self,key): """ @@ -645,50 +712,77 @@ def updateasmdevices(self): status="UPDATE_ASMDEVICES_UPDATED_SUCCESSFULLY" self.ocommon.log_info_message(msg,self.file_name) print(status) - - def process_listenerendpoint_params(self,key): + + def listasmdg(self): """ - check listenerendpoint params + getting the ams details """ status="" msg="" - listenername=None - portlist=None - - self.ocommon.log_info_message("processing listenerendpoint params {0}".format(key),self.file_name) - cvar_str=self.ora_env_dict[key] - self.ocommon.log_info_message("processing listenerendpoint params {0}".format(cvar_str),self.file_name) - cvar_str=cvar_str.replace('"', '') - try: - cvar_dict = dict(item.split("=") for item in cvar_str.split(";") if "=" in item) - except ValueError as e: - self.ocommon.prog_exit("Error occurred") - for ckey in cvar_dict.keys(): - if ckey == 'lsnrname': - listenername = cvar_dict[ckey] - if ckey == 'portlist': - portlist = cvar_dict[ckey] - return listenername,portlist - - def updatelistenerendp(self): + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + self.ocommon.log_info_message("getting the asm diskgroup list",self.file_name) + retvalue=self.ocommon.check_asminst(giuser,gihome) + if retvalue == 0: + dglist=self.ocommon.get_asmdg(giuser,gihome) + print(dglist) + else: + print("NOT READY") + + def listasmdisks(self): """ - update listener end points details + getting the ams details """ status="" msg="" giuser,gihome,obase,invloc=self.ocommon.get_gi_params() - self.ocommon.log_info_message("updating listener end points details params",self.file_name) - listenername,portlist=self.process_listenerendpoint_params("UPDATE_LISTENERENDP") - retvalue=self.ocommon.updatelistenerendp(giuser,gihome,listenername,portlist) - if not retvalue: - status="UPDATE_LISTENERENDPOINT_NOT_UPDATED" - msg='''Listener {0} End Point Details is not updated to portlist {1}'''.format(listenername,portlist) - self.ocommon.log_info_message(msg,self.file_name) - print(status) - self.ocommon.prog_exit("Error occurred") + self.ocommon.log_info_message("getting the asm diskgroup list",self.file_name) + dg=self.ora_env_dict["LIST_ASMDISKS"] + retvalue=self.ocommon.check_asminst(giuser,gihome) + if retvalue == 0: + dsklist=self.ocommon.get_asmdsk(giuser,gihome,dg) + print(dsklist) else: - msg='''Listener End Point Details is now updated to listenername-> {0} portlist-> {1}'''.format(listenername,portlist) - status="UPDATE_LISTENERENDPOINT_UPDATED_SUCCESSFULLY" - self.ocommon.log_info_message(msg,self.file_name) - print(status) - \ No newline at end of file + print("NOT READY") + + def listasmdgredundancy(self): + """ + getting the asm disk redundancy + """ + status="" + msg="" + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + self.ocommon.log_info_message("getting the asm diskgroup list",self.file_name) + dg=self.ora_env_dict["LIST_ASMDGREDUNDANCY"] + retvalue=self.ocommon.check_asminst(giuser,gihome) + if retvalue == 0: + asmdgrd=self.ocommon.get_asmdgrd(giuser,gihome,dg) + print(asmdgrd) + else: + print("NOT READY") + + + def listasminststatus(self): + """ + getting the asm instance status + """ + status="" + msg="" + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + retvalue=self.ocommon.check_asminst(giuser,gihome) + if retvalue == 0: + print("STARTED") + else: + print("NOT_STARTED") + + def listasminstname(self): + """ + getting the asm disk redundancy + """ + status="" + msg="" + giuser,gihome,obase,invloc=self.ocommon.get_gi_params() + sid=self.ocommon.get_asmsid(giuser,gihome) + if sid is not None: + print(sid) + else: + print("NOT READY") diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracadd.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracadd.py old mode 100644 new mode 100755 similarity index 99% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracadd.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracadd.py index 6db43726fb..6d6c810d70 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracadd.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracadd.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracdel.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracdel.py old mode 100644 new mode 100755 similarity index 98% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracdel.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracdel.py index 94dcca2eb0..4eab77a816 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracdel.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracdel.py @@ -1,9 +1,9 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl -# Author: sanjay.singh@oracle.com,paramdeep.saini@oracle.com +# Author: paramdeep.saini@oracle.com ############################ """ @@ -216,9 +216,11 @@ def del_gihome_main(self,hostname): """ giuser,gihome,gbase,oinv=self.ocommon.get_gi_params() self.ocommon.log_info_message("gi params " + gihome ,self.file_name) + hostname=self.ocommon.get_public_hostname() + node=hostname if self.ocommon.check_key("DEL_GIHOME",self.ora_env_dict): retcode1=self.ocvu.check_home(hostname,gihome,giuser) - status=self.ocommon.check_gi_installed(retcode1,gihome,giuser) + status=self.ocommon.check_gi_installed(retcode1,gihome,giuser,node,oinv) if status: self.del_gihome() else: diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracprov.py similarity index 95% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracprov.py index e590693af0..e7584a3878 100755 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracprov.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracprov.py @@ -1,9 +1,9 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl -# Author: sanjay.singh@oracle.com,paramdeep.saini@oracle.com +# Author: paramdeep.saini@oracle.com ############################ """ @@ -162,11 +162,23 @@ def perform_ssh_setup(self): Perform ssh setup """ #if not self.ocommon.detect_k8s_env(): - if not self.ocommon.check_key("SSH_PRIVATE_KEY",self.ora_env_dict) and not self.ocommon.check_key("SSH_PUBLIC_KEY",self.ora_env_dict): - dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() - self.osetupssh.setupssh(dbuser,dbhome,"INSTALL") - #if self.ocommon.check_key("VERIFY_SSH",self.ora_env_dict): + pub_nodes,vip_nodes,priv_nodes=self.ocommon.process_cluster_vars("CRS_NODES") + crs_nodes=pub_nodes.replace(" ",",") + crs_nodes_list=crs_nodes.split(",") + if len(crs_nodes_list) == 1: + self.ocommon.log_info_message("Cluster size=1. Node=" + crs_nodes_list[0],self.file_name) + user=self.ora_env_dict["DB_USER"] + cmd='''su - {0} -c "/bin/rm -rf ~/.ssh ; sleep 1; /bin/ssh-keygen -t rsa -q -N \'\' -f ~/.ssh/id_rsa ; sleep 1; /bin/ssh-keyscan {1} > ~/.ssh/known_hosts 2>/dev/null ; sleep 1; /bin/cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys"'''.format(user,crs_nodes_list[0]) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + else: + if not self.ocommon.check_key("SSH_PRIVATE_KEY",self.ora_env_dict) and not self.ocommon.check_key("SSH_PUBLIC_KEY",self.ora_env_dict): + dbuser,dbhome,dbase,oinv=self.ocommon.get_db_params() + self.osetupssh.setupssh(dbuser,dbhome,"INSTALL") + #if self.ocommon.check_key("VERIFY_SSH",self.ora_env_dict): #self.osetupssh.verifyssh(dbuser,"INSTALL") + else: + self.ocommon.log_info_message("SSH setup must be already completed during env setup as this this env variables SSH_PRIVATE_KEY and SSH_PUBLIC_KEY are set.",self.file_name) def db_sw_install(self): """ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracstdby.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracstdby.py old mode 100644 new mode 100755 similarity index 99% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracstdby.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracstdby.py index f97238e3d8..ad91f25d3b --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/oraracstdby.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/oraracstdby.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasetupenv.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orasetupenv.py old mode 100644 new mode 100755 similarity index 99% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasetupenv.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orasetupenv.py index d9c18db73c..cdf3c2a407 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasetupenv.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orasetupenv.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ @@ -83,13 +83,13 @@ def setup(self): self.check_systemd() self.set_ping_permission() self.set_common_script() - self.set_asmdev_perm() self.add_domain_search() self.add_dns_servers() self.populate_etchosts("localhost") self.populate_user_profiles() #self.setup_ssh_for_k8s() self.setup_gi_sw() + self.set_asmdev_perm() self.reset_grid_user_passwd() self.setup_db_sw() self.adjustMemlockLimits() @@ -114,6 +114,8 @@ def populate_env_vars(self): """ self.ocommon.populate_rac_env_vars() if self.ocommon.check_key("CRS_GPC",self.ora_env_dict): + if self.ocommon.ora_env_dict["CRS_GPC"].lower() == 'true': + self.ora_env_dict=self.ocommon.add_key("DB_CONFIG_TYPE","SINGLE",self.ora_env_dict) pubnode=self.ocommon.get_public_hostname() crs_nodes="pubhost="+pubnode if not self.ocommon.check_key("CRS_NODES",self.ora_env_dict): @@ -782,7 +784,7 @@ def set_banner(self): retcode1=self.ocvu.check_home(pubhostname,gihome,giuser) if retcode1 == 0: self.ora_env_dict=self.ocommon.add_key("GI_HOME_INSTALLED_FLAG","true",self.ora_env_dict) - status=self.ocommon.check_gi_installed(retcode1,gihome,giuser) + status=self.ocommon.check_gi_installed(retcode1,gihome,giuser,pubhostname,invloc) if status: msg="Grid is already installed on this machine" self.ocommon.log_info_message(self.ocommon.print_banner(msg),self.file_name) diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasshsetup.py b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orasshsetup.py old mode 100644 new mode 100755 similarity index 70% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasshsetup.py rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orasshsetup.py index 4091ff9137..e451730d42 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/orasshsetup.py +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/orasshsetup.py @@ -1,7 +1,7 @@ #!/usr/bin/python ############################# -# Copyright 2021, Oracle Corporation and/or affiliates. All rights reserved. +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl # Author: paramdeep.saini@oracle.com ############################ @@ -80,10 +80,13 @@ def setupssh(self,user,ohome,ctype): This function setup the ssh between user as SKIP_SSH_SETUP flag is not set """ self.ocommon.reset_os_password(user) - password=self.ocommon.get_os_password() + passwd=self.ocommon.get_os_password() + password=passwd.replace("\n", "") giuser,gihome,gibase,oinv=self.ocommon.get_gi_params() expect=self.ora_env_dict["EXPECT"] if self.ocommon.check_key("EXPECT",self.ora_env_dict) else "/bin/expect" script_dir=self.ora_env_dict["SSHSCR_DIR"] if self.ocommon.check_key("SSHSCR_DIR",self.ora_env_dict) else "/opt/scripts/startup/scripts" + + sshscr=self.ora_env_dict["SSHSCR"] if self.ocommon.check_key("SSHSCR",self.ora_env_dict) else "bin/cluvfy" if user == 'grid': sshscr="runcluvfy.sh" @@ -94,44 +97,44 @@ def setupssh(self,user,ohome,ctype): sshscr="runcluvfy.sh" cluster_nodes="" + # Run ssh-keyscan for each node + oraversion=self.ocommon.get_rsp_version("INSTALL",None) + version = oraversion.split(".", 1)[0].strip() if ctype == 'INSTALL': cluster_nodes=self.ocommon.get_cluster_nodes() cluster_nodes = cluster_nodes.replace(" ",",") i=0 while i < 5: + self.ocommon.log_info_message('''SSH setup in progress. Count set to {0}'''.format(i),self.file_name) self.ocommon.set_mask_str(password.strip()) - self.ocommon.log_info_message('''SSH setup in progress. Count set to {0}'''.format(i),self.file_name) - cmd='''su - {0} -c "echo \"{4}\" | {1}/{2} comp admprv -n {3} -o user_equiv -fixup"'''.format(user,gihome,sshscr,cluster_nodes,'HIDDEN_STRING') - output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) - self.ocommon.check_os_err(output,error,retcode,None) - self.ocommon.unset_mask_str() - retcode=self.verifyssh(user,gihome,sshscr,cluster_nodes) + if int(version) == 19 or int(version) == 21: + self.performsshsetup(user,gihome,sshscr,cluster_nodes,version,password,i,expect,script_dir) + else: + self.performsshsetup(user,gihome,sshscr,cluster_nodes,version,password,i,expect,script_dir) + retcode=self.verifyssh(user,gihome,sshscr,cluster_nodes,version) if retcode == 0: - break + break else: i = i + 1 self.ocommon.log_info_message('''SSH setup verification failed. Trying again..''',self.file_name) - elif ctype == 'ADDNODE': cluster_nodes=self.ocommon.get_cluster_nodes() cluster_nodes = cluster_nodes.replace(" ",",") exiting_cls_node=self.ocommon.get_existing_clu_nodes(True) new_nodes=cluster_nodes + "," + exiting_cls_node - cmd='''su - {0} -c "rm -rf ~/.ssh ; mkdir -p ~/.ssh ; chmod 700 ~/.ssh"'''.format(user) output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) self.ocommon.check_os_err(output,error,retcode,False) - - i=0 while i < 5: - self.ocommon.set_mask_str(password.strip()) - self.ocommon.log_info_message('''SSH setup in progress. Count set to {0}'''.format(i),self.file_name) - cmd='''su - {0} -c "echo \"{4}\" | {1}/{2} comp admprv -n {3} -o user_equiv -fixup"'''.format(user,gihome,sshscr,new_nodes,'HIDDEN_STRING') - output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) - self.ocommon.check_os_err(output,error,retcode,None) - self.ocommon.unset_mask_str() - retcode=self.verifyssh(user,gihome,sshscr,new_nodes) + # Run ssh-keyscan for each node + for node in cluster_nodes.split(","): + self.ocommon.log_info_message(f"Adding {node} to known_hosts.", self.file_name) + keyscan_cmd = '''su - {0} -c "ssh-keyscan -H {1} >> ~/.ssh/known_hosts"'''.format(user, node) + keyscan_output, keyscan_error, keyscan_retcode = self.ocommon.execute_cmd(keyscan_cmd, None, None) + self.ocommon.check_os_err(keyscan_output, keyscan_error, keyscan_retcode, False) + self.performsshsetup(user,gihome,sshscr,new_nodes,version,password,i,expect,script_dir) + retcode=self.verifyssh(user,gihome,sshscr,new_nodes,version) if retcode == 0: break else: @@ -140,16 +143,48 @@ def setupssh(self,user,ohome,ctype): else: cluster_nodes=self.ocommon.get_cluster_nodes() - def verifyssh(self,user,gihome,sshscr,cls_nodes): + def verifyssh(self,user,gihome,sshscr,cls_nodes,version): """ This function setup the ssh between user as SKIP_SSH_SETUP flag is not set """ self.ocommon.log_info_message("Verifying SSH between nodes " + cls_nodes, self.file_name) - cls_nodes = cls_nodes.replace(" ",",") - cmd='''su - {0} -c "{1}/{2} comp admprv -n {3} -o user_equiv -sshonly -verbose"'''.format(user,gihome,sshscr,cls_nodes) - output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) - self.ocommon.check_os_err(output,error,retcode,None) - return retcode + retcode1=0 + if int(version) == 19 or int(version) == 21: + nodes_list=cls_nodes.split(" ") + for node in nodes_list: + cmd='''su - {0} -c "ssh -o BatchMode=yes -o ConnectTimeout=5 {0}@{1} echo ok 2>&1"'''.format(user,node) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + if retcode != 0: + retcode1=255 + else: + cls_nodes = cls_nodes.replace(" ",",") + cmd='''su - {0} -c "{1}/{2} comp admprv -n {3} -o user_equiv -sshonly -verbose"'''.format(user,gihome,sshscr,cls_nodes) + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + retcode1=retcode + + return retcode1 + + def performsshsetup(self,user,gihome,sshscr,cls_nodes,version,password,counter,expect,script_dir): + """ + This functions set the ssh between cluster nodes + """ + self.ocommon.set_mask_str(password.strip()) + self.ocommon.log_info_message('''SSH setup in progress. Count set to {0}'''.format(counter),self.file_name) + if int(version) == 19 or int(version) == 21: + sshscr="setupSSH.expect" + cluster_nodes = cls_nodes.replace(","," ") + sshcmd='''su - {0} -c "{1} {2}/{3} {0} \\"{4}/oui/prov/resources/scripts\\" \\"{5}\\" \\"{6}\\""'''.format(user,expect,script_dir,sshscr,gihome,cluster_nodes,'HIDDEN_STRING') + sshcmd_output, sshcmd_error, sshcmd_retcode = self.ocommon.execute_cmd(sshcmd, None, None) + self.ocommon.check_os_err(sshcmd_output, sshcmd_error, sshcmd_retcode, False) + else: + cmd='''su - {0} -c "echo \"{4}\" | {1}/{2} comp admprv -n {3} -o user_equiv -fixup"'''.format(user,gihome,sshscr,new_nodes,'HIDDEN_STRING') + output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) + self.ocommon.check_os_err(output,error,retcode,None) + + self.ocommon.unset_mask_str() + def setupsshusekey(self,user,ohome,ctype): """ @@ -163,6 +198,8 @@ def setupsshusekey(self,user,ohome,ctype): new_nodes=self.ocommon.get_cluster_nodes() existing_cls_node=self.ocommon.get_existing_clu_nodes(None) giuser,gihome,gibase,oinv=self.ocommon.get_gi_params() + oraversion=self.ocommon.get_rsp_version("INSTALL",None) + version = oraversion.split(".", 1)[0].strip() sshscr=self.ora_env_dict["SSHSCR"] if self.ocommon.check_key("SSHSCR",self.ora_env_dict) else "bin/cluvfy" if user == 'grid': sshscr="runcluvfy.sh" @@ -180,7 +217,6 @@ def setupsshusekey(self,user,ohome,ctype): for node1 in cluster_nodes.split(" "): for node in cluster_nodes.split(" "): i=1 - #cmd='''su - {0} -c "ssh-keyscan -H {1} >> /home/{0}/.ssh/known_hosts"'''.format(user,node,ohome) cmd='''su - {0} -c "ssh -o StrictHostKeyChecking=no -x -l {0} {3} \\"ssh-keygen -R {1};ssh -o StrictHostKeyChecking=no -x -l {0} {1} \\\"/bin/sh -c true\\\"\\""''' .format(user,node,ohome,node1) output,error,retcode=self.ocommon.execute_cmd(cmd,None,None) self.ocommon.check_os_err(output,error,retcode,None) @@ -195,7 +231,7 @@ def setupsshusekey(self,user,ohome,ctype): time.sleep(5) i=i+1 - retcode=self.verifyssh(user,gihome,sshscr,new_nodes) + retcode=self.verifyssh(user,gihome,sshscr,new_nodes,version) def setupsshdirs(self,user,ohome,ctype): """ diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/setupSSH.expect b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/setupSSH.expect old mode 100644 new mode 100755 similarity index 95% rename from OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/setupSSH.expect rename to OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/setupSSH.expect index 73178da636..69848cfbb7 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/common/scripts/setupSSH.expect +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/scripts/setupSSH.expect @@ -1,7 +1,7 @@ #!/usr/bin/expect -f # LICENSE UPL 1.0 # -# Copyright (c) 1982-2018 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1982-2025 Oracle and/or its affiliates. All rights reserved. # # Since: January, 2018 # Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/setup_rac_host.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/setup_rac_host.sh new file mode 100755 index 0000000000..b854aab05a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/setup_rac_host.sh @@ -0,0 +1,740 @@ +#!/bin/bash +############################# +# Copyright 2020-2025, Oracle Corporation and/or affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl +# Author: paramdeep.saini@oracle.com +############################ +NODEDIRS=0 +SLIMENV=0 +IGNOREOSVERSION=0 +validate_environment_variables() { + local podman_compose_file="$1" + # shellcheck disable=SC2207,SC2016 + local env_variables=($(grep -oP '\${\K[^}]*' "$podman_compose_file" | sort -u)) + local missing_variables=() + + for var in "${env_variables[@]}"; do + if [[ -z "${!var}" ]]; then + missing_variables+=("$var") + fi + done + + if [ ${#missing_variables[@]} -eq 0 ]; then + echo "All required environment variables are present and exported." + return 0 + else + echo "The following required environment variables from podman-compose.yml(or may be wrong podman-compose.yml?) are missing or not exported:" + printf '%s\n' "${missing_variables[@]}" + return 1 + fi +} +# Function to set up environment variables +setup_nfs_variables() { + export HEALTHCHECK_INTERVAL=60s + export HEALTHCHECK_TIMEOUT=120s + export HEALTHCHECK_RETRIES=240 + export RACNODE1_CONTAINER_NAME=racnodep1 + export RACNODE1_HOST_NAME=racnodep1 + export RACNODE1_PUBLIC_IP=10.0.20.170 + export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 + export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 + export INSTALL_NODE=racnodep1 + export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c + export CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" + export SCAN_NAME=racnodepc1-scan + export CRS_ASM_DISCOVERY_STRING="/oradata" + export CRS_ASM_DEVICE_LIST="/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img" + export RACNODE2_CONTAINER_NAME=racnodep2 + export RACNODE2_HOST_NAME=racnodep2 + export RACNODE2_PUBLIC_IP=10.0.20.171 + export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 + export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 + export DNS_CONTAINER_NAME=rac-dnsserver + export DNS_HOST_NAME=racdns + export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" + export RAC_NODE_NAME_PREFIXD="racnoded" + export RAC_NODE_NAME_PREFIXP="racnodep" + export DNS_DOMAIN=example.info + export PUBLIC_NETWORK_NAME="rac_pub1_nw" + export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" + export PRIVATE1_NETWORK_NAME="rac_priv1_nw" + export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" + export PRIVATE2_NETWORK_NAME="rac_priv2_nw" + export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" + export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc + export KEY_SECRET_FILE=/opt/.secrets/key.pem + export DNS_PUBLIC_IP=10.0.20.25 + export DNS_PRIVATE1_IP=192.168.17.25 + export DNS_PRIVATE2_IP=192.168.18.25 + export CMAN_CONTAINER_NAME=racnodepc1-cman + export CMAN_HOST_NAME=racnodepc1-cman + export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" + export CMAN_PUBLIC_IP=10.0.20.15 + export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman" + export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" + export STORAGE_CONTAINER_NAME="racnode-storage" + export STORAGE_HOST_NAME="racnode-storage" + export STORAGE_IMAGE_NAME="localhost/oracle/rac-storage-server:latest" + export ORACLE_DBNAME="ORCLCDB" + export STORAGE_PUBLIC_IP=10.0.20.80 + export NFS_STORAGE_VOLUME="/scratch/stage/rac-storage/$ORACLE_DBNAME" + export DB_SERVICE=service:soepdb + + if [ -f /etc/selinux/config ]; then + # Check SELinux state + selinux_state=$(grep -E '^SELINUX=' /etc/selinux/config | cut -d= -f2) + + if [[ "$selinux_state" == "enforcing" || "$selinux_state" == "permissive" || "$selinux_state" == "targeted" ]]; then + echo "SELinux is enabled with state: $selinux_state. Proceeding with installation." + else + echo "SELinux is either disabled or in an unknown state: $selinux_state. Skipping installation." + echo "INFO: NFS Environment variables setup completed successfully." + return 0 + fi + else + echo "/etc/selinux/config not found. Skipping SELinux check." + echo "INFO: NFS Environment variables setup completed successfully." + return 0 + fi + + +# Create rac-storage.te file +cat < /var/opt/rac-storage.te +module rac-storage 1.0; + +require { + type container_init_t; + type hugetlbfs_t; + type nfsd_fs_t; + type rpc_pipefs_t; + type default_t; + type kernel_t; + class filesystem mount; + class filesystem unmount; + class file { read write open }; + class dir { read watch }; + class bpf { map_create map_read map_write }; + class system module_request; + class fifo_file { open read write }; +} + +#============= container_init_t ============== +allow container_init_t hugetlbfs_t:filesystem mount; +allow container_init_t nfsd_fs_t:filesystem mount; +allow container_init_t rpc_pipefs_t:filesystem mount; +allow container_init_t nfsd_fs_t:file { read write open }; +allow container_init_t nfsd_fs_t:dir { read watch }; +allow container_init_t rpc_pipefs_t:dir { read watch }; +allow container_init_t rpc_pipefs_t:fifo_file { open read write }; +allow container_init_t rpc_pipefs_t:filesystem unmount; +allow container_init_t self:bpf map_create; +allow container_init_t self:bpf { map_read map_write }; +allow container_init_t default_t:dir read; +allow container_init_t kernel_t:system module_request; +EOF + + # Change directory to /var/opt + cd /var/opt || { echo "Failed to change directory to /var/opt. Exiting."; exit 1; } + + # Make the policy module + make -f /usr/share/selinux/devel/Makefile rac-storage.pp || { echo "Failed to make rac-storage.pp. Exiting."; exit 1; } + + # Install the policy module + semodule -i rac-storage.pp || { echo "Failed to install rac-storage.pp. Exiting."; exit 1; } + + # List installed modules and grep for rac-storage + semodule -l | grep rac-storage + + echo "INFO: NFS Environment variables setup completed successully." + return 0 +} +setup_blockdevices_variables(){ + export HEALTHCHECK_INTERVAL=60s + export HEALTHCHECK_TIMEOUT=120s + export HEALTHCHECK_RETRIES=240 + export RACNODE1_CONTAINER_NAME=racnodep1 + export RACNODE1_HOST_NAME=racnodep1 + export RACNODE1_PUBLIC_IP=10.0.20.170 + export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 + export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 + export INSTALL_NODE=racnodep1 + export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c + export CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" + export SCAN_NAME=racnodepc1-scan + export ASM_DEVICE1="/dev/asm-disk1" + export ASM_DEVICE2="/dev/asm-disk2" + export CRS_ASM_DEVICE_LIST="${ASM_DEVICE1},${ASM_DEVICE2}" + export ASM_DISK1="/dev/oracleoci/oraclevdd" + export ASM_DISK2="/dev/oracleoci/oraclevde" + export CRS_ASM_DISCOVERY_STRING="/dev/asm-disk*" + export RACNODE2_CONTAINER_NAME=racnodep2 + export RACNODE2_HOST_NAME=racnodep2 + export RACNODE2_PUBLIC_IP=10.0.20.171 + export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 + export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 + export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc + export KEY_SECRET_FILE=/opt/.secrets/key.pem + export DNS_CONTAINER_NAME=rac-dnsserver + export DNS_HOST_NAME=racdns + export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" + export RAC_NODE_NAME_PREFIXD="racnoded" + export RAC_NODE_NAME_PREFIXP="racnodep" + export DNS_DOMAIN=example.info + export PUBLIC_NETWORK_NAME="rac_pub1_nw" + export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" + export PRIVATE1_NETWORK_NAME="rac_priv1_nw" + export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" + export PRIVATE2_NETWORK_NAME="rac_priv2_nw" + export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" + export DNS_PUBLIC_IP=10.0.20.25 + export DNS_PRIVATE1_IP=192.168.17.25 + export DNS_PRIVATE2_IP=192.168.18.25 + export CMAN_CONTAINER_NAME=racnodepc1-cman + export CMAN_HOST_NAME=racnodepc1-cman + export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" + export CMAN_PUBLIC_IP=10.0.20.15 + export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman" + export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" + export DB_SERVICE=service:soepdb + echo "INFO: BlockDevices Environment variables setup completed successully." + return 0 +} + + +# Function to set up DNS Podman container +setup_dns_container() { + podman-compose up -d ${DNS_CONTAINER_NAME} + success_message_line="DNS Server IS READY TO USE" + last_lines="" + start_time=$(date +%s) + + # Monitor logs until success message is found or timeout occurs + while true; do + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + + if [ $elapsed_time -ge 600 ]; then + # If 60 minutes elapsed, print a timeout message and exit + echo "ERROR: Success message not found in DNS Container logs after 10 minutes." >&2 + break + fi + + # Read the last 10 lines from the logs + last_lines=$(podman logs --tail 5 "${DNS_CONTAINER_NAME}" 2>&1) + + # Check if the success message is present in the output + if echo "$last_lines" | grep -q "$success_message_line"; then + echo "###########################################" + echo "INFO: DNS Container is setup successfully." + echo "###########################################" + break + fi + + # Print the last 10 lines from the logs + echo "$last_lines" >&2 + + # Sleep for a short duration before checking logs again + sleep 15 + done + return 0 +} + +setup_rac_container() { + podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} + podman-compose stop ${RACNODE1_CONTAINER_NAME} + + podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} + podman-compose stop ${RACNODE2_CONTAINER_NAME} + + podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + + podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + + podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} + podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} + podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + + podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} + podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} + podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + + podman-compose start ${RACNODE1_CONTAINER_NAME} + podman-compose start ${RACNODE2_CONTAINER_NAME} + + RAC_LOG="/tmp/orod/oracle_rac_setup.log" + success_message_line="ORACLE RAC DATABASE IS READY TO USE" + last_lines="" + start_time=$(date +%s) + + # Monitor logs until success message is found or timeout occurs + while true; do + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + + if [ $elapsed_time -ge 3600 ]; then + # If 60 minutes elapsed, print a timeout message and exit + echo "ERROR: Success message not found in the logs after 60 minutes." >&2 + break + fi + + # Read the last 10 lines from the logs + last_lines=$(podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -n 10 $RAC_LOG" 2>&1) + + # Check if the success message is present in the output + if echo "$last_lines" | grep -q "$success_message_line"; then + echo "###############################################" + echo "INFO: Oracle RAC Containers setup successfully." + echo "###############################################" + break + fi + + # Print the last 10 lines from the logs + echo "$last_lines" >&2 + + # Sleep for a short duration before checking logs again + sleep 15 + done + return 0 + +} + +setup_storage_container() { + export ORACLE_DBNAME=ORCLCDB + mkdir -p $NFS_STORAGE_VOLUME + rm -rf $NFS_STORAGE_VOLUME/asm_disk0* + podman rm -f ${STORAGE_CONTAINER_NAME} + podman-compose --podman-run-args="-t -i --systemd=always" up -d ${STORAGE_CONTAINER_NAME} + STOR_LOG="/tmp/storage_setup.log" + export_message_line1="Export list for racnode-storage:" + export_message_line2="/oradata *" + last_lines="" + start_time=$(date +%s) + # Monitor logs until export message is found or timeout occurs + while true; do + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + + if [ $elapsed_time -ge 1800 ]; then + # If 30 minutes elapsed, print a timeout message and exit + echo "ERROR: Successful message not found in the storage container logs after 30 minutes." >&2 + break + fi + # Read the last 10 lines from the logs + last_lines=$(podman exec ${STORAGE_CONTAINER_NAME} tail -n 10 "$STOR_LOG" 2>&1) + # Check if both lines of the export message are present in the output + if echo "$last_lines" | grep -q "$export_message_line1" && echo "$last_lines" | grep -q "$export_message_line2"; then + echo "############################################################" + echo "INFO: NFS Storage Container exporting /oradata successfully." + echo "############################################################" + break + fi + # Print the last 10 lines from the logs + echo "$last_lines" >&2 + # Sleep for a short duration before checking logs again + sleep 15 + done + podman volume inspect racstorage &> /dev/null && podman volume rm racstorage + sleep 5 + podman volume create --driver local \ + --opt type=nfs \ + --opt o=addr=$STORAGE_PUBLIC_IP,rw,bg,hard,tcp,vers=3,timeo=600,rsize=32768,wsize=32768,actimeo=0 \ + --opt device=$STORAGE_PUBLIC_IP:/oradata \ + racstorage + return 0 +} + + +setup_cman_container() { + podman-compose up -d ${CMAN_CONTAINER_NAME} + success_message_line="CONNECTION MANAGER IS READY TO USE" + last_lines="" + start_time=$(date +%s) + + # Monitor logs until success message is found or timeout occurs + while true; do + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + + if [ $elapsed_time -ge 600 ]; then + # If 60 minutes elapsed, print a timeout message and exit + echo "ERROR: Success message not found in CMAN Container logs after 10 minutes." >&2 + break + fi + + # Read the last 10 lines from the logs + last_lines=$(podman logs --tail 5 "${CMAN_CONTAINER_NAME}" 2>&1) + + # Check if the success message is present in the output + if echo "$last_lines" | grep -q "$success_message_line"; then + echo "###########################################" + echo "INFO: CMAN Container is setup successfully." + echo "###########################################" + break + fi + + # Print the last 10 lines from the logs + echo "$last_lines" >&2 + + # Sleep for a short duration before checking logs again + sleep 15 + done + return 0 +} + +setup_rac_networks() { + podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} + podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns --internal + podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns --internal + echo "INFO: Oracle RAC Container Networks setup successfully" + return 0 +} + + +function DisplayUsage(){ + echo "Usage : + $0 [<-slimenv> <-nodedirs=dir1,dir2,...,dirn>] [-ignoreOSVersion] [-blockdevices-env|-cleanup|-dns|-networks|-nfs-env|-prepare-rac-env|-rac|-storage] [-help]" + return 0 +} + +# Function to check if a command is available +check_command() { + if ! command -v "$1" &>/dev/null; then + return 1 + fi +} + +# Function to install Podman +install_podman() { + if ! check_command podman; then + echo "INFO: Podman is not installed. Installing..." + sudo dnf install -y podman + else + echo "INFO: Podman is already installed." + fi + return 0 +} + +# Function to install Podman-Compose +install_podman_compose() { + if ! check_command podman-compose; then + echo "INFO: Podman-Compose is not installed. Installing..." + # Enable EPEL repository for Oracle Linux 8 + sudo dnf config-manager --enable ol8_developer_EPEL + # Install Podman-Compose + sudo dnf install -y podman-compose + else + echo "INFO: Podman-Compose is already installed." + fi + return 0 +} + +function setupSELinuxContext(){ + + dnf install selinux-policy-devel -y + [ -f /var/opt/rac-podman.te ] && cp /var/opt/rac-podman.te /var/opt/rac-podman.te.ORG + [ -f /var/opt/rac-podman.te ] && rm -rf /var/opt/rac-podman.te + cat > /var/opt/rac-podman.te < /dev/null; then + echo "INFO: Deleting existing secret $secret_name..." + # shellcheck disable=SC2086 + podman secret rm $secret_name + fi + + # Create the new secret + echo "INFO: Creating new secret $secret_name..." + # shellcheck disable=SC2086 + podman secret create $secret_name $file_path +} + +create_secrets() { + # Check if RAC_SECRET environment variable is defined + if [ -z "$RAC_SECRET" ]; then + echo "ERROR: RAC_SECRET environment variable is not defined." + return 1 + fi + mkdir -p /opt/.secrets/ + # shellcheck disable=SC2086 + echo $RAC_SECRET > /opt/.secrets/pwdfile.txt + # shellcheck disable=SC2164 + cd /opt/.secrets + openssl genrsa -out key.pem + openssl rsa -in key.pem -out key.pub -pubout + openssl pkeyutl -in pwdfile.txt -out pwdfile.enc -pubin -inkey key.pub -encrypt + rm -rf /opt/.secrets/pwdfile.txt + # Delete and create secrets + delete_and_create_secret "pwdsecret" "/opt/.secrets/pwdfile.enc" + delete_and_create_secret "keysecret" "/opt/.secrets/key.pem" + echo "INFO: Secrets created." + # shellcheck disable=SC2164 + cd - + return 0 +} + +check_system_resources() { + # Check swap space in GB + swap_space=$(free -g | grep Swap | awk '{print $2}') + if [ "$swap_space" -ge 16 ]; then + echo "INFO: Swap space is sufficient ($swap_space GB)." + else + echo "ERROR: Swap space is insufficient ($swap_space GB). Minimum 32 GB required." + return 1 + fi + + # Check physical memory (RAM) in GB + total_memory=$(free -g | grep Mem | awk '{print $2}') + if [ "$total_memory" -ge 16 ]; then + echo "INFO: Physical memory is sufficient ($total_memory GB)." + else + echo "ERROR: Physical memory is insufficient ($total_memory GB). Minimum 32 GB required." + return 1 + fi + + # Both swap space and physical memory meet the requirements + return 0 +} + +setup_host_prepreq(){ + kernelVersionSupported=1 + # shellcheck disable=SC2317 + # shellcheck disable=SC2006 + OSVersion=`grep "Oracle Linux Server release 8" /etc/oracle-release` + OSstatus=$? + if [ ${OSstatus} -eq 0 ]; then + OSVersionSupported=1 + else + OSVersionSupported=0 + fi + + echo "INFO: Setting Podman env on OS [${OSVersion}]" + # shellcheck disable=SC2006,SC2086 + kernelVersion=`uname -r | cut -d. -f1,2` + # shellcheck disable=SC2006,SC2086 + majorKernelVersion=`echo ${kernelVersion} | cut -d. -f1` + # shellcheck disable=SC2006,SC2086 + minorKernelVersion=`echo ${kernelVersion} | cut -d. -f2` + + echo "Running on Kernel [${kernelVersion}]" +# shellcheck disable=SC2006,SC2086 + if [ ${majorKernelVersion} -lt 5 ]; then + kernelVersionSupported=0 + fi +# shellcheck disable=SC2086 + if [ $majorKernelVersion -eq 5 ]; then + # shellcheck disable=SC2086 + if [ ${minorKernelVersion} -lt 14 ]; then + kernelVersionSupported=0 + fi + fi +# shellcheck disable=SC2166 + if [ $OSVersionSupported -eq 0 -o $kernelVersionSupported -eq 0 ]; then + if [ ${IGNOREOSVERSION} == "0" ]; then + echo "ERROR: OSVersion=${OSVersion}.. KernelVersion=${kernelVersion}. Exiting." + return 1 + fi + fi + + echo "Setting kernel parameters in /etc/sysctl.conf" + sed -i '/fs.aio-max-nr=/d' /etc/sysctl.conf + sed -i '/fs.file-max=/d' /etc/sysctl.conf + sed -i '/net.core.rmem_max=/d' /etc/sysctl.conf + sed -i '/net.core.rmem_default=/d' /etc/sysctl.conf + sed -i '/net.core.wmem_max=/d' /etc/sysctl.conf + sed -i '/net.core.wmem_default=/d' /etc/sysctl.conf + sed -i '/vm.nr_hugepages=/d' /etc/sysctl.conf + + echo -e "fs.aio-max-nr=1048576\nfs.file-max=6815744\nnet.core.rmem_max=4194304\nnet.core.rmem_default=262144\nnet.core.wmem_max=1048576\nnet.core.wmem_default=262144\nvm.nr_hugepages=16384" >> /etc/sysctl.conf + + if [ ${SLIMENV} -eq 1 ]; then + echo "INFO: Slim environment specified" + if [ ${NODEDIRS} -eq 0 ]; then + echo "ERROR: Missing NodeDirs for SlimEnv. Exiting" + DisplayUsage + return 1 + fi + # shellcheck disable=SC2006,SC2001,SC2086 + nodeHomeDirs=`echo ${node_dirs} | sed -e 's/.*?=\(.*\)/\1/g'` + # shellcheck disable=SC2162 + IFS=',' read -a nodeHomeValues <<< "${nodeHomeDirs}" + for nodeHome in "${nodeHomeValues[@]}" + do + echo "INFO: Creating directory $nodeHome" + # shellcheck disable=SC2086 + mkdir -p $nodeHome + done + fi + + if [ ${OSVersionSupported} -eq 1 ]; then + echo "INFO: Starting chronyd service" + systemctl start chronyd + fi +# shellcheck disable=SC2002 + cat /sys/devices/system/clocksource/clocksource0/available_clocksource | grep tsc + # shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + echo "INFO: Setting current clocksource" + echo "tsc">/sys/devices/system/clocksource/clocksource0/current_clocksource + cat /sys/devices/system/clocksource/clocksource0/current_clocksource + + sed -i -e 's/\(GRUB_CMDLINE_LINUX=.*\)"/\1 tsc"/g' ./grub + else + echo "INFO: clock source [tsc] not available on the system" + fi + + df -h /dev/shm + + # shellcheck disable=SC2006 + freeSHM=`df -h /dev/shm | tail -n +2 | awk '{ print $4 }'` + echo "INFO: Available shm = [${freeSHM}]" + # shellcheck disable=SC2086,SC2060,SC2006 + freeSHM=`echo ${freeSHM} | tr -d [:alpha:]` + # shellcheck disable=SC2129,SC2086 + if [ ${freeSHM} -lt 4 ]; then + echo "ERROR: Low free space [${freeSHM}] in /dev/shm. Need at least 4GB space. Exiting." + DisplayUsage + return 1 + fi + install_podman + install_podman_compose + # shellcheck disable=SC2006 + selinux_state=$(grep -E '^SELINUX=' /etc/selinux/config | cut -d= -f2) + if [[ "$selinux_state" == "enforcing" || "$selinux_state" == "permissive" || "$selinux_state" == "targeted" ]]; then + echo "INFO: SELinux Enabled. Setting up SELinux Context" + setupSELinuxContext + else + echo "INFO: SELinux Disabled." + fi + create_secrets || return 1 + check_system_resources || return 1 + echo "INFO: Finished setting up the pre-requisites for Podman-Host" + return 0 +} + +cleanup_env(){ + podman rm -f ${DNS_CONTAINER_NAME} + podman rm -f ${STORAGE_CONTAINER_NAME} + podman rm -f $RACNODE1_CONTAINER_NAME + podman rm -f $RACNODE2_CONTAINER_NAME + podman rm -f ${CMAN_CONTAINER_NAME} + podman network inspect $PUBLIC_NETWORK_NAME &> /dev/null && podman network rm $PUBLIC_NETWORK_NAME + podman network inspect $PRIVATE1_NETWORK_NAME &> /dev/null && podman network rm $PRIVATE1_NETWORK_NAME + podman network inspect $PRIVATE2_NETWORK_NAME &> /dev/null && podman network rm $PRIVATE2_NETWORK_NAME + podman volume inspect racstorage &> /dev/null && podman volume rm racstorage + echo "INFO: Oracle Container RAC Environment Cleanup Successfully" + return 0 +} + +while [ $# -gt 0 ]; do + case "$1" in + -slimenv) + SLIMENV=1 + ;; + -nodedirs=*) + NODEDIRS=1 + node_dirs="${1#*=}" + ;; + -ignoreOSVersion) + IGNOREOSVERSION=1 + ;; + -help|-h) + DisplayUsage + ;; + -nfs-env) + setup_nfs_variables || echo "ERROR: Oracle RAC Environment Variables for NFS devices setup has failed." + ;; + -blockdevices-env) + setup_blockdevices_variables || echo "ERROR: Oracle RAC Environment variables for Block devices setup has failed." + ;; + -dns) + validate_environment_variables podman-compose.yml || exit 1 + setup_dns_container || echo "ERROR: Oracle RAC DNS Container Setup has failed." + ;; + -rac) + validate_environment_variables podman-compose.yml || exit 1 + setup_rac_container || echo "ERROR: Oracle RAC Container Setup has failed." + ;; + -storage) + validate_environment_variables podman-compose.yml || exit 1 + setup_storage_container || echo "ERROR: Oracle RAC Storage Container Setup has failed." + ;; + -cman) + validate_environment_variables podman-compose.yml || exit 1 + setup_cman_container || echo "ERROR: Oracle RAC Connection Manager Container Setup has failed." + ;; + -cleanup) + validate_environment_variables podman-compose.yml || exit 1 + cleanup_env || echo "ERROR: Oracle RAC Environment Cleanup Setup has failed." + ;; + -networks) + validate_environment_variables podman-compose.yml || exit 1 + setup_rac_networks || echo "ERROR: Oracle RAC Container Networks setup has failed." + ;; + -prepare-rac-env) + setup_host_prepreq || echo "ERROR: Oracle RAC preparation setups have failed." + ;; + *) + printf "***************************\n" + # shellcheck disable=SC2059 + printf "* Error: Invalid argument [$1] specified.*\n" + printf "***************************\n" + DisplayUsage + ;; + esac + shift +done \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/CLEANUP.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/CLEANUP.md new file mode 100644 index 0000000000..0f0a30cc39 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/CLEANUP.md @@ -0,0 +1,38 @@ +# Cleanup Oracle RAC Database Container Environment +To clean up the Oracle Real Application Clusters (Oracle RAC) environment, complete the following commands. + +```bash +podman inspect rac-dnsserver &> /dev/null && podman rm -f rac-dnsserver +podman inspect racnode-storage &> /dev/null && podman rm -f racnode-storage +podman inspect racnodep1 &> /dev/null && podman rm -f racnodep1 +podman inspect racnodep2 &> /dev/null && podman rm -f racnodep2 +podman inspect racnodepc1-cman &> /dev/null && podman rm -f racnodepc1-cman +podman network inspect rac_pub1_nw &> /dev/null && podman network rm rac_pub1_nw +podman network inspect rac_priv1_nw &> /dev/null && podman network rm rac_priv1_nw +podman network inspect rac_priv2_nw &> /dev/null && podman network rm rac_priv2_nw +podman volume inspect racstorage &> /dev/null && podman volume rm racstorage +``` + +If you have set up the container environment to use block devices, then clean up the ASM Disks: +```bash +dd if=/dev/zero of=/dev/oracleoci/oraclevdd bs=8k count=10000 +dd if=/dev/zero of=/dev/oracleoci/oraclevde bs=8k count=10000 +``` +If you have set up the container environment using an Oracle Slim Image, then clean up the data folders: +```bash +rm -rf /scratch/rac/cluster01/node1/* +rm -rf /scratch/rac/cluster01/node2/* +``` + +If you have set up the container environment with User Defined Response files, then clean up the response files: +```bash +rm -rf /scratch/common_scripts/podman/rac/* +``` + +## License + +All scripts and files hosted in this repository that are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/CONNECTING.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/CONNECTING.md new file mode 100644 index 0000000000..7c3c44f5dd --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/CONNECTING.md @@ -0,0 +1,191 @@ +# Connecting to an Oracle RAC Database +Follow this document to validate and connect to Oracle RAC Container Database. + +## Using this documentation +- [Connecting to an Oracle RAC Database](#connecting-to-an-oracle-rac-database) + - [Using this documentation](#using-this-documentation) + - [Validating Oracle RAC Containers](#validating-oracle-rac-containers) + - [Validating Oracle Grid Infrastructure](#validating-oracle-grid-infrastructure) + - [Validating Oracle RAC Database](#validating-oracle-rac-database) + - [Debugging Oracle RAC Containers](#debugging-oracle-rac-containers) + - [Client Connection](#client-connection) + - [License](#license) + - [Copyright](#copyright) + +## Validating Oracle RAC Containers +First Validate if Container is healthy or not by running- +```bash +podman ps -a + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +598385416fd7 localhost/oracle/rac-dnsserver:latest /bin/sh -c exec $... 55 minutes ago Up 55 minutes (healthy) rac-dnsserver +835e3d113898 localhost/oracle/rac-storage-server:latest 55 minutes ago Up 55 minutes (healthy) racnode-storage +9ba7bbee9095 localhost/oracle/database-rac:21c 52 minutes ago Up 52 minutes (healthy) racnodep1 +ebbf520b0c95 localhost/oracle/database-rac:21c 52 minutes ago Up 52 minutes (healthy) racnodep2 +36df843594d9 localhost/oracle/client-cman:21.3.0 /bin/sh -c exec $... 12 minutes ago Up 12 minutes (healthy) 0.0.0.0:1521->1521/tcp racnodepc1-cman +``` + +Look for `(healthy)` next to container names under `STATUS` section. + +To connect to the container execute following command: +```bash +podman exec -i -t racnodep1 /bin/bash +``` +## Validating Oracle Grid Infrastructure +Validate if Oracle Grid is up and running from within Container- +```bash +su - grid +#Verify the status of Oracle Clusterware stack: +[grid@racnodep1 ~]$ crsctl check cluster -all +************************************************************** +racnodep1: +CRS-4537: Cluster Ready Services is online +CRS-4529: Cluster Synchronization Services is online +CRS-4533: Event Manager is online +************************************************************** +racnodep2: +CRS-4537: Cluster Ready Services is online +CRS-4529: Cluster Synchronization Services is online +CRS-4533: Event Manager is online +************************************************************** + +[grid@racnodep1 u01]$ crsctl check crs +CRS-4638: Oracle High Availability Services is online +CRS-4537: Cluster Ready Services is online +CRS-4529: Cluster Synchronization Services is online +CRS-4533: Event Manager is online + +[grid@racnodep1 u01]$ crsctl stat res -t +-------------------------------------------------------------------------------- +Name Target State Server State details +-------------------------------------------------------------------------------- +Local Resources +-------------------------------------------------------------------------------- +ora.LISTENER.lsnr + ONLINE ONLINE racnodep1 STABLE + ONLINE ONLINE racnodep2 STABLE +ora.chad + ONLINE ONLINE racnodep1 STABLE + ONLINE ONLINE racnodep2 STABLE +ora.helper + OFFLINE OFFLINE racnodep1 STABLE + OFFLINE OFFLINE racnodep2 STABLE +ora.net1.network + ONLINE ONLINE racnodep1 STABLE + ONLINE ONLINE racnodep2 STABLE +ora.ons + ONLINE ONLINE racnodep1 STABLE + ONLINE ONLINE racnodep2 STABLE +-------------------------------------------------------------------------------- +Cluster Resources +-------------------------------------------------------------------------------- +ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup) + 1 ONLINE ONLINE racnodep1 STABLE + 2 ONLINE ONLINE racnodep2 STABLE +ora.ASMNET2LSNR_ASM.lsnr(ora.asmgroup) + 1 ONLINE ONLINE racnodep1 STABLE + 2 ONLINE ONLINE racnodep2 STABLE +ora.DATA.dg(ora.asmgroup) + 1 ONLINE ONLINE racnodep1 STABLE + 2 ONLINE ONLINE racnodep2 STABLE +ora.LISTENER_SCAN1.lsnr + 1 ONLINE ONLINE racnodep1 STABLE +ora.LISTENER_SCAN2.lsnr + 1 ONLINE ONLINE racnodep1 STABLE +ora.LISTENER_SCAN3.lsnr + 1 ONLINE ONLINE racnodep2 STABLE +ora.asm(ora.asmgroup) + 1 ONLINE ONLINE racnodep1 Started,STABLE + 2 ONLINE ONLINE racnodep2 Started,STABLE +ora.asmnet1.asmnetwork(ora.asmgroup) + 1 ONLINE ONLINE racnodep1 STABLE + 2 ONLINE ONLINE racnodep2 STABLE +ora.asmnet2.asmnetwork(ora.asmgroup) + 1 ONLINE ONLINE racnodep1 STABLE + 2 ONLINE ONLINE racnodep2 STABLE +ora.cdp1.cdp + 1 ONLINE ONLINE racnodep1 STABLE +ora.cdp2.cdp + 1 ONLINE ONLINE racnodep1 STABLE +ora.cdp3.cdp + 1 ONLINE ONLINE racnodep2 STABLE +ora.cvu + 1 ONLINE ONLINE racnodep1 STABLE +ora.orclcdb.db + 1 ONLINE ONLINE racnodep1 Open,HOME=/u01/app/o + racle/product/23ai/db + home_1,STABLE + 2 ONLINE ONLINE racnodep2 Open,HOME=/u01/app/o + racle/product/23ai/db + home_1,STABLE +ora.orclcdb.orclpdb.pdb + 1 ONLINE ONLINE racnodep1 READ WRITE,STABLE + 2 ONLINE ONLINE racnodep2 READ WRITE,STABLE +ora.orclcdb.soepdb.svc + 1 ONLINE ONLINE racnodep1 STABLE + 2 ONLINE ONLINE racnodep2 STABLE +ora.racnodep1.vip + 1 ONLINE ONLINE racnodep1 STABLE +ora.racnodep2.vip + 1 ONLINE ONLINE racnodep2 STABLE +ora.rhpserver + 1 OFFLINE OFFLINE STABLE +ora.scan1.vip + 1 ONLINE ONLINE racnodep1 STABLE +ora.scan2.vip + 1 ONLINE ONLINE racnodep1 STABLE +ora.scan3.vip + 1 ONLINE ONLINE racnodep2 STABLE +-------------------------------------------------------------------------------- + +/u01/app/21c/grid/bin/olsnodes -n +racnodep1 1 +racnodep2 2 +``` +## Validating Oracle RAC Database +Validate Oracle RAC Database from within Container- +```bash +su - oracle + +#Confirm the status of Oracle Database instances: +[oracle@racnodep1 ~]$ srvctl status database -d ORCLCDB +Instance ORCLCDB1 is running on node racnodep1 +Instance ORCLCDB2 is running on node racnodep2 + +# Validate network configuration and connectivity: +[oracle@racnodep1 ~]$ srvctl config scan +SCAN name: racnodepc1-scan, Network: 1 +Subnet IPv4: 10.0.20.0/255.255.255.0/eth0, static +Subnet IPv6: +SCAN 1 IPv4 VIP: 10.0.20.237 +SCAN VIP is enabled. +SCAN 2 IPv4 VIP: 10.0.20.238 +SCAN VIP is enabled. +SCAN 3 IPv4 VIP: 10.0.20.236 +SCAN VIP is enabled. +``` + +## Debugging Oracle RAC Containers +If the install fails for any reason, log in to container using the above command and check `/tmp/orod/oracle_rac_setup.log`. You can also review the Grid Infrastructure logs located at `$GRID_BASE/diag/crs` and check for failure logs. If the failure occurred during the database creation then check the database logs. + + +## Client Connection +* If you are using the podman network created using MACVLAN driver, and you have configured DNS appropriately, then you can connect using the public Single Client Access (SCAN) listener directly from any external client. To connect with the SCAN, use the following connection string, where `` is the SCAN name for the database, and `` is the database system identifier: + + ```bash + system/@//:1521/ + ``` + +* If you are using a connection manager and exposed the port 1521 on the host, then connect from an external client using the following connection string, where `` is the host container, and `` is the database system identifier: + + ```bash + system/@//:1521/ + ``` +* If you are using bridge driver and not using connection manager, you need to connect application to the same bridge network which you are using for Oracle RAC. +## License + +All scripts and files hosted in this repository which are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/DELETION.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/DELETION.md new file mode 100644 index 0000000000..1a0faa830a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/DELETION.md @@ -0,0 +1,31 @@ +# Deleting a Node from Existing Oracle RAC on Container Cluster +First identify the node you want to remove from RAC Container Cluster, then login to container and execute below- +```bash +cd /opt/scripts/startup/scripts/ +python3 main.py --delracnode="del_rachome=true;del_gridnode=true" +``` +E.g In this example we will delete racnodep3 from a cluster of 3 nodes viz. racnodep1,racnodep2, racnodep3. +```bash +podman exec -it racnodep3 bash +cd /opt/scripts/startup/scripts/ +python3 main.py --delracnode="del_rachome=true;del_gridnode=true" +``` +Validate racnodep3 is deleted successfully from Oracle RAC on Container Cluster - +```bash +podman exec -it racnodep1 bash +[root@racnodep1 bin]# /u01/app/23.3.0/grid/bin/olsnodes -n +racnodep1 1 +racnodep2 2 +``` +Now racnodep3 container can be removed by running command- +```bash +podman rm -f racnodep3 +``` + +## License + +All scripts and files hosted in this repository which are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/ENVIRONMENTVARIABLES.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/ENVIRONMENTVARIABLES.md new file mode 100644 index 0000000000..6d80e7de2d --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/ENVIRONMENTVARIABLES.md @@ -0,0 +1,55 @@ +# Environment Variables Explained for Oracle RAC on Podman + +This section provides information about the environment variables that can be used when creating Oracle RAC on Containers. + +| Environment Variable | Mandatory/Optional | Usage | Description | +|--------------------------|---------------------|------------|--------------------------------------------------------------| +| DNS_SERVERS | Mandatory | All | Specify the comma-separated list of DNS server IP addresses where both Oracle RAC nodes are resolved. | +| OP_TYPE | Mandatory | All | Specify the operation type. It can accept setuprac/setupgrid/addgridnode/racaddnode/setupracstandby. | +| CRS_NODES | Mandatory | All | Specify the CRS nodes in the format pubhost:pubhost1,viphost:viphost1;pubhost:pubhost2,viphost:viphost2. You can add as many hosts separated by semicolon. publhost and viphost are separated by comma. | +| SCAN_NAME | Mandatory | All | Specify the SCAN name. | +| CRS_ASM_DEVICE_LIST | Mandatory | All | Specify the ASM disk lists. | +| PUBLIC_HOSTS_DOMAIN | Optional | All | Specify public domain where RAC Containers are resolving to. | +| CRS_ASM_DISCOVERY_STRING | Optional | All | Specify the discovery string for ASM. | +| ORACLE_SID | Optional | All | Default value set to ORCLCDB. | +| ORACLE_PDB | Optional | All | Default value set to ORCLPDB. | +| ORACLE_CHARACTERSET | Optional | All | Default value set to AL32UTF8. | +| PWD_KEY | Mandatory | All | Pass the podman secret name for the key used while generating podman secrets. Default set to keysecret. | +| DB_PWD_FILE | Mandatory | All | Pass the podman secret name for the Oracle RAC Database to be used while generating podman secrets. Default set to pwdsecret. | +| INIT_SGA_SIZE | Optional | All | Set this environment variable when you want to set the size of SGA for RAC containers. | +| INIT_PGA_SIZE | Optional | All | Set this environment variable when you want to set the size of PGA for RAC containers. | +| CRS_PRIVATE_IP1 | Mandatory | All | Set this environment variable when you want to set the private IP for the first private network for RAC container. | +| CRS_PRIVATE_IP2 | Mandatory | All | Set this environment variable when you want to set the private IP for the second private network for RAC container. | +| INSTALL_NODE | Mandatory | All | Set this environment variable to the new Oracle node where the actual RAC cluster installation will happen. e.g., racnodep1/racnodep3 etc. | +| EXISTING_CLS_NODE | Mandatory | Mandatory only during Node Addition to existing RAC Cluster | This is set during addition of node to Existing RAC Cluster. Set this environment variable to existing Oracle RAC node e.g., racnodep1, racnodep2. | +| DB_ASM_DEVICE_LIST | Optional | All | Comma-separated list of ASM disk names with their full paths. | +| RECO_ASM_DEVICE_LIST | Optional | All | Comma-separated list of ASM disk names with their full paths. | +| DB_DATA_FILE_DEST | Optional | All | Name of the diskgroup where database data files will be stored. | +| DB_RECOVERY_FILE_DEST | Optional | All | Name of the diskgroup where database recovery files (archivelogs) will be stored. | +| CMAN_HOST | Optional | All | Specify the host for Oracle Connection Manager (CMAN). Default value is set to racnodepc1-cman. | +| CMAN_PORT | Optional | All | Specify the port for Oracle Connection Manager (CMAN). Default port is set to 1521. | +| DB_UNIQUE_NAME | Mandatory | Standby (DG Setup) | Specify the unique name for the standby database. | +| PRIMARY_DB_SCAN_NAME | Mandatory | Standby (DG Setup) | Specify the SCAN name of the primary database. | +| CRS_ASM_DISKGROUP | Mandatory | Standby (DG Setup) | Specify the ASM diskgroup for the standby database. | +| PRIMARY_DB_UNIQUE_NAME | Mandatory | Standby (DG Setup) | Specify the unique name of the primary database. | +| PRIMARY_DB_NAME | Mandatory | Standby (DG Setup) | Specify the name of the primary database. | +| DB_BLOCK_CHECKSUM | Mandatory | Primary and Standby (DG Setup) | Specify the type of DB block checksum to use. | +| DB_SERVICE | Optional | All | Specify the database service. Format: service:soepdb. | +| GRID_HOME | Mandatory | Setup using Slim Image | Path to Oracle Grid Infrastructure home directory. Default value is `/u01/app/21c/grid`. | +| GRID_BASE | Mandatory | Setup using Slim Image | Path to the base directory of Oracle Grid Infrastructure. Default value is `/u01/app/grid`. | +| DB_HOME | Mandatory | Setup using Slim Image | Path to Oracle Database home directory. Default value is `/u01/app/oracle/product/21c/dbhome_1`. | +| DB_BASE | Mandatory | Setup using Slim Image | Path to the base directory of Oracle Database. Default value is `/u01/app/oracle`. | +| INVENTORY | Mandatory | Setup using Slim Image | Path to the Oracle Inventory directory. Default value is `/u01/app/oraInventory`. | +| STAGING_SOFTWARE_LOC | Mandatory | Setup using Slim Image | Location where the Oracle software zip files are staged. Default value is `/scratch/software/21c/goldimages/240308`. | +| GRID_SW_ZIP_FILE | Mandatory | Setup using Slim Image | Name of the Oracle Grid Infrastructure software zip file. Default value is `LINUX.X64_213000_grid_home.zip`. | +| DB_SW_ZIP_FILE | Mandatory | Setup using Slim Image | Name of the Oracle Database software zip file. Default value is `LINUX.X64_213000_db_home.zip`. | +| GRID_RESPONSE_FILE | Mandatory | Setup using User Defined Response Files | Path to the Oracle Grid Infrastructure response file. Default value is `/tmp/grid_21c.rsp`. | +| DBCA_RESPONSE_FILE | Mandatory | Setup using User Defined Response Files | Path to the Oracle Database Configuration Assistant (DBCA) response file. Default value is `/tmp/dbca_21c.rsp`. | + +## License + +All scripts and files hosted in this repository which are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/README_1.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/README_1.md new file mode 100644 index 0000000000..278834c2f6 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/README_1.md @@ -0,0 +1,1078 @@ +# Oracle Real Application Clusters in Linux Containers + +Learn about container deployment options for Oracle Real Application Clusters (Oracle RAC) Release 21c (21.3) + +## Overview of Running Oracle RAC in Containers + +Oracle Real Application Clusters (Oracle RAC) is an option to the award-winning Oracle Database Enterprise Edition. Oracle RAC is a cluster database with a shared cache architecture that overcomes the limitations of traditional shared-nothing and shared-disk approaches to provide highly scalable and available database solutions for all business applications. +Oracle RAC uses Oracle Clusterware as a portable cluster software that allows clustering of independent servers so that they cooperate as a single system and Oracle Automatic Storage Management (Oracle ASM) to provide simplified storage management that is consistent across all servers and storage platforms. +Oracle Clusterware and Oracle ASM are part of the Oracle Grid Infrastructure, which bundles both solutions in an easy to deploy software package. + +For more information on Oracle RAC Database 21c refer to the [Oracle Database documentation](http://docs.oracle.com/en/database/). + +## Using this Image + +To create an Oracle RAC environment, complete these steps in order: + +- [Oracle Real Application Clusters in Linux Containers](#oracle-real-application-clusters-in-linux-containers) + - [Overview of Running Oracle RAC in Containers](#overview-of-running-oracle-rac-in-containers) + - [Using this Image](#using-this-image) + - [Section 1 : Prerequisites for running Oracle RAC in containers](#section-1--prerequisites-for-running-oracle-rac-in-containers) + - [Section 2: Building Oracle RAC Database Container Images](#section-2-building-oracle-rac-database-container-images) + - [Oracle RAC Container Image for Docker](#oracle-rac-container-image-for-docker) + - [Oracle RAC Container Image for Podman](#oracle-rac-container-image-for-podman) + - [Section 3: Network and Password Management](#section-3--network-and-password-management) + - [Section 4: Oracle RAC on Docker](#section-4-oracle-rac-on-docker) + - [Section 4.1 : Prerequisites for Running Oracle RAC on Docker](#section-41--prerequisites-for-running-oracle-rac-on-docker) + - [Section 4.2: Setup Oracle RAC Container on Docker](#section-42-setup-oracle-rac-container-on-docker) + - [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker) + - [Deploying Oracle RAC on Container With Oracle RAC Storage Container](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container) + - [Assign networks to Oracle RAC docker containers](#assign-networks-to-oracle-rac-docker-containers) + - [Start the first docker container](#start-the-first-docker-container) + - [Connect to the Oracle RAC docker container](#connect-to-the-oracle-rac-docker-container) + - [Section 4.3: Adding an Oracle RAC Node using a Docker Container](#section-43-adding-an-oracle-rac-node-using-a-docker-container) + - [Deploying Oracle RAC Additional Node on Container with Block Devices on Docker](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-docker) + - [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Docker](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-docker) + - [Assign Network to additional Oracle RAC docker container](#assign-network-to-additional-oracle-rac-docker-container) + - [Start Oracle RAC docker container](#start-oracle-rac-docker-container) + - [Connect to the Oracle RAC docker container](#connect-to-the-oracle-rac-docker-container) + - [Section 5: Oracle RAC on Podman](#section-5-oracle-rac-on-podman) + - [Section 5.1 : Prerequisites for Running Oracle RAC on Podman](#section-51--prerequisites-for-running-oracle-rac-on-podman) + - [Section 5.2: Setup RAC Containers on Podman](#section-52-setup-rac-containers-on-podman) + - [Deploying Oracle RAC Containers with Block Devices on Podman](#deploying-oracle-rac-containers-with-block-devices-on-podman) + - [Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container-on-podman) + - [Assign networks to Oracle RAC podman containers](#assign-networks-to-oracle-rac-podman-containers) + - [Start the first podman container](#start-the-first-podman-container) + - [Connect to the Oracle RAC container](#connect-to-the-oracle-rac-podman-container) + - [Section 5.3: Adding a Oracle RAC Node using a container on Podman](#section-53-adding-a-oracle-rac-node-using-a-container-on-podman) + - [Deploying Oracle RAC Additional Node on Container with Block Devices on Podman](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-podman) + - [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-podman) + - [Assign Network to additional Oracle RAC podman container](#assign-network-to-additional-oracle-rac-podman-container) + - [Start Oracle RAC podman container](#start-oracle-rac-podman-container) + - [Section 6: Connecting to an Oracle RAC Database](#section-6-connecting-to-an-oracle-rac-database) + - [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) + - [Section 8: Environment Variables for the Second and Subsequent Nodes](#section-8-environment-variables-for-the-second-and-subsequent-nodes) + - [Section 9: Building a Patched Oracle RAC Container Image](#section-9-building-a-patched-oracle-rac-container-image) + - [Section 10 : Sample Container Files for Older Releases](#section-10--sample-container-files-for-older-releases) + - [Docker](#docker-container-files) + - [Podman](#podman-container-files) + - [Section 11 : Support](#section-11--support) + - [Docker](#docker-support) + - [Podman](#podman-support) + - [Section 12 : License](#section-12--license) + - [Section 11 : Copyright](#section-11--copyright) + +## Section 1 : Prerequisites for running Oracle RAC in containers + +Before you proceed to section two, you must complete each of the steps listed in this section. + +To review the resource requirements for Oracle RAC, see Oracle Database 21c Release documentation [Oracle Grid Infrastructure Installation and Upgrade Guide](https://docs.oracle.com/en/database/oracle/oracle-database/21/cwlin/index.html) + +Complete each of the following prerequisites: + +1. Ensure that each container that you will deploy as part of your cluster meets the minimum hardware requirements for Oracle RAC and Oracle Grid Infrastructure software. +2. Ensure all data files, control files, redo log files, and the server parameter file (`SPFILE`) used by the Oracle RAC database reside on shared storage that is accessible by all the Oracle RAC database instances. An Oracle RAC database is a shared-everything database, so each Oracle RAC Node must have the same access. +3. Configure the following addresses manually in your DNS. + + - Public IP address for each container + - Private IP address for each container + - Virtual IP address for each container + - Three single client access name (SCAN) addresses for the cluster. +4. Block storage: If you are planning to use block devices for shared storage, then allocate block devices for OCR, voting and database files. +5. NFS storage: If you are planning to use NFS storage for OCR, Voting Disk and Database files, then configure NFS storage and export at least one NFS mount. You can also use `/docker-images/OracleDatabase/RAC/OracleRACStorageServer` container for shared file system on NFS. +6. Set`/etc/sysctl.conf`parameters: For Oracle RAC, you must set following parameters at host level in `/etc/sysctl.conf`: + + ```INI + fs.aio-max-nr = 1048576 + fs.file-max = 6815744 + net.core.rmem_max = 4194304 + net.core.rmem_default = 262144 + net.core.wmem_max = 1048576 + net.core.wmem_default = 262144 + net.core.rmem_default = 262144 + ``` + +7. List and reload parameters: After the `/etc/sysctl.conf` file is modified, run the following commands: + + ```bash + sysctl -a + sysctl -p + ``` + +8. To resolve VIPs and SCAN IPs, we are using a DNS container in this guide. Before proceeding to the next step, create a [DNS server container](../OracleDNSServer/README.md). +If you have a pre-configured DNS server in your environment, then you can replace `-e DNS_SERVERS=172.16.1.25`, `--dns=172.16.1.25`, `-e DOMAIN=example.com` and `--dns-search=example.com` parameters in **Section 2: Building Oracle RAC Database Podman Install Images** with the `DOMAIN_NAME` and `DNS_SERVER` based on your environment. +You must ensure that you have the`Podman-docker` package installed on your OL8 Podman host to run the command using the `docker` utility. + +9. If you are running RAC on Podman, you need to make sure you have installed the `podman-docker` rpm so that podman commands can be run using `docker` utility. +10. The Oracle RAC `Dockerfile` does not contain any Oracle software binaries. Download the following software from the [Oracle Technology Network](https://www.oracle.com/technetwork/database/enterprise-edition/downloads/index.html) and stage them under `/docker-images/OracleDatabase/RAC/OracleRealApplicationCluster/containerfiles/` folder. + + - Oracle Database 21c Grid Infrastructure (21.3) for Linux x86-64 + - Oracle Database 21c (21.3) for Linux x86-64 + + - If you are deploying Oracle RAC on Podman then execute following, otherwise skip to next section. + - Because Oracle RAC on Podman is supported on Release 21c (21.7) or later, you must download the grid release update (RU) from [support.oracle.com](https://support.oracle.com/portal/). In this case, we downloaded RU `34155589`. + + - Download the following one-off patches for release 21.7 from [support.oracle.com](https://support.oracle.com/portal/) + - `34339952` + - `32869666` + +**Notes** + +- If you are planning to use a `DNSServer` container for SCAN IPs, VIPs resolution, then configure the DNSServer. For testing purposes only, use the Oracle `DNSServer` image to deploy a container providing DNS resolutions. Please check [OracleDNSServer](../OracleDNSServer/README.md) for details. +- `OracleRACStorageServer` docker image can be used only for testing purpose. Please check [OracleRACStorageServer](../OracleRACStorageServer/README.md) for details. +- To run Oracle RAC using Podman on multiple hosts, refer [Podman macvlan network](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html). +To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, refer [Docker macvlan network](https://docs.docker.com/network/macvlan/). +- If the Docker or Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleConnectionManager) to access the Oracle RAC Database from outside the host. + +## Section 2: Building Oracle RAC Database Container Images + +**IMPORTANT :** This section assumes that you have gone through all the prerequisites in Section 1 and completed all the steps, based on your environment. Do not uncompress the binaries and patches. + +To assist in building the images, you can use the [`buildContainerImage.sh`](https://github.com/oracle/docker-images/blob/master/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/buildContainerImage.sh) script. See the following for instructions and usage. + +### Oracle RAC Container Image for Docker +If you are planing to deploy Oracle RAC container image on Podman, skip to the section [Oracle RAC Container Image for Podman](#oracle-rac-container-image-for-podman). + +```bash +./buildContainerImage.sh -v -o '--build-arg BASE_OL_IMAGE=oraclelinux:7' -i + +# Example: Building Oracle RAC Docker Image +./buildContainerImage.sh -v 21.3.0 -o '--build-arg BASE_OL_IMAGE=oraclelinux:7' -i +``` +***Note*** +- `IGNORE_PREREQ` is default `false` while building full image, if you want to skip this during dbca/grid installation or basically set `-ignorePrereq` while building the container image, set this to `true`. + +### Oracle RAC Container Image for Podman +If you are planing to deploy Oracle RAC container image on Docker, skip to the section [Oracle RAC Container Image for Docker](#oracle-rac-container-image-for-docker). + + ```bash + ./buildContainerImage.sh -v -o '--build-arg BASE_OL_IMAGE=oraclelinux:8' -i + + # Example: Building Oracle RAC Full image + ./buildContainerImage.sh -v 21.3.0 -o '--build-arg BASE_OL_IMAGE=oraclelinux:8' -i + ``` +- After the `21.3.0` Oracle RAC container image is built, start building a patched image with the download 21.7 RU and one-offs. To build the patch image, refer [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). + +**Notes** + +- The resulting images will contain the Oracle Grid Infrastructure binaries and Oracle RAC Database binaries. +- If you are behind a proxy wall, then you must set the `https_proxy` environment variable based on your environment before building the image. + +## Section 3: Network and Password Management + +1. Before you start the installation, you must plan your private and public network. You can create a network bridge on every container host so containers running within that host can communicate with each other. +For example, create `rac_pub1_nw` for the public network (`172.16.1.0/24`) and `rac_priv1_nw` (`192.168.17.0/24`) for a private network. You can use any network subnet for testing. In this document we reference the public network on `172.16.1.0/24` and the private network on `192.168.17.0/24`. + + ```bash + # docker network create --driver=bridge --subnet=172.16.1.0/24 rac_pub1_nw + # docker network create --driver=bridge --subnet=192.168.17.0/24 rac_priv1_nw + ``` + +- To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, you will need to create a [Docker macvlan network](https://docs.docker.com/network/macvlan/) using the following commands: + + ```bash + # docker network create -d macvlan --subnet=172.16.1.0/24 --gateway=172.16.1.1 -o parent=eth0 rac_pub1_nw + # docker network create -d macvlan --subnet=192.168.17.0/24 --gateway=192.168.17.1 -o parent=eth1 rac_priv1_nw + ``` + +2. Specify the secret volume for resetting the grid, oracle, and database user password during node creation or node addition. The volume can be a shared volume among all the containers. For example: + + ```bash + # mkdir /opt/.secrets/ + ``` +- If your environment uses Docker, run `openssl rand -hex 64 -out /opt/.secrets/pwd.key`. For Podman, run `openssl rand -hex -out /opt/.secrets/pwd.key` +- Edit the `/opt/.secrets/common_os_pwdfile` and seed the password for the grid, oracle and database users. For this deployment scenario, it will be a common password for the grid, oracle, and database users. Run the command: + + ```bash + # openssl enc -aes-256-cbc -salt -in /opt/.secrets/common_os_pwdfile -out /opt/.secrets/common_os_pwdfile.enc -pass file:/opt/.secrets/pwd.key + # rm -f /opt/.secrets/common_os_pwdfile + ``` +3. Create `rac_host_file` on both Podman and Docker hosts: + + ```bash + # mkdir /opt/containers/ + # touch /opt/containers/rac_host_file + ``` + +**Notes** + +- To run Oracle RAC using Podman on multiple hosts, refer [Podman macvlan network](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html). +To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, refer [Docker macvlan network](https://docs.docker.com/network/macvlan/). +- If the Docker or Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleConnectionManager) to access the Oracle RAC Database from outside the host. +- If you want to specify a different password for each of the user accounts, then create three different files, encrypt them under `/opt/.secrets`, and pass the file name to the container using the environment variable. Environment variables can be ORACLE_PWD_FILE for the oracle user, GRID_PWD_FILE for the grid user, and DB_PWD_FILE for the database password. +- If you want to use a common password for the oracle, grid, and database users, then you can assign a password file name to COMMON_OS_PWD_FILE environment variable. + +## Section 4: Oracle RAC on Docker + +If you are deploying Oracle RAC On Podman, skip to the [Section 5: Oracle RAC on Podman](#section-5-oracle-rac-on-podman). + +**Note** Oracle RAC is supported for production use on Docker starting with Oracle Database 21c (21.3). On earlier releases, Oracle RAC on Docker is supported for development and and test environments. To deploy Oracle RAC on Docker, use the pre-built images available on the Oracle Container Registry. Execute the following steps in a given order to deploy RAC on Docker: + +To create an Oracle RAC environment on Docker, complete each of these steps in order. + +### Section 4.1 : Prerequisites for Running Oracle RAC on Docker + +To run Oracle RAC on Docker, you must install and configure [Oracle Container Runtime for Docker](https://docs.oracle.com/cd/E52668_01/E87205/html/index.html) on Oracle Linux 7. You must have sufficient space on docker file system (`/var/lib/docker`), configured with the Docker OverlayFS storage driver option `overlay2`. + +**IMPORTANT:** Completing prerequisite steps is a requirement for successful configuration. + +Complete each prerequisite step in order, customized for your environment. + +1. Verify that you have enough memory and CPU resources available for all containers. For this `README.md`, we used the following configuration: + + - 2 Docker hosts + - CPU Cores: 1 Socket with 4 cores, with 2 threads for each core Intel® Xeon® Platinum 8167M CPU at 2.00 GHz + - RAM: 60GB + - Swap memory: 32 GB + - Oracle Linux 7.9 or later with the Unbreakable Enterprise Kernel 6: 5.4.17-2102.200.13.el7uek.x86_64. + +2. Oracle RAC must run certain processes in real-time mode. To run processes inside a container in real-time mode, you must make changes to the Docker configuration files. For details, see the [`dockerd` documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#examples). Edit the Docker Daemon based on Docker version: + + - Check the Docker version. In the following output, the Oracle `docker-engine` version is 19.3. + + ```bash + rpm -qa | grep docker + docker-cli-19.03.11.ol-9.el7.x86_64 + docker-engine-19.03.11.ol-9.el7.x86_64 + ``` + + - If Oracle `docker-engine` version is greater than or equal to 19.3: Edit `/usr/lib/systemd/system/docker.service` and add additional parameters in the `[Service]` section for the `dockerd` daemon: + + ```bash + ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --cpu-rt-runtime=950000 + ``` + + - If Oracle docker-engine version is less than 19.3: Edit `/etc/sysconfig/docker` and add following + + ```bash + OPTIONS='--selinux-enabled --cpu-rt-runtime=950000' + ``` + +3. After you have modified the `dockerd` daemon, reload the daemon with the changes you have made: + + ```bash + systemctl daemon-reload + systemctl stop docker + systemctl start docker + ``` + +### Section 4.2: Setup Oracle RAC Container on Docker + +This section provides step by step procedure to deploy Oracle RAC on container with block devices and storage container. To understand the details of environment variable, refer For the details of environment variables [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) + +Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. + +#### Deploying Oracle RAC on Container with Block Devices on Docker + +If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container With Oracle RAC Storage Container](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container). + +Make sure the ASM devices do not have any existing file system. To clear any other file system from the devices, use the following command: + + ```bash + # dd if=/dev/zero of=/dev/xvde bs=8k count=100000 + ``` + +Repeat for each shared block device. In the preceding example, `/dev/xvde` is a shared Xen virtual block device. + +Now create the Oracle RAC container using the image. You can use the following example to create a container: + + ```bash + # docker create -t -i \ + --hostname racnode1 \ + --volume /boot:/boot:ro \ + --volume /dev/shm \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --device=/dev/xvde:/dev/asm_disk1 \ + --device=/dev/xvdf:/dev/asm_disk2 \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + -e DNS_SERVERS="172.16.1.25" \ + -e NODE_VIP=172.16.1.160 \ + -e VIP_HOSTNAME=racnode1-vip \ + -e PRIV_IP=192.168.17.150 \ + -e PRIV_HOSTNAME=racnode1-priv \ + -e PUBLIC_IP=172.16.1.150 \ + -e PUBLIC_HOSTNAME=racnode1 \ + -e SCAN_NAME=racnode-scan \ + -e OP_TYPE=INSTALL \ + -e DOMAIN=example.com \ + -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ + -e ASM_DISCOVERY_DIR=/dev \ + -e CMAN_HOSTNAME=racnode-cman1 \ + -e CMAN_IP=172.16.1.15 \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + --restart=always --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ + --cpu-rt-runtime=95000 --ulimit rtprio=99 \ + --name racnode1 \ + oracle/database-rac:21.3.0 + ``` + +**Note:** Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. + +#### Deploying Oracle RAC on Container With Oracle RAC Storage Container + +If you are using block devices, skip to the section [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker) + +Now create the Oracle RAC container using the image. You can use the following example to create a container: + + ```bash + # docker create -t -i \ + --hostname racnode1 \ + --volume /boot:/boot:ro \ + --volume /dev/shm \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --privileged=false \ + --volume racstorage:/oradata \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + -e DNS_SERVERS="172.16.1.25" \ + -e NODE_VIP=172.16.1.160 \ + -e VIP_HOSTNAME=racnode1-vip \ + -e PRIV_IP=192.168.17.150 \ + -e PRIV_HOSTNAME=racnode1-priv \ + -e PUBLIC_IP=172.16.1.150 \ + -e PUBLIC_HOSTNAME=racnode1 \ + -e SCAN_NAME=racnode-scan \ + -e OP_TYPE=INSTALL \ + -e DOMAIN=example.com \ + -e ASM_DISCOVERY_DIR=/oradata \ + -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ + -e CMAN_HOSTNAME=racnode-cman1 \ + -e CMAN_IP=172.16.1.15 \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + --restart=always \ + --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --name racnode1 \ + oracle/database-rac:21.3.0 + ``` + +**Notes:** + +- Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. +- You must have created the `racstorage` volume before the creation of the Oracle RAC Container. For details, please refer [OracleRACStorageServer](../OracleRACStorageServer/README.md). +- For details about the available environment variables, refer the [Section 7](#section-7-environment-variables-for-the-first-node). + +#### Assign networks to Oracle RAC docker containers + +You need to assign the Docker networks created in section 1 to containers. Execute the following commands: + + ```bash + # docker network disconnect bridge racnode1 + # docker network connect rac_pub1_nw --ip 172.16.1.150 racnode1 + # docker network connect rac_priv1_nw --ip 192.168.17.150 racnode1 + ``` + +#### Start the first docker container + +To start the first container, run the following command: + + ```bash + # docker start racnode1 + ``` + +It can take at least 40 minutes or longer to create the first node of the cluster. To check the logs, use the following command from another terminal session: + + ```bash + # docker logs -f racnode1 + ``` + +You should see the database creation success message at the end: + + ```bash + #################################### + ORACLE RAC DATABASE IS READY TO USE! + #################################### + ``` + +#### Connect to the Oracle RAC docker container + +To connect to the container execute the following command: + +```bash +# docker exec -i -t racnode1 /bin/bash +``` + +If the install fails for any reason, log in to the container using the preceding command and check `/tmp/orod.log`. You can also review the Grid Infrastructure logs located at `$GRID_BASE/diag/crs` and check for failure logs. If the failure occurred during the database creation then check the database logs. + +### Section 4.3: Adding an Oracle RAC Node using a Docker Container + +Before proceeding to the next step, ensure Oracle Grid Infrastructure is running and the Oracle RAC Database is open as per instructions in [Section 4.2: Setup Oracle RAC on Docker](#section-42-setup-oracle-rac-container-on-docker). Otherwise, the node addition process will fail. + +Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. + +To understand the details of environment variable, refer For the details of environment variables [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes) + +Reset the password on the existing Oracle RAC node for SSH setup between an existing node in the cluster and the new node. Password must be the same on all the nodes for the `grid` and `oracle` users. Execute the following command on an existing node of the cluster. + +```bash +docker exec -i -t -u root racnode1 /bin/bash +sh /opt/scripts/startup/resetOSPassword.sh --help +sh /opt/scripts/startup/resetOSPassword.sh --op_type reset_grid_oracle --pwd_file common_os_pwdfile.enc --secret_volume /run/secrets --pwd_key_file pwd.key +``` + +**Note:** If you do not have a common secret volume among Oracle RAC containers, populate the password file with the same password that you have used on the new node, encrypt the file, and execute `resetOSPassword.sh` on the existing node of the cluster. + +#### Deploying Oracle RAC Additional Node on Container with Block Devices on Docker + +If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container with Oracle RAC Storage Container on Docker](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container) + +To create additional nodes, use the following command: + +```bash +# docker create -t -i \ + --hostname racnode2 \ + --volume /dev/shm \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /boot:/boot:ro \ + --dns-search=example.com \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --device=/dev/xvde:/dev/asm_disk1 \ + --device=/dev/zvdf:/dev/asm_disk2 \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + -e DNS_SERVERS="172.16.1.25" \ + -e EXISTING_CLS_NODES=racnode1 \ + -e NODE_VIP=172.16.1.161 \ + -e VIP_HOSTNAME=racnode2-vip \ + -e PRIV_IP=192.168.17.151 \ + -e PRIV_HOSTNAME=racnode2-priv \ + -e PUBLIC_IP=172.16.1.151 \ + -e PUBLIC_HOSTNAME=racnode2 \ + -e DOMAIN=example.com \ + -e SCAN_NAME=racnode-scan \ + -e ASM_DISCOVERY_DIR=/dev \ + -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ + -e ORACLE_SID=ORCLCDB \ + -e OP_TYPE=ADDNODE \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --restart=always \ + --name racnode2 \ + oracle/database-rac:21.3.0 +``` + +For details of all environment variables and parameters, refer to [Section 7](#section-7-environment-variables-for-the-first-node). + +#### Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Docker + +If you are using physical block devices for shared storage, skip to [Deploying Oracle RAC on Container with Block Devices on Docker](#deploying-oracle-rac-on-container-with-block-devices-on-docker) + +Use the existing `racstorage:/oradata` volume when creating the additional container using the image. + +For example: + +```bash +# docker create -t -i \ + --hostname racnode2 \ + --volume /dev/shm \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /boot:/boot:ro \ + --dns-search=example.com \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --privileged=false \ + --volume racstorage:/oradata \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + -e DNS_SERVERS="172.16.1.25" \ + -e EXISTING_CLS_NODES=racnode1 \ + -e NODE_VIP=172.16.1.161 \ + -e VIP_HOSTNAME=racnode2-vip \ + -e PRIV_IP=192.168.17.151 \ + -e PRIV_HOSTNAME=racnode2-priv \ + -e PUBLIC_IP=172.16.1.151 \ + -e PUBLIC_HOSTNAME=racnode2 \ + -e DOMAIN=example.com \ + -e SCAN_NAME=racnode-scan \ + -e ASM_DISCOVERY_DIR=/oradata \ + -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ + -e ORACLE_SID=ORCLCDB \ + -e OP_TYPE=ADDNODE \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + --tmpfs=/run -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --restart=always \ + --name racnode2 \ + oracle/database-rac:21.3.0 +``` + +**Notes:** + +- You must have created **racstorage** volume before the creation of the Oracle RAC container. +- You can change env variables such as IPs and ORACLE_PWD based on your env. For details about the env variables, refer the section 8. + +#### Assign Network to additional Oracle RAC docker container + +Connect the private and public networks you created earlier to the container: + +```bash +# docker network disconnect bridge racnode2 +# docker network connect rac_pub1_nw --ip 172.16.1.151 racnode2 +# docker network connect rac_priv1_nw --ip 192.168.17.151 racnode2 +``` + +#### Start Oracle RAC docker container + +Start the container + +```bash +# docker start racnode2 +``` + +To check the database logs, tail the logs using the following command: + +```bash +# docker logs -f racnode2 +``` + +You should see the database creation success message at the end. + +```text +#################################### +ORACLE RAC DATABASE IS READY TO USE! +#################################### +``` + +#### Connect to the Oracle RAC container on Additional Node + +To connect to the container execute the following command: + +```bash +# docker exec -i -t racnode2 /bin/bash +``` + +If the node addition fails, log in to the container using the preceding command and review `/tmp/orod.log`. You can also review the Grid Infrastructure logs i.e. `$GRID_BASE/diag/crs` and check for failure logs. If the node creation has failed during the database creation process, then check DB logs. + +## Section 5: Oracle RAC on Podman + +If you are deploying Oracle RAC On Docker, skip to [Section 4: Oracle RAC on Docker](#section-4-oracle-rac-on-docker) + +**Note** Oracle RAC is supported for production use on Podman starting with Oracle Database 19c (19.16), and Oracle Database 21c (21.7). You can deploy Oracle RAC on Podman using the pre-built images available on Oracle Container Registry. Execute the following steps in a given order to deploy RAC on Podman: + +To create an Oracle RAC environment on Podman, complete each of these steps in order. + +### Section 5.1 : Prerequisites for Running Oracle RAC on Podman + +You must install and configure [Podman release 4.0.2](https://docs.oracle.com/en/operating-systems/oracle-linux/Podman/) or later on Oracle Linux 8.5 or later to run Oracle RAC on Podman. + +**IMPORTANT:** Completing prerequisite steps is a requirement for successful configuration. + +Complete each prerequisite step in order, customized for your environment. + +1. Verify that you have enough memory and CPU resources available for all containers. In this `README.md` for Podman, we used the following configuration: + + - 2 Podman hosts + - CPU Cores: 1 Socket with 4 cores, with 2 threads for each core Intel® Xeon® Platinum 8167M CPU at 2.00 GHz + - RAM: 60 GB + - Swap memory: 32 GB + - Oracle Linux 8.5 (Linux-x86-64) with the Unbreakable Enterprise Kernel 6: `5.4.17-2136.300.7.el8uek.x86_64`. + +2. Oracle RAC must run certain processes in real-time mode. To run processes inside a container in real-time mode, populate the real-time CPU budgeting on machine restarts. Create a oneshot systemd service as follows: + + - Create a file `/etc/systemd/system/Podman-rac-cgroup.service` + - Append the following lines: + + ```INI + [Unit] + Description=Populate Cgroups with real time chunk on machine restart + After=multi-user.target + [Service] + Type=oneshot + ExecStart=/bin/bash -c “/bin/echo 950000 > /sys/fs/cgroup/cpu,cpuacct/machine.slice/cpu.rt_runtime_us && /bin/systemctl restart Podman-restart.service” + StandardOutput=journal + CPUAccounting=yes + Slice=machine.slice + [Install] + WantedBy=multi-user.target + ``` + + - After creating the file `/etc/systemd/system/Podman-rac-cgroup.service` with the lines appended in the preceding step, reload and restart the Podman daemon using the following steps: + + ```bash + systemctl daemon-reload + systemctl enable Podman-rac-cgroup.service + systemctl enable Podman-restart.service + systemctl start Podman-rac-cgroup.service + ``` + +3. If SELINUX is enabled on the Podman host, then you must create an SELinux policy for Oracle RAC on Podman. For details about this procedure, see "How to Configure Podman for SELinux Mode" in the publication [Oracle Real Application Clusters Installation Guide for Podman Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racpd/target-configuration-oracle-rac-podman.html#GUID-59138DF8-3781-4033-A38F-E0466884D008). + +### Section 5.2: Setup RAC Containers on Podman +This section provides step by step procedure to deploy Oracle RAC on container with block devices and storage container. To understand the details of environment variable, refer For the details of environment variables [Section 7: Environment Variables for the First Node](#section-7-environment-variables-for-the-first-node) + +Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. + +#### Deploying Oracle RAC Containers with Block Devices on Podman + +If you are using an NFS volume, skip to the section [Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman](#deploying-oracle-rac-on-container-with-oracle-rac-storage-container-on-podman). + +Make sure the ASM devices do not have any existing file system. To clear any other file system from the devices, use the following command: + + ```bash + # dd if=/dev/zero of=/dev/xvde bs=8k count=100000 + ``` + +Repeat for each shared block device. In the preceding example, `/dev/xvde` is a shared Xen virtual block device. + +Now create the Oracle RAC container using the image. For the details of environment variables, refer to section 7. You can use the following example to create a container: + + ```bash + # podman create -t -i \ + --hostname racnode1 \ + --volume /boot:/boot:ro \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --device=/dev/xvde:/dev/asm_disk1 \ + --device=/dev/xvdf:/dev/asm_disk2 \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=AUDIT_WRITE \ + --cap-add=AUDIT_CONTROL \ + -e DNS_SERVERS="172.16.1.25" \ + -e NODE_VIP=172.16.1.160 \ + -e VIP_HOSTNAME=racnode1-vip \ + -e PRIV_IP=192.168.17.150 \ + -e PRIV_HOSTNAME=racnode1-priv \ + -e PUBLIC_IP=172.16.1.150 \ + -e PUBLIC_HOSTNAME=racnode1 \ + -e SCAN_NAME=racnode-scan \ + -e OP_TYPE=INSTALL \ + -e DOMAIN=example.com \ + -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ + -e ASM_DISCOVERY_DIR=/dev \ + -e CMAN_HOSTNAME=racnode-cman1 \ + -e CMAN_IP=172.16.1.15 \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + --restart=always \ + --systemd=always \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --name racnode1 \ + localhost/oracle/database-rac:21.3.0-21.7.0 + ``` + +**Note:** Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. + +#### Deploying Oracle RAC on Container With Oracle RAC Storage Container on Podman + +If you are using block devices, skip to the section [Deploying RAC Containers with Block Devices on Podman](#deploying-oracle-rac-containers-with-block-devices-on-podman) +Now create the Oracle RAC container using the image. You can use the following example to create a container: + + ```bash + # podman create -t -i \ + --hostname racnode1 \ + --volume /boot:/boot:ro \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --privileged=false \ + --volume racstorage:/oradata \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=AUDIT_WRITE \ + --cap-add=AUDIT_CONTROL \ + -e DNS_SERVERS="172.16.1.25" \ + -e NODE_VIP=172.16.1.160 \ + -e VIP_HOSTNAME=racnode1-vip \ + -e PRIV_IP=192.168.17.150 \ + -e PRIV_HOSTNAME=racnode1-priv \ + -e PUBLIC_IP=172.16.1.150 \ + -e PUBLIC_HOSTNAME=racnode1 \ + -e SCAN_NAME=racnode-scan \ + -e OP_TYPE=INSTALL \ + -e DOMAIN=example.com \ + -e ASM_DISCOVERY_DIR=/oradata \ + -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ + -e CMAN_HOSTNAME=racnode-cman1 \ + -e CMAN_IP=172.16.1.15 \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + --restart=always \ + --systemd=always \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --name racnode1 \ + localhost/oracle/database-rac:21.3.0-21.7.0 + ``` + +**Notes:** + +- Change environment variables such as `NODE_IP`, `PRIV_IP`, `PUBLIC_IP`, `ASM_DEVICE_LIST`, `PWD_FILE`, and `PWD_KEY` based on your environment. Also, ensure you use the correct device names on each host. +- You must have created the `racstorage` volume before the creation of the Oracle RAC Container. For details about the available environment variables, refer the [Section 7](#section-7-environment-variables-for-the-first-node). + +#### Assign networks to Oracle RAC podman containers + +You need to assign the Podman networks created in section 1 to containers. Execute the following commands: + + ```bash + # podman network disconnect bridge racnode1 + # podman network connect rac_pub1_nw --ip 172.16.1.150 racnode1 + # podman network connect rac_priv1_nw --ip 192.168.17.150 racnode1 + ``` + +#### Start the first podman container + +To start the first container, run the following command: + + ```bash + # podman start racnode1 + ``` + +It can take at least 40 minutes or longer to create the first node of the cluster. To check the database logs, tail the logs using the following command: + +```bash +podman exec racnode1 /bin/bash -c "tail -f /tmp/orod.log" +``` + +You should see the database creation success message at the end. + +```text +#################################### +ORACLE RAC DATABASE IS READY TO USE! +#################################### +``` + +#### Connect to the Oracle RAC podman container + +To connect to the container execute the following command: + +```bash +# podman exec -i -t racnode1 /bin/bash +``` + +If the install fails for any reason, log in to the container using the preceding command and check `/tmp/orod.log`. You can also review the Grid Infrastructure logs located at `$GRID_BASE/diag/crs` and check for failure logs. If the failure occurred during the database creation then check the database logs. + +### Section 5.3: Adding a Oracle RAC Node using a container on Podman + +Before proceeding to the next step, ensure Oracle Grid Infrastructure is running and the Oracle RAC Database is open as per instructions in [Section 5.2: Setup RAC Containers on Podman](#section-52-setup-rac-containers-on-podman). Otherwise, the node addition process will fail. + +Refer the [Section 3: Network and Password Management](#section-3--network-and-password-management) and setup the network on a container host based on your Oracle RAC environment. If you have already done the setup, ignore and proceed further. + +To understand the details of environment variable, refer For the details of environment variables [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). + + +Reset the password on the existing Oracle RAC node for SSH setup between an existing node in the cluster and the new node. Password must be the same on all the nodes for the `grid` and `oracle` users. Execute the following command on an existing node of the cluster. + +```bash +podman exec -i -t -u root racnode1 /bin/bash +sh /opt/scripts/startup/resetOSPassword.sh --help +sh /opt/scripts/startup/resetOSPassword.sh --op_type reset_grid_oracle --pwd_file common_os_pwdfile.enc --secret_volume /run/secrets --pwd_key_file pwd.key +``` + +**Note:** If you do not have a common secret volume among Oracle RAC containers, populate the password file with the same password that you have used on the new node, encrypt the file, and execute `resetOSPassword.sh` on the existing node of the cluster. + +#### Deploying Oracle RAC Additional Node on Container with Block Devices on Podman + +If you are using an NFS volume, skip to the section [Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman](#deploying-oracle-rac-additional-node-on-container-with-oracle-rac-storage-container-on-podman). + +To create additional nodes, use the following command: + +```bash +# podman create -t -i \ + --hostname racnode2 \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /boot:/boot:ro \ + --dns-search=example.com \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --device=/dev/xvde:/dev/asm_disk1 \ + --device=/dev/zvdf:/dev/asm_disk2 \ + --privileged=false \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=AUDIT_CONTROL \ + --cap-add=AUDIT_WRITE \ + -e DNS_SERVERS="172.16.1.25" \ + -e EXISTING_CLS_NODES=racnode1 \ + -e NODE_VIP=172.16.1.161 \ + -e VIP_HOSTNAME=racnode2-vip \ + -e PRIV_IP=192.168.17.151 \ + -e PRIV_HOSTNAME=racnode2-priv \ + -e PUBLIC_IP=172.16.1.151 \ + -e PUBLIC_HOSTNAME=racnode2 \ + -e DOMAIN=example.com \ + -e SCAN_NAME=racnode-scan \ + -e ASM_DISCOVERY_DIR=/dev \ + -e ASM_DEVICE_LIST=/dev/asm_disk1,/dev/asm_disk2 \ + -e ORACLE_SID=ORCLCDB \ + -e OP_TYPE=ADDNODE \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + --systemd=always \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --restart=always \ + --name racnode2 \ + localhost/oracle/database-rac:21.3.0-21.7.0 +``` + +For details of all environment variables and parameters, refer to [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). + +#### Deploying Oracle RAC Additional Node on Container with Oracle RAC Storage Container on Podman + +If you are using physical block devices for shared storage, skip to [Deploying Oracle RAC Additional Node on Container with Block Devices on Podman](#deploying-oracle-rac-additional-node-on-container-with-block-devices-on-podman). + +Use the existing `racstorage:/oradata` volume when creating the additional container using the image. + +For example: + +```bash +# podman create -t -i \ + --hostname racnode2 \ + --tmpfs /dev/shm:rw,exec,size=4G \ + --volume /boot:/boot:ro \ + --dns-search=example.com \ + --volume /opt/containers/rac_host_file:/etc/hosts \ + --volume /opt/.secrets:/run/secrets:ro \ + --dns=172.16.1.25 \ + --dns-search=example.com \ + --privileged=false \ + --volume racstorage:/oradata \ + --cap-add=SYS_NICE \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=AUDIT_WRITE \ + --cap-add=AUDIT_CONTROL \ + -e DNS_SERVERS="172.16.1.25" \ + -e EXISTING_CLS_NODES=racnode1 \ + -e NODE_VIP=172.16.1.161 \ + -e VIP_HOSTNAME=racnode2-vip \ + -e PRIV_IP=192.168.17.151 \ + -e PRIV_HOSTNAME=racnode2-priv \ + -e PUBLIC_IP=172.16.1.151 \ + -e PUBLIC_HOSTNAME=racnode2 \ + -e DOMAIN=example.com \ + -e SCAN_NAME=racnode-scan \ + -e ASM_DISCOVERY_DIR=/oradata \ + -e ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ + -e ORACLE_SID=ORCLCDB \ + -e OP_TYPE=ADDNODE \ + -e COMMON_OS_PWD_FILE=common_os_pwdfile.enc \ + -e PWD_KEY=pwd.key \ + --systemd=always \ + --cpu-rt-runtime=95000 \ + --ulimit rtprio=99 \ + --restart=always \ + --name racnode2 \ + localhost/oracle/database-rac:21.3.0-21.7.0 +``` + +**Notes:** + +- You must have created **racstorage** volume before the creation of the Oracle RAC container. +- You can change env variables such as IPs and ORACLE_PWD based on your env. For details about the env variables, refer the [Section 8](#section-8-environment-variables-for-the-second-and-subsequent-nodes). + +#### Assign Network to additional Oracle RAC podman container + +Connect the private and public networks you created earlier to the container: + +```bash +# podman network disconnect bridge racnode2 +# podman network connect rac_pub1_nw --ip 172.16.1.151 racnode2 +# podman network connect rac_priv1_nw --ip 192.168.17.151 racnode2 +``` + +#### Start Oracle RAC podman container + +Start the container + +```bash +# podman start racnode2 +``` + +To check the database logs, tail the logs using the following command: + +```bash +podman exec racnode2 /bin/bash -c "tail -f /tmp/orod.log" +``` + +You should see the database creation success message at the end. + +```text +#################################### +ORACLE RAC DATABASE IS READY TO USE! +#################################### +``` + +## Section 6: Connecting to an Oracle RAC Database + +**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC cluster using the preceding sections. + +If you are using a connection manager and exposed the port 1521 on the host, then connect from an external client using the following connection string, where `` is the host container, and `` is the database system identifier: + +```bash +system/@//:1521/ +``` + +If you are using the bridge created using MACVLAN driver, and you have configured DNS appropriately, then you can connect using the public Single Client Access (SCAN) listener directly from any external client. To connect with the SCAN, use the following connection string, where `` is the SCAN name for the database, and `` is the database system identifier: + +```bash +system/@//:1521/ +``` + +## Section 7: Environment Variables for the First Node + +This section provides information about the environment variables that can be used when creating the first node of a cluster. + +```bash +OP_TYPE=###Specify the Operation TYPE. It can accept 2 values INSTALL OR ADDNODE#### +NODE_VIP=####Specify the Node VIP### +VIP_HOSTNAME=###Specify the VIP hostname### +PRIV_IP=###Specify the Private IP### +PRIV_HOSTNAME=###Specify the Private Hostname### +PUBLIC_IP=###Specify the public IP### +PUBLIC_HOSTNAME=###Specify the public hostname### +SCAN_NAME=###Specify the scan name### +ASM_DEVICE_LIST=###Specify the ASM Disk lists. +SCAN_IP=###Specify this if you do not have DNS server### +DOMAIN=###Default value set to example.com### +PASSWORD=###OS password will be generated by openssl### +CLUSTER_NAME=###Default value set to racnode-c#### +ORACLE_SID=###Default value set to ORCLCDB### +ORACLE_PDB=###Default value set to ORCLPDB### +ORACLE_PWD=###Default value set to generated by openssl random password### +ORACLE_CHARACTERSET=###Default value set AL32UTF8### +DEFAULT_GATEWAY=###Default gateway. You need this env variable if containers will be running on multiple hosts.#### +CMAN_HOSTNAME=###Connection Manager Host Name### +CMAN_IP=###Connection manager Host IP### +ASM_DISCOVERY_DIR=####ASM disk location insdie the container. By default it is /dev###### +COMMON_OS_PWD_FILE=###Pass the file name to setup grid and oracle user password. If you specify ORACLE_PWD_FILE, GRID_PWD_FILE, and DB_PWD_FILE then you do not need to specify this env variable### +ORACLE_PWD_FILE=###Pass the file name to set the password for oracle user.### +GRID_PWD_FILE=###Pass the file name to set the password for grid user.### +DB_PWD_FILE=###Pass the file name to set the password for DB user i.e. sys.### +REMOVE_OS_PWD_FILES=###Set this env variable to true to remove pwd key file and password file after resetting password.### +CONTAINER_DB_FLAG=###Default value is set to true to create container database. Set this to false if you do not want to create container database.### +``` + +## Section 8: Environment Variables for the Second and Subsequent Nodes + +This section provides the details about the environment variables that can be used for all additional nodes added to an existing cluster. + +```bash +OP_TYPE=###Specify the Operation TYPE. It can accept 2 values INSTALL OR ADDNODE### +EXISTING_CLS_NODES=###Specify the Existing Node of the cluster which you want to join. If you have 2 nodes in the cluster and you are trying to add the third node then specify existing 2 nodes of the clusters and separate them by comma.#### +NODE_VIP=###Specify the Node VIP### +VIP_HOSTNAME=###Specify the VIP hostname### +PRIV_IP=###Specify the Private IP### +PRIV_HOSTNAME=###Specify the Private Hostname### +PUBLIC_IP=###Specify the public IP### +PUBLIC_HOSTNAME=###Specify the public hostname### +SCAN_NAME=###Specify the scan name### +SCAN_IP=###Specify this if you do not have DNS server### +ASM_DEVICE_LIST=###Specify the ASM Disk lists. +DOMAIN=###Default value set to example.com### +ORACLE_SID=###Default value set to ORCLCDB### +DEFAULT_GATEWAY=###Default gateway. You need this env variable if containers will be running on multiple hosts.#### +CMAN_HOSTNAME=###Connection Manager Host Name### +CMAN_IP=###Connection manager Host IP### +ASM_DISCOVERY_DIR=####ASM disk location inside the container. By default it is /dev###### +COMMON_OS_PWD_FILE=###You need to pass the file name to setup grid and oracle user password. If you specify ORACLE_PWD_FILE, GRID_PWD_FILE, and DB_PWD_FILE then you do not need to specify this env variable### +ORACLE_PWD_FILE=###You need to pass the file name to set the password for oracle user.### +GRID_PWD_FILE=###You need to pass the file name to set the password for grid user.### +DB_PWD_FILE=###You need to pass the file name to set the password for DB user i.e. sys.### +REMOVE_OS_PWD_FILES=###You need to set this to true to remove pwd key file and password file after resetting password.### +``` + +## Section 9: Building a Patched Oracle RAC Container Image + +If you want to build a patched image based on a base 21.3.0 container image, then refer to the GitHub page [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). + +## Section 10 : Sample Container Files for Older Releases + +### Docker Container Files + +This project offers sample container files for Oracle Grid Infrastructure and Oracle Real Application Clusters for dev and test: + +* Oracle Database 19c Oracle Grid Infrastructure (19.3) for Linux x86-64 +* Oracle Database 19c (19.3) for Linux x86-64 +* Oracle Database 18c Oracle Grid Infrastructure (18.3) for Linux x86-64 +* Oracle Database 18c (18.3) for Linux x86-64 +* Oracle Database 12c Release 2 Oracle Grid Infrastructure (12.2.0.1.0) for Linux x86-64 +* Oracle Database 12c Release 2 (12.2.0.1.0) Enterprise Edition for Linux x86-64 + + **Notes:** + +* Note that the Oracle RAC on Docker Container releases are supported only for test and development environments, but not for production environments. +* If you are planning to build and deploy Oracle RAC 18.3.0, you need to download Oracle 18.3.0 Grid Infrastructure and Oracle Database 18.3.0 Database. You also need to download Patch# p28322130_183000OCWRU_Linux-x86-64.zip from [Oracle Technology Network](https://www.oracle.com/technetwork/database/database-technologies/clusterware/downloads/docker-4418413.html). +Stage it under containerfiles/18.3.0 folder. +* If you are planning to build and deploy Oracle RAC 12.2.0.1, you need to download Oracle 12.2.0.1 Grid Infrastructure and Oracle Database 12.2.0.1 Database. You also need to download Patch# p27383741_122010_Linux-x86-64.zip from [Oracle Technology Network](https://www.oracle.com/technetwork/database/database-technologies/clusterware/downloads/docker-4418413.html). +Stage it under containerfiles/12.2.0.1 folder. + +### Podman Container Files + +This project offers sample container files for Oracle Grid Infrastructure and Oracle Real Application Clusters for dev and test: + +* Oracle Database 19c Oracle Grid Infrastructure (19.3) for Linux x86-64 +* Oracle Database 19c (19.3) for Linux x86-64 + + **Notes:** + +* Because Oracle RAC on Podman is supported on 19c from 19.16 or later, you must download the grid release update (RU) from [support.oracle.com](https://support.oracle.com/portal/). In this case, we downloaded RU `34130714`. +* Download following one-offs for 19.16 from [support.oracle.com](https://support.oracle.com/portal/) + * `34339952` + * `32869666` +* Before starting the next step, you must edit `docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles/19.3.0/Dockerfile`, change `oraclelinux:7-slim` to `oraclelinux:8`, and save the file. +* You must add `CV_ASSUME_DISTID=OEL8` inside the `Dockerfile` as an env variable. + +* Once the `19.3.0` Oracle RAC on Podman image is built, start building patched image with the download 19.16 RU and one-offs. To build the patch the image, refer [Example of how to create a patched database image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch). +* Make changes in `/opt/containers/envfile` as per 19c `Dockerfile`. You need to change all the contents based on 19c such as `GRID_HOME`, `ORACLE_HOME` and `ADDNODE_RSP` which you have used in `Dockerfile` while building the image. + +## Section 11 : Support + +### Docker Support + +At the time of this release, Oracle RAC on Docker is supported only on Oracle Linux 7. To see current details, refer the [Real Application Clusters Installation Guide for Docker Containers Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racdk/oracle-rac-on-docker.html). + +### Podman Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 8.5 later. To see current Linux support certifications, refer [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## Section 12 : License + +To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository which are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Section 11 : Copyright + +Copyright (c) 2014-2022 Oracle and/or its affiliates. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/ENVVARIABLESCOMPOSE.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/ENVVARIABLESCOMPOSE.md new file mode 100644 index 0000000000..1821f037f3 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/ENVVARIABLESCOMPOSE.md @@ -0,0 +1,60 @@ +# Environment Variables Explained for Oracle RAC on Podman Compose + +Learn about the environment variables (env variables) that you can use when creating a two-node Oracle Real Application Clusters (Oracle RAC) cluster. + +| Variable Name | Description | +|----------------------------|-----------------------------------------------------------------------------| +| DNS_PUBLIC_IP | Default set to `10.0.20.25`. Set this env variable when you want to set DNS container public IP address where both Oracle RAC nodes are resolved. | +| DNS_CONTAINER_NAME | Default set to `rac-dnsserver`. Set this env variable when you want to set a name for the DNS container. | +| DNS_HOST_NAME | Default set to `racdns`. Set this env variable when you want to set the DNS container host name. | +| DNS_IMAGE_NAME | Default set to `"localhost/oracle/rac-dnsserver:latest"`. Set this env variable when you want to set the DNS image name. | +| RAC_NODE_NAME_PREFIXP | Default set to `racnodep`. Set this env variable when you want to use a different prefix for DNS podman container resolutions. | +| DNS_DOMAIN | Default set to `example.info`. Set this env variable when you want to set the DNS domain. | +| PUBLIC_NETWORK_NAME | Default set to `rac_pub1_nw`. Set this env variable when you want to set the public podman network name for the Oracle RAC cluster. | +| PUBLIC_NETWORK_SUBNET | Default set to `10.0.20.0/24`. Set this env variable when you want to set the public network subnet. | +| PRIVATE1_NETWORK_NAME | Default set to `rac_priv1_nw`. Set this env variable when you want to specify the first private network name. | +| PRIVATE1_NETWORK_SUBNET | Default set to `192.168.17.0/24`. Set this env variable when you want to set the first private network subnet. | +| PRIVATE2_NETWORK_NAME | Default set to `rac_priv2_nw`. Set this env variable when you want to specify the second private network name. | +| PRIVATE2_NETWORK_SUBNET | Default set to `192.168.18.0/24`. Set this env variable when you want to set the second private network subnet. | +| RACNODE1_CONTAINER_NAME | Default set to `racnodep1`. Set this env variable when you want to specify the container name for the first Oracle RAC container. | +| RACNODE1_HOST_NAME | Default set to `racnodep1`. Set this env variable when you want to specify host name for the first RAC container. | +| RACNODE1_PUBLIC_IP | Default set to `10.0.20.170`. Set this env variable when you want to set the public IP for the first Oracle RAC container. | +| RACNODE1_CRS_PRIVATE_IP1 | Default set to `192.168.17.170`. Set this env variable when you want to set the private IP for the first private network of the first Oracle RAC container. | +| RACNODE1_CRS_PRIVATE_IP2 | Default set to `192.168.18.170`. Set this env variable when you want to set the private IP for the second private network of the first Oracle RAC container. | +| INSTALL_NODE | Default set to `racnodep1`. Set this env variable to any of the RAC containers. Note: This value will remain the same across the Oracle RAC Cluster for both nodes where the actual Oracle RAC cluster installation occurs. | +| RAC_IMAGE_NAME | Default set to `localhost/oracle/database-rac:21.0.0`. Set this env variable when you want to specify the Oracle RAC Image name. | +| CRS_NODES | Default set to `"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip"`. Set this env variable to a value with the same format used here for all the Oracle RAC cluster node cluster setup. | +| SCAN_NAME | Default set to `racnodepc1-scan`. Set this env variable when you want to specify a resolvable scan name from the DNS. | +| CRS_ASM_DISCOVERY_STRING | With NFS storage devices the default is set to `/oradata`. With block devices, the default is set to `/dev/asm-disk*`. This value specifies the discovery string for ASM. Do not change this unless you have modified `podman-compose.yml` to find a different discovery string. | +| CRS_ASM_DEVICE_LIST | Default set to `/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img` This device list is used with NFS Storage Devices. Do not change this value. | +| ASM_DISK1 | Default set to `/dev/oracleoci/oraclevdd`. Set this env variable for block device setup when you want to specify the first ASM disk. | +| ASM_DISK2 | Default set to `/dev/oracleoci/oraclevde`. Set this env variable for block device setup when you want to specify the second ASM disk. | +| RACNODE2_CONTAINER_NAME | Default set to `racnodep2`. Set this env variable when you want to set the container name for the second Oracle RAC container. | +| RACNODE2_HOST_NAME | Default set to `racnodep2`. Set this env variable when you want to set the host name for the second Oracle RAC container. | +| RACNODE2_PUBLIC_IP | Default set to `10.0.20.171`. Set this env variable when you want to set the public IP for tje second Oracle RAC container. | +| RACNODE2_CRS_PRIVATE_IP1 | Default set to `192.168.17.171`. Set this env variable when you want to set the first private IP for the second Oracle RAC container. | +| RACNODE2_CRS_PRIVATE_IP2 | Default set to 192.168.18.171. Set this env variable when you want to set the second private IP for the second Oracle RAC container. | +| PWD_SECRET_FILE | Default set to `/opt/.secrets/pwdfile.enc`. Do not change this value. | +| KEY_SECRET_FILE | Default set to `/opt/.secrets/key.pem`. Do not change this value. | +| CMAN_CONTAINER_NAME | Default set to `racnodepc1-cman`. Set this env variable when you want to set a connection manager container name. | +| CMAN_HOST_NAME | Default set to `racnodepc1-cman`. Set this env variable when you want to set the hostname for the connection manager container. | +| CMAN_IMAGE_NAME | Default set to `"localhost/oracle/client-cman:21.0.0"`. Set this env variable when you want to set the connection manager image name. | +| CMAN_PUBLIC_IP | Default set to 10.0.20.15. Set this env variable when you want to set public ip for connection manager container. | +| CMAN_PUBLIC_HOSTNAME | Default set to `racnodepc1-cman`. Set this env variable when you want to set the public hostname for the connection manager container. | +| DB_HOSTDETAILS | Default set to `HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170`. Set this env variable when you want to use connection manager container to set details for the database host. | +| STORAGE_CONTAINER_NAME | Default set to `racnode-storage`. Set this env variable when you want to set the container name of the storage container. | +| STORAGE_HOST_NAME | Default set to `racnode-storage`. Set this env variable when you want to set the host name for the storage container. | +| STORAGE_IMAGE_NAME | Default set to `"localhost/oracle/rac-storage-server:latest"`. Set this env variable when you want to set the storage image name. | +| ORACLE_DBNAME | Default set to `ORCLCDB`. Set this env variable when you want to set the Oracle RAC database name. | +| STORAGE_PRIVATE_IP | Default set to `192.168.17.80`. Set this env variable when you want to set the private IP for the storage container. | +| NFS_STORAGE_VOLUME | Default set to `/scratch/stage/rac-storage/$ORACLE_DBNAME`. Set this env variable when you want to specify the path used by the NFS storage container. The path location must contain at least 50 GB of space. | +| DB_SERVICE | Default set to `service:soepdb`. Set this env variable when you want to specify the database service you are creating, using the format of <_service_:_nameofservice_>. | +| EXISTING_CLS_NODE | Default set to `"racnodep1,racnodep2"` This environment variable is used only during node addition. | + +## License + +All scripts and files hosted in this repository that are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/OTHERS.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/OTHERS.md new file mode 100644 index 0000000000..956ff3eac7 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/OTHERS.md @@ -0,0 +1,251 @@ +# Oracle Real Application Clusters in Linux Containers for Developers + +Learn about container deployment options for Oracle Real Application Clusters (Oracle RAC) Release 21c (v21.3). + +## Overview of Running Oracle RAC in Containers + +Oracle Real Application Clusters (Oracle RAC) is an option for the award-winning Oracle Database Enterprise Edition. Oracle RAC is a cluster database with a shared cache architecture that overcomes the limitations of traditional shared-nothing and shared-disk approaches to provide highly scalable and available database solutions for all business applications. + +Oracle RAC uses Oracle Clusterware as a portable cluster software that allows clustering of independent servers so that they cooperate as a single system, and Oracle Automatic Storage Management (Oracle ASM) to provide simplified storage management that is consistent across all servers and storage platforms. +Oracle Clusterware and Oracle ASM are part of the Oracle Grid Infrastructure, which bundles both solutions in an easy-to-deploy software package. + +For more information on Oracle RAC Database 21c, refer to the [Oracle Database documentation](http://docs.oracle.com/en/database/). + +This guide helps you install Oracle RAC on Containers on Host Machines as explained in detail below. With the current release, you prepare the host machine, build or use pre-built Oracle RAC Container Images v21.3, and set up Oracle RAC on Single or Multiple Host machines with Oracle ASM. +In this installation guide, we use [Podman](https://docs.podman.io/en/v3.0/) to create Oracle RAC Containers and manage them. + +## Using this Documentation +To create an Oracle RAC environment, follow these steps: + +- [Oracle Real Application Clusters in Linux Containers for Developers](#oracle-real-application-clusters-in-linux-containers-for-developers) + - [Overview of Running Oracle RAC in Containers](#overview-of-running-oracle-rac-in-containers) + - [Using this Documentation](#using-this-documentation) + - [Preparation Steps for Running Oracle RAC in Containers](#preparation-steps-for-running-oracle-rac-database-in-containers) + - [Getting Oracle RAC Database Container Images](#getting-oracle-rac-database-container-images) + - [Building Oracle RAC Database Container Image](#building-oracle-rac-database-container-image) + - [Building Oracle RAC Database Container Slim Image](#building-oracle-rac-database-container-slim-image) + - [Network Management](#network-management) + - [Password Management](#password-management) + - [Oracle RAC on Containers Deployment Scenarios](#oracle-rac-on-containers-deployment-scenarios) + - [Oracle RAC Containers on Podman](#oracle-rac-containers-on-podman) + - [Setup Using Oracle RAC Image](#1-setup-using-oracle-rac-container-image) + - [Setup Using Oracle RAC Slim Image](#2-setup-using-oracle-rac-container-slim-image) + - [Connecting to an Oracle RAC Database](#connecting-to-an-oracle-rac-database) + - [Deletion of Node from Oracle RAC Cluster](#deletion-of-node-from-oracle-rac-cluster) + - [Building a Patched Oracle RAC Container Image](#building-a-patched-oracle-rac-container-image) + - [Sample Container Files for Older Releases](#sample-container-files-for-older-releases) + - [Cleanup](#cleanup) + - [Support](#support) + - [License](#license) + - [Copyright](#copyright) + +## Preparation Steps for Running Oracle RAC Database in Containers + +Before you proceed to the next section, you must complete each of the steps listed in this section and complete the following prerequisites. + +* Refer to the following sections in the publication [Oracle Real Application Clusters Installation Guide](https://docs.oracle.com/cd/F39414_01/racpd/oracle-real-application-clusters-installation-guide-podman-oracle-linux-x86-64.pdf) for Podman Oracle Linux x86-64 to complete the preparation steps for Oracle RAC on Container deployment: + + * Overview of Oracle RAC on Podman + * Host Preparation for Oracle RAC on Podman + * Podman Host Server Configuration + * Podman Containers and Oracle RAC Nodes + * Provisioning the Podman Host Server + * Podman Host Preparation + * Preparing for Podman Container Installation + * Installing Podman Engine + * Allocating Linux Resources for Oracle Grid Infrastructure Deployment + * How to Configure Podman for SELinux Mode +* Install `git` from dnf or yum repository and clone the git repo. We clone this repo on a path called `` and refer here. +* If you are planning to use NFS storage for OCR, Voting Disk, and Database files, then configure NFS storage and export at least one NFS mount. You can also use the `/docker-images/OracleDatabase/RAC/OracleRACStorageServer` container for the shared file system on NFS. Refer [OracleRACStorageServer](../OracleRACStorageServer/README.md). + +* If SELinux is enabled on the Podman host, you must create an SELinux policy for Oracle RAC on Podman. For details about this procedure, see `How to Configure Podman for SELinux Mode` in the publication [Oracle Real Application Clusters Installation Guide for Podman Oracle Linux x86-64](https://docs.oracle.com/en/database/oracle/oracle-database/21/racpd/target-configuration-oracle-rac-podman.html#GUID-59138DF8-3781-4033-A38F-E0466884D008). +Also, when you are performing the installation using any files from a Podman host machine where SELinux is enabled, make sure they are labeled correctly with `container_file_t` context. You can use `ls -lZ ` to see the security context set on files. + +* To resolve VIPs and SCAN IPs, in this guide we use a DNS container. Before proceeding to the next step, create a [DNS server container](../OracleDNSServer/README.md). +If you have a preconfigured DNS server in your environment, then you can replace `-e DNS_SERVERS=10.0.20.25`, `--dns=10.0.20.25`, `-e DOMAIN=example.info`, and `--dns-search=example.info` parameters in the examples in this guide with the `DOMAIN_NAME` and `DNS_SERVER` based on your environment. + +* The Oracle RAC `Containerfile` does not contain any Oracle software binaries. Download the following software from the [Oracle Technology Network](https://www.oracle.com/technetwork/database/enterprise-edition/downloads/index.html), if you are planning to build Oracle RAC Container Images from the next section. +However, if you are using pre-built RAC images from the Oracle Container Registry, you can skip this step. + - Oracle Grid Infrastructure 21c (21.3) for Linux x86-64 + - Oracle Database 21c (21.3) for Linux x86-64 + +**Notes** + +- **For testing purposes only**, use the Oracle `DNSServer` Image to deploy a container providing DNS resolution. Refer [OracleDNSServer](../OracleDNSServer/README.md) for details. +- `OracleRACStorageServer` container image can be used **only for testing purposes**. Refer [OracleRACStorageServer](../OracleRACStorageServer/README.md) for details. +- If the Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/RAC/OracleConnectionManager) to access the Oracle RAC Database from outside the host. + +## Getting Oracle RAC Database Container Images + +Oracle RAC is supported for production use on Podman starting with Oracle Database 19c (19.16), and Oracle Database 21c (21.7). You can also deploy Oracle RAC on Podman using the pre-built images available on the Oracle Container Registry. +Refer to [this documentation](https://docs.oracle.com/en/operating-systems/oracle-linux/docker/docker-UsingDockerRegistries.html#docker-registry) for details on using the Oracle Container Registry. + +Example of pulling an Oracle RAC Image from the Oracle Container Registry: +```bash +# For Oracle RAC Container Image +podman pull container-registry.oracle.com/database/rac_ru:21.16 +podman tag container-registry.oracle.com/database/rac_ru:21.16 localhost/oracle/database-rac:21.3.0 +``` + +If you are using pre-built Oracle RAC images from the Oracle Container Registry, then you can skip the section that follows where we build the container images. + +If you want to build the latest Oracle RAC Image from this Github repository, instead of a pre-built image, then then follow these instructions. + +**IMPORTANT :** This section assumes that you have completed all of the prerequisites in [Preparation Steps for running Oracle RAC Database in containers](#preparation-steps-for-running-oracle-rac-database-in-containers) and completed all the steps, based on your environment. Ensure that you do not uncompress the binaries and patches manually before building the Oracle RAC Image. + +To assist in building the images, you can use the [`buildContainerImage.sh`](./containerfiles/buildContainerImage.sh) script. See the following for instructions and usage. + +### Building Oracle RAC Database Container Image + +In this document, Oracle RAC Database Container Image refers to an Oracle RAC Database Container Image with Oracle Grid Infrastructure and Oracle Database software binaries installed during Oracle RAC Podman image creation. The resulting images will contain the Oracle Grid Infrastructure and Oracle RAC Database software binaries. +Before you begin, you must download grid and database binaries and stage them under `/docker-images/OracleDatabase/RAC/OracleRealApplicationCluster/containerfiles/`. + +```bash + ./buildContainerImage.sh -v +``` +Example: Building Oracle RAC image for v 21.3.0- +```bash + ./buildContainerImage.sh -v 21.3.0 +``` + +### Building Oracle RAC Database Container Slim Image +In this document, an Oracle RAC container slim image refers to a container image that does not include installing Oracle Grid Infrastructure and Oracle Database during the Oracle RAC image creation. To build an Oracle RAC slim image that doesn't contain the Oracle RAC Database and Grid infrastructure software, run the following command: +```bash + ./buildContainerImage.sh -v -i -o '--build-arg SLIMMING=true' +``` + Example: Building Oracle Slim Image for v 21.3.0- + ```bash + ./buildContainerImage.sh -v 21.3.0 -i -o '--build-arg SLIMMING=true' + ``` + To build an Oracle RAC slim image, you must use `--build-arg SLIMMING=true`. + To change the base image for building Oracle RAC images, you must use `--build-arg BASE_OL_IMAGE=oraclelinux:9`. + +**Notes** +- Usage of `./buildContainerImage.sh`- + ```text + -v: version to build + -i: ignore the MD5 checksums + -t: user-defined image name and tag (e.g., image_name:tag). Default is set to `oracle/database-rac:` for RAC Image and `oracle/database-rac:-slim` for RAC slim image. + -o: passes on container build option (e.g., --build-arg SLIMMIMG=true for slim,--build-arg BASE_OL_IMAGE=oraclelinux:9 to change base image). The default is "--build-arg SLIMMING=false" + ``` +- Ensure that you have enough space in `/var/lib/containers` while building the Oracle RAC image. Also, if required use `export TMPDIR=` for Podman to refer to any other folder as the temporary podman cache location instead of the default '/tmp' location. +- After the `21.3.0` Oracle RAC container image is built, to apply the 21c RU and build the 21c patched image, refer to [Example of how to create a patched database image](./samples/applypatch/README.md). +- If you are behind a proxy wall, then you must set the `https_proxy` or `http_proxy` environment variable based on your environment before building the image. +- In the slim image case, the resulting images will not contain the Oracle Grid Infrastructure binaries and Oracle RAC Database binaries. + +## Network Management + +Before you start the installation, you must plan your private and public network. Refer to section `Podman Host Preparation` in the publication [Oracle Real Application Clusters Installation Guide](https://docs.oracle.com/cd/F39414_01/racpd/oracle-real-application-clusters-installation-guide-podman-oracle-linux-x86-64.pdf) for Podman Oracle Linux x86-64. +You can create a `network bridge` on every container host so containers running within that host can communicate with each other. For example: create `rac_pub1_nw` for the public network (`10.0.20.0/24`) and `rac_priv1_nw` (`192.168.17.0/24`) for a private network. +You can use any network subnet for testing. In this document we define the public network on `10.0.20.0/24` and the private network on `192.168.17.0/24`. + +```bash + podman network create --driver=bridge --subnet=10.0.20.0/24 rac_pub1_nw + podman network create --driver=bridge --subnet=192.168.17.0/24 rac_priv1_nw --disable-dns --internal + podman network create --driver=bridge --subnet=192.168.18.0/24 rac_priv2_nw --disable-dns --internal + +``` + +- To run Oracle RAC using Oracle Container Runtime for Docker on multiple hosts, you must create one of the following: + +a. [Podman macvlan network](https://docs.podman.io/en/latest/markdown/podman-network-create.1.html) using the following commands: + +```bash + podman network create -d macvlan --subnet=10.0.20.0/24 --gateway=10.0.20.1 -o parent=ens5 rac_pub1_nw + podman network create -d macvlan --subnet=192.168.17.0/24 --gateway=192.168.17.1 -o parent=ens6 rac_priv1_nw --disable-dns --internal + podman network create -d macvlan --subnet=192.168.18.0/24 --gateway=192.168.18.1 -o parent=ens7 rac_priv2_nw --disable-dns --internal +``` + + +b. [Podman ipvlan network](https://docs.docker.com/network/drivers/ipvlan/) using the following commands: +```bash + podman network create -d ipvlan --subnet=10.0.20.0/24 -o parent=ens5 rac_pub1_nw + podman network create -d ipvlan --subnet=192.168.17.0/24 -o parent=ens6 rac_priv1_nw --disable-dns --internal + podman network create -d ipvlan --subnet=192.168.18.0/24 -o parent=ens7 rac_priv2_nw --disable-dns --internal + ``` + +## Password Management +- Specify the secret volume for resetting the grid, oracle, and database user password during node creation or node addition. The volume can be a shared volume among all the containers. For example: + +```bash +mkdir /opt/.secrets/ +``` +- Generate a password file - Edit the `/opt/.secrets/pwdfile.txt` and seed the password for the grid, oracle, and database users. For this deployment scenario, it will be a common password for the grid, oracle, and database users. Run the command: + +```bash +cd /opt/.secrets +openssl genrsa -out key.pem +openssl rsa -in key.pem -out key.pub -pubout +openssl pkeyutl -in pwdfile.txt -out pwdfile.enc -pubin -inkey key.pub -encrypt +rm -rf /opt/.secrets/pwdfile.txt +``` +- Oracle recommends using Podman secrets inside the containers. To create Podman secrets, run the following command: + +```bash +podman secret create pwdsecret /opt/.secrets/pwdfile.enc +podman secret create keysecret /opt/.secrets/key.pem + +podman secret ls +ID NAME DRIVER CREATED UPDATED +7eb7f573905283c808bdabaff keysecret file 13 hours ago 13 hours ago +e3ac963fd736d8bc01dcd44dd pwdsecret file 13 hours ago 13 hours ago + +podman secret inspect +``` +Notes: +- In this example we use `pwdsecret` as the common password for SSH setup between containers for the oracle, grid, and Oracle RAC database users. Also, `keysecret` is used to extract secrets inside the Oracle RAC Containers. + +## Oracle RAC on Containers Deployment Scenarios +Oracle RAC can be deployed with various scenarios, such as using podman vs podman-compose, NFS vs Block Devices, Oracle RAC Container Image vs Slim Image, with User Defined Response files, and so on. All are covered in detail in the instructions that follow. + +### Oracle RAC Containers on Podman +#### [1. Setup Using Oracle RAC Container Image](./rac-container/racimage/README.md) +#### [2. Setup Using Oracle RAC Container Slim Image](./rac-container/racslimimage/README.md) + +### Oracle RAC Containers on Podman Compose +#### [1. Setup Using Oracle RAC Container Image](../samples/rac-compose/racimage/README.md) +#### [2. Setup Using Oracle RAC Container Slim Image](../samples/rac-compose/racslimimage/README.md) + +## Connecting to an Oracle RAC Database + +**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC cluster using the preceding sections. +Refer to the [README](./CONNECTING.md) for instructions on how to connect to the Oracle RAC Database. + +## Deletion of Node from Oracle RAC Cluster +Refer to [README](./DELETION.md) for instructions on how to delete a Node from Existing Oracle RAC Container Cluster. + +## Building a Patched Oracle RAC Container Image + +If you want to build a patched image based on a base 21.3.0 container image, then refer to the GitHub page [Example of how to create a patched database image](./samples/applypatch/README.md). + +## Sample Container Files for Older Releases + +This project offers example container files for Oracle Grid Infrastructure and Oracle Real Application Clusters for dev and test: + +* Oracle Database 21c Oracle Grid Infrastructure (21.3) for Linux x86-64 +* Oracle Database 21c (21.3) for Linux x86-64 +* Oracle Database 19c Oracle Grid Infrastructure (19.3) for Linux x86-64 +* Oracle Database 19c (19.3) for Linux x86-64 +* Oracle Database 18c Oracle Grid Infrastructure (18.3) for Linux x86-64 +* Oracle Database 18c (18.3) for Linux x86-64 +* Oracle Database 12c Release 2 Oracle Grid Infrastructure (12.2.0.1.0) for Linux x86-64 +* Oracle Database 12c Release 2 (12.2.0.1.0) Enterprise Edition for Linux x86-64 + +To install older releases of Oracle RAC on Podman or Oracle RAC on Docker, refer to the [README.md](./README_1.md) + +## Cleanup +Refer to [README](./CLEANUP.md) for instructions on how to connect to an Oracle RAC Database Container Environment. + +## Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 9.3 or later. To see the current Linux support certifications, refer to [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## License + +To download and run Oracle Grid Infrastructure and Oracle Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository that are required to build the container images are, unless otherwise noted, released under a UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/README.md new file mode 100644 index 0000000000..1bcb281402 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/developers/README.md @@ -0,0 +1,300 @@ +# Oracle Real Application Clusters in Linux Containers for Developers + +Learn about container deployment options for Oracle Real Application Clusters (Oracle RAC) Release 21c (v26.0) + +## Overview of Running Oracle RAC in Containers + +Oracle Real Application Clusters (Oracle RAC) is an option for the award-winning Oracle Database Enterprise Edition. Oracle RAC is a cluster database with a shared cache architecture that overcomes the limitations of traditional shared-nothing and shared-disk approaches to provide highly scalable and available database solutions for all business applications. +Oracle RAC uses Oracle Clusterware as a portable cluster software that allows clustering of independent servers so that they cooperate as a single system and Oracle Automatic Storage Management (Oracle ASM) to provide simplified storage management that is consistent across all servers and storage platforms. +Oracle Clusterware and Oracle ASM are part of the Oracle Grid Infrastructure, which bundles both solutions in an easy-to-deploy software package. For more information on Oracle RAC Database 21c refer to the [Oracle Database documentation](http://docs.oracle.com/en/database/). + +This guide helps you install Oracle RAC on Containers on Host Machines as explained in detail below. With the current release, you prepare the host machine, build or use pre-built Oracle RAC Container Images v26.0, and setup Oracle RAC on Single or Multiple Host machines with Oracle ASM. +In this installation guide, we use [Podman](https://docs.podman.io/en/v3.0/) to create Oracle RAC Containers and manage them. + +## Using this Documentation +To create an Oracle RAC environment, follow these steps: + +- [Oracle Real Application Clusters in Linux Containers for Developers](#oracle-real-application-clusters-in-linux-containers-for-developers) + - [Overview of Running Oracle RAC in Containers](#overview-of-running-oracle-rac-in-containers) + - [Using this Documentation](#using-this-documentation) + - [Before you begin](#before-you-begin) + - [QuickStart](#quickstart) + - [Getting Oracle RAC Database Container Images](#getting-oracle-rac-database-container-images) + - [Networking in Oracle RAC Podman Container Environment](#networking-in-oracle-rac-podman-container-environment) + - [Deploy Oracle RAC 2 Node Environment with NFS Storage Container](#deploy-oracle-rac-2-node-environment-with-nfs-storage-container) + - [Deploy Oracle RAC 2 Node Environment with BlockDevices](#deploy-oracle-rac-2-node-environment-with-blockdevices) + - [Validating Oracle RAC Environment](#validating-oracle-rac-environment) + - [Connecting to an Oracle RAC Database](#connecting-to-an-oracle-rac-database) + - [Environment Variables Explained for above 2 Node RAC on Podman Compose](#environment-variables-explained-for-above-2-node-rac-on-podman-compose) + - [Cleanup](#cleanup) + - [Support](#support) + - [License](#license) + - [Copyright](#copyright) + +## Before you begin +- Before proceeding further, the below prerequisites related to the Oracle RAC (Real Application Cluster) Podman host Environment need to be setup as a preparation steps for the Podman host machine for Oracle RAC Containers. For more details related to the preparation of the host machine, refer to [Preparation Steps for running Oracle RAC Database in containers](../../README.md#preparation-steps-for-running-oracle-rac-database-in-containers). +We have pre-created script `setup_rac_host.sh` which will prepare the podman host with the following pre-requisites- + - Validate Host machine for supported Os version(OL >9.3), Kernel(>UEKR7), Memory(>32GB), Swap(>32GB), shm(>4GB) etc. + - Update /etc/sysctl.conf + - Setup node directories for Slim Image + - Setup chronyd service + - Setup tsc clock (if available). + - Install Podman + - Install Podman Compose + - Setup and Load SELinux modules + - Create Oracle RAC Podman secrets + +**Note :** All below steps or commands in this QuickStart needs to be run as a `sudo` or `root` user. +* In this quickstart, our working directory is `/docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/containerfiles` from where all commands are executed. +* Set `secret-password` of your choice below, which is going to be used as a password for the Oracle RAC Container environment. + Execute below command- + ```bash + export RAC_SECRET= + ``` + +- To prepare podman host machine using a pre-created script, copy the file `setup_rac_host.sh` from [/docker-images/OracleDatabase/RAC/ +OracleRealApplicationClusters/containerfiles/setup_rac_host.sh](../containerfiles/setup_rac_host.sh) and execute below - + ```bash + ./setup_rac_host.sh -prepare-rac-env + ``` + Logs- + ```bash + INFO: Finished setting up the pre-requisites for Podman-Host + ``` + +## Getting Oracle RAC Database Container Images + +Oracle RAC is supported for production use on Podman starting with Oracle Database 19c (19.16), and Oracle Database 21c (21.7). You can also deploy Oracle RAC on Podman using the pre-built images available on the Oracle Container Registry. +Refer [this documentation](https://docs.oracle.com/en/operating-systems/oracle-linux/docker/docker-UsingDockerRegistries.html#docker-registry) for details on using Oracle Container Registry and [Getting Oracle RAC Database Container Images](../../README.md#getting-oracle-rac-database-container-images) + +Example of pulling an Oracle RAC Image from the Oracle Container Registry: +```bash +# For Oracle RAC Container Image- +podman pull container-registry.oracle.com/database/rac_ru:21.16 +podman tag container-registry.oracle.com/database/rac_ru:21.16 localhost/oracle/database-rac:21c +``` + +**Notes** +- Use the Oracle `DNSServer` Image to deploy a container providing DNS resolutions. Refer [OracleDNSServer](../../../OracleDNSServer/README.md) +- `OracleRACStorageServer` container image can be used for deploy Oracle RAC with NFS Storage. Refer [OracleRACStorageServer](../../../OracleRACStorageServer/README.md) for details. +- If the Podman bridge network is not available outside your host, you can use the Oracle Connection Manager [CMAN image](../../../OracleConnectionManager/README.md) to access the Oracle RAC Database from outside the host. + +- When Podman Images are ready like the below example used in this quickstart developer guide, you can proceed to the next steps- + ```bash + podman images + localhost/oracle/client-cman 21.3.0 7b095637d7b6 About a minute ago 2.08 GB + localhost/oracle/database-rac 21c dcda5cf71b23 12 hours ago 9.33 GB + localhost/oracle/rac-storage-server latest d233b08a8aed 12 hours ago 443 MB + localhost/oracle/rac-dnsserver latest 7d2301d7ea53 13 hours ago 279 MB + ``` + + +## QuickStart +To become familiar with Oracle RAC on Containers, Oracle recommends that you first start with this QuickStart. + +After you become familiar with Oracle RAC on Containers, you can explore more advanced setups, deployments, features, and so on, as explained in detail in the [Oracle Real Application Clusters](../../../OracleRealApplicationClusters/README.md) + +* To resolve VIPs and SCAN IPs, in this guide we use a DNS container. Before proceeding to the next step, create a [DNS server container](../OracleDNSServer/README.md). +If you have a preconfigured DNS server in your environment, then you can replace `-e DNS_SERVERS=10.0.20.25`, `--dns=10.0.20.25`, `-e DOMAIN=example.info` and `--dns-search=example.info` parameters in the examples in this guide with the `DOMAIN_NAME` and `DNS_SERVER` based on your environment. + +## Networking in Oracle RAC Podman Container Environment +- In this Quick Start, we will create below subnets for Oracle RAC Podman Container Environment- + + | Network Name | Subnet CIDR | Description | + |----------------|--------------|--------------------------------------| + | rac_pub1_nw | 10.0.20.0/24 | Public network for Oracle RAC Podman Container Environment | + | rac_priv1_nw | 192.168.17.0/24 | First private network for Oracle RAC Podman Container Environment | + | rac_priv2_nw | 192.168.18.0/24 | Second private network for Oracle RAC Podman Container Environment | + +## Deploy Oracle RAC 2 Node Environment with NFS Storage Container +- Copy `podman-compose.yml` file from this [/docker-images/OracleDatabase/RAC/ +OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/nfsdevices/podman-compose.yml](../samples/rac-compose/racimage/withoutresponsefiles/nfsdevices/podman-compose.yml) in your working directory. +- Execute the below command from your working directory to export the required environment variables required by the compose file in this quickstart- + ```bash + source ./setup_rac_host.sh -nfs-env + ``` + Logs - + ```bash + INFO: NFS Environment variables setup completed successfully. + ``` + Note: In this example, `DB_SERVICE` is set to as default as an example. If you want to change to a different name, set like below - + ```bash + export DB_SERVICE=service: + ``` + + Note: + - In this example, we have used the below path for NFS Storage Volume. This path must have a minimum 100GB of free space. If you want to change it, export by changing it as per your environment before proceeding further - + ```bash + export ORACLE_DBNAME=ORCLCDB + export NFS_STORAGE_VOLUME="/scratch/stage/rac-storage/$ORACLE_DBNAME" + ``` + - If SELinux host is enabled on the machine then execute the following- + ```bash + semanage fcontext -a -t container_file_t /scratch/stage/rac-storage/$ORACLE_DBNAME + restorecon -v /scratch/stage/rac-storage/$ORACLE_DBNAME + ``` +- Execute below to create Podman Networks specific to RAC in this quickstart- + ```bash + ./setup_rac_host.sh -networks + ``` + Logs - + ```bash + INFO: Oracle RAC Container Networks setup successfully + ``` +- Execute below to deploy DNS Containers- + ```bash + ./setup_rac_host.sh -dns + ``` + Logs - + ```bash + ########################################## + INFO: DNS Container is setup successfully. + ########################################## + ``` +- Execute below to deploy Storage Containers- + + ```bash + ./setup_rac_host.sh -storage + ``` + Logs- + ```bash + ############################################################ + INFO: NFS Storage Container exporting /oradata successfully. + ############################################################ + racstorage + ``` +- Execute below to deploy Oracle RAC Containers- + ```bash + ./setup_rac_host.sh -rac + ``` + Logs- + ```bash + ############################################### + INFO: Oracle RAC Containers setup successfully. + ############################################### + ``` +- Optional: If the Podman bridge network is not available outside your host, you can use the Oracle Connection Manager to access the Oracle RAC Database from outside the host. Execute below if you want to deploy CMAN Container as well- + ```bash + ./setup_rac_host.sh -cman + ``` + Logs- + ```bash + ########################################### + INFO: CMAN Container is setup successfully. + ########################################### + ``` +- If you want to cleanup the RAC Container environment, then execute below- + ```bash + ./setup_rac_host.sh -cleanup + ``` + This will cleanup Oracle RAC Containers, Oracle Storage Volume, Oracle RAC Podman Networks, etc. + + Logs- + ```bash + INFO: Oracle Container RAC Environment Cleanup Successfully + ``` + +## Deploy Oracle RAC 2 Node Environment with BlockDevices + +- Copy `podman-compose.yml` file from [/docker-images/OracleDatabase/RAC/ +OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/blockdevices/podman-compose.yml](../samples/rac-compose/racimage/withoutresponsefiles/blockdevices/podman-compose.yml) in your working directory. +- Execute the below command to export the required environment variables required by the compose file in this quickstart- + ```bash + source ./setup_rac_host.sh -blockdevices-env + ``` + Logs- + ```bash + INFO: BlockDevices Environment variables setup completed successfully. + ``` + Note: In this example, DB_SERVICE is set to service:soepdb. If you want to change to a different name, set it like `export DB_SERVICE=service:` + + Note: In this example, we have used the below asm disks. If you want to change it, export by changing it as per your environment before proceeding further - + ```bash + export ASM_DISK1="/dev/oracleoci/oraclevdd" + export ASM_DISK2="/dev/oracleoci/oraclevde" + ``` +- Execute below to create Podman Networks specific to RAC in this quickstart- + ```bash + ./setup_rac_host.sh -networks + ``` + Logs- + ```bash + INFO: Oracle RAC Container Networks setup successfully + ``` + +- Execute below to deploy DNS Containers- + ```bash + ./setup_rac_host.sh -dns + ``` + Logs- + ```bash + ########################################## + INFO: DNS Container is setup successfully. + ########################################## + ``` +- Execute below to deploy Oracle RAC Containers- + ```bash + ./setup_rac_host.sh -rac + ``` + Logs- + ```bash + ############################################### + INFO: Oracle RAC Containers setup successfully. + ############################################### + ``` +- Optional: If the Podman bridge network is not available outside your host, you can use the Oracle Connection Manager to access the Oracle RAC Database from outside the host. Execute below if you want to deploy CMAN Container as well- + ```bash + ./setup_rac_host.sh -cman + ``` + Logs- + ```bash + ########################################### + INFO: CMAN Container is setup successfully. + ########################################### + ``` +- If you want to Cleanup the RAC Container environment , then execute the below- + ```bash + ./setup_rac_host.sh -cleanup + ``` + This will cleanup Oracle RAC Containers, Oracle RAC Podman Networks, etc. + Logs- + ```bash + INFO: Oracle Container RAC Environment Cleanup Successfully + ``` + +## Validating Oracle RAC Environment +You can validate if the environment is healthy by running the below command- +```bash +podman ps -a + +58642afb20eb localhost/oracle/rac-dnsserver:latest /bin/sh -c exec $... 23 hours ago Up 23 hours (healthy) rac-dnsserver +a192f4e9092a localhost/oracle/database-rac:21c 10 hours ago Up 10 hours (healthy) racnodep1 +745679457df5 localhost/oracle/database-rac:21c 10 hours ago Up 10 hours (healthy) racnodep2 +``` +Note: +- Look for `(healthy)` next to container names under the `STATUS` section. + +## Environment Variables Explained for above 2 Node RAC on Podman Compose +Refer to [Environment Variables Explained for Oracle RAC on Podman Compose](./ENVVARIABLESCOMPOSE.md) for the explanation of all the environment variables related to Oracle RAC on Podman Compose. Change or Set these environment variables as per your environment. + +## Connecting to an Oracle RAC Database + +**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC cluster using the preceding sections. +Refer to the [README](../CONNECTING.md) for instructions on how to connect to the Oracle RAC Database. + +## Cleanup +Refer to [README](../CLEANUP.md) for instructions on how to cleanup an Oracle RAC Database Container Environment. + +## Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 9.3 later. To see current Linux support certifications, refer [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## License + +To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository that are required to build the container images are, unless otherwise noted, released under a UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/orestart/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/orestart/README.md new file mode 100644 index 0000000000..78e0c03f51 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/orestart/README.md @@ -0,0 +1,263 @@ +# Oracle Database on Oracle Restart + +After you build your Oracle RAC Database Container Image, you can create use this image to deploy an Oracle database on Oracle Restart. Oracle Restart improves the availability of your Oracle Database. When you install Oracle Restart, various Oracle components can be automatically restarted after a hardware or software failure or whenever your database host computer restarts. +You can choose to deploy Oracle Database on Oracle Restart on block devices as demonstrated in the detail in this document. + +Refer [Getting Oracle RAC Database Container Images](../../../OracleRealApplicationClusters/README.md#getting-oracle-rac-database-container-images) for getting Oracle RAC Container Images. + +- [Oracle Database on Oracle Restart](#oracle-database-on-oracle-restart) + - [Section 1: Prerequisites for Setting up Oracle Restart using Oracle RAC Container Image](#section-1-prerequisites-for-setting-up-oracle-cluster-using-oracle-rac-container-image) + - [Section 2: Deploying Oracle Restart using Oracle RAC Image](#section-2-deploying-oracle-restart-using-oracle-rac-image) + - [Section 2.1.1: Deploying With Block Devices](#section-211-deploying-with-block-devices) + - [Section 3: Attach the network to the container](#section-3-attach-the-network-to-the-container) + - [Section 4: Start the container](#section-4-start-the-container) + - [Section 5: Validate the Oracle Restart Environment](#section-5-validate-the-oracle-restart-environment) + - [Section 6: Connecting to Oracle Restart Environment](#section-6-connecting-to-oracle-restart-environment) + - [Section 7: Environment Variables Explained for Oracle Database Restart](#section-7-environment-variables-explained-for-oracle-database-restart) + - [Cleanup](#cleanup) + - [Support](#support) + - [License](#license) + - [Copyright](#copyright) + + +## Section 1: Prerequisites for Setting up Oracle Cluster using Oracle RAC Container Image + +Refer [Preparation Steps for running Oracle RAC Database in containers](../../../OracleRealApplicationClusters/README.md#preparation-steps-for-running-oracle-rac-database-in-containers) in order to prepare Podman Host machine. Once these pre-requisites are complete, you can proceed further. + +Ensure that you have created at least one block device with at least 50 Gb of storage space that can be accessed by Oracle Restart. You can create more block devices in accordance with your requirements and pass those environment variables and devices to the podman create command. + +Ensure that the ASM devices do not have any existing file system. To clear any existing file system from the devices, use the following command: +```bash +dd if=/dev/zero of=/dev/oracleoci/oraclevdd bs=8k count=10000 +``` +Repeat this command on each shared block device. In this example command, `/dev/oracleoci/oraclevdd` is a shared KVM virtual block device. + +For Oracle Restart you do not need SCANs and VIPs in comparison to Oracle RAC Cluster. Environment variables that are needed to setup Oracle Restart areas explained in [Section 7: Environment Variables Explained for Oracle Database Restart](#section-7-environment-variables-explained-for-oracle-database-restart) + +**NOTE:** In this example, the Oracle Restart is deployed with DNS server running in a podman container. Please refer [here](../../../OracleDNSServer/README.md) for the documentation. + +### Export Environment Variables for Oracle Database Restart + +```bash +#######COMMON VARIABLE###### +export CRS_ASM_DEVICE_LIST=/dev/asm-disk1 +export DB_ASM_DEVICE_LIST=/dev/asm-disk2 +export RECO_ASM_DEVICE_LIST=/dev/asm-disk3 +export DEVICE="--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1" +export DOMAIN=example.info +export DNS_SERVER_IP=10.0.20.25 +export IMAGE_NAME=oracle/database-rac:21.16.0 +export PUB_BRIDGE=rac_pub1_nw + +######ORACLE RESTART Variable###### +export GPCNODE=dbmc1 +export GPCNODE_PUB_IP=10.0.20.195 +``` + +## Section 2: Deploying Oracle Restart using Oracle RAC Image +### Section 2.1.1: Deploying With Block Devices + +```bash +podman create -t -i \ +--hostname ${GPCNODE} \ +--dns-search ${DOMAIN} \ +--dns ${DNS_SERVER_IP} \ +--shm-size 4G \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +-e DNS_SERVERS=${DNS_SERVER_IP} \ +-e DB_SERVICE="service:soepdb" \ +-e PUBLIC_HOSTS_DOMAIN=${DOMAIN} \ +-e DB_NAME=ORCLCDB \ +-e ORACLE_PDB_NAME=ORCLPDB \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=${GPCNODE} \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +${DEVICE} \ +-e CRS_ASM_DEVICE_LIST=${CRS_ASM_DEVICE_LIST} \ +-e OP_TYPE=setuprac \ +-e CRS_GPC="true" \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name ${GPCNODE} \ +${IMAGE_NAME} +``` + +## Section 3: Attach the network to the container + +```bash +podman network disconnect podman ${GPCNODE} +podman network connect ${PUB_BRIDGE} --ip ${GPCNODE_PUB_IP} ${GPCNODE} +``` + +## Section 4: Start the container + +Run the following commands to start the container: + +```bash +podman start ${GPCNODE} +``` + +It can take approximately 20 minutes or longer to create and start the Oracle Restart setup . To check the logs, use the following command from another terminal session: + +```bash +podman exec ${GPCNODE} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +When the database configuration is complete, you should see a message similar to the following: + +```bash +################################### +ORACLE RAC DATABASE IS READY TO USE +################################### +``` + +## Section 5: Validate the Oracle Restart Environment +To validate if the environment is healthy, run the following command: +```bash +podman ps -a + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +131b86004040 localhost/oracle/rac-dnsserver:latest /bin/sh -c exec $... 3 days ago Up 3 days (healthy) rac-dnsserver +e010e1122e99 localhost/oracle/database-rac:21.16.0 podman network di... 3 hours ago Up 3 hours (healthy) dbmc1 +``` +**Note:** +- Look for `(healthy)` next to container names under the `STATUS` section. + +## Section 6: Connecting to Oracle Restart Environment + +**IMPORTANT:** Before you connnect to the environment, you must first successfully create an Oracle Restart Environment as described in the preceding sections. + +To connect to the container execute following command: +```bash +podman exec -i -t ${GPCNODE} /bin/bash +``` +### Validating Oracle Grid Infrastructure +Validate if Oracle Grid Infrastructure Stack is up and running from within container: +```bash +# Verify the status of Oracle Restart stack: +su - grid +#Verify the status of Oracle Clusterware stack: +[grid@dbmc1 ~]$ crsctl check cluster -all +************************************************************** +dbmc1: +CRS-4537: Cluster Ready Services is online +CRS-4529: Cluster Synchronization Services is online +CRS-4533: Event Manager is online +************************************************************** + +[grid@dbmc1 u01]$ crsctl check crs +CRS-4638: Oracle High Availability Services is online +CRS-4537: Cluster Ready Services is online +CRS-4529: Cluster Synchronization Services is online +CRS-4533: Event Manager is online + +[grid@dbmc1 u01]$ crsctl stat res -t +-------------------------------------------------------------------------------- +Name Target State Server State details +-------------------------------------------------------------------------------- +Local Resources +-------------------------------------------------------------------------------- +ora.LISTENER.lsnr + ONLINE ONLINE dbmc1 STABLE +ora.net1.network + ONLINE ONLINE dbmc1 STABLE +ora.ons + ONLINE ONLINE dbmc1 STABLE +-------------------------------------------------------------------------------- +Cluster Resources +-------------------------------------------------------------------------------- +ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup) + 1 ONLINE ONLINE dbmc1 STABLE +ora.DATA.dg(ora.asmgroup) + 1 ONLINE ONLINE dbmc1 STABLE +ora.asm(ora.asmgroup) + 1 ONLINE ONLINE dbmc1 Started,STABLE +ora.asmnet1.asmnetwork(ora.asmgroup) + 1 ONLINE ONLINE dbmc1 STABLE +ora.cvu + 1 ONLINE ONLINE dbmc1 STABLE +ora.orclcdb.db + 1 ONLINE ONLINE dbmc1 Open,HOME=/u01/app/o + racle/product/21c/d + bhome_1,STABLE +ora.orclcdb.orclcdb_orclpdb.svc + 1 ONLINE ONLINE dbmc1 STABLE +ora.orclcdb.orclpdb.pdb + 1 ONLINE ONLINE dbmc1 READ WRITE,STABLE +ora.orclcdb.soepdb.svc + 1 ONLINE ONLINE dbmc1 STABLE +ora.dbmc1.vip + 1 ONLINE ONLINE dbmc1 STABLE +-------------------------------------------------------------------------------- + +[grid@dbmc1 ~]$ /u01/app/21c/grid/bin/olsnodes -n +dbmc1 1 +``` +### Validating Oracle Restart Database +Validate Oracle Restart Database from within Container- +```bash +su - oracle + +#Confirm the status of Oracle Database instances: +[oracle@dbmc1 ~]$ srvctl status database -d ORCLCDB +Instance ORCLCDB is running on node dbmc1 +``` + + +## Section 7: Environment Variables Explained for Oracle Database Restart +| Variable | Default Value | Description | +|------------------------|-----------------------------|----------------------------------------------------------| +| CRS_ASM_DEVICE_LIST | /dev/asm-disk1 | Path to the ASM device for CRS | +| DB_ASM_DEVICE_LIST | /dev/asm-disk2 | Path to the ASM device for the database | +| RECO_ASM_DEVICE_LIST | /dev/asm-disk3 | Path to the ASM device for recovery | +| DEVICE | --device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 | Device mapping for Docker container | +| DOMAIN | example.info | Domain name for the environment | +| DNS_SERVER_IP | 10.0.20.25 | IP address of the DNS server | +| IMAGE_NAME | oracle/database-rac:21.16.0 | Name of the Docker image for Oracle RAC | +| PUB_BRIDGE | rac_pub1_nw | Name of the public bridge network interface | +| GPCNODE | dbmc1 | Hostname of GPC Host | +| GPCNODE_PUB_IP | 10.0.20.195 | Public IP address of RAC node 1 | + +## Cleanup +Execute below commands to cleanup Oracle Restart Container Environment- +```bash +podman rm -f ${GPCNODE} +podman network inspect rac_pub1_nw &> /dev/null && podman network rm rac_pub1_nw +``` + +Cleanup ASM Disks: +```bash +dd if=/dev/zero of=/dev/oracleoci/oraclevdd bs=8k count=10000 +``` + +## Support + +At the time of this release, Oracle Restart is supported on Podman for Oracle Linux 8.5 later. To review the current Linux support certifications, see [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## License + +To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository that are required to build the container images are, unless otherwise noted, released under a UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/README.md new file mode 100644 index 0000000000..38c365c41c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/README.md @@ -0,0 +1,762 @@ +# Oracle RAC on Podman using Oracle RAC Image +=============================================================== + +Refer to the following instructions to set up Oracle RAC on Podman using an Oracle RAC Image for various scenarios. + +- [Oracle RAC on Podman using Oracle RAC Image](#oracle-rac-on-podman-using-oracle-rac-image) + - [Section 1: Prerequisites for Setting up Oracle RAC on Container using Oracle RAC Image](#section-1-prerequisites-for-setting-up-oracle-rac-database-on-containers-using-oracle-rac-image) + - [Section 2: Deploying Two-node Oracle RAC on Podman using Oracle RAC Image](#section-2-deploying-two-node-oracle-rac-on-podman-using-oracle-rac-image) + - [Section 2.1: Deploying Two-Node Oracle RAC on Podman Using Oracle RAC image Without Using Response Files](#section-21-deploying-two-node-oracle-rac-on-podman-using-an-oracle-rac-image-without-using-response-files) + - [Section 2.1.1: Deploying With Block Devices](#section-211-deploying-with-block-devices) + - [Section 2.1.2: Deploying with NFS Storage Devices](#section-212-deploying-with-nfs-storage-devices) + - [Section 2.2: Deploying Two-node Oracle RAC on Podman Using Oracle RAC Image with User-defined response files](#section-22-deploying-two-node-oracle-rac-setup-on-podman-using-oracle-rac-image-using-user-defined-response-files) + - [Section 2.2.1: Deploying With block devices](#section-221-deploying-with-blockdevices) + - [Section 2.2.2: Deploying with NFS storage devices](#section-222-deploying-with-nfs-storage-devices) + - [Section 3: Attach the Network to Containers](#section-3-attach-the-network-to-containers) + - [Attach the network to racnodep1](#attach-the-network-to-racnodep1) + - [Attach the network to racnodep2](#attach-the-network-to-racnodep2) + - [Section 4: Start the Containers](#section-4-start-the-containers) + - [Section 5: Validate the Oracle RAC Environment](#section-5-validate-the-oracle-rac-environment) + - [Section 6: Connecting to Oracle RAC environment](#section-6-connecting-to-oracle-rac-environment) + - [Section 7: Example of Node Addition to Oracle RAC Database Based on Oracle RAC Image with block devices](#section-7-example-of-node-addition-to-oracle-rac-database-based-on-oracle-rac-image-with-block-devices) + - [Section 7.1: Example of node addition to Oracle RAC Database based on Oracle RAC image without Response File](#section-71-example-of-node-addition-to-oracle-rac-database-based-on-oracle-rac-image-without-response-file) + - [Section 8: Example of Node Addition to Oracle RAC Database Based on Oracle RAC Image with NFS Storage Devices](#section-8-example-of-node-addition-to-oracle-rac-database-based-on-oracle-rac-image-with-nfs-storage-devices) + - [Section 8.1: Example of node addition to Oracle RAC Database based on Oracle RAC Image without Response File](#section-81-example-of-node-addition-to-oracle-rac-database-based-on-oracle-rac-image-without-response-file) + - [Environment Variables for Oracle RAC on Containers](#environment-variables-for-oracle-rac-on-containers) + - [Cleanup](#cleanup) + - [Support](#support) + - [License](#license) + - [Copyright](#copyright) + +## Oracle RAC Setup on Podman using Oracle RAC Image + +You can deploy multi-node Oracle RAC Database using Oracle RAC Database Container Images either on block devices or on NFS storage devices. You can also choose to deploy the images either by using Response Files that you define, or without using response files. All of these are demonstrated in detail in this document. + +## Section 1: Prerequisites for Setting up Oracle RAC Database on containers using Oracle RAC image +**IMPORTANT:** Complete all of the steps specified in this section (customize for your environment) before you proceed to the next section. Completing prerequisite steps is a requirement for successful configuration. + + +* Complete the [Preparation Steps for running Oracle RAC Database in containers](../../../README.md#preparation-steps-for-running-oracle-rac-database-in-containers) +* If you are planning to use Oracle Connection Manager, then create an Oracle Connection Manager container image. See the [Oracle Connection Manager in Linux Containers](../../../../OracleConnectionManager/README.md) +* Ensure the Oracle RAC Database Container Image is present. You can either pull ru image from the Oracle Container Registry by following [Getting Oracle RAC Database Container Images](../../../README.md#getting-oracle-rac-database-container-images), or you can create the Oracle RAC Container Patched image by following [Building a Patched Oracle RAC Container Image](../../../README.md#building-a-patched-oracle-rac-container-image) +```bash +# podman images|grep database-rac +localhost/oracle/database-rac 21c 41239091d2ac 16 minutes ago 20.2 GB +``` +* Configure the [Network Management](../../../README.md#network-management). +* Configure the [Password Management](../../../README.md#password-management). + +## Section 2: Deploying Two-node Oracle RAC on Podman Using Oracle RAC Image + +Use the following instructions to set up Oracle RAC Database on Podman using an Oracle RAC Database Container image for various scenarios, such as deploying with user-defined response files or deploying without user-defined response files. Oracle RAC Database setup can also be done either on block devices or on NFS storage devices. + +### Section 2.1: Deploying Two-node Oracle RAC on Podman using an Oracle RAC image without using response files + +To set up Oracle RAC Database on Podman using an Oracle RAC Database Container Image without providing response files, complete these steps. + +#### Section 2.1.1: Deploying With Block Devices +##### Section 2.1.1.1: Prerequisites for setting up Oracle RAC with block devices + +Ensure that you have created at least one Block Device with at least 50 Gb of storage space that can be accessed by two Oracle RAC Nodes, and can be shared between them. You can create more block devices in accordance with your requirements and pass those environment variables and devices to the `podman create` command as well as in the Oracle Grid Infrastructure (grid) response files. + +**Note:** You can skip this step if you are planning to use NFS storage devices. + +Ensure that the ASM devices do not have any existing file system. To clear any existing file system from the devices, use the following command: + +```bash +dd if=/dev/zero of=/dev/oracleoci/oraclevdd bs=8k count=10000 +``` + +Repeat this command on each shared block device. In this example command, `/dev/oracleoci/oraclevdd` is a shared KVM virtual block device. + +##### Section 2.1.1.2: Create Oracle RAC Containers + +Create the Oracle RAC containers using the Oracle RAC Database Container Image. For details about environment variables, see [Environment Variables Explained](#environment-variables-for-oracle-rac-on-containers) + +You can use the following example to create a container on host `racnodep1`: + +```bash +podman create -t -i \ +--hostname racnodep1 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e CRS_PRIVATE_IP1=192.168.17.170 \ +-e CRS_PRIVATE_IP2=192.168.18.170 \ +-e CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=racnodep1 \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +-e CRS_ASM_DEVICE_LIST=/dev/asm-disk1,/dev/asm-disk2 \ +-e OP_TYPE=setuprac \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep1 \ +localhost/oracle/database-rac:21c +``` + +To create another container with hostname `racnodep2`, use the following command: +```bash +podman create -t -i \ +--hostname racnodep2 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e CRS_PRIVATE_IP1=192.168.17.171 \ +-e CRS_PRIVATE_IP2=192.168.18.171 \ +-e CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=racnodep1 \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +-e CRS_ASM_DEVICE_LIST=/dev/asm-disk1,/dev/asm-disk2 \ +-e OP_TYPE=setuprac \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep2 \ +localhost/oracle/database-rac:21c +``` +#### Section 2.1.2: Deploying with NFS Storage Devices + +##### Section 2.1.2.1: Prerequisites for setting up Oracle RAC with NFS storage devices + +* Create an NFS Volume to be used for ASM Devices for Oracle RAC. See the section `Configuring NFS for Storage for Oracle RAC on Podman` in [Oracle Real Application Clusters Installation Guide for Podman](https://docs.oracle.com/cd/F39414_01/racpd/oracle-real-application-clusters-installation-guide-podman-oracle-linux-x86-64.pdf) for more details. + +**Note:** You can skip this step if you are planning to use block devices for storage. +* Make sure the ASM NFS Storage devices do not have any existing file system. + +##### Section 2.1.2.2: Create Oracle RAC Containers +Create the Oracle RAC Database containers using the Oracle RAC Database Container Image. For details about environment variables, see [Environment Variables Explained](#environment-variables-for-oracle-rac-on-containers). You can use the following example to create a container on host `racnodep1`: + +```bash +podman create -t -i \ +--hostname racnodep1 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e CRS_PRIVATE_IP1=192.168.17.170 \ +-e CRS_PRIVATE_IP2=192.168.18.170 \ +-e CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=racnodep1 \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--volume racstorage:/oradata \ +-e CRS_ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ +-e CRS_ASM_DISCOVERY_STRING="/oradata/asm_disk*" \ +-e OP_TYPE=setuprac \ +-e ASM_ON_NAS=True \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep1 \ +localhost/oracle/database-rac:21c +``` + +To create another container on host `racnodep2`, use the following command: +```bash +podman create -t -i \ +--hostname racnodep2 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e CRS_PRIVATE_IP1=192.168.17.171 \ +-e CRS_PRIVATE_IP2=192.168.18.171 \ +-e CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=racnodep1 \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--volume racstorage:/oradata \ +-e CRS_ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ +-e CRS_ASM_DISCOVERY_STRING="/oradata/asm_disk*" \ +-e OP_TYPE=setuprac \ +-e ASM_ON_NAS=True \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep2 \ +localhost/oracle/database-rac:21c +``` + +### Section 2.2: Deploying Two-Node Oracle RAC Setup on Podman using Oracle RAC Image Using User Defined Response files + +Follow the below instructions to setup Oracle RAC on Podman using Oracle RAC Image for using user-defined response files. + +#### Section 2.2.1: Deploying With BlockDevices + +##### Prerequisites for setting up Oracle RAC with User-Defined files +- On the shared folder between both RAC nodes, create a file named [grid_setup_21c.rsp](withresponsefiles/blockdevices/grid_setup_21c.rsp). In this example, we copy the file to `/scratch/common_scripts/podman/rac/grid_setup_21c.rsp` +- On the shared folder between both RAC nodes, create a file named [dbca_21c.rsp](withresponsefiles/dbca_21c.rsp). In this example, we copy the file to `/scratch/common_scripts/podman/rac/dbca_21c.rsp` +- If SELinux is enabled on the host machine then execute the following as well - + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/grid_setup_21c.rsp + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp + ``` + **Note:** Passwords defined in response files is going to be overwritten by passwords defined in `podman secret` due to security reasons of exposure of the password as plain text. +You can skip this step if you are not planning to use **User Defined Response Files for RAC**. + +Create first Oracle RAC Container `racnodep1`: +```bash +podman create -t -i \ +--hostname racnodep1 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--volume /scratch/common_scripts/podman/rac/grid_setup_21c.rsp:/tmp/grid_21c.rsp \ +--volume /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e GRID_RESPONSE_FILE=/tmp/grid_21c.rsp \ +-e DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp \ +-e CRS_PRIVATE_IP1=192.168.17.170 \ +-e CRS_PRIVATE_IP2=192.168.18.170 \ +-e CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=racnodep1 \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +-e CRS_ASM_DEVICE_LIST=/dev/asm-disk1,/dev/asm-disk2 \ +-e OP_TYPE=setuprac \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep1 \ +localhost/oracle/database-rac:21c +``` + +Create another Oracle RAC container `racnodep2`: +```bash +podman create -t -i \ +--hostname racnodep2 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--volume /scratch/common_scripts/podman/rac/grid_setup_21c.rsp:/tmp/grid_21c.rsp \ +--volume /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e GRID_RESPONSE_FILE=/tmp/grid_21c.rsp \ +-e DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp \ +-e CRS_PRIVATE_IP1=192.168.17.171 \ +-e CRS_PRIVATE_IP2=192.168.18.171 \ +-e CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=racnodep1 \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +-e CRS_ASM_DEVICE_LIST=/dev/asm-disk1,/dev/asm-disk2 \ +-e OP_TYPE=setuprac \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep2 \ +localhost/oracle/database-rac:21c +``` +#### Section 2.2.2: Deploying with NFS storage devices + +##### Prerequisites for setting up Oracle RAC with User-Defined Files +- Create a NFS Volume to be used for ASM Devices for Oracle RAC. See the section `Configuring NFS for Storage for Oracle RAC on Podman` in [Oracle Real Application Clusters Installation Guide for Podman](https://docs.oracle.com/cd/F39414_01/racpd/oracle-real-application-clusters-installation-guide-podman-oracle-linux-x86-64.pdf) for more details. + + **Note:** You can skip this step if you are planning to use block devices for storage. + +- Make sure the ASM NFS Storage devices do not have any existing file system. +- On the shared folder between both Oracle RAC nodes, create the file name [grid_setup_21c.rsp](withresponsefiles/nfsdevices/grid_setup_21c.rsp). In this example, we copy the file to `/scratch/common_scripts/podman/rac/grid_setup_21c.rsp` +- On the shared folder between both RAC nodes, create a file named [dbca_21c.rsp](withresponsefiles/dbca_21c.rsp). In this example, we copy the file to `/scratch/common_scripts/podman/rac/dbca_21c.rsp` +- If the SELinux is enabled on the machine then also run the following the following as well- + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/grid_setup_21c.rsp + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp + ``` +**Note:** You can skip this step if you are not planning to deploy with user-defined Response Files for Oracle RAC. + +Create the first Oracle RAC Container. In this example, the hostname is `racnodep1` + +```bash +podman create -t -i \ +--hostname racnodep1 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--volume /scratch/common_scripts/podman/rac/grid_setup_21c.rsp:/tmp/grid_21c.rsp \ +--volume /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e GRID_RESPONSE_FILE=/tmp/grid_21c.rsp \ +-e DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp \ +-e CRS_PRIVATE_IP1=192.168.17.170 \ +-e CRS_PRIVATE_IP2=192.168.18.170 \ +-e CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=racnodep1 \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--volume racstorage:/oradata \ +-e CRS_ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ +-e CRS_ASM_DISCOVERY_STRING="/oradata/asm_disk*" \ +-e OP_TYPE=setuprac \ +-e ASM_ON_NAS=True \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep1 \ +localhost/oracle/database-rac:21c +``` + +Create another Oracle RAC container. In this example, the hostname is `racnodep2` +```bash +podman create -t -i \ +--hostname racnodep2 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--volume /scratch/common_scripts/podman/rac/grid_setup_21c.rsp:/tmp/grid_21c.rsp \ +--volume /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e GRID_RESPONSE_FILE=/tmp/grid_21c.rsp \ +-e DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp \ +-e CRS_PRIVATE_IP1=192.168.17.171 \ +-e CRS_PRIVATE_IP2=192.168.18.171 \ +-e CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e INSTALL_NODE=racnodep1 \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--volume racstorage:/oradata \ +-e CRS_ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ +-e CRS_ASM_DISCOVERY_STRING="/oradata/asm_disk*" \ +-e OP_TYPE=setuprac \ +-e ASM_ON_NAS=True \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep2 \ +localhost/oracle/database-rac:21c +``` +**Note:** +- To use this example, change the environment variables based on your environment. See [Environment Variables for Oracle RAC on Containers](#environment-variables-for-oracle-rac-on-containers) for more details. +- In the example that follows, we use a podman bridge network with one public and two private networks. For this reason,`--sysctl 'net.ipv4.conf.eth1.rp_filter=2' --sysctl 'net.ipv4.conf.eth2.rp_filter=2` is required when we use two private networks. If your use case is different, then this sysctl configuration for the Podman Bridge can be ignored. +- If you are planning to place database files such as datafiles and archivelogs on different diskgroups, then you must pass these parameters: `DB_ASM_DEVICE_LIST`, `RECO_ASM_DEVICE_LIST`,`DB_DATA_FILE_DEST`, `DB_RECOVERY_FILE_DEST`. For more information, see [Environment Variables for Oracle RAC on Containers](#environment-variables-for-oracle-rac-on-containers). + +## Section 3: Attach the Network to Containers + +You must assign the podman networks created based on the preceding examples. Complete the following tasks: + +### Attach the network to racnodep1 + +```bash +podman network disconnect podman racnodep1 +podman network connect rac_pub1_nw --ip 10.0.20.170 racnodep1 +podman network connect rac_priv1_nw --ip 192.168.17.170 racnodep1 +podman network connect rac_priv2_nw --ip 192.168.18.170 racnodep1 +``` + +### Attach the network to racnodep2 + +```bash +podman network disconnect podman racnodep2 +podman network connect rac_pub1_nw --ip 10.0.20.171 racnodep2 +podman network connect rac_priv1_nw --ip 192.168.17.171 racnodep2 +podman network connect rac_priv2_nw --ip 192.168.18.171 racnodep2 +``` + +## Section 4: Start the containers + +You need to start the containers. Run the following commands: + +```bash +podman start racnodep1 +podman start racnodep2 +``` + +It can take approximately 20 minutes or longer to create and set up a two-node Oracle RAC Database on Containers. To check the logs, use the following command from another terminal session: + +```bash +podman exec racnodep1 /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +When the database configuration is complete, you should see a message, similar to the following, on the installing node i.e. `racnodep1` in this case: + +```bash +#################################### +ORACLE RAC DATABASE IS READY TO USE! +#################################### +``` + +Note: +- If you see any error related to files mounted on a container volume not detected in the podman logs, then make sure they are labeled correctly with the `container_file_t` context. You can use `ls -lZ ` to see the security context set on files. + + For example: + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp + restorecon -vF /scratch/common_scripts/podman/rac/dbca_21c.rsp + ls -lZ /scratch/common_scripts/podman/rac/dbca_21c.rsp + ``` + +## Section 5: Validate the Oracle RAC Environment +To validate if the environment is healthy, run the following command: +```bash +podman ps -a + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +f1345fd4047b localhost/oracle/rac-dnsserver:latest /bin/sh -c exec $... 8 hours ago Up 8 hours (healthy) rac-dnsserver +2f42e49758d1 localhost/oracle/database-rac:21c 46 minutes ago Up 37 minutes (healthy) racnodep1 +a27fceea9fe6 localhost/oracle/database-rac:21c 46 minutes ago Up 37 minutes (healthy) racnodep2 +``` +**Note:** +- Look for `(healthy)` next to container names under the `STATUS` section. + +## Section 6: Connecting to Oracle RAC Environment + +**IMPORTANT:** Before you connnect to the environment, you must first successfully create an Oracle RAC Database as described in the preceding sections. +See [Connecting to an Oracle RAC Database](../../CONNECTING.md) for instructions on how to connect to the Oracle RAC Database. + +## Section 7: Example of Node Addition to Oracle RAC Database Based on Oracle RAC Image with Block Devices + +### Section 7.1: Example of node addition to Oracle RAC Database based on Oracle RAC Image without Response File +The following is an example of how to add an additional node to the existing Oracle RAC two-node cluster using the Oracle RAC Database Container Image and without user-defined response files. + +Create additional container for the new Oracle RAC Database Node. In this example, we create the container with hostname `racnodep3`: +```bash +podman create -t -i \ +--hostname racnodep3 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e CRS_PRIVATE_IP1=192.168.17.172 \ +-e CRS_PRIVATE_IP2=192.168.18.172 \ +-e CRS_NODES="\"pubhost:racnodep3,viphost:racnodep3-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +-e DB_PWD_FILE=pwdsecret \ +-e PWD_KEY=keysecret \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +-e CRS_ASM_DEVICE_LIST=/dev/asm-disk1,/dev/asm-disk2 \ +-e OP_TYPE=racaddnode \ +-e EXISTING_CLS_NODE="racnodep1,racnodep2" \ +-e INSTALL_NODE=racnodep3 \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep3 \ +localhost/oracle/database-rac:21c +``` + +Attach the networks to the new container and start the container: +```bash +podman network disconnect podman racnodep3 +podman network connect rac_pub1_nw --ip 10.0.20.172 racnodep3 +podman network connect rac_priv1_nw --ip 192.168.17.172 racnodep3 +podman network connect rac_priv2_nw --ip 192.168.18.172 racnodep3 +podman start racnodep3 +``` + +Monitor the new container logs using below command: +```bash +podman exec racnodep3 /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` +When the Oracle RAC container has completed being set up, you should see a message similar to the following: +```bash +======================================================== +Oracle Database ORCLCDB3 is up and running on racnodep3. +======================================================== +``` + +## Section 8: Example of Node Addition to Oracle RAC Database Based on Oracle RAC Image with NFS Storage Devices + +### Section 8.1: Example of node addition to Oracle RAC Database based on Oracle RAC Image without Response File +In the following example, we add an additional node to the existing Oracle RAC two-node cluster using the Oracle RAC Database Container Image without user-defined response files. + +Create additional container for the new Oracle RAC Database Node. In this example, the hostname is `racnodep3` + +```bash +podman create -t -i \ +--hostname racnodep3 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--secret pwdsecret \ +--secret keysecret \ +-e DNS_SERVERS="10.0.20.25" \ +-e DB_SERVICE=service:soepdb \ +-e CRS_PRIVATE_IP1=192.168.17.172 \ +-e CRS_PRIVATE_IP2=192.168.18.172 \ +-e CRS_NODES="\"pubhost:racnodep3,viphost:racnodep3-vip\"" \ +-e SCAN_NAME=racnodepc1-scan \ +-e INIT_SGA_SIZE=3G \ +-e INIT_PGA_SIZE=2G \ +--volume racstorage:/oradata \ +-e CRS_ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img \ +-e CRS_ASM_DISCOVERY_STRING="/oradata/asm_disk*" \ +-e OP_TYPE=racaddnode \ +-e EXISTING_CLS_NODE="racnodep1,racnodep2" \ +-e INSTALL_NODE=racnodep3 \ +-e ASM_ON_NAS=True \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep3 \ +localhost/oracle/database-rac:21c +``` + +Attach the networks to the new container and start the container: +```bash +podman network disconnect podman racnodep3 +podman network connect rac_pub1_nw --ip 10.0.20.172 racnodep3 +podman network connect rac_priv1_nw --ip 192.168.17.172 racnodep3 +podman network connect rac_priv2_nw --ip 192.168.18.172 racnodep3 +podman start racnodep3 +``` +Monitor the new container logs using below command: +```bash +podman exec racnodep3 /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +When the Oracle RAC container has completed being set up, you should see a message similar to the following: +```bash +======================================================== +Oracle Database ORCLCDB3 is up and running on racnodep3. +======================================================== +``` + +## Environment Variables for Oracle RAC on Containers +For an explanation of all of the environment variables used with Oracle RAC on Podman, see [Environment Variables Explained for Oracle RAC on Podman](../../../docs/ENVIRONMENTVARIABLES.md). Change or set these environment variables as required for configurations info your environment. + +## Cleanup +For instructions to connect to a cleanup Oracle RAC Database Container Environment, see [README](../../../docs/CLEANUP.md). + +## Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 8.10 later. To review the current Linux support certifications, see [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## License + +To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository that are required to build the container images are, unless otherwise noted, released under a UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/addition/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/addition/podman-compose.yml new file mode 100644 index 0000000000..6ca4172d63 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/addition/podman-compose.yml @@ -0,0 +1,76 @@ +--- +version: "3" +networks: + rac_pub1_nw: + name: ${PUBLIC_NETWORK_NAME} + external: true + rac_priv1_nw: + name: ${PRIVATE1_NETWORK_NAME} + external: true + rac_priv2_nw: + name: ${PRIVATE2_NETWORK_NAME} + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + racnodep3: + container_name: ${RACNODE3_CONTAINER_NAME} + hostname: ${RACNODE3_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - ${GRID_RESPONSE_FILE}:/tmp/grid_21ai.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + PRIVATE_IP1_LIST: ${RACNODE3_PRIVATE_IP1_LIST} + PRIVATE_IP2_LIST: ${RACNODE3_PRIVATE_IP2_LIST} + DEFAULT_GATEWAY: ${DEFAULT_GATEWAY} + INSTALL_NODE: ${INSTALL_NODE} + OP_TYPE: racaddnode + EXISTING_CLS_NODE: ${EXISTING_CLS_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + PROFILE_FLAG: "true" + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_DIR: ${CRS_ASM_DISCOVERY_DIR} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_RESPONSE_FILE: /tmp/grid_21ai.rsp + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD-SHELL", "if [ `cat /tmp/orod/oracle_rac_setup.log | grep -c 'ORACLE RAC DATABASE IS READY TO USE'` -ge 1 ]; then exit 0; else exit 1; fi"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/grid_setup_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/grid_setup_21c.rsp new file mode 100644 index 0000000000..c7ffe19d4a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/grid_setup_21c.rsp @@ -0,0 +1,64 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +oracle.install.option=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +oracle.install.asm.OSDBA=dba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=LOCAL_SCAN +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName=racnodepc1-scan +oracle.install.crs.config.gpnp.scanPort=1521 +oracle.install.crs.config.ClusterConfiguration=STANDALONE +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile= +oracle.install.crs.config.clusterName=rac01cluster +oracle.install.crs.config.gpnp.configureGNS= +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption= +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=racnodep1:racnodep1-vip:HUB,racnodep2:racnodep2-vip:HUB +oracle.install.crs.config.networkInterfaceList=eth0:10.0.20.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +oracle.install.asm.configureGIMRDataDG=false +oracle.install.crs.config.storageOption= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.storageOption=ASM +oracle.install.asmOnNAS.ocrLocation= +oracle.install.asmOnNAS.configureGIMRDataDG=false +oracle.install.asmOnNAS.gimrLocation= +oracle.install.asm.SYSASMPassword=ORacle__21c +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy=EXTERNAL +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2, +oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2 +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.monitorPassword=ORacle__21c +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/podman-compose.yml new file mode 100644 index 0000000000..a74819d4a8 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/blockdevices/podman-compose.yml @@ -0,0 +1,188 @@ +--- +version: "3" +networks: + rac_pub1_nw: + name: ${PUBLIC_NETWORK_NAME} + driver: bridge + ipam: + driver: default + config: + - subnet: "${PUBLIC_NETWORK_SUBNET}" + rac_priv1_nw: + name: ${PRIVATE1_NETWORK_NAME} + driver: bridge + ipam: + driver: default + config: + - subnet: "${PRIVATE1_NETWORK_SUBNET}" + rac_priv2_nw: + name: ${PRIVATE2_NETWORK_NAME} + driver: bridge + ipam: + driver: default + config: + - subnet: "${PRIVATE2_NETWORK_SUBNET}" +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXD: ${RAC_NODE_NAME_PREFIXD} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "if [ `cat /tmp/orod.log | grep -c 'DNS Server IS READY TO USE'` -ge 1 ]; then exit 0; else exit 1; fi"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - ${GRID_RESPONSE_FILE}:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + PRIVATE_IP1_LIST: ${RACNODE1_PRIVATE_IP1_LIST} + PRIVATE_IP2_LIST: ${RACNODE1_PRIVATE_IP2_LIST} + DEFAULT_GATEWAY: ${DEFAULT_GATEWAY} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_DIR: ${CRS_ASM_DISCOVERY_DIR} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD-SHELL", "if [ `cat /tmp/orod/oracle_rac_setup.log | grep -c 'ORACLE RAC DATABASE IS READY TO USE'` -ge 1 ]; then exit 0; else exit 1; fi"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - ${GRID_RESPONSE_FILE}:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + PRIVATE_IP1_LIST: ${RACNODE2_PRIVATE_IP1_LIST} + PRIVATE_IP2_LIST: ${RACNODE2_PRIVATE_IP2_LIST} + DEFAULT_GATEWAY: ${DEFAULT_GATEWAY} + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_DIR: ${CRS_ASM_DISCOVERY_DIR} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "if [ `cat /tmp/orod.log | grep -c 'CONNECTION MANAGER IS READY TO USE'` -ge 1 ]; then exit 0; else exit 1; fi"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/dbca_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/dbca_21c.rsp new file mode 100644 index 0000000000..e36b5e55d7 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/dbca_21c.rsp @@ -0,0 +1,58 @@ +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v21.0.0 +gdbName=ORCLCDB +sid=ORCLCDB +databaseConfigType=RAC +RACOneNodeServiceName= +policyManaged=false +managementPolicy= +createServerPool=false +serverPoolName= +cardinality= +force=false +pqPoolName= +pqCardinality= +createAsContainerDatabase=true +numberOfPDBs=1 +pdbName=ORCLPDB +useLocalUndoForPDBs=true +pdbAdminPassword=ORacle__21c +nodelist=racnodep1,racnodep2 +templateName={ORACLE_HOME}/assistants/dbca/templates/General_Purpose.dbc +sysPassword=ORacle__21c +systemPassword=ORacle__21c +oracleHomeUserPassword= +emConfiguration= +runCVUChecks=true +dbsnmpPassword=ORacle__21c +omsHost= +omsPort= +emUser= +emPassword= +dvConfiguration=false +dvUserName= +dvUserPassword= +dvAccountManagerName= +dvAccountManagerPassword= +olsConfiguration=false +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ +recoveryAreaDestination= +storageType=ASM +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ +asmsnmpPassword= +recoveryGroupName= +characterSet=AL32UTF8 +nationalCharacterSet=AL16UTF16 +registerWithDirService= +dirServiceUserName= +dirServicePassword= +walletPassword= +listeners=LISTENER +variablesFile= +variables=DB_UNIQUE_NAME=ORCLCDB,ORACLE_BASE=/u01/app/oracle,PDB_NAME=ORCLPDB,DB_NAME=ORCLCDB,ORACLE_HOME=/u01/app/oracle/product/21c/dbhome_1,SID=ORCLCDB +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive +sampleSchema=false +memoryPercentage=40 +databaseType=MULTIPURPOSE +automaticMemoryManagement=false +totalMemory=5000 \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/nfsdevices/grid_setup_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/nfsdevices/grid_setup_21c.rsp new file mode 100644 index 0000000000..16062dd6cb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racimage/withresponsefiles/nfsdevices/grid_setup_21c.rsp @@ -0,0 +1,64 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +oracle.install.option=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +oracle.install.asm.OSDBA=dba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=LOCAL_SCAN +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName=racnodepc1-scan +oracle.install.crs.config.gpnp.scanPort=1521 +oracle.install.crs.config.ClusterConfiguration=STANDALONE +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile= +oracle.install.crs.config.clusterName=rac01cluster +oracle.install.crs.config.gpnp.configureGNS= +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption= +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=racnodep1:racnodep1-vip:HUB,racnodep2:racnodep2-vip:HUB +oracle.install.crs.config.networkInterfaceList=eth0:10.0.20.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +oracle.install.asm.configureGIMRDataDG=false +oracle.install.crs.config.storageOption= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.storageOption=ASM +oracle.install.asmOnNAS.ocrLocation= +oracle.install.asmOnNAS.configureGIMRDataDG=false +oracle.install.asmOnNAS.gimrLocation= +oracle.install.asm.SYSASMPassword=ORacle__21c +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy=EXTERNAL +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oradata/asm_disk01.img,,/oradata/asm_disk02.img,,/oradata/asm_disk03.img,,/oradata/asm_disk04.img,,/oradata/asm_disk05.im +oracle.install.asm.diskGroup.disks=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=/oradata/asm_disk* +oracle.install.asm.monitorPassword=ORacle__21c +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/README.md new file mode 100644 index 0000000000..4b7cfaf03a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/README.md @@ -0,0 +1,701 @@ +# Oracle RAC on Podman using Slim Image +=============================================================== + +Refer below instructions for the setup of Oracle RAC on Podman using Slim Image for various scenarios. + +- [Oracle RAC on Podman using Slim Image](#oracle-rac-on-podman-using-slim-image) + - [Section 1: Prerequisites for Setting up Oracle RAC on Container Using Slim Image](#section-1-prerequisites-for-setting-up-oracle-rac-on-container-using-slim-image) + - [Section 2: Deploying 2 Node Oracle RAC Setup on Podman Using Slim Image](#section-2-deploying-2-node-oracle-rac-setup-on-podman-using-slim-image) + - [Section 2.1: Deploying 2 Node Oracle RAC Setup on Podman Using Slim Image Without using response files](#section-21-deploying-2-node-oracle-rac-setup-on-podman-using-slim-image-without-using-response-files) + - [Section 2.1.1: Deploying With BlockDevices](#section-211-deploying-with-blockdevices) + - [Section 2.1.2: Deploying with NFS Storage Devices](#section-212-deploying-with-nfs-storage-devices) + - [Section 2.2: Deploying 2 Node Oracle RAC Setup on Podman Using Slim Image Using User Defined response files](#section-22-deploying-2-node-oracle-rac-setup-on-podman-using-slim-image-using-user-defined-response-files) + - [Section 2.2.1: Deploying with BlockDevices](#section-221-deploying-with-blockdevices) + - [Section 2.2.2: Deploying with NFS Storage Devices](#section-222-deploying-with-nfs-storage-devices) + - [Section 3: Attach the Network to Containers](#section-3-attach-the-network-to-containers) + - [Attach the Network to racnodep1](#attach-the-network-to-racnodep1) + - [Attach the Network to racnodep2](#attach-the-network-to-racnodep2) + - [Section 4: Start the Containers](#section-4-start-the-containers) + - [Section 5: Validation Oracle RAC Environment](#section-5-validating-oracle-rac-environment) + - [Section 6: Connecting to Oracle RAC Environment](#section-6-connecting-to-oracle-rac-environment) + - [Section 7: Sample of Addition of Nodes to Oracle RAC Containers based on Slim Image](#section-7-sample-of-addition-of-nodes-to-oracle-rac-containers-based-on-slim-image) + - [Section 7.1: Sample of Addition of Nodes to Oracle RAC Containers based on Slim Image Without Response File](#section-71-sample-of-addition-of-nodes-to-oracle-rac-containers-based-on-slim-image-without-response-file) + - [Section 8: Sample of Addition of Nodes to Oracle RAC Containers based on Oracle RAC Slim Image with NFS Storage Devices](#section-8-sample-of-addition-of-nodes-to-oracle-rac-containers-based-on-oracle-rac-slim-image-with-nfs-storage-devices) + - [Section 8.1: Sample of Addition of Nodes to Oracle RAC Containers based on Oracle RAC Image Without Response File](#section-81-sample-of-addition-of-nodes-to-oracle-rac-containers-based-on-oracle-rac-image-without-response-file) + - [Section 9: Environment Variables for Oracle RAC on Containers](#section-9-environment-variables-for-oracle-rac-on-containers) + - [Cleanup](#cleanup) + - [Support](#support) + - [License](#license) + - [Copyright](#copyright) + +## Oracle RAC Setup on Podman using Slim Image + +Users can deploy multi-node Oracle RAC Database Setup using Oracle RAC Database Container Slim Image either on Block Devices or NFS storage Devices and with or without using User Defined Response Files. All of these are demonstrated in detail in this document. + +## Section 1: Prerequisites for Setting up Oracle RAC on Container using Slim Image +**IMPORTANT:** Execute all the steps specified in this section (customize for your environment) before you proceed to the next section. Completing prerequisite steps is a requirement for successful configuration. + +* Complete the [Preparation Steps for running Oracle RAC Database in containers](../../../README.md#preparation-steps-for-running-oracle-rac-database-in-containers) +* If you are planning to use Oracle Connection Manager, then create an Oracle Connection Manager container image. See the [Oracle Connection Manager in Linux Containers](../../../../OracleConnectionManager/README.md) +* Make sure the Oracle RAC Database Container Slim Image is present as shown below. If you have not created the Oracle RAC Database Container image, execute the [Building Oracle RAC Database Container Slim Image](../../../README.md#building-oracle-rac-database-container-slim-image) + ```bash + # podman images|grep database-rac + localhost/oracle/database-rac 21c-slim bf6ae21ccd5a 8 hours ago 517 MB + ``` +* Configure the [Network Management](../../../README.md#network-management). +* Configure the [Password Management](../../../README.md#password-management). + +* Prepare Hosts with empty paths for 2 nodes similar to below, these are going to be mounted to Oracle RAC Containers for installing Oracle RAC Software binaries later during container creation- + ```bash + mkdir -p /scratch/rac/cluster01/node1 + rm -rf /scratch/rac/cluster01/node1/* + + mkdir -p /scratch/rac/cluster01/node2 + rm -rf /scratch/rac/cluster01/node2/* + ``` + +* Make sure the downloaded Oracle RAC software location is staged & available for both RAC nodes. In the below example, we have staged Oracle RAC software at location ```/scratch/software/21c/goldimages``` + ```bash + ls /scratch/software/21c/goldimages + LINUX.X64_213000_db_home.zip LINUX.X64_213000_grid_home.zip + ``` +* If SELinux is enabled on the host machine then execute the following as well- + ```bash + semanage fcontext -a -t container_file_t /scratch/rac/cluster01/node1 + restorecon -v /scratch/rac/cluster01/node1 + semanage fcontext -a -t container_file_t /scratch/rac/cluster01/node2 + restorecon -v /scratch/rac/cluster01/node2 + semanage fcontext -a -t container_file_t /scratch/software/21c/goldimages/LINUX.X64_213000_grid_home.zip + restorecon -v /scratch/software/21c/goldimages/LINUX.X64_213000_grid_home.zip + semanage fcontext -a -t container_file_t /scratch/software/21c/goldimages/LINUX.X64_213000_db_home.zip + restorecon -v /scratch/software/21c/goldimages/LINUX.X64_213000_db_home.zip + ``` + +## Section 2: Deploying 2 Node Oracle RAC Setup on Podman using Slim Image + +Follow the below instructions to setup Oracle RAC Database on Podman using Slim Image for various scenarios like using user-defined response files or not using the it. Oracle RAC setup can also be done either on block devices or on NFS storage devices. + +### Section 2.1: Deploying 2 Node Oracle RAC Setup on Podman using Slim Image Without using response files + +Follow the below instructions to setup Oracle RAC on Podman using Slim Image without providing response files. + +#### Section 2.1.1: Deploying With BlockDevices +##### Section 2.1.1.1: Prerequisites for setting up Oracle RAC with Block Devices + +- Make sure you have created atleast 1 Block Device with 50Gb storage space which can be accessed by 2 RAC Nodes and shared between them. You can create more block devices as per your requirements and pass the same to environment variables and devices to `podman create` command as well as in grid response files (if using the same). You can skip this step if you are planning to use **NFS storage devices**. + + Make sure the ASM devices do not have any existing file system. To clear any existing file system from the devices, use the following command: + ```bash + dd if=/dev/zero of=/dev/oracleoci/oraclevdd bs=8k count=10000 + ``` + Repeat the cleanup disk for each shared block device. In the preceding example, `/dev/oracleoci/oraclevdd` is a shared KVM virtual block device. + +- In this example, we are going to use environment variables passed in a file called [envfile_racnodep1](withoutresponsefiles/blockdevices/envfile_racnodep1) & [envfile_racnodep2](withoutresponsefiles/blockdevices/envfile_racnodep2) and mounted to rac node containers. +In this example, files `envfile_racnodep1` and `envfile_racnodep2` are placed under `/scratch/common_scripts/podman/rac` on container host. + +- If SELinux is enabled on machine then execute the following as well- + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/envfile_racnodep1 + restorecon -v /scratch/common_scripts/podman/rac/envfile_racnodep1 + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/envfile_racnodep2 + restorecon -v /scratch/common_scripts/podman/rac/envfile_racnodep2 + ``` + +###### Section 2.1.1.2: Create Oracle RAC Containers +Now create the Oracle RAC containers using the Oracle RAC Database Container Slim Image. For the details of environment variables, refer to [Environment Variables Explained](#section-9-environment-variables-for-oracle-rac-on-containers) + +**Note**: Before creating the containers, you need to make sure you have edited the file `/scratch/common_scripts/podman/rac/envfile_racnodep1` and set the variables based on your environment. + +You can use the following example to create a container on host `racnodep1`: +```bash +podman create -t -i \ +--hostname racnodep1 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--volume /scratch/rac/cluster01/node1:/u01 \ +--volume /scratch/common_scripts/podman/rac/envfile_racnodep1:/etc/rac_env_vars/envfile \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--volume /scratch:/scratch \ +--secret pwdsecret \ +--secret keysecret \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep1 \ + localhost/oracle/database-rac:21c-slim + ``` + **Note**: Before creating the containers, you need to make sure you have edited the file `/scratch/common_scripts/podman/rac/envfile_racnodep2` and set the variables based on your enviornment. + +Create another Oracle RAC Container - + ```bash +podman create -t -i \ +--hostname racnodep2 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--secret pwdsecret \ +--secret keysecret \ +--volume /scratch/rac/cluster01/node2:/u01 \ +--volume /scratch/common_scripts/podman/rac/envfile_racnodep2:/etc/rac_env_vars/envfile \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--volume /scratch:/scratch \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep2 \ + localhost/oracle/database-rac:21c-slim + ``` + +#### Section 2.1.2: Deploying with NFS Storage Devices +##### Section 2.1.2.1: Prerequisites for setting up Oracle RAC with NFS Storage Devices +* Create a NFS Volume to be used for ASM Devices for Oracle RAC. See [Configuring NFS for Storage for Oracle RAC on Podman](https://review.us.oracle.com/review2/Review.html#reviewId=467473;scope=document;status=open,fixed;documentId=4229197) for more details. **Note:** You can skip this step if you are planning to use block devices for storage. + +* Make sure the ASM NFS Storage devices do not have any existing file system. + +* In this example we are going to use environment variables passed in a file called [envfile_racnodep1](withoutresponsefiles/nfsdevices/envfile_racnodep1) & [envfile_racnodep2](withoutresponsefiles/nfsdevices/envfile_racnodep2) and mounted to rac node containers. In this example, we are creating files under the `/scratch/common_scripts/podman/rac` path. + +* If SELinux is enabled on the host machine, then execute the following as well - + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/envfile_racnodep1 + restorecon -v /scratch/common_scripts/podman/rac/envfile_racnodep1 + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/envfile_racnodep2 + restorecon -v /scratch/common_scripts/podman/rac/envfile_racnodep2 + ``` +###### Section 2.1.2.2: Create Oracle RAC Containers +Now create the Oracle RAC containers using the image. For the details of environment variables, refer to [Environment Variables Explained](#section-9-environment-variables-for-oracle-rac-on-containers) +**Note**: Before creating the containers, you need to make sure you have edited teh file `/scratch/common_scripts/podman/rac/envfile_racnodep1` and set the variables based on your environment. + +You can use the following example to create the first Oracle RAC container: +```bash +podman create -t -i \ +--hostname racnodep1 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--secret pwdsecret \ +--secret keysecret \ +--volume /scratch/rac/cluster01/node1:/u01 \ +--volume /scratch/common_scripts/podman/rac/envfile_racnodep1:/etc/rac_env_vars/envfile \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--volume /scratch:/scratch \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--volume racstorage:/oradata \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep1 \ + localhost/oracle/database-rac:21c-slim + ``` + +**Note**: Before creating the containers, you need to make sure you have edited teh file `/scratch/common_scripts/podman/rac/envfile_racnodep2` and set the variables based on your enviornment. + +Create another Oracle RAC Container - + + ```bash +podman create -t -i \ +--hostname racnodep2 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--secret pwdsecret \ +--secret keysecret \ +--volume /scratch/rac/cluster01/node2:/u01 \ +--volume /scratch/common_scripts/podman/rac/envfile_racnodep2:/etc/rac_env_vars/envfile \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--volume /scratch:/scratch \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--volume racstorage:/oradata \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep2 \ + localhost/oracle/database-rac:21c-slim + ``` + +### Section 2.2: Deploying 2 Node Oracle RAC Setup on Podman using Slim Image Using User Defined response files +#### Section 2.2.1: Deploying With BlockDevices +##### Section 2.1.1.1: Prerequisites for setup Oracle RAC using User-Defined Files with Block Devices +- On the shared folder between both RAC nodes, copy file [grid_setup_new_21c.rsp](withresponsefiles/blockdevices/grid_setup_new_21c.rsp) in `/scratch/common_scripts/podman/rac/`. +- Also, prepare a database response file similar to this [dbca_21c.rsp](withresponsefiles/dbca_21c.rsp). +- In the below example, we have captured all environment variables passed to the container in a separate envfile and mounted the same to both RAC nodes. Create envfile [envfile_racnodep1](withresponsefiles/blockdevices/envfile_racnodep1) and [envfile_racnode2](withresponsefiles/blockdevices/envfile_racnodep2) for both nodes in directory `/scratch/common_scripts/podman/rac/` +- If SELinux is enabled on the host machine then execute the following as well- + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/envfile_racnodep1 + restorecon -v /scratch/common_scripts/podman/rac/envfile_racnodep1 + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/envfile_racnodep2 + restorecon -v /scratch/common_scripts/podman/rac/envfile_racnodep2 + ``` + Note: Passwords defined in response files is going to be overwritten by passwords defined in `podman secret` due to security reasons of exposure of the password as plain text. +You can skip this step if you are planning not to use **User Defined Response Files for RAC**. + +Follow the below instructions to setup Oracle RAC on Podman using Slim Image for using user-defined response files. + + +You can use the following example to create the first Oracle RAC container: + +**Note**: Before creating the containers, you need to make sure you have edited the file `/scratch/common_scripts/podman/rac/envfile_racnodep1` and set the variables based on your enviornment. + +```bash +podman create -t -i \ +--hostname racnodep1 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--secret pwdsecret \ +--secret keysecret \ +--volume /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp:/tmp/grid_21c.rsp \ +--volume /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp \ +--volume /scratch/rac/cluster01/node1:/u01 \ +--volume /scratch:/scratch \ +--volume /scratch/common_scripts/podman/rac/envfile_racnodep1:/etc/rac_env_vars/envfile \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep1 \ +localhost/oracle/database-rac:21c-slim + ``` + +**Note**: Before creating the containers, you need to make sure you have edited teh file `/scratch/common_scripts/podman/rac/envfile_racnodep2` and set the variables based on your enviornment. + +To create another container, use the following command: + +```bash +podman create -t -i \ +--hostname racnodep2 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--secret pwdsecret \ +--secret keysecret \ +--volume /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp:/tmp/grid_21c.rsp \ +--volume /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp \ +--volume /scratch/rac/cluster01/node2:/u01 \ +--volume /scratch:/scratch \ +--volume /scratch/common_scripts/podman/rac/envfile_racnodep2:/etc/rac_env_vars/envfile \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ +--device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep2 \ + localhost/oracle/database-rac:21c-slim + ``` +#### Section 2.2.2: Deploying with NFS Storage Devices +##### Section 2.2.2.1: Prerequisites for setup Oracle RAC using User Defined Files with NFS Devices +- Create a NFS Volume to be used for ASM Devices for Oracle RAC. See [Configuring NFS for Storage for Oracle RAC on Podman](https://review.us.oracle.com/review2/Review.html#reviewId=467473;scope=document;status=open,fixed;documentId=4229197) for more details. **Note:** You can skip this step if you are planning to use block devices for storage. + +- Make sure the ASM NFS Storage devices do not have any existing file system. +- On the shared folder between both RAC nodes, create file name [grid_setup_new_21c.rsp](withresponsefiles/nfsdevices/grid_setup_new_21c.rsp) similar as below inside directory named `/scratch/common_scripts/podman/rac/`. +- Also, prepare a database response file similar to this [dbca_21c.rsp](withresponsefiles/dbca_21c.rsp) inside directory named `/scratch/common_scripts/podman/rac/`. +- In the below example, we have captured all environment variables passed to the container in a separate envfile and mounted the same to both RAC nodes. + + Create envfile [envfile_racnodep1](withresponsefiles/nfsdevices/envfile_racnodep1) and [envfile_racnode2](withresponsefiles/nfsdevices/envfile_racnodep2) for both nodes in directory `/scratch/common_scripts/podman/rac/`. +- If the SELinux is enabled on machine then execute the following as well - + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/envfile_racnodep1 + restorecon -v /scratch/common_scripts/podman/rac/envfile_racnodep1 + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/envfile_racnodep2 + restorecon -v /scratch/common_scripts/podman/rac/envfile_racnodep2 + ``` +You can skip this step if you are planning not to use **User Defined Response Files for RAC**. + +Follow the below instructions to setup Oracle RAC on Podman using Slim Image for using user-defined response files. + +**Note**: Before creating the containers, you need to make sure you have edited teh file `/scratch/common_scripts/podman/rac/envfile_racnodep1` and set the variables based on your enviornment. + +You can use the following example to create the first Oracle RAC container: +```bash +podman create -t -i \ +--hostname racnodep1 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--secret pwdsecret \ +--secret keysecret \ +--volume /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp:/tmp/grid_21c.rsp \ +--volume /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp \ +--volume /scratch/rac/cluster01/node1:/u01 \ +--volume /scratch:/scratch \ +--volume /scratch/common_scripts/podman/rac/envfile_racnodep1:/etc/rac_env_vars/envfile \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--volume racstorage:/oradata \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep1 \ +localhost/oracle/database-rac:21c-slim + ``` + +**Note**: Before creating the containers, you need to make sure you have edited teh file `/scratch/common_scripts/podman/rac/envfile_racnodep1` and set the variables based on your enviornment. + +To create another container, use the following command: + +```bash +podman create -t -i \ +--hostname racnodep2 \ +--dns-search "example.info" \ +--dns 10.0.20.25 \ +--shm-size 4G \ +--secret pwdsecret \ +--secret keysecret \ +--volume /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp:/tmp/grid_21c.rsp \ +--volume /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp \ +--volume /scratch/rac/cluster01/node2:/u01 \ +--volume /scratch:/scratch \ +--volume /scratch/common_scripts/podman/rac/envfile_racnodep2:/etc/rac_env_vars/envfile \ +--health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ +--sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ +--sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ +--cpuset-cpus 0-1 \ +--memory 16G \ +--memory-swap 32G \ +--sysctl kernel.shmall=2097152 \ +--sysctl "kernel.sem=250 32000 100 128" \ +--sysctl kernel.shmmax=8589934592 \ +--sysctl kernel.shmmni=4096 \ +--cap-add=SYS_RESOURCE \ +--cap-add=NET_ADMIN \ +--cap-add=SYS_NICE \ +--cap-add=AUDIT_WRITE \ +--cap-add=AUDIT_CONTROL \ +--cap-add=NET_RAW \ +--volume racstorage:/oradata \ +--restart=always \ +--ulimit rtprio=99 \ +--systemd=always \ +--name racnodep2 \ + localhost/oracle/database-rac:21c-slim + ``` +**Note:** +- Change environment variables based on your environment. Refer [Section 8: Environment Variables for Oracle RAC on Containers](#section-9-environment-variables-for-oracle-rac-on-containers) for more details. +- Below example uses, a podman bridge network with one public and two private networks, hence`--sysctl 'net.ipv4.conf.eth1.rp_filter=2' --sysctl 'net.ipv4.conf.eth2.rp_filter=2` is required when we use two private networks, else these can be ignored. +- If you are planning to place database files such as datafiles and archivelogs on different diskgroups, you need to pass these parameters- `DB_ASM_DEVICE_LIST`,`RECO_ASM_DEVICE_LIST`,`DB_DATA_FILE_DEST`, `DB_RECOVERY_FILE_DEST`. Refer [Section 8: Environment Variables for Oracle RAC on Containers](#section-9-environment-variables-for-oracle-rac-on-containers) for more details. + +## Section 3: Attach the network to containers + +You need to assign the podman networks created based on the above sections. Execute the following commands: + +### Attach the network to racnodep1 + +```bash +podman network disconnect podman racnodep1 +podman network connect rac_pub1_nw --ip 10.0.20.170 racnodep1 +podman network connect rac_priv1_nw --ip 192.168.17.170 racnodep1 +podman network connect rac_priv2_nw --ip 192.168.18.170 racnodep1 +``` +### Attach the network to racnodep2 + +```bash +podman network disconnect podman racnodep2 +podman network connect rac_pub1_nw --ip 10.0.20.171 racnodep2 +podman network connect rac_priv1_nw --ip 192.168.17.171 racnodep2 +podman network connect rac_priv2_nw --ip 192.168.18.171 racnodep2 +``` +## Section 4: Start the containers + +You need to start the container. Execute the following command: + +```bash +podman start racnodep1 +podman start racnodep2 +``` + +It can take at least 20 minutes or longer to create and setup 2 node RAC primary and standby setup. To check the logs, use the following command from another terminal session: + +```bash +podman exec racnodep1 /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +You should see the database creation success message at the end: +```bash +#################################### +ORACLE RAC DATABASE IS READY TO USE! +#################################### +``` + +Note: +- If you see any error related to files mounted on a container volume not detected in the podman logs, then make sure they are labeled correctly with the `container_file_t` context. You can use `ls -lZ ` to see the security context set on files. + For example- + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp + restorecon -vF /scratch/common_scripts/podman/rac/dbca_21c.rsp + ls -lZ /scratch/common_scripts/podman/rac/dbca_21c.rsp +` ``` + +## Section 5: Validating Oracle RAC Environment +You can validate if the environment is healthy by running the below command- +```bash +podman ps -a + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +f1345fd4047b localhost/oracle/rac-dnsserver:latest /bin/sh -c exec $... 8 hours ago Up 8 hours (healthy) rac-dnsserver +2f42e49758d1 localhost/oracle/database-rac:21c-slim 46 minutes ago Up 37 minutes (healthy) racnodep1 +a27fceea9fe6 localhost/oracle/database-rac:21c-slim 46 minutes ago Up 37 minutes (healthy) racnodep2 +``` +Note: +- Look for `(healthy)` next to container names under the `STATUS` section. + +## Section 6: Connecting to Oracle RAC Environment + +**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC cluster using the preceding sections. +Refer to [README](./docs/CONNECTING.md) for instructions on how to connect to Oracle RAC Database. + +## Section 7: Sample of Addition of Nodes to Oracle RAC Containers based on Slim Image +### Section 7.1: Sample of Addition of Nodes to Oracle RAC Containers based on Slim Image Without Response File +Below is the example of adding 1 more node to the existing Oracle RAC 2 node cluster using Slim image and without user-defined files - +- Create envfile [envfile_racnodep3](withoutresponsefiles/blockdevices/envfile_racnodep3) for additional node and keep it here `/scratch/common_scripts/podman/rac/envfile_racnodep3` + +**Note**: Before creating the containers, you need to make sure you have edited teh file `/scratch/common_scripts/podman/rac/envfile_racnodep3` and set the variables based on your enviornment. + +- Prepare Folder for additional node- + ```bash + mkdir -p /scratch/rac/cluster01/node3 + rm -rf /scratch/rac/cluster01/node3/* + ``` +- Create additional Oracle RAC Container- + ```bash + podman create -t -i \ + --hostname racnodep3 \ + --dns-search "example.info" \ + --dns 10.0.20.25 \ + --shm-size 4G \ + --secret pwdsecret \ + --secret keysecret \ + --volume /scratch/rac/cluster01/node3:/u01 \ + --volume /scratch/common_scripts/podman/rac/envfile_racnodep3:/etc/rac_env_vars/envfile \ + --health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ + --volume /scratch:/scratch \ + --cpuset-cpus 0-1 \ + --memory 16G \ + --memory-swap 32G \ + --sysctl kernel.shmall=2097152 \ + --sysctl "kernel.sem=250 32000 100 128" \ + --sysctl kernel.shmmax=8589934592 \ + --sysctl kernel.shmmni=4096 \ + --sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ + --sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=SYS_NICE \ + --cap-add=AUDIT_WRITE \ + --cap-add=AUDIT_CONTROL \ + --cap-add=NET_RAW \ + --device=/dev/oracleoci/oraclevdd:/dev/asm-disk1 \ + --device=/dev/oracleoci/oraclevde:/dev/asm-disk2 \ + --restart=always \ + --ulimit rtprio=99 \ + --systemd=always \ + --name racnodep3 \ + localhost/oracle/database-rac:21c-slim + + podman network disconnect podman racnodep3 + podman network connect rac_pub1_nw --ip 10.0.20.172 racnodep3 + podman network connect rac_priv1_nw --ip 192.168.17.172 racnodep3 + podman network connect rac_priv2_nw --ip 192.168.18.172 racnodep3 + podman start racnodep3 + podman exec racnodep3 /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" + ``` + Successful message for addition of nodes- + ```bash + ======================================================== + Oracle Database ORCLCDB3 is up and running on racnodep3. + ======================================================== + ``` + +## Section 8: Sample of Addition of Nodes to Oracle RAC Containers based on Oracle RAC Slim Image with NFS Storage Devices + +### Section 8.1: Sample of Addition of Nodes to Oracle RAC Containers based on Oracle RAC Image Without Response File +Below is an example of adding one more node to the existing Oracle RAC 2 node cluster using the Oracle RAC image and without user-defined files. +**Note**: Before creating the containers, you need to make sure you have edited teh file `/scratch/common_scripts/podman/rac/envfile_racnodep3` and set the variables based on your enviornment. + +- Prepare directory for additional node- + ```bash + mkdir -p /scratch/rac/cluster01/node3 + rm -rf /scratch/rac/cluster01/node3/* + ``` +- Create additional Oracle RAC Container - + ```bash + podman create -t -i \ + --hostname racnodep3 \ + --dns-search "example.info" \ + --dns 10.0.20.25 \ + --shm-size 4G \ + --secret pwdsecret \ + --secret keysecret \ + --volume /scratch/rac/cluster01/node3:/u01 \ + --volume /scratch/common_scripts/podman/rac/envfile_racnodep3:/etc/rac_env_vars/envfile \ + --health-cmd "/bin/python3 /opt/scripts/startup/scripts/main.py --checkracstatus" \ + --volume /scratch:/scratch \ + --cpuset-cpus 0-1 \ + --memory 16G \ + --memory-swap 32G \ + --sysctl kernel.shmall=2097152 \ + --sysctl "kernel.sem=250 32000 100 128" \ + --sysctl kernel.shmmax=8589934592 \ + --sysctl kernel.shmmni=4096 \ + --sysctl 'net.ipv4.conf.eth1.rp_filter=2' \ + --sysctl 'net.ipv4.conf.eth2.rp_filter=2' \ + --cap-add=SYS_RESOURCE \ + --cap-add=NET_ADMIN \ + --cap-add=SYS_NICE \ + --cap-add=AUDIT_WRITE \ + --cap-add=AUDIT_CONTROL \ + --cap-add=NET_RAW \ + --volume racstorage:/oradata \ + --restart=always \ + --ulimit rtprio=99 \ + --systemd=always \ + --name racnodep3 \ + localhost/oracle/database-rac:21c-slim + + podman network disconnect podman racnodep3 + podman network connect rac_pub1_nw --ip 10.0.20.172 racnodep3 + podman network connect rac_priv1_nw --ip 192.168.17.172 racnodep3 + podman network connect rac_priv2_nw --ip 192.168.18.172 racnodep3 + podman start racnodep3 + podman exec racnodep3 /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" + + ======================================================== + Oracle Database ORCLCDB3 is up and running on racnodep3. + ======================================================== + ``` + +## Section 9: Environment Variables for Oracle RAC on Containers +Refer to [Environment Variables Explained for Oracle RAC on Podman Compose](../../../docs/ENVIRONMENTVARIABLES.md) for the explanation of all the environment variables related to Oracle RAC on Podman Compose. Change or Set these environment variables as per your environment. + +## Cleanup +Refer to [README](../../../docs/CLEANUP.md) for instructions on how to connect to a cleanup Oracle RAC Database Container Environment. + +## Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 8.10 later. To see current Linux support certifications, refer [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## License + +To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository that are required to build the container images are, unless otherwise noted, released under a UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep1 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep1 new file mode 100644 index 0000000000..9c9e217aed --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep1 @@ -0,0 +1,22 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.170 +CRS_PRIVATE_IP2=192.168.18.170 +CRS_NODES=pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip +GRID_HOME=/u01/app/21c/grid +GRID_BASE=/u01/app/grid +DB_HOME=/u01/app/oracle/product/21c/dbhome_1 +DB_BASE=/u01/app/oracle +INVENTORY=/u01/app/oraInventory +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +SCAN_NAME=racnodepc1-scan +INIT_SGA_SIZE=3G +INIT_PGA_SIZE=2G +INSTALL_NODE=racnodep1 +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_NAME=ORCLCDB +CRS_ASM_DEVICE_LIST=/dev/asm-disk1,/dev/asm-disk2 +OP_TYPE=setuprac +DB_SERVICE=service:soepdb \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep2 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep2 new file mode 100644 index 0000000000..d13fea7e4a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep2 @@ -0,0 +1,22 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.171 +CRS_PRIVATE_IP2=192.168.18.171 +CRS_NODES=pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip +GRID_HOME=/u01/app/21c/grid +GRID_BASE=/u01/app/grid +DB_HOME=/u01/app/oracle/product/21c/dbhome_1 +DB_BASE=/u01/app/oracle +INVENTORY=/u01/app/oraInventory +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +SCAN_NAME=racnodepc1-scan +INIT_SGA_SIZE=3G +INIT_PGA_SIZE=2G +INSTALL_NODE=racnodep1 +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_NAME=ORCLCDB +CRS_ASM_DEVICE_LIST=/dev/asm-disk1,/dev/asm-disk2 +OP_TYPE=setuprac +DB_SERVICE=service:soepdb \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep3 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep3 new file mode 100644 index 0000000000..7b2d518ed6 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/blockdevices/envfile_racnodep3 @@ -0,0 +1,24 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.172 +CRS_PRIVATE_IP2=192.168.18.172 +CRS_NODES=pubhost:racnodep3,viphost:racnodep3-vip +GRID_HOME=/u01/app/21c/grid +GRID_BASE=/u01/app/grid +DB_HOME=/u01/app/oracle/product/21c/dbhome_1 +DB_BASE=/u01/app/oracle +INVENTORY=/u01/app/oraInventory +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +SCAN_NAME=racnodepc1-scan +INIT_SGA_SIZE=3G +INIT_PGA_SIZE=2G +INSTALL_NODE=racnodep3 +EXISTING_CLS_NODE=racnodep1,racnodep2 +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_NAME=ORCLCDB +CRS_ASM_DEVICE_LIST=/dev/asm-disk1,/dev/asm-disk2 +OP_TYPE=racaddnode +DB_SERVICE=service:soepdb +IGNORE_CRS_PREREQS=true \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep1 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep1 new file mode 100644 index 0000000000..fac508e931 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep1 @@ -0,0 +1,24 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.170 +CRS_PRIVATE_IP2=192.168.18.170 +CRS_NODES=pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip +GRID_HOME=/u01/app/21c/grid +GRID_BASE=/u01/app/grid +DB_HOME=/u01/app/oracle/product/21c/dbhome_1 +DB_BASE=/u01/app/oracle +INVENTORY=/u01/app/oraInventory +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +SCAN_NAME=racnodepc1-scan +INIT_SGA_SIZE=3G +INIT_PGA_SIZE=2G +INSTALL_NODE=racnodep1 +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_NAME=ORCLCDB +CRS_ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img +CRS_ASM_DISCOVERY_STRING=/oradata/asm_disk* +ASM_ON_NAS=True +OP_TYPE=setuprac +DB_SERVICE=service:soepdb \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep2 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep2 new file mode 100644 index 0000000000..75682d1365 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep2 @@ -0,0 +1,24 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.171 +CRS_PRIVATE_IP2=192.168.18.171 +CRS_NODES=pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip +GRID_HOME=/u01/app/21c/grid +GRID_BASE=/u01/app/grid +DB_HOME=/u01/app/oracle/product/21c/dbhome_1 +DB_BASE=/u01/app/oracle +INVENTORY=/u01/app/oraInventory +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +SCAN_NAME=racnodepc1-scan +INIT_SGA_SIZE=3G +INIT_PGA_SIZE=2G +INSTALL_NODE=racnodep1 +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_NAME=ORCLCDB +CRS_ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img +CRS_ASM_DISCOVERY_STRING=/oradata/asm_disk* +ASM_ON_NAS=True +OP_TYPE=setuprac +DB_SERVICE=service:soepdb \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep3 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep3 new file mode 100644 index 0000000000..4f79c0edb2 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withoutresponsefiles/nfsdevices/envfile_racnodep3 @@ -0,0 +1,26 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.172 +CRS_PRIVATE_IP2=192.168.18.172 +CRS_NODES=pubhost:racnodep3,viphost:racnodep3-vip +GRID_HOME=/u01/app/21c/grid +GRID_BASE=/u01/app/grid +DB_HOME=/u01/app/oracle/product/21c/dbhome_1 +DB_BASE=/u01/app/oracle +INVENTORY=/u01/app/oraInventory +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +SCAN_NAME=racnodepc1-scan +INIT_SGA_SIZE=3G +INIT_PGA_SIZE=2G +INSTALL_NODE=racnodep3 +EXISTING_CLS_NODE=racnodep1,racnodep2 +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_NAME=ORCLCDB +CRS_ASM_DEVICE_LIST=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img +CRS_ASM_DISCOVERY_STRING=/oradata/asm_disk* +ASM_ON_NAS=True +OP_TYPE=racaddnode +DB_SERVICE=service:soepdb +IGNORE_CRS_PREREQS=true \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/dbca_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/dbca_21c.rsp new file mode 100644 index 0000000000..c8b0e201e2 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/dbca_21c.rsp @@ -0,0 +1,58 @@ +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v21.0.0 +gdbName=ORCLCDB +sid=ORCLCDB +databaseConfigType=RAC +RACOneNodeServiceName= +policyManaged=false +managementPolicy= +createServerPool=false +serverPoolName= +cardinality= +force=false +pqPoolName= +pqCardinality= +createAsContainerDatabase=true +numberOfPDBs=1 +pdbName=ORCLPDB +useLocalUndoForPDBs=true +pdbAdminPassword=ORacle__21c +nodelist=racnodep3 +templateName={ORACLE_HOME}/assistants/dbca/templates/General_Purpose.dbc +sysPassword=ORacle__21c +systemPassword=ORacle__21c +oracleHomeUserPassword= +emConfiguration= +runCVUChecks=true +dbsnmpPassword=ORacle__21c +omsHost= +omsPort= +emUser= +emPassword= +dvConfiguration=false +dvUserName= +dvUserPassword= +dvAccountManagerName= +dvAccountManagerPassword= +olsConfiguration=false +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ +recoveryAreaDestination= +storageType=ASM +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ +asmsnmpPassword= +recoveryGroupName= +characterSet=AL32UTF8 +nationalCharacterSet=AL16UTF16 +registerWithDirService= +dirServiceUserName= +dirServicePassword= +walletPassword= +listeners=LISTENER +variablesFile= +variables=DB_UNIQUE_NAME=ORCLCDB,ORACLE_BASE=/u01/app/oracle,PDB_NAME=ORCLPDB,DB_NAME=ORCLCDB,ORACLE_HOME=/u01/app/oracle/product/21.3.0/dbhome_1,SID=ORCLCDB +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive +sampleSchema=false +memoryPercentage=40 +databaseType=MULTIPURPOSE +automaticMemoryManagement=false +totalMemory=5000 \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/envfile_racnodep3 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/envfile_racnodep3 new file mode 100644 index 0000000000..06d095a250 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/envfile_racnodep3 @@ -0,0 +1,17 @@ +CRS_PRIVATE_IP1=192.168.17.172 +CRS_PRIVATE_IP2=192.168.18.172 +GRID_HOME=/u01/app/21c/grid +DEFAULT_GATEWAY=172.20.1.1 +COPY_GRID_SOFTWARE=true +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +COPY_DB_SOFTWARE=true +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +OP_TYPE=setuprac +GRID_RESPONSE_FILE=/tmp/grid_21c.rsp +DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp +OP_TYPE=racaddnode +DB_NAME=ORCLCDB +EXISTING_CLS_NODE=racnodep1,racnodep2 +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/grid_setup_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/grid_setup_21c.rsp new file mode 100644 index 0000000000..7165f956ef --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/grid_setup_21c.rsp @@ -0,0 +1,63 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +installOption=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +clusterUsage=RAC +zeroDowntimeGIPatching= +skipDriverUpdate= +OSDBA=asmdba +OSOPER=asmoper +OSASM=asmadmin +scanType= +scanClientDataFile= +scanName=racnodepc1-scan +scanPort=1521 +configureAsExtendedCluster= +clusterName=racnode-c +configureGNS= +configureDHCPAssignedVIPs= +gnsSubDomain= +gnsVIPAddress= +sites= +clusterNodes=racnodep3:racnodep3-vip:HUB +networkInterfaceList=eth0:172.20.1.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +storageOption= +votingFilesLocations= +ocrLocations= +clientDataFile= +useIPMI= +bmcBinpath= +bmcUsername= +bmcPassword= +sysasmPassword=ORacle__21c +diskGroupName=DATA +redundancy=EXTERNAL +auSize= +failureGroups= +disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2, +diskList=/dev/asm-disk1,/dev/asm-disk2 +quorumFailureGroupNames= +diskString=/dev/asm* +asmsnmpPassword=ORacle__21c +configureAFD=false +ignoreDownNodes=false +configureBackupDG= +backupDGName= +backupDGRedundancy= +backupDGAUSize= +backupDGFailureGroups= +backupDGDisksWithFailureGroupNames= +backupDGDiskList= +backupDGQuorumFailureGroups= +managementOption= +omsHost= +omsPort= +emAdminUser= +emadminPassword= +executeRootScript=false +configMethod=ROOT +sudoPath= +sudoUserName= +batchInfo= +nodesToDelete= +enableAutoFixup= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/podman-compose.yml new file mode 100644 index 0000000000..399fba7b78 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/addition/podman-compose.yml @@ -0,0 +1,77 @@ +--- +version: "3" +networks: + rac_pub1_nw: + name: ${PUBLIC_NETWORK_NAME} + external: true + rac_priv1_nw: + name: ${PRIVATE1_NETWORK_NAME} + external: true + rac_priv2_nw: + name: ${PRIVATE2_NETWORK_NAME} + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + racnodep3: + container_name: ${RACNODE3_CONTAINER_NAME} + hostname: ${RACNODE3_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node3:/u01 + - /scratch:/scratch + - ${DB_RESPONSE_FILE}:/tmp/dbca_21c.rsp + - ${GRID_RESPONSE_FILE}:/tmp/grid_21c.rsp + environment: + PRIVATE_IP1_LIST: ${RACNODE3_PRIVATE_IP1_LIST} + PRIVATE_IP2_LIST: ${RACNODE3_PRIVATE_IP2_LIST} + DEFAULT_GATEWAY: ${DEFAULT_GATEWAY} + GRID_HOME: /u01/app/21c/grid + COPY_GRID_SOFTWARE: true + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + COPY_DB_SOFTWARE: true + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + DBCA_RESPONSE_FILE: /tmp/dbca_21c.rsp + OP_TYPE: racaddnode + EXISTING_CLS_NODE: ${EXISTING_CLS_NODE} + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD-SHELL", "if [ `cat /tmp/orod/oracle_rac_setup.log | grep -c 'ORACLE RAC DATABASE IS READY TO USE'` -ge 1 ]; then exit 0; else exit 1; fi"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/envfile_racnodep1 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/envfile_racnodep1 new file mode 100644 index 0000000000..8ac6f400f8 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/envfile_racnodep1 @@ -0,0 +1,15 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.170 +CRS_PRIVATE_IP2=192.168.18.170 +GRID_HOME=/u01/app/21c/grid +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +OP_TYPE=setuprac +SCAN_NAME=racnodepc1-scan +INSTALL_NODE=racnodep1 +GRID_RESPONSE_FILE=/tmp/grid_21c.rsp +DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_SERVICE=service:soepdb \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/envfile_racnodep2 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/envfile_racnodep2 new file mode 100644 index 0000000000..7a9e3e570b --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/envfile_racnodep2 @@ -0,0 +1,15 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.171 +CRS_PRIVATE_IP2=192.168.18.171 +GRID_HOME=/u01/app/21c/grid +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +OP_TYPE=setuprac +SCAN_NAME=racnodepc1-scan +INSTALL_NODE=racnodep1 +GRID_RESPONSE_FILE=/tmp/grid_21c.rsp +DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_SERVICE=service:soepdb \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/grid_setup_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/grid_setup_21c.rsp new file mode 100644 index 0000000000..c7ffe19d4a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/blockdevices/grid_setup_21c.rsp @@ -0,0 +1,64 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +oracle.install.option=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +oracle.install.asm.OSDBA=dba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=LOCAL_SCAN +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName=racnodepc1-scan +oracle.install.crs.config.gpnp.scanPort=1521 +oracle.install.crs.config.ClusterConfiguration=STANDALONE +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile= +oracle.install.crs.config.clusterName=rac01cluster +oracle.install.crs.config.gpnp.configureGNS= +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption= +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=racnodep1:racnodep1-vip:HUB,racnodep2:racnodep2-vip:HUB +oracle.install.crs.config.networkInterfaceList=eth0:10.0.20.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +oracle.install.asm.configureGIMRDataDG=false +oracle.install.crs.config.storageOption= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.storageOption=ASM +oracle.install.asmOnNAS.ocrLocation= +oracle.install.asmOnNAS.configureGIMRDataDG=false +oracle.install.asmOnNAS.gimrLocation= +oracle.install.asm.SYSASMPassword=ORacle__21c +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy=EXTERNAL +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2, +oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2 +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.monitorPassword=ORacle__21c +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/dbca_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/dbca_21c.rsp new file mode 100644 index 0000000000..e36b5e55d7 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/dbca_21c.rsp @@ -0,0 +1,58 @@ +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v21.0.0 +gdbName=ORCLCDB +sid=ORCLCDB +databaseConfigType=RAC +RACOneNodeServiceName= +policyManaged=false +managementPolicy= +createServerPool=false +serverPoolName= +cardinality= +force=false +pqPoolName= +pqCardinality= +createAsContainerDatabase=true +numberOfPDBs=1 +pdbName=ORCLPDB +useLocalUndoForPDBs=true +pdbAdminPassword=ORacle__21c +nodelist=racnodep1,racnodep2 +templateName={ORACLE_HOME}/assistants/dbca/templates/General_Purpose.dbc +sysPassword=ORacle__21c +systemPassword=ORacle__21c +oracleHomeUserPassword= +emConfiguration= +runCVUChecks=true +dbsnmpPassword=ORacle__21c +omsHost= +omsPort= +emUser= +emPassword= +dvConfiguration=false +dvUserName= +dvUserPassword= +dvAccountManagerName= +dvAccountManagerPassword= +olsConfiguration=false +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ +recoveryAreaDestination= +storageType=ASM +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ +asmsnmpPassword= +recoveryGroupName= +characterSet=AL32UTF8 +nationalCharacterSet=AL16UTF16 +registerWithDirService= +dirServiceUserName= +dirServicePassword= +walletPassword= +listeners=LISTENER +variablesFile= +variables=DB_UNIQUE_NAME=ORCLCDB,ORACLE_BASE=/u01/app/oracle,PDB_NAME=ORCLPDB,DB_NAME=ORCLCDB,ORACLE_HOME=/u01/app/oracle/product/21c/dbhome_1,SID=ORCLCDB +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive +sampleSchema=false +memoryPercentage=40 +databaseType=MULTIPURPOSE +automaticMemoryManagement=false +totalMemory=5000 \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/envfile_racnodep1 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/envfile_racnodep1 new file mode 100644 index 0000000000..69944faa5e --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/envfile_racnodep1 @@ -0,0 +1,17 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.170 +CRS_PRIVATE_IP2=192.168.18.170 +GRID_HOME=/u01/app/21c/grid +DEFAULT_GATEWAY=10.0.20.1 +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +OP_TYPE=setuprac +SCAN_NAME=racnodepc1-scan +INSTALL_NODE=racnodep1 +GRID_RESPONSE_FILE=/tmp/grid_21c.rsp +DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp +ASM_ON_NAS=True +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_SERVICE=service:soepdb \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/envfile_racnodep2 b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/envfile_racnodep2 new file mode 100644 index 0000000000..360aefdede --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/envfile_racnodep2 @@ -0,0 +1,17 @@ +DNS_SERVERS=10.0.20.25 +CRS_PRIVATE_IP1=192.168.17.171 +CRS_PRIVATE_IP2=192.168.18.171 +GRID_HOME=/u01/app/21c/grid +DEFAULT_GATEWAY=10.0.20.1 +STAGING_SOFTWARE_LOC=/scratch/software/21c/goldimages +GRID_SW_ZIP_FILE=LINUX.X64_213000_grid_home.zip +DB_SW_ZIP_FILE=LINUX.X64_213000_db_home.zip +OP_TYPE=setuprac +SCAN_NAME=racnodepc1-scan +INSTALL_NODE=racnodep1 +GRID_RESPONSE_FILE=/tmp/grid_21c.rsp +DBCA_RESPONSE_FILE=/tmp/dbca_21c.rsp +ASM_ON_NAS=True +DB_PWD_FILE=pwdsecret +PWD_KEY=keysecret +DB_SERVICE=service:soepdb \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/grid_setup_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/grid_setup_21c.rsp new file mode 100644 index 0000000000..6ecd64dc55 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/docs/rac-container/racslimimage/withresponsefiles/nfsdevices/grid_setup_21c.rsp @@ -0,0 +1,64 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +oracle.install.option=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +oracle.install.asm.OSDBA=dba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=LOCAL_SCAN +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName=racnodepc1-scan +oracle.install.crs.config.gpnp.scanPort=1521 +oracle.install.crs.config.ClusterConfiguration=STANDALONE +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile= +oracle.install.crs.config.clusterName=rac01cluster +oracle.install.crs.config.gpnp.configureGNS= +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption= +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=racnodep1:racnodep1-vip:HUB,racnodep2:racnodep2-vip:HUB +oracle.install.crs.config.networkInterfaceList=eth0:10.0.20.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +oracle.install.asm.configureGIMRDataDG=false +oracle.install.crs.config.storageOption= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.storageOption=ASM +oracle.install.asmOnNAS.ocrLocation= +oracle.install.asmOnNAS.configureGIMRDataDG=false +oracle.install.asmOnNAS.gimrLocation= +oracle.install.asm.SYSASMPassword=ORacle__21c +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy=EXTERNAL +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oradata/asm_disk01.img,,/oradata/asm_disk02.img,,/oradata/asm_disk03.img,,/oradata/asm_disk04.img,,/oradata/asm_disk05.img +oracle.install.asm.diskGroup.disks=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=/oradata/asm_disk* +oracle.install.asm.monitorPassword=ORacle__21c +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/README.md index 86c9ca297b..ad4b964271 100644 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/README.md +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/README.md @@ -15,8 +15,11 @@ Example of how to create a 2-node RAC based on Docker Compose. You can create a racpodmancompose ---------------- -Example of how to create a 2-node RAC based on Podman Compose. You can create a single-node RAC using Podman Compose based on your environment. For details, please refer to [README.MD of racpodmancompose](./racpodmancompose/README.md). +Example of how to create 2 node Oracle RAC Setup on **Podman Compose** using Oracle RAC image or RAC slim image, with or without User Defined Response files. You can also create multinode rac using responsefiles based on your environment. + +Refer [Podman Compose using Oracle RAC container image](./rac-compose/racimage/README.md) for details in order to setup 2 node Oracle RAC Setup on Podman Compose using Oracle RAC Container Image. +Refer [Podman Compose using Oracle RAC slim image](./rac-compose/racslimimage/README.md) for details in order to setup 2 node Oracle RAC Setup on Podman Compose using Oracle RAC Slim Image. Copyright --------- -Copyright (c) 2014-2024 Oracle and/or its affiliates. All rights reserved. \ No newline at end of file +Copyright (c) 2014-2025 Oracle and/or its affiliates. All rights reserved. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/README.md index 39cea6e80a..51b05f9850 100644 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/README.md +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/README.md @@ -1,35 +1,14 @@ -# Example of how to create a patched database image - +# Example of how to create an Oracle RAC Database Container Patched Image ============================================= - -- [Example of how to create a patched database image](#example-of-how-to-create-a-patched-database-image) - - [Build Oracle RAC Slim and Base Image](#build-oracle-rac-slim-and-base-image) - - [The patch structure](#the-patch-structure) - - [Installing the patch](#installing-the-patch) - - [Copyright](#copyright) - -## Build Oracle RAC Slim and Base Image - -- Create RAC slim image based on the version you want build the patched images. This image will be used during multi-stage build to minimize the size requirements. - - Change directory to `/docker-images/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles` - - Build the RAC slim image - - ```bash - ./buildContainerImage.sh -v -i -p -o '--build-arg BASE_OL_IMAGE=oraclelinux:8 --build-arg SLIMMING=true' - - Example: - ./buildContainerImage.sh -v 21.3.0 -i -p -o '--build-arg BASE_OL_IMAGE=oraclelinux:8 --build-arg SLIMMING=true' - ``` - -**Note**: For Docker, you need to change `BASE_OL_IMAGE` to `oraclelinux:7-slim`. - -- If you have not already built the base Oracle RAC image, you need to build it by following the [README.md](../../../OracleRealApplicationClusters/README.md). Once you have built the base Oracle RAC image, you can create a patched version of it. In order to build such an image you will have to provide the patch zip file. +## Pre-requisites +After you build your base Oracle RAC image following the [README.md](../../../OracleRealApplicationClusters/README.md#building-oracle-rac-database-container-image), it is mandatory to create **Oracle RAC Slim image** following [README.md](../../../OracleRealApplicationClusters/README.md#building-oracle-rac-database-container-slim-image), then you can create a patched version of it. +To build a patched image, you must provide the patch zip file. **Notes:** -- Some patches require a newer version of `OPatch`, the Oracle Interim Patch Installer utility. It is highly recommended, you always update opatch with the new version. -- You can only patch 19.3.0 and above using this script. -- The scripts will automatically install a newer OPatch version, if provided. +* Some patches require a newer version of `OPatch`, the Oracle Interim Patch Installer utility. Oracle highly recommends that you always update opatch with the new version. +* You can only patch releases 19.3.0 or later using this script. +* The scripts automatically install a newer OPatch version, if provided. ## The patch structure @@ -52,19 +31,19 @@ The scripts used in this example rely on following directory structure: p6880880*.zip (optional, OPatch zip file) ``` -**patches:** The working directory for patch installation. -**grid:**: The directory containing patches(Release Update) for Oracle Grid Infrastructure. -**oracle**: The directory containing patches(Release Update) for Oracle RAC Home and Database -**001**: The directory containing the patch(Release Update) zip file. +**patches:** The working directory for patch installation. +**grid:**: The directory containing patches (Release Update) for Oracle Grid Infrastructure. +**oracle**: The directory containing patches (Release Update) for Oracle Real Application Clusters (Oracle RAC) and Oracle Database +**001**: The directory containing the patch (Release Update) zip file. **00N**: The second, third, ... directory containing the second, third, ... patch zip file. -This is useful if you want to install multiple patches at once. The script will go into each of these directories in the numbered order and apply the patches. -**Important**: It is up to the user to guarantee the patch order, if any. +These directories are useful if you want to install multiple patches at once. The script will go into each of these directories in the numbered order and apply the patches. +**Important**: It is up to you to guarantee the patch order, if any order is required. -### Installing the patch +## Installing the patch -- If you have multiple patches to be applied at once, add more sub directories following the numbering scheme of 002, 003, 004, 005, 00N. -- If you have a new version of OPatch, put the OPatch zip file directly into the patches directory. Do not change the name of the zip file! -- A utility script named `buildPatchedContainerImage.sh` has been provided to assist with building the patched image: +* If you have multiple patches that you want to apply at once, then add more subdirectories following the numbering scheme of 002, 003, 004, 005, 00_N_. +* If you have a new version of OPatch, then put the OPatch zip file directly into the patches directory. **Do not change the name of the OPatch zip file**. +* A utility script named `buildPatchedContainerImage.sh` is provided to assist with building the patched image: ```bash [oracle@localhost applypatch]# ./buildPatchedContainerImage.sh -h @@ -77,17 +56,29 @@ This is useful if you want to install multiple patches at once. The script will -o: passes on container build option -p: patch label to be used for the tag ``` - - - Following is an example of building patched image using 21.3.0. Note that `BASE_RAC_IMAGE=oracle/database-rac:21.3.0` set to 21.3.0. You need to set BASE_RAC_IMAGE based on your environment. +* The following is an example of building a patched image using 21.3.0. Note that `BASE_RAC_IMAGE=oracle/database-rac:21.3.0` is set to 21.3.0. You must set BASE_RAC_IMAGE and RAC_SLIM_IMAGE based on your enviornment. ```bash - ./buildPatchedContainerImage.sh -v 21.3.0 -p 21.7.0 -o '--build-arg BASE_RAC_IMAGE=localhost/oracle/database-rac:21.3.0 --build-arg RAC_SLIM_IMAGE=localhost/oracle/database-rac:21.3.0-slim' + # ./buildPatchedContainerImage.sh -v 21.3.0 -p 21.16.0 -o '--build-arg BASE_RAC_IMAGE=localhost/oracle/database-rac:21.3.0 --build-arg RAC_SLIM_IMAGE=localhost/oracle/database-rac:21.3.0-slim' ``` -**Important:** It is not supported to apply patches on already existing databases. You will have to create a new, patched database container image. You can use the PDB unplug/plug functionality to carry over your PDB into the patched container database! +Logs- +```bash + Oracle Database container image for Real Application Clusters (RAC) version 21.3.0 is ready to be extended: + + --> oracle/database-rac:21.3.0-21.16.0 + + Build completed in 1419 seconds. +``` +Once Oracle RAC Patch image is built, lets retag it and it is referenced as 21c in this [README](../../README.md) documenation. +```bash +podman tag localhost/oracle/database-rac:21.3.0-21.16.0 localhost/oracle/database-rac:21c +``` + +**Important:** It is not supported to apply patches on already existing databases. You must create a new, patched database container image. You can use the PDB unplug/plug functionality to carry over your PDB into the patched container database. -**Notes**: If you are trying to patch the image on OL8 on PODMAN host, you must have `podman-docker` package installed on your PODMAN host. +**Notes**: If you are trying to patch the image on Oracle Linux 8 (OL8) on the PODMAN host, then you must have the `podman-docker` package installed on your PODMAN host. ## Copyright -Copyright (c) 2014-2024 Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2014-2025 Oracle and/or its affiliates. All rights reserved. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/buildPatchedContainerImage.sh b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/buildPatchedContainerImage.sh index b9a2d113ec..7779e9f818 100755 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/buildPatchedContainerImage.sh +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/buildPatchedContainerImage.sh @@ -11,7 +11,6 @@ It builds a patched RAC container image Parameters: -v: version to build - Choose one of: $(for i in $(ls -d */); do echo -n "${i%%/} "; done) -o: passes on container build option -p: patch label to be used for the tag @@ -32,8 +31,11 @@ if [ "$#" -eq 0 ]; then fi # Parameters +# shellcheck disable=SC2034 ENTERPRISE=0 +# shellcheck disable=SC2034 STANDARD=0 +# shellcheck disable=SC2034 LATEST="latest" VERSION='x' PATCHLABEL="patch" @@ -68,26 +70,28 @@ done IMAGE_NAME="oracle/database-rac:$VERSION-$PATCHLABEL" # Go into version folder +# shellcheck disable=SC2164 cd latest # Proxy settings PROXY_SETTINGS="" +# shellcheck disable=SC2154 if [ "${http_proxy}" != "" ]; then PROXY_SETTINGS="$PROXY_SETTINGS --build-arg http_proxy=${http_proxy}" fi - +# shellcheck disable=SC2154 if [ "${https_proxy}" != "" ]; then PROXY_SETTINGS="$PROXY_SETTINGS --build-arg https_proxy=${https_proxy}" fi - +# shellcheck disable=SC2154 if [ "${ftp_proxy}" != "" ]; then PROXY_SETTINGS="$PROXY_SETTINGS --build-arg ftp_proxy=${ftp_proxy}" fi - +# shellcheck disable=SC2154 if [ "${no_proxy}" != "" ]; then PROXY_SETTINGS="$PROXY_SETTINGS --build-arg no_proxy=${no_proxy}" fi - +# shellcheck disable=SC2154 if [ "$PROXY_SETTINGS" != "" ]; then echo "Proxy settings were found and will be used during the build." fi @@ -99,15 +103,23 @@ echo "Building image '$IMAGE_NAME' ..." # BUILD THE IMAGE (replace all environment variables) BUILD_START=$(date '+%s') -docker build --force-rm=true --no-cache=true $DOCKEROPS $PROXY_SETTINGS -t $IMAGE_NAME -f Dockerfile . || { +docker build --no-cache=true $DOCKEROPS $PROXY_SETTINGS -t env -f ContainerfileEnv . +# shellcheck disable=SC2046 +docker cp $(docker create --name env-070125 --rm env):/tmp/.env ./ +# shellcheck disable=SC2046 +docker build --no-cache=true $DOCKEROPS \ + --build-arg GRID_HOME=$(grep GRID_HOME .env | cut -d '=' -f2) \ + --build-arg DB_HOME=$(grep DB_HOME .env | cut -d '=' -f2) $PROXY_SETTINGS -t $IMAGE_NAME -f Containerfile . || { echo "There was an error building the image." exit 1 } +docker rmi -f env-070125 + BUILD_END=$(date '+%s') BUILD_ELAPSED=`expr $BUILD_END - $BUILD_START` echo "" - +# shellcheck disable=SC2320 if [ $? -eq 0 ]; then cat << EOF Oracle Database container image for Real Application Clusters (RAC) version $VERSION is ready to be extended: diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/Dockerfile b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/Containerfile similarity index 55% rename from OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/Dockerfile rename to OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/Containerfile index b71c0ee04c..46cd29abee 100644 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/Dockerfile +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/Containerfile @@ -5,7 +5,7 @@ # ORACLE DOCKERFILES PROJECT # -------------------------- # This is the Dockerfile for a patched Oracle Database 21c Release 3 -# +# # REQUIREMETNS FOR THIS IMAGE # ---------------------------------- # The oracle/rac-database:21.3.0 image has to exist @@ -13,28 +13,19 @@ # HOW TO BUILD THIS IMAGE # ----------------------- # Put the downloaded patch(es) into the sub folders patch/0NN -# Run: -# $ docker build -f Dockerfile -t oracle/rac-database:21.3.0- . +# Run: +# $ docker build -f Dockerfile -t oracle/rac-database:21.3.0- . # -# hadolint global ignore=DL3006,DL3025 -ARG BASE_RAC_IMAGE=oracle/database-rac:21.3.0 +ARG BASE_RAC_IMAGE=localhost/oracle/database-rac:19.3.0 +ARG RAC_SLIM_IMAGE=localhost/oracle/database-rac:19.3.0-slim # Pull base image # --------------- +# hadolint ignore=DL3006 FROM $BASE_RAC_IMAGE as builder -ARG RAC_SLIM_IMAGE - -# Labels -# ------ -LABEL "provider"="Oracle" \ - "issues"="https://github.com/oracle/docker-images/issues" \ - "maintainer"="paramdeep Saini " \ - "volume.setup.location1"="/opt/scripts" \ - "volume.startup.location1"="/opt/scripts/startup" \ - "port.listener"="1521" \ - "port.oemexpress"="5500" # Argument to control removal of components not needed after db software installation +ARG SLIMMING=false ARG PATCH_DIR="patches" ARG DB_EDITION="EE" ARG USER="root" @@ -42,7 +33,8 @@ ARG WORKDIR="/rac-work-dir" # Environment variables required for this build (do NOT change) # ------------------------------------------------------------- -USER $USER +# hadolint ignore=DL3002 +USER root ENV PATCH_DIR=$PATCH_DIR \ GRID_PATCH_FILE="applyGridPatches.sh" \ @@ -51,7 +43,7 @@ ENV PATCH_DIR=$PATCH_DIR \ DB_USER="oracle" \ USER=$USER \ WORKDIR=$WORKDIR \ - GRID_USER="grid" + GRID_USER="grid" # Use second ENV so that variable get substituted ENV PATCH_INSTALL_DIR=/tmp/patches @@ -74,43 +66,59 @@ RUN chown -R grid:oinstall $PATCH_INSTALL_DIR/*.sh && \ USER oracle RUN $PATCH_INSTALL_DIR/$DB_PATCH_FILE $PATCH_INSTALL_DIR +# hadolint ignore=DL3002 +USER root -USER $USER - -RUN "$PATCH_INSTALL_DIR"/"$FIXUP_PREQ_FILE" && \ - cp "$PATCH_INSTALL_DIR"/"$FIXUP_PREQ_FILE" "$SCRIPT_DIR"/"$FIXUP_PREQ_FILE" && \ +RUN $PATCH_INSTALL_DIR/$FIXUP_PREQ_FILE && \ rm -rf /etc/oracle && \ - rm -rf "$PATCH_INSTALL_DIR" - -############################################# -# ------------------------------------------- -# Start new stage for grid/DB with Slim image -# ------------------------------------------- -############################################# - -FROM $RAC_SLIM_IMAGE as final -ARG USER + rm -rf $PATCH_INSTALL_DIR + +##################### +# hadolint ignore=DL3006 +FROM $RAC_SLIM_IMAGE AS final + +# Define build-time arguments +ARG GRID_HOME +ARG DB_HOME + +#Set environment variables using build arguments +ENV GRID_BASE=/u01/app/grid \ + GRID_HOME=$GRID_HOME \ + DB_BASE=/u01/app/oracle \ + DB_HOME=$DB_HOME \ + INSTALL_SCRIPTS=/opt/scripts/install \ + SCRIPT_DIR=/opt/scripts/startup \ + RAC_SCRIPTS_DIR="scripts" + +ENV GRID_PATH=$GRID_HOME/bin:$GRID_HOME/OPatch/:$GRID_HOME/perl/bin:/usr/sbin:/bin:/sbin \ + DB_PATH=$DB_HOME/bin:$DB_HOME/OPatch/:$DB_HOME/perl/bin:/usr/sbin:/bin:/sbin \ + GRID_LD_LIBRARY_PATH=$GRID_HOME/lib:/usr/lib:/lib \ + DB_LD_LIBRARY_PATH=$DB_HOME/lib:/usr/lib:/lib + +# Run some basic command to verify the environment variables (optional) +RUN echo "GRID_BASE=$GRID_BASE" && \ + echo "GRID_HOME=$GRID_HOME" && \ + echo "DB_BASE=$DB_BASE" && \ + echo "DB_HOME=$DB_HOME" + +RUN if [ -d "/u01" ]; then \ + rm -rf /u01 && \ + :; \ +fi COPY --from=builder /u01 /u01 -RUN mkdir -p /tmp/scripts -COPY --from=builder $SCRIPT_DIR /tmp/scripts -RUN cp -rn /tmp/scripts/* $SCRIPT_DIR/ && \ - rm -rf /tmp/scripts - -RUN chmod 755 "$SCRIPT_DIR"/* && \ - "$INVENTORY"/orainstRoot.sh && \ - "$GRID_HOME"/root.sh && \ - "$DB_HOME"/root.sh && \ - "$SCRIPT_DIR"/"$FIXUP_PREQ_FILE" && \ - cp "$SCRIPT_DIR"/"$INITSH" /usr/bin/"$INITSH" && \ - chmod 755 /usr/bin/"$INITSH" && \ - rm -f "$SCRIPT_DIR"/"$FIXUP_PREQ_FILE" - -USER $USER -WORKDIR $WORKDIR +USER ${USER} VOLUME ["/common_scripts"] +WORKDIR $WORKDIR +HEALTHCHECK --interval=2m --start-period=30m \ + CMD "$SCRIPT_DIR/scripts/main.py --checkracinst=true" >/dev/null || exit 1 +#Fix SID detection +# hadolint ignore=SC2086 +RUN $INVENTORY/orainstRoot.sh && \ + $GRID_HOME/root.sh && \ + $DB_HOME/root.sh # Define default command to start Oracle Grid and RAC Database setup. - +# hadolint ignore=DL3025 ENTRYPOINT /usr/bin/$INITSH \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/ContainerfileEnv b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/ContainerfileEnv new file mode 100644 index 0000000000..c6e78351fa --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/applypatch/latest/ContainerfileEnv @@ -0,0 +1,9 @@ +# Stage 1: Base Stage with Environment Variables +ARG BASE_RAC_IMAGE=localhost/oracle/database-rac:19.3.0 +FROM $BASE_RAC_IMAGE + +# Write the environment variables to a .env file +RUN echo "GRID_HOME=$GRID_HOME" >> /tmp/.env && \ + echo "DB_HOME=$DB_HOME" >> /tmp/.env + +ENTRYPOINT ["/bin/bash"] \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/customracdb/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/customracdb/README.md index a94edecaa4..1d85419f89 100644 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/customracdb/README.md +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/customracdb/README.md @@ -654,4 +654,4 @@ ORACLE RAC DATABASE IS READY TO USE! ## Copyright -Copyright (c) 2014-2019 Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2014-2025 Oracle and/or its affiliates. All rights reserved. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/README.md new file mode 100644 index 0000000000..b3ac2c9e9f --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/README.md @@ -0,0 +1,801 @@ +# Oracle RAC on Podman Compose using Oracle RAC Image +=============================================================== + +Refer below instructions for setup of Oracle RAC on Podman using Oracle RAC Image for various scenarios. + +- [Oracle RAC on Podman Compose using Oracle RAC Image](#oracle-rac-on-podman-compose-using-oracle-rac-image) + - [Section 1 : Prerequisites for Setting up Oracle RAC on Container using Oracle RAC Image](#section-1-prerequisites-for-setting-up-oracle-rac-on-container-using-oracle-rac-image) + - [Section 2: Setup Oracle RAC Containers with Oracle RAC Image using Podman Compose Files](#section-2-setup-oracle-rac-containers-with-oracle-rac-image-using-podman-compose-files) + - [Section 2.1: Deploying With BlockDevices](#section-21-deploying-with-blockdevices) + - [Section 2.1.1: Setup Without Using User Defined Response files](#section-211-setup-without-using-user-defined-response-files) + - [Section 2.1.2: Setup Using User Defined Response files](#section-212-setup-using-user-defined-response-files) + - [Section 2.2: Deploying With NFS Storage Devices](#section-22-deploying-with-nfs-storage-devices) + - [Section 2.2.1: Setup Without Using User Defined Response files](#section-221-setup-without-using-user-defined-response-files) + - [Section 2.2.2: Setup Using User Defined Response files](#section-222-setup-using-user-defined-response-files) + - [Section 3: Sample of Addition of Nodes to Oracle RAC Containers based on Oracle RAC Image](#section-3-sample-of-addition-of-nodes-to-oracle-rac-containers-based-on-oracle-rac-image) + - [Section 3.1: Sample of Addition of Nodes to Oracle RAC Containers using Podman Compose based on Oracle RAC Image with BlockDevices](#section-31-sample-of-addition-of-nodes-to-oracle-rac-containers-using-podman-compose-based-on-oracle-rac-image-with-blockdevices) + - [Section 3.2: Sample of Addition of Nodes to Oracle RAC Containers using Podman Compose based on Oracle RAC Image with NFS Storage Devices](#section-32-sample-of-addition-of-nodes-to-oracle-rac-containers-using-podman-compose-based-on-oracle-rac-image-with-nfs-storage-devices) + - [Section 4: Environment Variables for Oracle RAC on Podman Compose](#section-4-environment-variables-for-oracle-rac-on-podman-compose) + - [Section 5: Validating Oracle RAC Environment](#section-5-validating-oracle-rac-environment) + - [Section 6: Connecting to Oracle RAC Environment](#section-6-connecting-to-oracle-rac-environment) + - [Cleanup](#cleanup) + - [Support](#support) + - [License](#license) + - [Copyright](#copyright) + +## Oracle RAC Setup on Podman Compose using Oracle RAC Image + +You can deploy multi node Oracle RAC Setup using Oracle RAC Image either on Block Devices or NFS storage Devices by using User Defined Response Files or without using same. All these scenarios are discussed in detail as you proceed further below. + +## Section 1: Prerequisites for Setting up Oracle RAC on Container using Oracle RAC Image +**IMPORTANT :** Execute all the steps specified in this section (customized for your environment) before you proceed to the next section. Completing prerequisite steps is a requirement for successful configuration. + + +* Execute the [Preparation Steps for running Oracle RAC database in containers](../../../README.md#preparation-steps-for-running-oracle-rac-database-in-containers) +* Create Oracle Connection Manager on Container image and container if the IPs are not available on user network.Please refer [RAC Oracle Connection Manager README.MD](../../../../OracleConnectionManager/README.md). +* Ensure the Oracle RAC Image is present. You can either pull ru image from the Oracle Container Registry by following [Building Oracle RAC Database Container Images](../../../README.md#getting-oracle-rac-database-container-images), or you can create the Oracle RAC Container Patched image by following [Building Oracle RAC Database Container Images](../../../README.md#building-a-patched-oracle-rac-container-image) +```bash +# podman images|grep database-rac +localhost/oracle/database-rac 21c 41239091d2ac 16 minutes ago 20.2 GB +``` +* Execute the [Network](../../../README.md#network-management). +* Execute the [Password Management](../../../README.md#password-management). +* `podman-compose` is part of [ol8_developer_EPEL](https://yum.oracle.com/repo/OracleLinux/ol8/developer/EPEL/x86_64/index.html). Enable `ol8_developer_EPEL` repository and install `podman-compose` as below- + ```bash + sudo dnf config-manager --enable ol8_developer_EPEL + sudo dnf install -y podman-compose + ``` +In order to setup 2 Node RAC containers using Podman compose, please make sure pre-requisites are completed before proceeding further - + +## Section 2: Setup Oracle RAC Containers with Oracle RAC Image using Podman Compose Files +### Section 2.1: Deploying With BlockDevices +#### Section 2.1.1: Setup Without Using User Defined Response files +Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE1_CONTAINER_NAME=racnodep1 +export RACNODE1_HOST_NAME=racnodep1 +export RACNODE1_PUBLIC_IP=10.0.20.170 +export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 +export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 +export INSTALL_NODE=racnodep1 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" +export SCAN_NAME=racnodepc1-scan +export ASM_DEVICE1="/dev/asm-disk1" +export ASM_DEVICE2="/dev/asm-disk2" +export CRS_ASM_DEVICE_LIST="${ASM_DEVICE1},${ASM_DEVICE2}" +export ASM_DISK1="/dev/oracleoci/oraclevdd" +export ASM_DISK2="/dev/oracleoci/oraclevde" +export RACNODE2_CONTAINER_NAME=racnodep2 +export RACNODE2_HOST_NAME=racnodep2 +export RACNODE2_PUBLIC_IP=10.0.20.171 +export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 +export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXD="racnoded" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export DNS_PRIVATE1_IP=192.168.17.25 +export DNS_PRIVATE2_IP=192.168.18.25 +export CMAN_CONTAINER_NAME=racnode-cman +export CMAN_HOST_NAME=racnode-cman1 +export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnode-cman1" +export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" +export DB_SERVICE=service:soepdb +``` + +Create compose file named [podman-compose.yml](./withoutresponsefiles/blockdevices/podman-compose.yml) in your working directory. + +Create podman networks- +```bash +podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} +podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns +podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns +``` + +Bring up DNS Containers- +```bash +podman-compose up -d ${DNS_CONTAINER_NAME} +podman-compose logs ${DNS_CONTAINER_NAME} +``` +DNS Container Logs- +```bash +podman-compose logs ${DNS_CONTAINER_NAME} +03-28-2024 07:46:59 UTC : : ################################################ +03-28-2024 07:46:59 UTC : : DNS Server IS READY TO USE! +03-28-2024 07:46:59 UTC : : ################################################ + +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} +podman-compose stop ${RACNODE1_CONTAINER_NAME} + +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} +podman-compose stop ${RACNODE2_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + +podman-compose start ${RACNODE1_CONTAINER_NAME} +podman-compose start ${RACNODE2_CONTAINER_NAME} +podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +=================================== +ORACLE RAC DATABASE IS READY TO USE +=================================== +``` + +(Optionally) Bring up CMAN Container- +```bash +podman-compose up -d ${CMAN_CONTAINER_NAME} + +podman-compose logs -f ${CMAN_CONTAINER_NAME} +################################################ + CONNECTION MANAGER IS READY TO USE! +################################################ +``` +#### Section 2.1.2: Setup Using User Defined Response files +Make sure you completed pre-requisites step to install Podman Compose on required Podman Host Machines. + +On the shared folder between both RAC nodes, copy file named [grid_setup_new_21c.rsp](withresponsefiles/nfsdevices/grid_setup_new_21c.rsp) to shared location e.g `/scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp`. You can skip this step if you are planing to not to use **User Defined Response Files for RAC**. + +If SELinux host is enable on machine then execute the following as well - +```bash +semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp +restorecon -v /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp +semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp +restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp +``` +Now, Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE1_CONTAINER_NAME=racnodep1 +export RACNODE1_HOST_NAME=racnodep1 +export RACNODE1_PUBLIC_IP=10.0.20.170 +export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 +export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 +export INSTALL_NODE=racnodep1 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" +export SCAN_NAME=racnodepc1-scan +export ASM_DEVICE1="/dev/asm-disk1" +export ASM_DEVICE2="/dev/asm-disk2" +export CRS_ASM_DEVICE_LIST="${ASM_DEVICE1},${ASM_DEVICE2}" +export ASM_DISK1="/dev/oracleoci/oraclevdd" +export ASM_DISK2="/dev/oracleoci/oraclevde" +export RACNODE2_CONTAINER_NAME=racnodep2 +export RACNODE2_HOST_NAME=racnodep2 +export RACNODE2_PUBLIC_IP=10.0.20.171 +export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 +export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export DNS_PRIVATE1_IP=192.168.17.25 +export DNS_PRIVATE2_IP=192.168.18.25 +export CMAN_CONTAINER_NAME=racnodepc1-cman +export CMAN_HOST_NAME=racnodepc1-cman +export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman" +export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" +export GRID_RESPONSE_FILE="/scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp" +export DB_SERVICE=service:soepdb +``` +Create podman networks- +```bash +podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} +podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns +podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns +``` + +Create compose file named [podman-compose.yml](./withresponsefiles/blockdevices/podman-compose.yml) in your working directory. + +Bring up DNS Containers- +```bash +podman-compose up -d ${DNS_CONTAINER_NAME} +podman-compose stop ${DNS_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${DNS_PUBLIC_IP} ${DNS_CONTAINER_NAME} +podman-compose start ${DNS_CONTAINER_NAME} +``` + +Successful logs when DNS container comes up- +```bash +podman-compose logs ${DNS_CONTAINER_NAME} +################################################ + DNS Server IS READY TO USE! +################################################ +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} +podman-compose stop ${RACNODE1_CONTAINER_NAME} + +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} +podman-compose stop ${RACNODE2_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + +podman-compose start ${RACNODE1_CONTAINER_NAME} +podman-compose start ${RACNODE2_CONTAINER_NAME} +podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +=================================== +ORACLE RAC DATABASE IS READY TO USE +=================================== +``` + +Bring up CMAN Container- +```bash +podman-compose up -d ${CMAN_CONTAINER_NAME} + +podman-compose logs -f ${CMAN_CONTAINER_NAME} +################################################ + CONNECTION MANAGER IS READY TO USE! +################################################ +``` + +### Section 2.2: Deploying With NFS Storage Devices +#### Section 2.2.1: Setup Without Using User Defined Response files +Make sure you completed pre-requisites step to install Podman Compose on required Podman Host Machines. + +Create placeholder for NFS storage and make sure it is empty - + +```bash +export ORACLE_DBNAME=ORCLCDB +mkdir -p /scratch/stage/rac-storage/$ORACLE_DBNAME +rm -rf /scratch/stage/rac-storage/ORCLCDB/asm_disk0* +``` + +Now, Export the required environment variables required by `podman-compose.yml` file - + +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE1_CONTAINER_NAME=racnodep1 +export RACNODE1_HOST_NAME=racnodep1 +export RACNODE1_PUBLIC_IP=10.0.20.170 +export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 +export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 +export INSTALL_NODE=racnodep1 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" +export SCAN_NAME=racnodepc1-scan +export CRS_ASM_DISCOVERY_STRING="/oradata" +export CRS_ASM_DEVICE_LIST="/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img" +export RACNODE2_CONTAINER_NAME=racnodep2 +export RACNODE2_HOST_NAME=racnodep2 +export RACNODE2_PUBLIC_IP=10.0.20.171 +export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 +export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DNS_PUBLIC_IP=10.0.20.25 +export DNS_PRIVATE1_IP=192.168.17.25 +export DNS_PRIVATE2_IP=192.168.18.25 +export CMAN_CONTAINER_NAME=racnode-cman +export CMAN_HOST_NAME=racnode-cman1 +export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnode-cman1" +export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" +export STORAGE_CONTAINER_NAME="racnode-storage" +export STORAGE_HOST_NAME="racnode-storage" +export STORAGE_IMAGE_NAME="localhost/oracle/rac-storage-server:latest" +export ORACLE_DBNAME="ORCLCDB" +export STORAGE_PUBLIC_IP=10.0.20.80 +export NFS_STORAGE_VOLUME="/scratch/stage/rac-storage/$ORACLE_DBNAME" +export DB_SERVICE=service:soepdb +``` +Create podman networks- +```bash +podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} +podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns +podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns +``` + +Create compose file named [podman-compose.yml](./withoutresponsefiles/nfsdevices/podman-compose.yml) in your working directory. + + +Bring up DNS Containers- +```bash +podman-compose up -d ${DNS_CONTAINER_NAME} +podman-compose logs ${DNS_CONTAINER_NAME} +``` +Logs- +```bash +04-03-2024 13:22:54 UTC : : ################################################ +04-03-2024 13:22:54 UTC : : DNS Server IS READY TO USE! +04-03-2024 13:22:54 UTC : : ##################################### +``` + +Bring up Storage Container- +```bash +podman-compose --podman-run-args="-t -i --systemd=always" up -d ${STORAGE_CONTAINER_NAME} +podman-compose exec ${STORAGE_CONTAINER_NAME} tail -f /tmp/storage_setup.log +``` +Logs- +```bash +Export list for racnode-storage: +/oradata * +################################################# + Setup Completed +################################################# +``` + +Create NFS volume- +```bash +podman volume create --driver local \ +--opt type=nfs \ +--opt o=addr=10.0.20.80,rw,bg,hard,tcp,vers=3,timeo=600,rsize=32768,wsize=32768,actimeo=0 \ +--opt device=10.0.20.80:/oradata \ +racstorage +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} +podman-compose stop ${RACNODE1_CONTAINER_NAME} + +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} +podman-compose stop ${RACNODE2_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + +podman-compose start ${RACNODE1_CONTAINER_NAME} +podman-compose start ${RACNODE2_CONTAINER_NAME} +podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +=================================== +ORACLE RAC DATABASE IS READY TO USE +=================================== +``` + +(Optionally) Bring up CMAN Container- +```bash +podman-compose up -d ${CMAN_CONTAINER_NAME} + +podman-compose logs -f ${CMAN_CONTAINER_NAME} +################################################ + CONNECTION MANAGER IS READY TO USE! +################################################ +``` +#### Section 2.2.2: Setup Using User Defined Response files +Make sure you completed pre-requisites step to install Podman Compose on required Podman Host Machines. + +If SELinux is enabled in your host machine then execute the following as well - +```bash +semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp +restorecon -v /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp +semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp +restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp +``` +Create placeholder for NFS storage and make sure it is empty - + +```bash +export ORACLE_DBNAME=ORCLCDB +mkdir -p /scratch/stage/rac-storage/$ORACLE_DBNAME +rm -rf /scratch/stage/rac-storage/ORCLCDB/asm_disk0* +``` + +On the shared folder between both RAC nodes, copy file name [grid_setup_new_21c.rsp](withresponsefiles/nfsdevices/grid_setup_new_21c.rsp) to shared location e.g `/scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp`. You can skip this step if you are planing to not to use **User Defined Response Files for RAC**. +If SELinux host is enable on machine then execute the following as well - +```bash +semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp +restorecon -v /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp +semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp +restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp +``` +Now, Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE1_CONTAINER_NAME=racnodep1 +export RACNODE1_HOST_NAME=racnodep1 +export RACNODE1_PUBLIC_IP=10.0.20.170 +export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 +export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 +export INSTALL_NODE=racnodep1 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" +export SCAN_NAME=racnodepc1-scan +export CRS_ASM_DISCOVERY_STRING="/oradata" +export CRS_ASM_DEVICE_LIST="/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img" +export RACNODE2_CONTAINER_NAME=racnodep2 +export RACNODE2_HOST_NAME=racnodep2 +export RACNODE2_PUBLIC_IP=10.0.20.171 +export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 +export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DNS_PUBLIC_IP=10.0.20.25 +export DNS_PRIVATE1_IP=192.168.17.25 +export DNS_PRIVATE2_IP=192.168.18.25 +export CMAN_CONTAINER_NAME=racnode-cman +export CMAN_HOST_NAME=racnode-cman1 +export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnode-cman1" +export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" +export STORAGE_CONTAINER_NAME="racnode-storage" +export STORAGE_HOST_NAME="racnode-storage" +export STORAGE_IMAGE_NAME="localhost/oracle/rac-storage-server:latest" +export ORACLE_DBNAME="ORCLCDB" +export STORAGE_PUBLIC_IP=10.0.20.80 +export NFS_STORAGE_VOLUME="/scratch/stage/rac-storage/$ORACLE_DBNAME" +export GRID_RESPONSE_FILE="/scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp" +export DB_SERVICE=service:soepdb +``` + +Create podman networks- +```bash +podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} +podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns +podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns +``` + +Create compose file named [podman-compose.yml](./withresponsefiles/nfsdevices/podman-compose.yml) in your working directory. + +Bring up DNS Containers- +```bash +podman-compose up -d ${DNS_CONTAINER_NAME} +podman-compose stop ${DNS_CONTAINER_NAME} +podman network disconnect ${PUBLIC_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${DNS_PUBLIC_IP} ${DNS_CONTAINER_NAME} +podman-compose start ${DNS_CONTAINER_NAME} +``` + +Successful logs when DNS container comes up- +```bash +podman-compose logs ${DNS_CONTAINER_NAME} +################################################ + DNS Server IS READY TO USE! +################################################ +``` + +Bring up Storage Container- +```bash +podman-compose --podman-run-args="-t -i --systemd=always" up -d ${STORAGE_CONTAINER_NAME} +podman-compose exec ${STORAGE_CONTAINER_NAME} tail -f /tmp/storage_setup.log + +Export list for racnode-storage: +/oradata * +################################################# + Setup Completed +################################################# +``` + +Create NFS volume- +```bash +podman volume create --driver local \ +--opt type=nfs \ +--opt o=addr=10.0.20.80,rw,bg,hard,tcp,vers=3,timeo=600,rsize=32768,wsize=32768,actimeo=0 \ +--opt device=10.0.20.80:/oradata \ +racstorage +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} +podman-compose stop ${RACNODE1_CONTAINER_NAME} + +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} +podman-compose stop ${RACNODE2_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + +podman-compose start ${RACNODE1_CONTAINER_NAME} +podman-compose start ${RACNODE2_CONTAINER_NAME} +podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +=================================== +ORACLE RAC DATABASE IS READY TO USE +=================================== +``` + +(Optionally) Bring up CMAN Container- +```bash +podman-compose up -d ${CMAN_CONTAINER_NAME} +podman-compose logs -f ${CMAN_CONTAINER_NAME} +################################################ + CONNECTION MANAGER IS READY TO USE! +################################################ +``` +## Section 3: Sample of Addition of Nodes to Oracle RAC Containers based on Oracle RAC Image + +### Section 3.1: Sample of Addition of Nodes to Oracle RAC Containers using Podman Compose based on Oracle RAC Image with BlockDevices + +Below is an example to add one more node to existing Oracle RAC 2 node cluster using Oracle RAC Image and with user defined files using podman compose file - + +Create compose file named [podman-compose.yml](./withoutresponsefiles/blockdevices/addition/podman-compose.yml) in your working directory. + +Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE3_CONTAINER_NAME=racnodep3 +export RACNODE3_HOST_NAME=racnodep3 +export RACNODE3_PUBLIC_IP=10.0.20.172 +export RACNODE3_CRS_PRIVATE_IP1=192.168.17.172 +export RACNODE3_CRS_PRIVATE_IP2=192.168.18.172 +export INSTALL_NODE=racnodep3 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep3,viphost:racnodep3-vip\"" +export EXISTING_CLS_NODE="racnodep1,racnodep2" +export SCAN_NAME=racnodepc1-scan +export ASM_DEVICE1="/dev/asm-disk1" +export ASM_DEVICE2="/dev/asm-disk2" +export CRS_ASM_DEVICE_LIST="${ASM_DEVICE1},${ASM_DEVICE2}" +export ASM_DISK1="/dev/oracleoci/oraclevdd" +export ASM_DISK2="/dev/oracleoci/oraclevde" +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXD="racnoded" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DB_SERVICE=service:soepdb +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE3_CONTAINER_NAME} +podman-compose stop ${RACNODE3_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE3_PUBLIC_IP} ${RACNODE3_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE3_CRS_PRIVATE_IP1} ${RACNODE3_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE3_CRS_PRIVATE_IP2} ${RACNODE3_CONTAINER_NAME} + +podman-compose start ${RACNODE3_CONTAINER_NAME} +podman exec ${RACNODE3_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +======================================================== +Oracle Database ORCLCDB3 is up and running on racnodep3. +======================================================== +``` + +### Section 3.2: Sample of Addition of Nodes to Oracle RAC Containers using Podman Compose based on Oracle RAC Image with NFS Storage Devices +Below is the example to add one more node to existing Oracle RAC 2 node cluster using Oracle RAC Image and with user defined files using podman compose file - + +Create compose file named [podman-compose.yml](./withoutresponsefiles/nfsdevices/addition/podman-compose.yml) in your working directory. + + +Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE3_CONTAINER_NAME=racnodep3 +export RACNODE3_HOST_NAME=racnodep3 +export RACNODE3_PUBLIC_IP=10.0.20.172 +export RACNODE3_CRS_PRIVATE_IP1=192.168.17.172 +export RACNODE3_CRS_PRIVATE_IP2=192.168.18.172 +export INSTALL_NODE=racnodep3 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep3,viphost:racnodep3-vip\"" +export EXISTING_CLS_NODE="racnodep1,racnodep2" +export SCAN_NAME=racnodepc1-scan +export CRS_ASM_DISCOVERY_STRING="/oradata" +export CRS_ASM_DEVICE_LIST="/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img" +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export CMAN_CONTAINER_NAME=racnodepc1-cman +export CMAN_HOST_NAME=racnodepc1-cman +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman" +export DB_SERVICE=service:soepdb +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE3_CONTAINER_NAME} +podman-compose stop ${RACNODE3_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE3_PUBLIC_IP} ${RACNODE3_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE3_CRS_PRIVATE_IP1} ${RACNODE3_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE3_CRS_PRIVATE_IP2} ${RACNODE3_CONTAINER_NAME} + +podman-compose start ${RACNODE3_CONTAINER_NAME} +podman exec ${RACNODE3_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +======================================================== +Oracle Database ORCLCDB3 is up and running on racnodep3. +======================================================== +``` +## Section 4: Environment Variables for Oracle RAC on Podman Compose +Refer [Environment Variables Explained for Oracle RAC on Podman Compose](../../../docs/ENVVARIABLESCOMPOSE.md) for explanation of all the environment variables related to Oracle RAC on Podman Compose. Change or Set these environment variables as per your environment. + +## Section 5: Validating Oracle RAC Environment +You can validate if Oracle Container environment is healthy by running below command- +```bash +podman ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +f1345fd4047b localhost/oracle/rac-dnsserver:latest /bin/sh -c exec $... 8 hours ago Up 8 hours (healthy) rac-dnsserver +2f42e49758d1 localhost/oracle/database-rac:21c 46 minutes ago Up 37 minutes (healthy) racnodep1 +a27fceea9fe6 localhost/oracle/database-rac:21c 46 minutes ago Up 37 minutes (healthy) racnodep2 +``` +Note: +- Look for `(healthy)` next to container names under `STATUS` section. + +## Section 6: Connecting to Oracle RAC Environment + +**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC cluster using the preceding sections. +Refer [README](../../../docs/CONNECTING.md) for instructions on how to connect to Oracle RAC Database. + +## Cleanup +Refer [README](../../../docs/CLEANUP.md) for instructions on how to connect to cleanup Oracle RAC Database Container Environment. + +## Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 8.10 later. To see current Linux support certifications, refer [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## License + +To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository which are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/blockdevices/addition/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/blockdevices/addition/podman-compose.yml new file mode 100644 index 0000000000..f3df64cace --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/blockdevices/addition/podman-compose.yml @@ -0,0 +1,73 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + racnodep3: + container_name: ${RACNODE3_CONTAINER_NAME} + hostname: ${RACNODE3_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - racstorage:/oradata + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE3_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE3_CRS_PRIVATE_IP2} + INSTALL_NODE: ${INSTALL_NODE} + OP_TYPE: racaddnode + EXISTING_CLS_NODE: ${EXISTING_CLS_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/blockdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/blockdevices/podman-compose.yml new file mode 100644 index 0000000000..8c9c23dcbd --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/blockdevices/podman-compose.yml @@ -0,0 +1,172 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep named"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + rac_pub1_nw: + ipv4_address: ${DNS_PUBLIC_IP} + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE1_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE1_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE2_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE2_CRS_PRIVATE_IP2} + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_STRING: ${CRS_ASM_DISCOVERY_STRING} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + - NET_RAW + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'cmadmin'"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/nfsdevices/addition/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/nfsdevices/addition/podman-compose.yml new file mode 100644 index 0000000000..d1b3af742e --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/nfsdevices/addition/podman-compose.yml @@ -0,0 +1,75 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +volumes: + racstorage: + external: true +services: + racnodep3: + container_name: ${RACNODE3_CONTAINER_NAME} + hostname: ${RACNODE3_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - racstorage:/oradata + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE3_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE3_CRS_PRIVATE_IP2} + INSTALL_NODE: ${INSTALL_NODE} + OP_TYPE: racaddnode + EXISTING_CLS_NODE: ${EXISTING_CLS_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_STRING: "/oradata" + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/nfsdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/nfsdevices/podman-compose.yml new file mode 100644 index 0000000000..bf2143bab4 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withoutresponsefiles/nfsdevices/podman-compose.yml @@ -0,0 +1,198 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +volumes: + racstorage: + external: true +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep named"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + rac_pub1_nw: + ipv4_address: ${DNS_PUBLIC_IP} + racnode-storage: + container_name: ${STORAGE_CONTAINER_NAME} + hostname: ${STORAGE_HOST_NAME} + image: ${STORAGE_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + volumes: + - ${NFS_STORAGE_VOLUME}:/oradata + cap_add: + - SYS_ADMIN + - AUDIT_WRITE + - NET_ADMIN + restart: always + healthcheck: + test: + - CMD-SHELL + - /bin/bash -c "ls -lrt /oradata/ && showmount -e | grep '/oradata'" + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + networks: + rac_pub1_nw: + ipv4_address: ${STORAGE_PUBLIC_IP} + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - racstorage:/oradata + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE1_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE1_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_STRING: "/oradata" + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - racstorage:${CRS_ASM_DISCOVERY_STRING} + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE2_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE2_CRS_PRIVATE_IP2} + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_STRING: "/oradata" + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + - NET_RAW + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'cmadmin'"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/blockdevices/grid_setup_new_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/blockdevices/grid_setup_new_21c.rsp new file mode 100644 index 0000000000..c7ffe19d4a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/blockdevices/grid_setup_new_21c.rsp @@ -0,0 +1,64 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +oracle.install.option=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +oracle.install.asm.OSDBA=dba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=LOCAL_SCAN +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName=racnodepc1-scan +oracle.install.crs.config.gpnp.scanPort=1521 +oracle.install.crs.config.ClusterConfiguration=STANDALONE +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile= +oracle.install.crs.config.clusterName=rac01cluster +oracle.install.crs.config.gpnp.configureGNS= +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption= +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=racnodep1:racnodep1-vip:HUB,racnodep2:racnodep2-vip:HUB +oracle.install.crs.config.networkInterfaceList=eth0:10.0.20.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +oracle.install.asm.configureGIMRDataDG=false +oracle.install.crs.config.storageOption= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.storageOption=ASM +oracle.install.asmOnNAS.ocrLocation= +oracle.install.asmOnNAS.configureGIMRDataDG=false +oracle.install.asmOnNAS.gimrLocation= +oracle.install.asm.SYSASMPassword=ORacle__21c +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy=EXTERNAL +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2, +oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2 +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.monitorPassword=ORacle__21c +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/blockdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/blockdevices/podman-compose.yml new file mode 100644 index 0000000000..c5bdf2bd39 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/blockdevices/podman-compose.yml @@ -0,0 +1,176 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep named"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + rac_pub1_nw: + ipv4_address: ${DNS_PUBLIC_IP} + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - ${GRID_RESPONSE_FILE}:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE1_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE1_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - ${GRID_RESPONSE_FILE}:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE2_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE2_CRS_PRIVATE_IP2} + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'cmadmin'"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/nfsdevices/grid_setup_new_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/nfsdevices/grid_setup_new_21c.rsp new file mode 100644 index 0000000000..16062dd6cb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/nfsdevices/grid_setup_new_21c.rsp @@ -0,0 +1,64 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +oracle.install.option=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +oracle.install.asm.OSDBA=dba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=LOCAL_SCAN +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName=racnodepc1-scan +oracle.install.crs.config.gpnp.scanPort=1521 +oracle.install.crs.config.ClusterConfiguration=STANDALONE +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile= +oracle.install.crs.config.clusterName=rac01cluster +oracle.install.crs.config.gpnp.configureGNS= +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption= +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=racnodep1:racnodep1-vip:HUB,racnodep2:racnodep2-vip:HUB +oracle.install.crs.config.networkInterfaceList=eth0:10.0.20.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +oracle.install.asm.configureGIMRDataDG=false +oracle.install.crs.config.storageOption= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.storageOption=ASM +oracle.install.asmOnNAS.ocrLocation= +oracle.install.asmOnNAS.configureGIMRDataDG=false +oracle.install.asmOnNAS.gimrLocation= +oracle.install.asm.SYSASMPassword=ORacle__21c +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy=EXTERNAL +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oradata/asm_disk01.img,,/oradata/asm_disk02.img,,/oradata/asm_disk03.img,,/oradata/asm_disk04.img,,/oradata/asm_disk05.im +oracle.install.asm.diskGroup.disks=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=/oradata/asm_disk* +oracle.install.asm.monitorPassword=ORacle__21c +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/nfsdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/nfsdevices/podman-compose.yml new file mode 100644 index 0000000000..3816a34ca7 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racimage/withresponsefiles/nfsdevices/podman-compose.yml @@ -0,0 +1,200 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +volumes: + racstorage: + external: true +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep named"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + rac_pub1_nw: + ipv4_address: ${DNS_PUBLIC_IP} + racnode-storage: + container_name: ${STORAGE_CONTAINER_NAME} + hostname: ${STORAGE_HOST_NAME} + image: ${STORAGE_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + volumes: + - ${NFS_STORAGE_VOLUME}:/oradata + cap_add: + - SYS_ADMIN + - AUDIT_WRITE + - NET_ADMIN + restart: always + healthcheck: + test: + - CMD-SHELL + - /bin/bash -c "ls -lrt /oradata/ && showmount -e | grep '/oradata'" + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + networks: + rac_pub1_nw: + ipv4_address: ${STORAGE_PUBLIC_IP} + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - racstorage:/oradata + - ${GRID_RESPONSE_FILE}:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE1_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE1_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_STRING: "/oradata" + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - racstorage:${CRS_ASM_DISCOVERY_STRING} + - ${GRID_RESPONSE_FILE}:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE2_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE2_CRS_PRIVATE_IP2} + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_STRING: "/oradata" + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + - NET_RAW + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'cmadmin'"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/README.md new file mode 100644 index 0000000000..3fe29c452f --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/README.md @@ -0,0 +1,832 @@ +# Oracle RAC on Podman Compose using Slim Image +=============================================================== + +Refer below instructions for setup of Oracle RAC on Podman Compose using Slim Image for various scenarios. + +- [Oracle RAC on Podman Compose using Slim Image](#oracle-rac-on-podman-compose-using-slim-image) + - [Section 1 : Prerequisites for Setting up Oracle RAC on Container Using Slim Image](#section-1-prerequisites-for-setting-up-oracle-rac-on-container-using-slim-image) + - [Section 2: Setup Oracle RAC Containers with Slim Image using Podman Compose Files](#section-2-setup-oracle-rac-containers-with-slim-image-using-podman-compose-files) + - [Section 2.1: Deploying With BlockDevices](#section-21-deploying-with-blockdevices) + - [Section 2.1.1: Setup Without Using User Defined Response files](#section-211-setup-without-using-user-defined-response-files) + - [Section 2.1.2: Setup Using User Defined Response files](#section-212-setup-using-user-defined-response-files) + - [Section 2.2: Deploying With NFS Storage Devices](#section-22-deploying-with-nfs-storage-devices) + - [Section 2.2.1: Setup Without Using User Defined Response files](#section-221-setup-without-using-user-defined-response-files) + - [Section 2.2.2: Setup Using User Defined Response files](#section-222-setup-using-user-defined-response-files) + - [Section 3: Sample of Addition of Nodes to Oracle RAC Containers based on Slim Image](#section-3-sample-of-addition-of-nodes-to-oracle-rac-containers-based-on-slim-image) + - [Section 3.1: Sample of Addition of Nodes to Oracle RAC Containers using Podman Compose based on Oracle RAC Slim Image with BlockDevices](#section-31-sample-of-addition-of-nodes-to-oracle-rac-containers-using-podman-compose-based-on-oracle-rac-slim-image-with-blockdevices) + - [Section 3.2: Sample of Addition of Nodes to Oracle RAC Containers using Podman Compose based on Oracle RAC Slim Image with NFS Storage Devices](#section-32-sample-of-addition-of-nodes-to-oracle-rac-containers-using-podman-compose-based-on-oracle-rac-slim-image-with-nfs-storage-devices) + - [Section 4: Environment Variables for Oracle RAC on Podman Compose](#section-4-environment-variables-for-oracle-rac-on-podman-compose) + - [Section 5: Validating Oracle RAC Environment](#section-5-validating-oracle-rac-environment) + - [Section 6: Connecting to Oracle RAC Environment](#section-6-connecting-to-oracle-rac-environment) + - [Cleanup](#cleanup) + - [Support](#support) + - [License](#license) + - [Copyright](#copyright) + +## Oracle RAC Setup on Podman Compose using Slim Image + +You can deploy multi node Oracle RAC Setup using Slim Image either on Block Devices or NFS storage Devices by using User Defined Response Files or without using same. All these scenarios are discussed in detail as you proceed further below. +## Section 1: Prerequisites for Setting up Oracle RAC on Container using Slim Image +**IMPORTANT :** Execute all the steps specified in this section before you proceed to the next section. Completing prerequisite steps is a requirement for successful configuration. + +* Execute the [Preparation Steps for running Oracle RAC Database in Containers](../../../README.md#preparation-steps-for-running-oracle-rac-database-in-containers) +* Create Oracle Connection Manager on Container image and container if the IPs are not available on user network.Please refer [RAC Oracle Connection Manager README.MD](../../../../OracleConnectionManager/README.md). +* Make sure Oracle RAC Slim Image is present as shown below. If you have not created the Oracle RAC Container image, execute the [Section 2.1: Building Oracle RAC Database Slim Image](../../../README.md). + ```bash + # podman images|grep database-rac + localhost/oracle/database-rac 21.3.0-slim bf6ae21ccd5a 8 hours ago 517 MB + ``` +Retag it as below as we are going to refer image as `localhost/oracle/database-rac:21c-slim` everywhere- +```bash +podman tag localhost/oracle/database-rac:21.3.0-slim localhost/oracle/database-rac:21c-slim +``` + +* Execute the [Network](../../../README.md#network-management). +* Execute the [Password Management](../../../README.md#password-management). +* `podman-compose` is part of [ol8_developer_EPEL](https://yum.oracle.com/repo/OracleLinux/ol8/developer/EPEL/x86_64/index.html). Enable `ol8_developer_EPEL` repository and install `podman-compose` as below- + ```bash + dnf config-manager --enable ol8_developer_EPEL + dnf install -y podman-compose + ``` +* Prepare Hosts with empty paths for 2 nodes similar to below, these are going to be mounted to Oracle RAC nodes for installing Oracle RAC Software binaries later during container creation - + ```bash + mkdir -p /scratch/rac/cluster01/node1 + rm -rf /scratch/rac/cluster01/node1/* + + mkdir -p /scratch/rac/cluster01/node2 + rm -rf /scratch/rac/cluster01/node2/* + ``` + +* Make sure downloaded Oracle RAC software location is staged, & available for both RAC nodes. In below example, we have staged Oracle RAC software at location `/scratch/software/21c/goldimages` + ```bash + ls /scratch/software/21c/goldimages + LINUX.X64_213000_db_home.zip LINUX.X64_213000_grid_home.zip + ``` +* If SELinux is enabled on the host machine then execute the following as well - + ```bash + semanage fcontext -a -t container_file_t /scratch/rac/cluster01/node1 + restorecon -v /scratch/rac/cluster01/node1 + semanage fcontext -a -t container_file_t /scratch/rac/cluster01/node2 + restorecon -v /scratch/rac/cluster01/node2 + semanage fcontext -a -t container_file_t /scratch/software/21c/goldimages/LINUX.X64_213000_grid_home.zip + restorecon -v /scratch/software/21c/goldimages/LINUX.X64_213000_grid_home.zip + semanage fcontext -a -t container_file_t /scratch/software/21c/goldimages/LINUX.X64_213000_db_home.zip + restorecon -v /scratch/software/21c/goldimages/LINUX.X64_213000_db_home.zip + ``` +In order to setup 2 Node RAC containers using Podman compose, please make sure pre-requisites are completed before proceeding further - + +## Section 2: Setup Oracle RAC Containers with Slim Image using Podman Compose Files + +### Section 2.1: Deploying With BlockDevices + +#### Section 2.1.1: Setup Without Using User Defined Response files +Make sure you completed pre-requisites step to install Podman Compose on required Podman Host Machines. + +Now, Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE1_CONTAINER_NAME=racnodep1 +export RACNODE1_HOST_NAME=racnodep1 +export RACNODE1_PUBLIC_IP=10.0.20.170 +export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 +export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 +export INSTALL_NODE=racnodep1 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c-slim +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" +export SCAN_NAME=racnodepc1-scan +export ASM_DEVICE1="/dev/asm-disk1" +export ASM_DEVICE2="/dev/asm-disk2" +export CRS_ASM_DEVICE_LIST="${ASM_DEVICE1},${ASM_DEVICE2}" +export ASM_DISK1="/dev/oracleoci/oraclevdd" +export ASM_DISK2="/dev/oracleoci/oraclevde" +export CRS_ASM_DISCOVERY_STRING="/dev/asm*" +export STAGING_SOFTWARE_LOC="/scratch/software/21c/goldimages/" +export RACNODE2_CONTAINER_NAME=racnodep2 +export RACNODE2_HOST_NAME=racnodep2 +export RACNODE2_PUBLIC_IP=10.0.20.171 +export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 +export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export DNS_PRIVATE1_IP=192.168.17.25 +export DNS_PRIVATE2_IP=192.168.18.25 +export CMAN_CONTAINER_NAME=racnodepc1-cman +export CMAN_HOST_NAME=racnodepc1-cman +export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman" +export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DB_SERVICE=service:soepdb +``` +Create podman networks- +```bash +podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} +podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns --internal +podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns --internal +``` +Create compose file named [podman-compose.yml](./withoutresponsefiles/blockdevices/podman-compose.yml) in your working directory. + + +Bring up DNS Containers- +```bash +podman-compose up -d ${DNS_CONTAINER_NAME} +podman-compose stop ${DNS_CONTAINER_NAME} +podman network disconnect ${PUBLIC_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${DNS_PUBLIC_IP} ${DNS_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${DNS_PRIVATE1_IP} ${DNS_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${DNS_PRIVATE2_IP} ${DNS_CONTAINER_NAME} +podman-compose start ${DNS_CONTAINER_NAME} +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} +podman-compose stop ${RACNODE1_CONTAINER_NAME} + +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} +podman-compose stop ${RACNODE2_CONTAINER_NAME} + +rm -rf /scratch/rac/cluster01/node1/* +rm -rf /scratch/rac/cluster01/node2/* + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + +podman-compose start ${RACNODE1_CONTAINER_NAME} +podman-compose start ${RACNODE2_CONTAINER_NAME} +podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +=================================== +ORACLE RAC DATABASE IS READY TO USE +=================================== +``` + +Bring up CMAN Container- +```bash +podman-compose up -d ${CMAN_CONTAINER_NAME} +``` + +Successful Message when CMAN container is setup properly- +```bash +################################################ +CONNECTION MANAGER IS READY TO USE! +################################################ +``` +#### Section 2.1.2: Setup Using User Defined Response files +* On the shared folder between both RAC nodes, create file name `grid_setup_new_21c.rsp` similar as below inside directory named `/scratch/common_scripts/podman/rac/`. Same is also saved in this [grid_setup_new_21c.rsp](withresponsefiles/blockdevices/grid_setup_new_21c.rsp) file. +* Also, prepare database response file similar to this [dbca_21c.rsp](./dbca_21c.rsp). +* If SELinux host is enable on machine then execute the following as well - + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp + ``` +You can skip this step if you are planing to not to use **User Defined Response Files for RAC**. + +Now, Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE1_CONTAINER_NAME=racnodep1 +export RACNODE1_HOST_NAME=racnodep1 +export RACNODE1_PUBLIC_IP=10.0.20.170 +export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 +export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 +export INSTALL_NODE=racnodep1 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c-slim +export STAGING_SOFTWARE_LOC="/scratch/software/21c/goldimages/" +export DEFAULT_GATEWAY="10.0.20.1" +export ASM_DEVICE1="/dev/asm-disk1" +export ASM_DEVICE2="/dev/asm-disk2" +export CRS_ASM_DEVICE_LIST="${ASM_DEVICE1},${ASM_DEVICE2}" +export ASM_DISK1="/dev/oracleoci/oraclevdd" +export ASM_DISK2="/dev/oracleoci/oraclevde" +export RACNODE2_CONTAINER_NAME=racnodep2 +export RACNODE2_HOST_NAME=racnodep2 +export RACNODE2_PUBLIC_IP=10.0.20.171 +export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 +export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export CMAN_CONTAINER_NAME=racnodepc1-cman +export CMAN_HOST_NAME=racnodepc1-cman +export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman" +export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" +export GRID_RESPONSE_FILE="/scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp" +export DB_RESPONSE_FILE="/scratch/common_scripts/podman/rac/dbca_21c.rsp" +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DB_SERVICE=service:soepdb +``` +Create podman networks- +```bash +podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} +podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns --internal +podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns --internal +``` +Create compose file named [podman-compose.yml](./withresponsefiles/blockdevices/podman-compose.yml) in your working directory. + + +Bring up DNS Containers- +```bash +podman-compose up -d ${DNS_CONTAINER_NAME} +podman-compose stop ${DNS_CONTAINER_NAME} +podman network disconnect ${PUBLIC_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${DNS_PUBLIC_IP} ${DNS_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${DNS_PRIVATE1_IP} ${DNS_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${DNS_PRIVATE2_IP} ${DNS_CONTAINER_NAME} +podman-compose start ${DNS_CONTAINER_NAME} +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} +podman-compose stop ${RACNODE1_CONTAINER_NAME} +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} +podman-compose stop ${RACNODE2_CONTAINER_NAME} +rm -rf /scratch/rac/cluster01/node1/* +rm -rf /scratch/rac/cluster01/node2/* +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + +podman-compose start ${RACNODE1_CONTAINER_NAME} +podman-compose start ${RACNODE2_CONTAINER_NAME} +podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +=================================== +ORACLE RAC DATABASE IS READY TO USE +=================================== +``` + +Bring up CMAN Container- +```bash +podman-compose up -d ${CMAN_CONTAINER_NAME} +``` + +Successful Message when CMAN container is setup properly- +```bash +################################################ +CONNECTION MANAGER IS READY TO USE! +################################################ +``` +### Section 2.2: Deploying With NFS Storage Devices +#### Section 2.2.1: Setup Without Using User Defined Response files + +Create placeholder for NFS storage and make sure it is empty - + + ```bash + export ORACLE_DBNAME=ORCLCDB + mkdir -p /scratch/stage/rac-storage/$ORACLE_DBNAME + rm -rf /scratch/stage/rac-storage/ORCLCDB/asm_disk0* + ``` + +Now, Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE1_CONTAINER_NAME=racnodep1 +export RACNODE1_HOST_NAME=racnodep1 +export RACNODE1_PUBLIC_IP=10.0.20.170 +export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 +export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 +export INSTALL_NODE=racnodep1 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c-slim +export STAGING_SOFTWARE_LOC="/scratch/software/21c/goldimages/" +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep1,viphost:racnodep1-vip;pubhost:racnodep2,viphost:racnodep2-vip\"" +export SCAN_NAME=racnodepc1-scan +export CRS_ASM_DISCOVERY_STRING="/oradata" +export CRS_ASM_DEVICE_LIST="/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img" +export RACNODE2_CONTAINER_NAME=racnodep2 +export RACNODE2_HOST_NAME=racnodep2 +export RACNODE2_PUBLIC_IP=10.0.20.171 +export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 +export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export CMAN_CONTAINER_NAME=racnodepc1-cman +export CMAN_HOST_NAME=racnodepc1-cman +export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman" +export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" +export STORAGE_CONTAINER_NAME="racnode-storage" +export STORAGE_HOST_NAME="racnode-storage" +export STORAGE_IMAGE_NAME="localhost/oracle/rac-storage-server:latest" +export ORACLE_DBNAME="ORCLCDB" +export STORAGE_PUBLIC_IP=10.0.20.80 +export NFS_STORAGE_VOLUME="/scratch/stage/rac-storage/$ORACLE_DBNAME" +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DB_SERVICE=service:soepdb +``` +Create podman networks- +```bash +podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} +podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns --internal +podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns --internal +``` +Create compose file named [podman-compose.yml](./withoutresponsefiles/nfsdevices/podman-compose.yml) in your working directory. + + +Bring up DNS Containers- +```bash +podman-compose up -d ${DNS_CONTAINER_NAME} +podman-compose stop ${DNS_CONTAINER_NAME} +podman network disconnect ${PUBLIC_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${DNS_PUBLIC_IP} ${DNS_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${DNS_PRIVATE1_IP} ${DNS_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${DNS_PRIVATE2_IP} ${DNS_CONTAINER_NAME} +podman-compose start ${DNS_CONTAINER_NAME} +``` + +Bring up Storage Container- +```bash +podman-compose --podman-run-args="-t -i --systemd=always" up -d ${STORAGE_CONTAINER_NAME} +podman-compose exec ${STORAGE_CONTAINER_NAME} tail -f /tmp/storage_setup.log + +Export list for racnode-storage: +/oradata * +################################################# + Setup Completed +################################################# +``` + +Create NFS volume- +```bash +podman volume create --driver local \ +--opt type=nfs \ +--opt o=addr=10.0.20.80,rw,bg,hard,tcp,vers=3,timeo=600,rsize=32768,wsize=32768,actimeo=0 \ +--opt device=10.0.20.80:/oradata \ +racstorage +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} +podman-compose stop ${RACNODE1_CONTAINER_NAME} +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} +podman-compose stop ${RACNODE2_CONTAINER_NAME} +rm -rf /scratch/rac/cluster01/node1/* +rm -rf /scratch/rac/cluster01/node2/* +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + +podman-compose start ${RACNODE1_CONTAINER_NAME} +podman-compose start ${RACNODE2_CONTAINER_NAME} +podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +=================================== +ORACLE RAC DATABASE IS READY TO USE +=================================== +``` + +Bring up CMAN Container- +```bash +podman-compose up -d ${CMAN_CONTAINER_NAME} + +podman-compose logs -f ${CMAN_CONTAINER_NAME} +################################################ + CONNECTION MANAGER IS READY TO USE! +################################################ +``` +#### Section 2.2.2: Setup Using User Defined Response files + +* Create placeholder for NFS storage and make sure it is empty - + + ```bash + export ORACLE_DBNAME=ORCLCDB + mkdir -p /scratch/stage/rac-storage/$ORACLE_DBNAME + rm -rf /scratch/stage/rac-storage/ORCLCDB/asm_disk0* + ``` +* On the shared folder e.g `scratch/common_scripts/podman/rac` between both RAC nodes, copy file named [grid_setup_new_21c.rsp](withresponsefiles/nfsdevices/grid_setup_new_21c.rsp) +* Also copy, [dbca_21c.rsp](./dbca_21c.rsp) in `scratch/common_scripts/podman/rac`. +* If SELinux host is enable on machine then execute the following as well - + ```bash + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp + semanage fcontext -a -t container_file_t /scratch/common_scripts/podman/rac/dbca_21c.rsp + restorecon -v /scratch/common_scripts/podman/rac/dbca_21c.rsp + ``` + +You can skip this step if you are planing to not to use **User Defined Response Files for RAC**. + +Now, Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE1_CONTAINER_NAME=racnodep1 +export RACNODE1_HOST_NAME=racnodep1 +export RACNODE1_PUBLIC_IP=10.0.20.170 +export RACNODE1_CRS_PRIVATE_IP1=192.168.17.170 +export RACNODE1_CRS_PRIVATE_IP2=192.168.18.170 +export INSTALL_NODE=racnodep1 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c-slim +export STAGING_SOFTWARE_LOC="/scratch/software/21c/goldimages/" +export DEFAULT_GATEWAY="10.0.20.1" +export SCAN_NAME=racnodepc1-scan +export RACNODE2_CONTAINER_NAME=racnodep2 +export RACNODE2_HOST_NAME=racnodep2 +export RACNODE2_PUBLIC_IP=10.0.20.171 +export RACNODE2_CRS_PRIVATE_IP1=192.168.17.171 +export RACNODE2_CRS_PRIVATE_IP2=192.168.18.171 +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export CMAN_CONTAINER_NAME=racnodepc1-cman +export CMAN_HOST_NAME=racnodepc1-cman +export CMAN_IMAGE_NAME="localhost/oracle/client-cman:21.3.0" +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman" +export DB_HOSTDETAILS="HOST=racnodepc1-scan:RULE_ACT=accept,HOST=racnodep1:IP=10.0.20.170" +export STORAGE_CONTAINER_NAME="racnode-storage" +export STORAGE_HOST_NAME="racnode-storage" +export STORAGE_IMAGE_NAME="localhost/oracle/rac-storage-server:latest" +export ORACLE_DBNAME="ORCLCDB" +export STORAGE_PUBLIC_IP=10.0.20.80 +export NFS_STORAGE_VOLUME="/scratch/stage/rac-storage/$ORACLE_DBNAME" +export GRID_RESPONSE_FILE="/scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp" +export DB_RESPONSE_FILE="/scratch/common_scripts/podman/rac/dbca_21c.rsp" +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DB_SERVICE=service:soepdb +``` +Create podman networks- +```bash +podman network create --driver=bridge --subnet=${PUBLIC_NETWORK_SUBNET} ${PUBLIC_NETWORK_NAME} +podman network create --driver=bridge --subnet=${PRIVATE1_NETWORK_SUBNET} ${PRIVATE1_NETWORK_NAME} --disable-dns --internal +podman network create --driver=bridge --subnet=${PRIVATE2_NETWORK_SUBNET} ${PRIVATE2_NETWORK_NAME} --disable-dns --internal +``` +Create compose file named [podman-compose.yml](./withresponsefiles/nfsdevices/podman-compose.yml) in your working directory. +Bring up DNS Containers- +```bash +podman-compose up -d ${DNS_CONTAINER_NAME} +podman-compose stop ${DNS_CONTAINER_NAME} +podman network disconnect ${PUBLIC_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${DNS_CONTAINER_NAME} +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${DNS_PUBLIC_IP} ${DNS_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${DNS_PRIVATE1_IP} ${DNS_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${DNS_PRIVATE2_IP} ${DNS_CONTAINER_NAME} +podman-compose start ${DNS_CONTAINER_NAME} +``` + +Successful logs when DNS container comes up- +```bash +podman-compose logs ${DNS_CONTAINER_NAME} +################################################ + DNS Server IS READY TO USE! +################################################ +``` +Bring up Storage Container- +```bash +podman-compose --podman-run-args="-t -i --systemd=always" up -d ${STORAGE_CONTAINER_NAME} +podman-compose exec ${STORAGE_CONTAINER_NAME} tail -f /tmp/storage_setup.log + +Export list for racnode-storage: +/oradata * +################################################# + Setup Completed +################################################# +``` + +Create NFS volume- +```bash +podman volume create --driver local \ +--opt type=nfs \ +--opt o=addr=10.0.20.80,rw,bg,hard,tcp,vers=3,timeo=600,rsize=32768,wsize=32768,actimeo=0 \ +--opt device=10.0.20.80:/oradata \ +racstorage +``` + +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE1_CONTAINER_NAME} +podman-compose stop ${RACNODE1_CONTAINER_NAME} + +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE2_CONTAINER_NAME} +podman-compose stop ${RACNODE2_CONTAINER_NAME} + +rm -rf /scratch/rac/cluster01/node1/* +rm -rf /scratch/rac/cluster01/node2/* + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE1_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE2_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE1_PUBLIC_IP} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP1} ${RACNODE1_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE1_CRS_PRIVATE_IP2} ${RACNODE1_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE2_PUBLIC_IP} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP1} ${RACNODE2_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE2_CRS_PRIVATE_IP2} ${RACNODE2_CONTAINER_NAME} + +podman-compose start ${RACNODE1_CONTAINER_NAME} +podman-compose start ${RACNODE2_CONTAINER_NAME} +podman exec ${RACNODE1_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +=================================== +ORACLE RAC DATABASE IS READY TO USE +=================================== +``` + +(Optionally) Bring up CMAN Container- +```bash +podman-compose up -d ${CMAN_CONTAINER_NAME} +podman-compose logs -f ${CMAN_CONTAINER_NAME} +################################################ + CONNECTION MANAGER IS READY TO USE! +################################################ +``` +## Section 3: Sample of Addition of Nodes to Oracle RAC Containers based on Slim Image + +* Before you proceed to add additional node, create place holder for it - + ```bash + mkdir -p /scratch/rac/cluster01/node3 + rm -rf /scratch/rac/cluster01/node3/* + ``` +* If SELinux is enabled in your machine then execute the following as well - + ```bash + semanage fcontext -a -t container_file_t /scratch/rac/cluster01/node3 + restorecon -v /scratch/rac/cluster01/node3 + ``` + +### Section 3.1: Sample of Addition of Nodes to Oracle RAC Containers using Podman Compose based on Oracle RAC Slim Image with BlockDevices + +Below is example to add one more node to existing Oracle RAC 2 node cluster using full image and with user defined files using podman compose file - + +Create compose file named [podman-compose.yml](./withoutresponsefiles/blockdevices/addition/podman-compose.yml) in your working directory. + +Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE3_CONTAINER_NAME=racnodep3 +export RACNODE3_HOST_NAME=racnodep3 +export RACNODE3_PUBLIC_IP=10.0.20.172 +export RACNODE3_CRS_PRIVATE_IP1=192.168.17.172 +export RACNODE3_CRS_PRIVATE_IP2=192.168.18.172 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c-slim +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES=pubhost:racnodep3,viphost:racnodep3-vip +export SCAN_NAME=racnodepc1-scan +export ASM_DEVICE1="/dev/asm-disk1" +export ASM_DEVICE2="/dev/asm-disk2" +export CRS_ASM_DEVICE_LIST="${ASM_DEVICE1},${ASM_DEVICE2}" +export ASM_DISK1="/dev/oracleoci/oraclevdd" +export ASM_DISK2="/dev/oracleoci/oraclevde" +export STAGING_SOFTWARE_LOC="/scratch/software/21c/goldimages/" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export DNS_PUBLIC_IP=10.0.20.25 +export OP_TYPE=racaddnode +export DB_NAME=ORCLCDB +export INSTALL_NODE=racnodep3 +export EXISTING_CLS_NODE=racnodep1,racnodep2 +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export DB_SERVICE=service:soepdb +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE3_CONTAINER_NAME} +podman-compose stop ${RACNODE3_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE3_PUBLIC_IP} ${RACNODE3_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE3_CRS_PRIVATE_IP1} ${RACNODE3_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE3_CRS_PRIVATE_IP2} ${RACNODE3_CONTAINER_NAME} + +podman-compose start ${RACNODE3_CONTAINER_NAME} +podman exec ${RACNODE3_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +======================================================== +Oracle Database ORCLCDB3 is up and running on racnodep3. +======================================================== +``` +## Section 3.2: Sample of Addition of Nodes to Oracle RAC Containers using Podman Compose based on Oracle RAC Slim Image with NFS Storage Devices + +Below is example to add one more node to existing Oracle RAC 2 node cluster using Oracle RAC Image and with user defined files using podman compose file- + +Create compose file named [podman-compose.yml](./withoutresponsefiles/nfsdevices/addition/podman-compose.yml) in your working directory. + + +Export the required environment variables required by `podman-compose.yml` file - +```bash +export HEALTHCHECK_INTERVAL=60s +export HEALTHCHECK_TIMEOUT=120s +export HEALTHCHECK_RETRIES=240 +export RACNODE3_CONTAINER_NAME=racnodep3 +export RACNODE3_HOST_NAME=racnodep3 +export RACNODE3_PUBLIC_IP=10.0.20.172 +export RACNODE3_CRS_PRIVATE_IP1=192.168.17.172 +export RACNODE3_CRS_PRIVATE_IP2=192.168.18.172 +export INSTALL_NODE=racnodep3 +export RAC_IMAGE_NAME=localhost/oracle/database-rac:21c-slim +export DEFAULT_GATEWAY="10.0.20.1" +export CRS_NODES="\"pubhost:racnodep3,viphost:racnodep3-vip\"" +export EXISTING_CLS_NODE="racnodep1,racnodep2" +export SCAN_NAME=racnodepc1-scan +export CRS_ASM_DISCOVERY_STRING="/oradata" +export CRS_ASM_DEVICE_LIST="/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img" +export DNS_CONTAINER_NAME=rac-dnsserver +export DNS_HOST_NAME=racdns +export DNS_IMAGE_NAME="oracle/rac-dnsserver:latest" +export RAC_NODE_NAME_PREFIXP="racnodep" +export STAGING_SOFTWARE_LOC="/scratch/software/21c/goldimages/" +export DNS_DOMAIN=example.info +export PUBLIC_NETWORK_NAME="rac_pub1_nw" +export PUBLIC_NETWORK_SUBNET="10.0.20.0/24" +export PRIVATE1_NETWORK_NAME="rac_priv1_nw" +export PRIVATE1_NETWORK_SUBNET="192.168.17.0/24" +export PRIVATE2_NETWORK_NAME="rac_priv2_nw" +export PRIVATE2_NETWORK_SUBNET="192.168.18.0/24" +export DNS_PUBLIC_IP=10.0.20.25 +export PWD_SECRET_FILE=/opt/.secrets/pwdfile.enc +export KEY_SECRET_FILE=/opt/.secrets/key.pem +export CMAN_CONTAINER_NAME=racnodepc1-cman +export CMAN_HOST_NAME=racnodepc1-cman1 +export CMAN_PUBLIC_IP=10.0.20.15 +export CMAN_PUBLIC_HOSTNAME="racnodepc1-cman1" +export DB_SERVICE=service:soepdb +``` +Bring up RAC Containers- +```bash +podman-compose --podman-run-args="-t -i --systemd=always --cpuset-cpus 0-1 --memory 16G --memory-swap 32G" up -d ${RACNODE3_CONTAINER_NAME} +podman-compose stop ${RACNODE3_CONTAINER_NAME} + +podman network disconnect ${PUBLIC_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} +podman network disconnect ${PRIVATE1_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} +podman network disconnect ${PRIVATE2_NETWORK_NAME} ${RACNODE3_CONTAINER_NAME} + +podman network connect ${PUBLIC_NETWORK_NAME} --ip ${RACNODE3_PUBLIC_IP} ${RACNODE3_CONTAINER_NAME} +podman network connect ${PRIVATE1_NETWORK_NAME} --ip ${RACNODE3_CRS_PRIVATE_IP1} ${RACNODE3_CONTAINER_NAME} +podman network connect ${PRIVATE2_NETWORK_NAME} --ip ${RACNODE3_CRS_PRIVATE_IP2} ${RACNODE3_CONTAINER_NAME} + +podman-compose start ${RACNODE3_CONTAINER_NAME} +podman exec ${RACNODE3_CONTAINER_NAME} /bin/bash -c "tail -f /tmp/orod/oracle_rac_setup.log" +``` + +Successful Message when RAC container is setup properly- +```bash +======================================================== +Oracle Database ORCLCDB3 is up and running on racnodep3. +======================================================== +``` + +## Section 4: Environment Variables for Oracle RAC on Podman Compose + +Refer [Environment Variables Explained for Oracle RAC on Podman Compose](../../../docs/ENVVARIABLESCOMPOSE.md) for explanation of all the environment variables related to Oracle RAC on Podman Compose. Change or Set these environment variables as per your environment. + +## Section 5: Validating Oracle RAC Environment +You can validate if environment is healthy by running below command- +```bash +podman ps -a + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +f1345fd4047b localhost/oracle/rac-dnsserver:latest /bin/sh -c exec $... 8 hours ago Up 8 hours (healthy) rac-dnsserver +2f42e49758d1 localhost/oracle/database-rac:21c-slim 46 minutes ago Up 37 minutes (healthy) racnodep1 +a27fceea9fe6 localhost/oracle/database-rac:21c-slim 46 minutes ago Up 37 minutes (healthy) racnodep2 +``` +Note: +- Look for `(healthy)` next to container names under `STATUS` section. + +## Section 6: Connecting to Oracle RAC Environment + +**IMPORTANT:** This section assumes that you have successfully created an Oracle RAC cluster using the preceding sections. +Refer [README](../../../docs/CONNECTING.md) for instructions on how to connect to Oracle RAC Database. + +## Cleanup +Refer [README](../../../docs/CLEANUP.md) for instructions on how to connect to cleanup Oracle RAC Database Container Environment. + +## Support + +At the time of this release, Oracle RAC on Podman is supported for Oracle Linux 8.10 later. To see current Linux support certifications, refer [Oracle RAC on Podman Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/install-and-upgrade.html) + +## License + +To download and run Oracle Grid and Database, regardless of whether inside or outside a container, you must download the binaries from the Oracle website and accept the license indicated on that page. + +All scripts and files hosted in this repository which are required to build the container images are, unless otherwise noted, released under UPL 1.0 license. + +## Copyright + +Copyright (c) 2014-2025 Oracle and/or its affiliates. \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/dbca_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/dbca_21c.rsp new file mode 100644 index 0000000000..e36b5e55d7 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/dbca_21c.rsp @@ -0,0 +1,58 @@ +responseFileVersion=/oracle/assistants/rspfmt_dbca_response_schema_v21.0.0 +gdbName=ORCLCDB +sid=ORCLCDB +databaseConfigType=RAC +RACOneNodeServiceName= +policyManaged=false +managementPolicy= +createServerPool=false +serverPoolName= +cardinality= +force=false +pqPoolName= +pqCardinality= +createAsContainerDatabase=true +numberOfPDBs=1 +pdbName=ORCLPDB +useLocalUndoForPDBs=true +pdbAdminPassword=ORacle__21c +nodelist=racnodep1,racnodep2 +templateName={ORACLE_HOME}/assistants/dbca/templates/General_Purpose.dbc +sysPassword=ORacle__21c +systemPassword=ORacle__21c +oracleHomeUserPassword= +emConfiguration= +runCVUChecks=true +dbsnmpPassword=ORacle__21c +omsHost= +omsPort= +emUser= +emPassword= +dvConfiguration=false +dvUserName= +dvUserPassword= +dvAccountManagerName= +dvAccountManagerPassword= +olsConfiguration=false +datafileJarLocation={ORACLE_HOME}/assistants/dbca/templates/ +datafileDestination=+DATA/{DB_UNIQUE_NAME}/ +recoveryAreaDestination= +storageType=ASM +diskGroupName=+DATA/{DB_UNIQUE_NAME}/ +asmsnmpPassword= +recoveryGroupName= +characterSet=AL32UTF8 +nationalCharacterSet=AL16UTF16 +registerWithDirService= +dirServiceUserName= +dirServicePassword= +walletPassword= +listeners=LISTENER +variablesFile= +variables=DB_UNIQUE_NAME=ORCLCDB,ORACLE_BASE=/u01/app/oracle,PDB_NAME=ORCLPDB,DB_NAME=ORCLCDB,ORACLE_HOME=/u01/app/oracle/product/21c/dbhome_1,SID=ORCLCDB +initParams=audit_trail=none,audit_sys_operations=false,remote_login_passwordfile=exclusive +sampleSchema=false +memoryPercentage=40 +databaseType=MULTIPURPOSE +automaticMemoryManagement=false +totalMemory=5000 \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/blockdevices/addition/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/blockdevices/addition/podman-compose.yml new file mode 100644 index 0000000000..8af9e406f8 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/blockdevices/addition/podman-compose.yml @@ -0,0 +1,87 @@ +--- +version: "3" +networks: + rac_pub1_nw: + name: ${PUBLIC_NETWORK_NAME} + external: true + rac_priv1_nw: + name: ${PRIVATE1_NETWORK_NAME} + external: true + rac_priv2_nw: + name: ${PRIVATE2_NETWORK_NAME} + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + racnodep3: + container_name: ${RACNODE3_CONTAINER_NAME} + hostname: ${RACNODE3_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node3:/u01 + - /scratch:/scratch + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE3_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE3_CRS_PRIVATE_IP2} + OP_TYPE: racaddnode + INSTALL_NODE: ${INSTALL_NODE} + EXISTING_CLS_NODE: ${EXISTING_CLS_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_DIR: ${CRS_ASM_DISCOVERY_DIR} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + GRID_BASE: /u01/app/grid + DB_HOME: /u01/app/oracle/product/21c/dbhome_1 + DB_BASE: /u01/app/oracle + INVENTORY: /u01/app/oraInventory + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + DB_NAME: ORCLCDB + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/blockdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/blockdevices/podman-compose.yml new file mode 100644 index 0000000000..8f940ed41c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/blockdevices/podman-compose.yml @@ -0,0 +1,196 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep named"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + rac_pub1_nw: + ipv4_address: ${DNS_PUBLIC_IP} + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node1:/u01 + - /scratch:/scratch + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE1_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE1_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + GRID_BASE: /u01/app/grid + DB_HOME: /u01/app/oracle/product/21c/dbhome_1 + DB_BASE: /u01/app/oracle + INVENTORY: /u01/app/oraInventory + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + DB_NAME: ORCLCDB + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node2:/u01 + - /scratch:/scratch + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE2_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE2_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + GRID_BASE: /u01/app/grid + DB_HOME: /u01/app/oracle/product/21c/dbhome_1 + DB_BASE: /u01/app/oracle + INVENTORY: /u01/app/oraInventory + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + DB_NAME: ORCLCDB + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + - NET_RAW + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'cmadmin'"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/nfsdevices/addition/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/nfsdevices/addition/podman-compose.yml new file mode 100644 index 0000000000..c6227aa8d3 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/nfsdevices/addition/podman-compose.yml @@ -0,0 +1,89 @@ +--- +version: "3" +networks: + rac_pub1_nw: + name: ${PUBLIC_NETWORK_NAME} + external: true + rac_priv1_nw: + name: ${PRIVATE1_NETWORK_NAME} + external: true + rac_priv2_nw: + name: ${PRIVATE2_NETWORK_NAME} + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +volumes: + racstorage: + external: true +services: + racnodep3: + container_name: ${RACNODE3_CONTAINER_NAME} + hostname: ${RACNODE3_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node3:/u01 + - /scratch:/scratch + - racstorage:/oradata + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE3_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE3_CRS_PRIVATE_IP2} + OP_TYPE: racaddnode + INSTALL_NODE: ${INSTALL_NODE} + EXISTING_CLS_NODE: ${EXISTING_CLS_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_DIR: ${CRS_ASM_DISCOVERY_DIR} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + GRID_BASE: /u01/app/grid + DB_HOME: /u01/app/oracle/product/21c/dbhome_1 + DB_BASE: /u01/app/oracle + INVENTORY: /u01/app/oraInventory + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + DB_NAME: ORCLCDB + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/nfsdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/nfsdevices/podman-compose.yml new file mode 100644 index 0000000000..5950c162da --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withoutresponsefiles/nfsdevices/podman-compose.yml @@ -0,0 +1,221 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +volumes: + racstorage: + external: true +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep named"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + rac_pub1_nw: + ipv4_address: ${DNS_PUBLIC_IP} + racnode-storage: + container_name: ${STORAGE_CONTAINER_NAME} + hostname: ${STORAGE_HOST_NAME} + image: ${STORAGE_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + volumes: + - ${NFS_STORAGE_VOLUME}:/oradata + cap_add: + - SYS_ADMIN + - AUDIT_WRITE + - NET_ADMIN + restart: always + healthcheck: + test: + - CMD-SHELL + - /bin/bash -c "ls -lrt /oradata/ && showmount -e | grep '/oradata'" + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + networks: + rac_pub1_nw: + ipv4_address: ${STORAGE_PUBLIC_IP} + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node1:/u01 + - /scratch:/scratch + - racstorage:/oradata + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE1_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE1_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_STRING: "/oradata" + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + GRID_BASE: /u01/app/grid + DB_HOME: /u01/app/oracle/product/21c/dbhome_1 + DB_BASE: /u01/app/oracle + INVENTORY: /u01/app/oraInventory + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + DB_NAME: ORCLCDB + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node2:/u01 + - /scratch:/scratch + - racstorage:/oradata + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE2_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE2_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + CRS_NODES: ${CRS_NODES} + SCAN_NAME: ${SCAN_NAME} + CRS_ASM_DEVICE_LIST: ${CRS_ASM_DEVICE_LIST} + CRS_ASM_DISCOVERY_STRING: "/oradata" + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + GRID_BASE: /u01/app/grid + DB_HOME: /u01/app/oracle/product/21c/dbhome_1 + DB_BASE: /u01/app/oracle + INVENTORY: /u01/app/oraInventory + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + DB_NAME: ORCLCDB + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + - NET_RAW + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'cmadmin'"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/blockdevices/grid_setup_new_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/blockdevices/grid_setup_new_21c.rsp new file mode 100644 index 0000000000..c7ffe19d4a --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/blockdevices/grid_setup_new_21c.rsp @@ -0,0 +1,64 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +oracle.install.option=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +oracle.install.asm.OSDBA=dba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=LOCAL_SCAN +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName=racnodepc1-scan +oracle.install.crs.config.gpnp.scanPort=1521 +oracle.install.crs.config.ClusterConfiguration=STANDALONE +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile= +oracle.install.crs.config.clusterName=rac01cluster +oracle.install.crs.config.gpnp.configureGNS= +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption= +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=racnodep1:racnodep1-vip:HUB,racnodep2:racnodep2-vip:HUB +oracle.install.crs.config.networkInterfaceList=eth0:10.0.20.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +oracle.install.asm.configureGIMRDataDG=false +oracle.install.crs.config.storageOption= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.storageOption=ASM +oracle.install.asmOnNAS.ocrLocation= +oracle.install.asmOnNAS.configureGIMRDataDG=false +oracle.install.asmOnNAS.gimrLocation= +oracle.install.asm.SYSASMPassword=ORacle__21c +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy=EXTERNAL +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-disk1,,/dev/asm-disk2, +oracle.install.asm.diskGroup.disks=/dev/asm-disk1,/dev/asm-disk2 +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm* +oracle.install.asm.monitorPassword=ORacle__21c +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/blockdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/blockdevices/podman-compose.yml new file mode 100644 index 0000000000..02fcc6b43c --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/blockdevices/podman-compose.yml @@ -0,0 +1,190 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep named"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + rac_pub1_nw: + ipv4_address: ${DNS_PUBLIC_IP} + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node1:/u01 + - /scratch:/scratch + - /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp + - /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE1_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE1_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + SCAN_NAME: ${SCAN_NAME} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + DBCA_RESPONSE_FILE: /tmp/dbca_21c.rsp + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node2:/u01 + - /scratch:/scratch + - /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp + - /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE2_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE2_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + SCAN_NAME: ${SCAN_NAME} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + DBCA_RESPONSE_FILE: /tmp/dbca_21c.rsp + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + DB_SERVICE: ${DB_SERVICE} + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + devices: + - "${ASM_DISK1}:${ASM_DEVICE1}" + - "${ASM_DISK2}:${ASM_DEVICE2}" + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + - NET_RAW + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'cmadmin'"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/nfsdevices/grid_setup_new_21c.rsp b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/nfsdevices/grid_setup_new_21c.rsp new file mode 100644 index 0000000000..16062dd6cb --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/nfsdevices/grid_setup_new_21c.rsp @@ -0,0 +1,64 @@ +oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v21.0.0 +INVENTORY_LOCATION=/u01/app/oraInventory +oracle.install.option=CRS_CONFIG +ORACLE_BASE=/u01/app/grid +oracle.install.asm.OSDBA=dba +oracle.install.asm.OSOPER= +oracle.install.asm.OSASM=asmadmin +oracle.install.crs.config.scanType=LOCAL_SCAN +oracle.install.crs.config.SCANClientDataFile= +oracle.install.crs.config.gpnp.scanName=racnodepc1-scan +oracle.install.crs.config.gpnp.scanPort=1521 +oracle.install.crs.config.ClusterConfiguration=STANDALONE +oracle.install.crs.config.configureAsExtendedCluster=false +oracle.install.crs.config.memberClusterManifestFile= +oracle.install.crs.config.clusterName=rac01cluster +oracle.install.crs.config.gpnp.configureGNS= +oracle.install.crs.config.autoConfigureClusterNodeVIP=false +oracle.install.crs.config.gpnp.gnsOption= +oracle.install.crs.config.gpnp.gnsClientDataFile= +oracle.install.crs.config.gpnp.gnsSubDomain= +oracle.install.crs.config.gpnp.gnsVIPAddress= +oracle.install.crs.config.sites= +oracle.install.crs.config.clusterNodes=racnodep1:racnodep1-vip:HUB,racnodep2:racnodep2-vip:HUB +oracle.install.crs.config.networkInterfaceList=eth0:10.0.20.0:1,eth1:192.168.17.0:5,eth2:192.168.18.0:5 +oracle.install.asm.configureGIMRDataDG=false +oracle.install.crs.config.storageOption= +oracle.install.crs.config.useIPMI=false +oracle.install.crs.config.ipmi.bmcUsername= +oracle.install.crs.config.ipmi.bmcPassword= +oracle.install.asm.storageOption=ASM +oracle.install.asmOnNAS.ocrLocation= +oracle.install.asmOnNAS.configureGIMRDataDG=false +oracle.install.asmOnNAS.gimrLocation= +oracle.install.asm.SYSASMPassword=ORacle__21c +oracle.install.asm.diskGroup.name=DATA +oracle.install.asm.diskGroup.redundancy=EXTERNAL +oracle.install.asm.diskGroup.AUSize=4 +oracle.install.asm.diskGroup.FailureGroups= +oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oradata/asm_disk01.img,,/oradata/asm_disk02.img,,/oradata/asm_disk03.img,,/oradata/asm_disk04.img,,/oradata/asm_disk05.im +oracle.install.asm.diskGroup.disks=/oradata/asm_disk01.img,/oradata/asm_disk02.img,/oradata/asm_disk03.img,/oradata/asm_disk04.img,/oradata/asm_disk05.img +oracle.install.asm.diskGroup.quorumFailureGroupNames= +oracle.install.asm.diskGroup.diskDiscoveryString=/oradata/asm_disk* +oracle.install.asm.monitorPassword=ORacle__21c +oracle.install.asm.gimrDG.name= +oracle.install.asm.gimrDG.redundancy= +oracle.install.asm.gimrDG.AUSize=1 +oracle.install.asm.gimrDG.FailureGroups= +oracle.install.asm.gimrDG.disksWithFailureGroupNames= +oracle.install.asm.gimrDG.disks= +oracle.install.asm.gimrDG.quorumFailureGroupNames= +oracle.install.asm.configureAFD=false +oracle.install.crs.configureRHPS=false +oracle.install.crs.config.ignoreDownNodes=false +oracle.install.config.managementOption=NONE +oracle.install.config.omsHost= +oracle.install.config.omsPort=0 +oracle.install.config.emAdminUser= +oracle.install.config.emAdminPassword= +oracle.install.crs.rootconfig.executeRootScript=false +oracle.install.crs.rootconfig.configMethod=ROOT +oracle.install.crs.rootconfig.sudoPath= +oracle.install.crs.rootconfig.sudoUserName= +oracle.install.crs.config.batchinfo= +oracle.install.crs.deleteNode.nodes= \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/nfsdevices/podman-compose.yml b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/nfsdevices/podman-compose.yml new file mode 100644 index 0000000000..240c1ed983 --- /dev/null +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/rac-compose/racslimimage/withresponsefiles/nfsdevices/podman-compose.yml @@ -0,0 +1,219 @@ +--- +version: "3" +networks: + rac_pub1_nw: + external: true + rac_priv1_nw: + external: true + rac_priv2_nw: + external: true +secrets: + pwdsecret: + file: ${PWD_SECRET_FILE} + keysecret: + file: ${KEY_SECRET_FILE} +volumes: + racstorage: + external: true +services: + rac-dnsserver: + container_name: ${DNS_CONTAINER_NAME} + hostname: ${DNS_HOST_NAME} + image: ${DNS_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + environment: + SETUP_DNS_CONFIG_FILES: "setup_true" + DOMAIN_NAME: ${DNS_DOMAIN} + RAC_NODE_NAME_PREFIXP: ${RAC_NODE_NAME_PREFIXP} + WEBMIN_ENABLED: false + SETUP_DNS_CONFIG_FILES: "setup_true" + cap_add: + - AUDIT_WRITE + healthcheck: + test: ["CMD-SHELL", "pgrep named"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + privileged: false + networks: + rac_pub1_nw: + ipv4_address: ${DNS_PUBLIC_IP} + racnode-storage: + container_name: ${STORAGE_CONTAINER_NAME} + hostname: ${STORAGE_HOST_NAME} + image: ${STORAGE_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + volumes: + - ${NFS_STORAGE_VOLUME}:/oradata + environment: + DNS_SERVER: ${DNS_PUBLIC_IP} + DOMAIN: ${DNS_DOMAIN} + cap_add: + - SYS_ADMIN + - AUDIT_WRITE + - NET_ADMIN + restart: always + healthcheck: + test: + - CMD-SHELL + - /bin/bash -c "ls -lrt /oradata/ && showmount -e | grep '/oradata'" + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + networks: + rac_pub1_nw: + ipv4_address: ${STORAGE_PUBLIC_IP} + racnodep1: + container_name: ${RACNODE1_CONTAINER_NAME} + hostname: ${RACNODE1_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node1:/u01 + - /scratch:/scratch + - /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp + - racstorage:/oradata + - /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp:/tmp/grid_21c.rsp + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE1_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE1_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + SCAN_NAME: ${SCAN_NAME} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + DBCA_RESPONSE_FILE: /tmp/dbca_21c.rsp + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + RESET_FAILED_SYSTEMD: true + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodep2: + container_name: ${RACNODE2_CONTAINER_NAME} + hostname: ${RACNODE2_HOST_NAME} + image: ${RAC_IMAGE_NAME} + restart: always + dns: ${DNS_PUBLIC_IP} + dns_search: ${DNS_DOMAIN} + shm_size: 4G + secrets: + - pwdsecret + - keysecret + volumes: + - /scratch/rac/cluster01/node2:/u01 + - /scratch:/scratch + - /scratch/common_scripts/podman/rac/dbca_21c.rsp:/tmp/dbca_21c.rsp + - /scratch/common_scripts/podman/rac/grid_setup_new_21c.rsp:/tmp/grid_21c.rsp + - racstorage:/oradata + environment: + DNS_SERVERS: ${DNS_PUBLIC_IP} + CRS_PRIVATE_IP1: ${RACNODE2_CRS_PRIVATE_IP1} + CRS_PRIVATE_IP2: ${RACNODE2_CRS_PRIVATE_IP2} + OP_TYPE: setuprac + INSTALL_NODE: ${INSTALL_NODE} + SCAN_NAME: ${SCAN_NAME} + INIT_SGA_SIZE: 3G + INIT_PGA_SIZE: 2G + GRID_HOME: /u01/app/21c/grid + STAGING_SOFTWARE_LOC: ${STAGING_SOFTWARE_LOC} + GRID_SW_ZIP_FILE: LINUX.X64_213000_grid_home.zip + DB_SW_ZIP_FILE: LINUX.X64_213000_db_home.zip + GRID_RESPONSE_FILE: /tmp/grid_21c.rsp + DBCA_RESPONSE_FILE: /tmp/dbca_21c.rsp + DB_PWD_FILE: pwdsecret + PWD_KEY: keysecret + CMAN_HOST: ${CMAN_HOST_NAME} + CMAN_PORT: 1521 + ASM_ON_NAS: True + DB_SERVICE: ${DB_SERVICE} + RESET_FAILED_SYSTEMD: true + sysctls: + - kernel.shmall=2097152 + - kernel.shmmax=8589934592 + - kernel.shmmni=4096 + - 'kernel.sem=250 32000 100 128' + - 'net.ipv4.conf.eth1.rp_filter=2' + - 'net.ipv4.conf.eth2.rp_filter=2' + ulimits: + rtprio: 99 + cap_add: + - SYS_RESOURCE + - NET_ADMIN + - SYS_NICE + - AUDIT_WRITE + - AUDIT_CONTROL + - NET_RAW + networks: + - rac_pub1_nw + - rac_priv1_nw + - rac_priv2_nw + healthcheck: + test: ["CMD", "/bin/python3", "/opt/scripts/startup/scripts/main.py", "--checkracstatus"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} + racnodepc1-cman: + container_name: ${CMAN_CONTAINER_NAME} + hostname: ${CMAN_HOST_NAME} + image: ${CMAN_IMAGE_NAME} + dns_search: ${DNS_DOMAIN} + dns: ${DNS_PUBLIC_IP} + environment: + DOMAIN_NAME: ${DNS_DOMAIN} + PUBLIC_IP: ${CMAN_PUBLIC_IP} + PUBLIC_HOSTNAME: ${CMAN_PUBLIC_HOSTNAME} + DB_HOSTDETAILS: ${DB_HOSTDETAILS} + privileged: false + ports: + - 1521:1521 + networks: + rac_pub1_nw: + ipv4_address: ${CMAN_PUBLIC_IP} + cap_add: + - AUDIT_WRITE + - NET_RAW + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'cmadmin'"] + interval: ${HEALTHCHECK_INTERVAL} + timeout: ${HEALTHCHECK_TIMEOUT} + retries: ${HEALTHCHECK_RETRIES} \ No newline at end of file diff --git a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/racdockercompose/README.md b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/racdockercompose/README.md index f881df0ad3..ba4464767b 100644 --- a/OracleDatabase/RAC/OracleRealApplicationClusters/samples/racdockercompose/README.md +++ b/OracleDatabase/RAC/OracleRealApplicationClusters/samples/racdockercompose/README.md @@ -487,4 +487,4 @@ docker rmi -f oracle/rac-dnsserver:latest oracle/rac-storage-server:19.3.0 oracl ## Copyright -Copyright (c) 2014-2024 Oracle and/or its affiliates. All rights reserved. \ No newline at end of file +Copyright (c) 2014-2025 Oracle and/or its affiliates. All rights reserved. \ No newline at end of file diff --git a/OracleDatabase/RAC/README.md b/OracleDatabase/RAC/README.md index b29fe10b0e..7634a8e3bb 100644 --- a/OracleDatabase/RAC/README.md +++ b/OracleDatabase/RAC/README.md @@ -1,38 +1,48 @@ # Oracle Database on Containers -Sample build files to facilitate installation, configuration, and environment setup for DevOps users. For more information about Oracle Database please see the [Oracle Database Online Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/index.html). +This guide contains details of build files to facilitate installation, configuration, and environment setup of an Oracle Real Application Clusters (Oracle RAC) Oracle Database on Containers for DevOps users. For more information about Oracle Database, see [_Oracle Database Online Documentation_](https://docs.oracle.com/en/database/oracle/oracle-database/index.html). -## How to build Oracle RAC container image and access in your environment +## How to build an Oracle RAC container image and access in your environment -Please review README of following sections in a given order. After reviewing the README of each section, you can skip the image/container creation if you do not meet the requirement. +Review the README of the following sections in the order given. After reviewing the README of each section, use the build files, and skip the image or container creation steps that do not match your requirements. -* Please review following points before you proceed to next sections: - * For better performance, it is good to use BTRFS file system for Docker storage on the Docker host. Please refer to [Oracle Container Runtime for Docker Documentation](https://docs.oracle.com/cd/E52668_01/E87205/html/index.html) - * Install and configure Docker Engine on Oracle Linux 7 to run RAC on Docker. - * For Oracle RAC setup in this document, we have used public network on 172.16.1.0/24 and the private network on 192.168.17.0/24. - * If you plan to use different public and private network in your environment, please gather details for following IPs: +* Review the following points before you proceed to the next sections: + * Review the [Oracle Container Runtime for Podman documentation](https://docs.oracle.com/en/learn/run-containers-podman/index.html#introduction) + * To run Oracle RAC on Podman, install and configure the Podman engine on Oracle Linux 8. + * For the Oracle RAC setup in this document, we have configured the public network on 10.0.20.0/24 and the private network on 192.168.17.0/24 and 192.168.18.0/24. + * If you plan to use different public and private networks in your environment, then obtain the configuration you need for the following IPs: * Public IP address for each OracleRealApplicationClusters container. * Private IP address for each OracleRealApplicationClusters container. * Virtual IP address for each OracleRealApplicationClusters container. - * If you have DNS then collect three single client access name (SCAN) addresses for the cluster. For details, please refer to [Installing Oracle Grid Infrastructure Guide](https://docs.oracle.com/en/database/oracle/oracle-database/18/cwlin/index.html). If you do not have DNS server, you can use single scan IP along with scan name for testing purpose. - * Public IP for OracleConnectionManager container. - * Private IP for OracleRACStorageServer container. - * You must have internet connectivity for yum. + * If you have DNS then collect three single client access name (SCAN) addresses for the cluster. For details, see [Installing Oracle Grid Infrastructure Guide](https://docs.oracle.com/en/database/oracle/oracle-database/21/cwlin/index.html). + * (Optional) Public IP for OracleConnectionManager container. + * Ensure to have internet connectivity for DNF Package Manager. ## OracleConnectionManager Provides Docker build files to create an Oracle Connection Manager container image. If you are planing to run RAC containers on single host and RAC containers IPs are not accessible on your network, you can use connection manager image to access RAC database on your network. For more details, see [OracleConnectionManager/README.md](./OracleConnectionManager/README.md). -## OracleRACStorageServer +## Oracle Restart +Provides Details to create Oracle Database on Oracle Restart. For more details, see [OracleRealApplicationClusters/docs/orestart/README.md](./OracleRealApplicationClusters/docs/orestart/README.md) -Provides Docker build files to create an NFS based Storage Server for Oracle RAC. If you do not have block storage or NAS device for Oracle RAC to store OCR/Voting files and Datafiles, you can use OracleRACStorageServer container image to provide shared storage. For more details, see [OracleRACStorageServer/README.md](./OracleRACStorageServer/README.md). +## Oracle Real ApplicationClusters -## OracleRACDNSServer +Provides Podman build files to create an Oracle RAC Database container image. For more details, see [OracleRealApplicationClusters/README.md](./OracleRealApplicationClusters/README.md) -Provides Docker build files to create a local DNS Server container for Oracle RAC On Docker. This container based DNS server provides IP addresses and the hostname resolution for the containers on the host. For more details, see [OracleRACDNSServer/README.md](./OracleDNSServer/README.md). -## OracleRealApplicationClusters +## Oracle Real Application Clusters for Developers -Provides Docker build files to create an Oracle RAC Database container image. For more details, see [OracleRealApplicationClusters/README.md](./OracleRealApplicationClusters/README.md). +Provides Details to create an Oracle RAC Database for a rapid deployment to build CI/CD pipeline. -**Note:** Please make sure that you have reviewed the README of OracleConnectionManager and OracleRACStorageServer sections and created the images/container based on your env before you review the README of OracleRealApplicationClusters. +You need to review `OracleRACDNSServer` and `OracleRACStorageServer` sections, create the images and containers based on your environment configuration before you proceed to `Oracle Real Application Clusters For Developers` section. + +* **OracleRACDNSServer Container** + + Provides Podman build files to create a local DNS Server container for Oracle RAC on Podman. This container-based DNS server provides IP addresses and the hostname resolution for the containers on the host. For more details, see [OracleRACDNSServer/README.md](./OracleDNSServer/README.md). + +* **OracleRACStorageServer Container** + + Provides Podman build files to create an NFS-based storage server for Oracle RAC. If you do not have a block storage or NAS device for Oracle RAC to store OCR, Voting files and Datafiles, then you can use the Oracle RAC Storage Server Container Image to provide shared storage. For more details, see [OracleRACStorageServer/README.md](./OracleRACStorageServer/README.md). + +* **Oracle Real Application Clusters for Developers** + Provides Details to create an Oracle RAC Database Container Image for developers. For more details, see [OracleRealApplicationClusters/docs/developers/README.md](./OracleRealApplicationClusters/docs/developers/README.md) \ No newline at end of file diff --git a/OracleDatabase/README.md b/OracleDatabase/README.md index 7128b5d07a..511129859f 100644 --- a/OracleDatabase/README.md +++ b/OracleDatabase/README.md @@ -7,5 +7,5 @@ Provides Docker build files to create an Oracle Database Single Instance Docker ## Oracle Sharding Provides terraform scripts to deploy Oracle Sharding in Oracle Cloud with Oracle Database Cloud Service, Docker build files and Sharding on OKE. For more details, see [oracle/db-sharding](https://github.com/oracle/db-sharding). -## RAC -Provides Docker build files to create an Oracle RAC Database docker image. For more details, see [RAC/README.md](./RAC/README.md). +## RAC +Provides Podman build files to create an Oracle RAC Database podman image. For more details, see [RAC/README.md](./RAC/README.md).