diff --git a/docs_user/modules/proc_configuring-networking-for-control-plane-services.adoc b/docs_user/modules/proc_configuring-networking-for-control-plane-services.adoc index 37b1ad8e8..5b6072350 100644 --- a/docs_user/modules/proc_configuring-networking-for-control-plane-services.adoc +++ b/docs_user/modules/proc_configuring-networking-for-control-plane-services.adoc @@ -53,9 +53,9 @@ spec: "ipam": { "type": "whereabouts", "range": "172.17.0.0/24", - "range_start": "172.17.0.20", <1> - "range_end": "172.17.0.50", <2> - "exclude": [ <3> + "range_start": "172.17.0.20", + "range_end": "172.17.0.50", + "exclude": [ "172.17.0.24/32", "172.17.0.44/31" ] @@ -63,9 +63,9 @@ spec: } ---- + -<1> Defines the start of the IP range. -<2> Defines the end of the IP range. -<3> Excludes part of the IP range. This example excludes IP addresses `172.17.0.24/32` and `172.17.0.44/31` from the allocation pool. +* `spec.config.ipam.range_start` defines the start of the IP range. +* `spec.config.ipam.range_end` defines the end of the IP range. +* `spec.config.ipam.exclude` excludes part of the IP range. This example excludes IP addresses `172.17.0.24/32` and `172.17.0.44/31` from the allocation pool. . If your {OpenStackShort} services require load balancer IP addresses, define the pools for these services in an `IPAddressPool` CR. For example: + diff --git a/docs_user/modules/proc_deploying-backend-services.adoc b/docs_user/modules/proc_deploying-backend-services.adoc index 2c883acdb..e81643174 100644 --- a/docs_user/modules/proc_deploying-backend-services.adoc +++ b/docs_user/modules/proc_deploying-backend-services.adoc @@ -142,7 +142,7 @@ metadata: name: openstack spec: secret: osp-secret - storageClass: <1> + storageClass: barbican: enabled: false @@ -167,7 +167,7 @@ spec: annotations: metallb.universe.tf/address-pool: ctlplane metallb.universe.tf/allow-shared-ip: ctlplane - metallb.universe.tf/loadBalancerIPs: 192.168.122.80 <2> + metallb.universe.tf/loadBalancerIPs: spec: type: LoadBalancer @@ -213,7 +213,7 @@ spec: secret: osp-secret replicas: 3 storageRequest: 5G - openstack-cell1: <3> + openstack-cell1: secret: osp-secret replicas: 3 storageRequest: 5G @@ -270,7 +270,7 @@ spec: metadata: annotations: metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.85 + metallb.universe.tf/loadBalancerIPs: spec: type: LoadBalancer rabbitmq-cell1: @@ -281,7 +281,7 @@ spec: metadata: annotations: metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.86 + metallb.universe.tf/loadBalancerIPs: spec: type: LoadBalancer @@ -293,7 +293,7 @@ spec: metadata: annotations: metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.87 + metallb.universe.tf/loadBalancerIPs: spec: type: LoadBalancer rabbitmq-cell3: @@ -304,12 +304,12 @@ spec: metadata: annotations: metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.88 + metallb.universe.tf/loadBalancerIPs: spec: type: LoadBalancer telemetry: enabled: false - tls: <4> + tls: podLevel: enabled: false ingress: @@ -326,10 +326,28 @@ spec: EOF ---- + -<1> Select an existing `` in your {OpenShiftShort} cluster. -<2> Replace `` with the LoadBalancer IP address. -<3> This example provides the required infrastructure database and messaging services for 3 Compute cells named `cell1`, `cell2`, and `cell3`. Adjust the values for fields such as `replicas`, `storage`, or `storageRequest`, for each Compute cell as needed. -<4> If you enabled TLS-e in your {OpenStackShort} environment, in the `spec:tls` section set `tls` to the following: +where: + +:: +Specifies an existing storage class in your {OpenShiftShort} cluster. + +:: +Specifies the LoadBalancer IP address. If you use IPv6, change the load balancer IPs to the IPs in your environment, for example: ++ +---- +... +metallb.universe.tf/allow-shared-ip: ctlplane +metallb.universe.tf/loadBalancerIPs: fd00:aaaa::80 +... +metallb.universe.tf/address-pool: internalapi +metallb.universe.tf/loadBalancerIPs: fd00:bbbb::85 +... +metallb.universe.tf/address-pool: internalapi +metallb.universe.tf/loadBalancerIPs: fd00:bbbb::86 +---- + +* `galera.openstack-cell1` provides the required infrastructure database and messaging services for the Compute cells, for example, `cell1`, `cell2`, and `cell3`. Adjust the values for fields such as `replicas`, `storage`, or `storageRequest`, for each Compute cell as needed. +* `spec.tls` specifies whether TLS-e is enabled. If you enabled TLS-e in your {OpenStackShort} environment, set `tls` to the following: + ---- spec: @@ -351,22 +369,6 @@ spec: customIssuer: rootca-internal enabled: true ---- -+ -[NOTE] -==== -If you use IPv6, change the load balancer IPs to the IPs in your environment, for example: ----- -... -metallb.universe.tf/allow-shared-ip: ctlplane -metallb.universe.tf/loadBalancerIPs: fd00:aaaa::80 -... -metallb.universe.tf/address-pool: internalapi -metallb.universe.tf/loadBalancerIPs: fd00:bbbb::85 -... -metallb.universe.tf/address-pool: internalapi -metallb.universe.tf/loadBalancerIPs: fd00:bbbb::86 ----- -==== endif::[] ifeval::["{build_variant}" == "ospdo"] @@ -382,7 +384,7 @@ metadata: name: openstack spec: secret: osp-secret - storageClass: <1> + storageClass: barbican: @@ -406,9 +408,9 @@ spec: service: metadata: annotations: - metallb.universe.tf/address-pool: <2> + metallb.universe.tf/address-pool: metallb.universe.tf/allow-shared-ip: - metallb.universe.tf/loadBalancerIPs: <3> + metallb.universe.tf/loadBalancerIPs: spec: type: LoadBalancer options: @@ -453,7 +455,7 @@ spec: secret: osp-secret replicas: 3 storageRequest: 5G - openstack-cell1: <4> + openstack-cell1: secret: osp-secret replicas: 3 storageRequest: 5G @@ -492,7 +494,7 @@ spec: ovndbcluster-nb: replicas: 3 dbType: NB - networkAttachment: <5> + networkAttachment: ovndbcluster-sb: replicas: 3 dbType: SB @@ -549,7 +551,7 @@ spec: telemetry: enabled: false - tls: <6> + tls: podLevel: enabled: false ingress: @@ -567,12 +569,34 @@ spec: EOF ---- + -<1> Select an existing `` in your {OpenShiftShort} cluster. -<2> Replace `` with the name of your network definition. -<3> Replace `` with the LoadBalancer IP address. -<4> This example provides the required infrastructure database and messaging services for 3 Compute cells named `cell1`, `cell2`, and `cell3`. Adjust the values for fields such as `replicas`, `storage`, or `storageRequest`, for each Compute cell as needed. -<5> Replace `` with the name of your network. -<6> If you enabled TLS-e in your {OpenStackShort} environment, in the `spec:tls` section set `tls` to the following: +where: + +:: +Specifies an existing storage class in your {OpenShiftShort} cluster. + +:: +Specifies name of your network definition. + +:: +Specifies the LoadBalancer IP address. If you use IPv6, change the load balancer IPs to the IPs in your environment, for example: ++ +---- +... +metallb.universe.tf/allow-shared-ip: ctlplane +metallb.universe.tf/loadBalancerIPs: fd00:aaaa::80 +... +metallb.universe.tf/address-pool: internalapi +metallb.universe.tf/loadBalancerIPs: fd00:bbbb::85 +... +metallb.universe.tf/address-pool: internalapi +metallb.universe.tf/loadBalancerIPs: fd00:bbbb::86 +---- + +:: +Specifies the name of your network. + +* `galera.openstack-cell1` provides the required infrastructure database and messaging services for the Compute cells, for example, `cell1`, `cell2`, and `cell3`. Adjust the values for fields such as `replicas`, `storage`, or `storageRequest`, for each Compute cell as needed. +* `spec.tls` specifies whether TLS-e is enabled. If you enabled TLS-e in your {OpenStackShort} environment, set `tls` to the following: + ---- spec: @@ -594,22 +618,6 @@ spec: customIssuer: rootca-internal enabled: true ---- - -[NOTE] -==== -If you use IPv6, change the load balancer IPs to the IPs in your environment, for example: ----- -... -metallb.universe.tf/allow-shared-ip: ctlplane -metallb.universe.tf/loadBalancerIPs: fd00:aaaa::80 -... -metallb.universe.tf/address-pool: internalapi -metallb.universe.tf/loadBalancerIPs: fd00:bbbb::85 -... -metallb.universe.tf/address-pool: internalapi -metallb.universe.tf/loadBalancerIPs: fd00:bbbb::86 ----- -==== endif::[] diff --git a/docs_user/modules/proc_deploying-file-systems-service-control-plane.adoc b/docs_user/modules/proc_deploying-file-systems-service-control-plane.adoc index a41db59ca..239d1c284 100644 --- a/docs_user/modules/proc_deploying-file-systems-service-control-plane.adoc +++ b/docs_user/modules/proc_deploying-file-systems-service-control-plane.adoc @@ -71,7 +71,7 @@ spec: annotations: metallb.universe.tf/address-pool: internalapi metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 <1> + metallb.universe.tf/loadBalancerIPs: <172.17.0.80> spec: type: LoadBalancer manilaScheduler: @@ -85,16 +85,16 @@ spec: host = hostgroup [cephfs] driver_handles_share_servers=False - share_backend_name=cephfs <2> + share_backend_name=cephfs share_driver=manila.share.drivers.cephfs.driver.CephFSDriver cephfs_conf_path=/etc/ceph/ceph.conf cephfs_auth_id=openstack cephfs_cluster_name=ceph cephfs_volume_mode=0755 cephfs_protocol_helper_type=CEPHFS - networkAttachments: <3> + networkAttachments: - storage - extraMounts: <4> + extraMounts: - name: v1 region: r1 extraVol: @@ -112,10 +112,10 @@ spec: __EOF__ ---- + -<1> If you use IPv6, change the load balancer IP to the load balancer IP in your environment, for example, `metallb.universe.tf/loadBalancerIPs: fd00:bbbb::80`. -<2> Ensure that the names of the back ends (`share_backend_name`) are the same as they were in {OpenStackShort} {rhos_prev_ver}. -<3> Ensure that you specify the appropriate storage management network in the `networkAttachments` section. For example, the `manilaShares` instance with the CephFS back-end driver is connected to the `storage` network. -<4> If you need to add extra files to any of the services, you can use `extraMounts`. For example, when using {Ceph}, you can add the {rhos_component_storage_file} Ceph user's keyring file as well as the `ceph.conf` configuration file. +* `metallb.universe.tf/loadBalancerIPs:<172.17.0.80>` specifies the load balancer IP in your environment. If you use IPv6, change the load balancer IP to the load balancer IP in your environment, for example, `metallb.universe.tf/loadBalancerIPs: fd00:bbbb::80`. +* `share_backend_name` specifies the names of the back ends to use in {rhos_long}. Ensure that the names of the back ends are the same as they were in {OpenStackShort} {rhos_prev_ver}. +* `networkAttachments` specifies the appropriate storage management network. For example, the `manilaShares` instance with the CephFS back-end driver is connected to the `storage` network. +* `extraMounts` specifies additional files to add to any of the services. For example, when using {Ceph}, you can add the {rhos_component_storage_file} Ceph user's keyring file as well as the `ceph.conf` configuration file. + The following example patch file uses CephFS through NFS: + @@ -141,7 +141,7 @@ spec: annotations: metallb.universe.tf/address-pool: internalapi metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + metallb.universe.tf/loadBalancerIPs: <172.17.0.80> spec: type: LoadBalancer manilaScheduler: diff --git a/docs_user/modules/proc_deploying-the-bare-metal-provisioning-service.adoc b/docs_user/modules/proc_deploying-the-bare-metal-provisioning-service.adoc index 5d780bbf0..95e8cb51e 100644 --- a/docs_user/modules/proc_deploying-the-bare-metal-provisioning-service.adoc +++ b/docs_user/modules/proc_deploying-the-bare-metal-provisioning-service.adoc @@ -60,7 +60,7 @@ spec: annotations: metallb.universe.tf/address-pool: internalapi metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 <1> + metallb.universe.tf/loadBalancerIPs: <1> spec: type: LoadBalancer ironicConductors: @@ -100,7 +100,10 @@ spec: ' ---- + -<1> If you use IPv6, change the load balancer IP to the load balancer IP in your environment, for example, `metallb.universe.tf/loadBalancerIPs: fd00:bbbb::80`. +where: + +:: +Specifies the load balancer IP in your environment. If you use IPv6, change the load balancer IP to the load balancer IP in your environment, for example, `metallb.universe.tf/loadBalancerIPs: fd00:bbbb::80`. . Wait for the {bare_metal} control plane services CRs to become ready: + diff --git a/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc b/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc index c6a374faf..b5c63f547 100644 --- a/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc +++ b/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc @@ -33,11 +33,11 @@ $ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{"name": "internalapi-static","ips": ["172. $ MARIADB_RUN_OVERRIDES="$OSPDO_MARIADB_CLIENT_ANNOTATIONS" endif::[] -$ CELLS="default cell1 cell2" <1> +$ CELLS="default cell1 cell2" $ DEFAULT_CELL_NAME="cell3" $ RENAMED_CELLS="cell1 cell2 $DEFAULT_CELL_NAME" -$ CHARACTER_SET=utf8 # <2> +$ CHARACTER_SET=utf8 # $ COLLATION=utf8_general_ci $ declare -A PODIFIED_DB_ROOT_PASSWORD @@ -70,14 +70,14 @@ $ for CELL in $(echo $CELLS); do > done $ declare -A SOURCE_MARIADB_IP -$ SOURCE_MARIADB_IP[default]=** <3> -$ SOURCE_MARIADB_IP[cell1]=** <4> -$ SOURCE_MARIADB_IP[cell2]=** <5> +$ SOURCE_MARIADB_IP[default]=** +$ SOURCE_MARIADB_IP[cell1]=** +$ SOURCE_MARIADB_IP[cell2]=** # ... $ declare -A SOURCE_GALERA_MEMBERS_DEFAULT $ SOURCE_GALERA_MEMBERS_DEFAULT=( -> ["standalone.localdomain"]=172.17.0.100 <6> +> ["standalone.localdomain"]=172.17.0.100 > # [...]=... > ) $ declare -A SOURCE_GALERA_MEMBERS_CELL1 @@ -90,13 +90,12 @@ $ SOURCE_GALERA_MEMBERS_CELL2=( > ) ---- + -<1> `CELLS` and `RENAMED_CELLS` represent changes that are going to be made after you import the databases. The `default` cell takes a new name from `DEFAULT_CELL_NAME`. -In a multi-cell adoption scenario, `default` cell might retain its original 'default' name as well. -<2> The `CHARACTER_SET` variable and collation should match the source database. If they do not match, then foreign key relationships break for any tables that are created in the future as part of database sync. -<3> Add data in `SOURCE_MARIADB_IP[*]= ...` for each cell that is defined in `CELLS`. Provide records for the cell names and VIP addresses of MariaDB Galera clusters. -<4> Replace `` with the VIP of your galera cell1 cluster. -<5> Replace `` with the VIP of your galera cell2 cluster, and so on. -<6> For each cell defined in `CELLS`, in `SOURCE_GALERA_MEMBERS_CELL`, add the names of the MariaDB Galera cluster members and its IP address. Replace `["standalone.localdomain"]="172.17.0.100"` with the real hosts data. +* `CELLS` and `RENAMED_CELLS` represent changes that are going to be made after you import the databases. The `default` cell takes a new name from `DEFAULT_CELL_NAME`. In a multi-cell adoption scenario, `default` cell might retain its original 'default' name as well. +* `CHARACTER_SET` and `COLLATION` should match the source database. If they do not match, then foreign key relationships break for any tables that are created in the future as part of the database sync. +* `SOURCE_MARIADB_IP[*]= ...` includes the data for each cell that is defined in `CELLS`. Provide records for the cell names and VIP addresses of MariaDB Galera clusters. +* `` defines the VIP of your galera cell1 cluster. +* `` defines the VIP of your galera cell2 cluster, and so on. +* `SOURCE_GALERA_MEMBERS_CELL`, defines the names of the MariaDB Galera cluster members and their IP address for each cell defined in `CELLS`. Replace `["standalone.localdomain"]="172.17.0.100"` with the real hosts data. [NOTE] A standalone {OpenStackPreviousInstaller} environment only creates a 'default' cell, which should be the only `CELLS` value in this case. The `DEFAULT_CELL_NAME` value should be `cell1`. @@ -259,18 +258,18 @@ $ for CELL in $(echo $CELLS); do > RCELL=$CELL > [ "$CELL" = "default" ] && RCELL=$DEFAULT_CELL_NAME > oc rsh mariadb-copy-data << EOF -> declare -A db_name_map <1> +> declare -A db_name_map > db_name_map['nova']="nova_$RCELL" > db_name_map['ovs_neutron']='neutron' > db_name_map['ironic-inspector']='ironic_inspector' -> declare -A db_cell_map <2> +> declare -A db_cell_map > db_cell_map['nova']="nova_$DEFAULT_CELL_NAME" -> db_cell_map["nova_$RCELL"]="nova_$RCELL" <3> -> declare -A db_server_map <4> +> db_cell_map["nova_$RCELL"]="nova_$RCELL" +> declare -A db_server_map > db_server_map['default']=${PODIFIED_MARIADB_IP['super']} > db_server_map["nova"]=${PODIFIED_MARIADB_IP[$DEFAULT_CELL_NAME]} > db_server_map["nova_$RCELL"]=${PODIFIED_MARIADB_IP[$RCELL]} -> declare -A db_server_password_map <5> +> declare -A db_server_password_map > db_server_password_map['default']=${PODIFIED_DB_ROOT_PASSWORD['super']} > db_server_password_map["nova"]=${PODIFIED_DB_ROOT_PASSWORD[$DEFAULT_CELL_NAME]} > db_server_password_map["nova_$RCELL"]=${PODIFIED_DB_ROOT_PASSWORD[$RCELL]} @@ -284,7 +283,7 @@ $ for CELL in $(echo $CELLS); do > target=super > else > target=$RCELL -> fi <6> +> fi > renamed_db_file="\${target}_new.\${db_name}.sql" > mv -f \${db_file} \${renamed_db_file} > if [[ -v "db_name_map[\${db_name}]" ]]; then @@ -316,12 +315,12 @@ $ for CELL in $(echo $CELLS); do > done ---- + -<1> Defines which common databases to rename when importing them. -<2> Defines which cells databases to import, and how to rename them, if needed. -<3> Omits importing special `cell0` databases of the cells, as its contents cannot be consolidated during adoption. -<4> Defines which databases to import into which servers, usually dedicated for cells. -<5> Defines the root passwords map for database servers. You can only use the same password for now. -<6> Assigns which databases to import into which hosts when extracting databases from the `default` cell. +* `db_name_map` defines which common databases to rename when importing them. +* `db_cell_map` defines which cells databases to import, and how to rename them, if needed. +* `db_cell_map["nova_$RCELL"]="nova_$RCELL"` omits importing special `cell0` databases of the cells, as its contents cannot be consolidated during adoption. +* `db_server_map` defines which databases to import into which servers, usually dedicated for cells. +* `db_server_password_map` defines the root passwords map for database servers. You can only use the same password for now. +* `renamed_db_file="\${target}_new.\${db_name}.sql"` assigns which databases to import into which hosts when extracting databases from the `default` cell. .Verification @@ -337,9 +336,9 @@ $ set -u $ dbs=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p"${PODIFIED_DB_ROOT_PASSWORD['super']}" -e 'SHOW databases;') $ echo $dbs | grep -Eq '\bkeystone\b' && echo "OK" || echo "CHECK FAILED" $ echo $dbs | grep -Eq '\bneutron\b' && echo "OK" || echo "CHECK FAILED" -$ echo "${PULL_OPENSTACK_CONFIGURATION_DATABASES[@]}" | grep -Eq '\bovs_neutron\b' && echo "OK" || echo "CHECK FAILED" <1> +$ echo "${PULL_OPENSTACK_CONFIGURATION_DATABASES[@]}" | grep -Eq '\bovs_neutron\b' && echo "OK" || echo "CHECK FAILED" $ novadb_mapped_cells=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p"${PODIFIED_DB_ROOT_PASSWORD['super']}" \ -> nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;') <2> +> nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;') $ uuidf='\S{8,}-\S{4,}-\S{4,}-\S{4,}-\S{12,}' $ default=$(printf "%s\n" "$PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS" | sed -rn "s/^($uuidf)\s+default\b.*$/\1/p") $ difference=$(diff -ZNua \ @@ -352,25 +351,25 @@ $ if [ "$DEFAULT_CELL_NAME" != "default" ]; then > else > [ "x$difference" = "x" ] && echo "OK" || echo "CHECK FAILED" > fi -$ for CELL in $(echo $RENAMED_CELLS); do <3> +$ for CELL in $(echo $RENAMED_CELLS); do > RCELL=$CELL > [ "$CELL" = "$DEFAULT_CELL_NAME" ] && RCELL=default > set +u > . ~/.source_cloud_exported_variables_$RCELL > set -u -> c1dbs=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$CELL]} -e 'SHOW databases;') <4> +> c1dbs=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$CELL]} -e 'SHOW databases;') > echo $c1dbs | grep -Eq "\bnova_${CELL}\b" && echo "OK" || echo "CHECK FAILED" > novadb_svc_records=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$CELL]} \ > nova_$CELL -e "select host from services where services.binary='nova-compute' and deleted=0 order by host asc;") -> diff -Z <(echo "x$novadb_svc_records") <(echo "x${PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[@]}") && echo "OK" || echo "CHECK FAILED" <5> +> diff -Z <(echo "x$novadb_svc_records") <(echo "x${PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[@]}") && echo "OK" || echo "CHECK FAILED" > done ---- + -<1> Ensures that the {networking_first_ref} database is renamed from `ovs_neutron`. -<2> Ensures that the `default` cell is renamed to `$DEFAULT_CELL_NAME`, and the cell UUIDs are retained. -<3> Ensures that the registered Compute services names have not changed. -<4> Ensures {compute_service} cells databases are extracted to separate database servers, and renamed from `nova` to `nova_cell`. -<5> Ensures that the registered {compute_service} name has not changed. +* `echo "${PULL_OPENSTACK_CONFIGURATION_DATABASES[@]}" | grep -Eq '\bovs_neutron\b' && echo "OK" || echo "CHECK FAILED"` ensures that the {networking_first_ref} database is renamed from `ovs_neutron`. +* `nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;')` ensures that the `default` cell is renamed to `$DEFAULT_CELL_NAME`, and the cell UUIDs are retained. +* `for CELL in $(echo $RENAMED_CELLS); do` ensures that the registered Compute services names have not changed. +* `c1dbs=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$CELL]} -e 'SHOW databases;')` ensures {compute_service} cells databases are extracted to separate database servers, and renamed from `nova` to `nova_cell`. +* `diff -Z <(echo "x$novadb_svc_records") <(echo "x${PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[@]}") && echo "OK" || echo "CHECK FAILED"` ensures that the registered {compute_service} name has not changed. . Delete the `mariadb-data` pod and the `mariadb-copy-data` persistent volume claim that contains the database backup: + diff --git a/docs_user/modules/proc_migrating-ovn-data.adoc b/docs_user/modules/proc_migrating-ovn-data.adoc index 8a2acf078..261f8b85a 100644 --- a/docs_user/modules/proc_migrating-ovn-data.adoc +++ b/docs_user/modules/proc_migrating-ovn-data.adoc @@ -106,7 +106,7 @@ ifeval::["{build_variant}" == "ospdo"] endif::[] spec: ifeval::["{build_variant}" == "ospdo"] - nodeName: '{{ }}' <1> + nodeName: '{{ }}' endif::[] containers: - image: $OVSDB_IMAGE @@ -136,7 +136,10 @@ EOF ---- + ifeval::["{build_variant}" == "ospdo"] -<1> Replace `` with the {OpenStackShort} node that contains the Controller node. +where: + +:: +Specifies the {OpenStackShort} node that contains the Controller node. endif::[] . Wait for the pod to be ready: @@ -285,10 +288,10 @@ spec: template: ovnController: nicMappings: - physNet: NIC <1> + physNet: NIC ---- + -<1> `physNet` is the name of your physical network. `NIC` is the name of the physical interface that is connected to your physical network. +* `physNet` defines the name of your physical network. `NIC` is the name of the physical interface that is connected to your physical network. + [NOTE] Running OVN gateways on {OpenShiftShort} nodes might be prone to data plane downtime during Open vSwitch upgrades. Consider running OVN gateways on dedicated `Networker` data plane nodes for production deployments instead. diff --git a/docs_user/modules/proc_migrating-the-rgw-backends.adoc b/docs_user/modules/proc_migrating-the-rgw-backends.adoc index 38220688d..80a1a9983 100644 --- a/docs_user/modules/proc_migrating-the-rgw-backends.adoc +++ b/docs_user/modules/proc_migrating-the-rgw-backends.adoc @@ -73,24 +73,24 @@ This example assumes that `172.17.3.0/24` is the `storage` network. ---- --- networks: -- 172.17.3.0/24<1> +- 172.17.3.0/24 placement: - label: rgw <2> + label: rgw service_id: rgw service_name: rgw.rgw service_type: rgw spec: - rgw_frontend_port: 8090 <3> + rgw_frontend_port: 8090 rgw_realm: default rgw_zone: default - rgw_frontend_ssl_certificate: ... <4> + rgw_frontend_ssl_certificate: ... ssl: true ---- + -<1> Add the storage network where the RGW back ends are deployed. -<2> Replace the Controller nodes with the `label: rgw` label. -<3> Change the `rgw_frontend_port` value to `8090` to avoid conflicts with the Ceph ingress daemon. -<4> Optional: if TLS is enabled, add the SSL certificate and key concatenation as described in link:{configuring-storage}/assembly_configuring-red-hat-ceph-storage-as-the-backend-for-rhosp-storage#proc_ceph-configure-rgw-with-tls_ceph-back-end[Configuring RGW with TLS for an external Red Hat Ceph Storage cluster] in _{configuring-storage-t}_. +* `networks` defines the storage network where the RGW back ends are deployed. +* `placement.label: rgw` replaces the Controller nodes with the `rgw` label. +* `spec.rgw_frontend_port` specifies the value as `8090` to avoid conflicts with the Ceph ingress daemon. +* `spec.rgw_frontend_ssl_certificate` defines the SSL certificate and key concatenation if TLS is enabled as described in link:{configuring-storage}/assembly_configuring-red-hat-ceph-storage-as-the-backend-for-rhosp-storage#proc_ceph-configure-rgw-with-tls_ceph-back-end[Configuring RGW with TLS for an external Red Hat Ceph Storage cluster] in _{configuring-storage-t}_. . Apply the new RGW spec by using the orchestrator CLI: + diff --git a/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc b/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc index bc4bbd924..162ce3c65 100644 --- a/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc +++ b/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc @@ -95,7 +95,7 @@ $ cat celltemplates >> oscp-patch.yaml ---- cell: novaComputeTemplates: - : <1> + : customServiceConfig: | [DEFAULT] host = @@ -105,7 +105,10 @@ $ cat celltemplates >> oscp-patch.yaml ... ---- + -<1> Replace `` with the hostname of the node that is running the `ironic` Compute driver in the source cloud of `cell`. +where: + +:: +Specifies the hostname of the node that is running the `ironic` Compute driver in the source cloud of `cell`. . Apply the patch file: + diff --git a/docs_user/modules/proc_pulling-configuration-from-a-tripleo-deployment.adoc b/docs_user/modules/proc_pulling-configuration-from-a-tripleo-deployment.adoc index 6c7962b75..a6b8d4837 100644 --- a/docs_user/modules/proc_pulling-configuration-from-a-tripleo-deployment.adoc +++ b/docs_user/modules/proc_pulling-configuration-from-a-tripleo-deployment.adoc @@ -76,14 +76,14 @@ Repeat this step for each {OpenStackShort} service that you want to disable or e ---- services: ovs_external_ids: - hosts: <1> + hosts: - standalone - service_command: "ovs-vsctl list Open_vSwitch . | grep external_ids | awk -F ': ' '{ print $2; }'" <2> - cat_output: true <3> + service_command: "ovs-vsctl list Open_vSwitch . | grep external_ids | awk -F ': ' '{ print $2; }'" + cat_output: true path: - ovs_external_ids.json - config_mapping: <4> - ovn-bridge-mappings: edpm_ovn_bridge_mappings <5> + config_mapping: + ovn-bridge-mappings: edpm_ovn_bridge_mappings ovn-bridge: edpm_ovn_bridge ovn-encap-type: edpm_ovn_encap_type ovn-monitor-all: ovn_monitor_all @@ -94,11 +94,11 @@ services: [NOTE] You must correctly configure an SSH configuration file or equivalent for non-standard services, such as OVS. The `ovs_external_ids` service does not run in a container, and the OVS data is stored on each host of your cloud, for example, `controller_1/controller_2/`, and so on. + -<1> The list of hosts, for example, `compute-1`, `compute-2`. -<2> The command that runs against the hosts. -<3> Os-diff gets the output of the command and stores the output in a file that is specified by the key path. -<4> Provides a mapping between, in this example, the data plane custom resource definition and the `ovs-vsctl` output. -<5> The `edpm_ovn_bridge_mappings` variable must be a list of strings, for example, `["datacentre:br-ex"]`. +* `hosts` specifies the list of hosts, for example, `compute-1`, `compute-2`. +* `service_command: "ovs-vsctl list Open_vSwitch . | grep external_ids | awk -F ': ' '{ print $2; }'"` runs against the hosts. +* `cat_output: true` provides os-diff with the output of the command and stores the output in a file that is specified by the key path. +* `config_mapping` provides a mapping between, in this example, the data plane custom resource definition and the `ovs-vsctl` output. +* `ovn-bridge-mappings: edpm_ovn_bridge_mappings` must be a list of strings, for example, `["datacentre:br-ex"]`. .. Compare the values: + diff --git a/docs_user/modules/proc_retrieving-topology-specific-service-configuration.adoc b/docs_user/modules/proc_retrieving-topology-specific-service-configuration.adoc index d2115c76e..53d0837ec 100644 --- a/docs_user/modules/proc_retrieving-topology-specific-service-configuration.adoc +++ b/docs_user/modules/proc_retrieving-topology-specific-service-configuration.adoc @@ -207,7 +207,7 @@ export PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=$($CONTROLLER1_SSH + ---- $ unset SRIOV_AGENTS -$ declare -xA SRIOV_AGENTS <1> +$ declare -xA SRIOV_AGENTS $ for CELL in $(echo $CELLS); do > RCELL=$CELL > [ "$CELL" = "$DEFAULT_CELL_NAME" ] && RCELL=default @@ -236,7 +236,7 @@ $ for CELL in $(echo $CELLS); do $ chmod 0600 ~/.source_cloud_exported_variables* ---- + -<1> If `neutron-sriov-nic-agent` agents are running in your {OpenStackShort} deployment, get the configuration to use for the data plane adoption. +* `declare -xA SRIOV_AGENTS` gets the `neutron-sriov-nic-agent` configuration to use for the data plane adoption if `neutron-sriov-nic-agent` agents are running in your {OpenStackShort} deployment. .Next steps diff --git a/docs_user/modules/proc_stopping-infrastructure-management-and-compute-services.adoc b/docs_user/modules/proc_stopping-infrastructure-management-and-compute-services.adoc index cd10f65d0..977e37629 100644 --- a/docs_user/modules/proc_stopping-infrastructure-management-and-compute-services.adoc +++ b/docs_user/modules/proc_stopping-infrastructure-management-and-compute-services.adoc @@ -27,18 +27,18 @@ ifeval::["{build}" == "downstream"] CONTROLLER1_SSH="ssh -i root@" # ... endif::[] -# ... <1> +# ... ifeval::["{build}" != "downstream"] EDPM_PRIVATEKEY_PATH="~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa" endif::[] ifeval::["{build}" == "downstream"] -EDPM_PRIVATEKEY_PATH="" <2> +EDPM_PRIVATEKEY_PATH="" endif::[] ---- + -<1> In the `CONTROLLER_SSH` settings, provide SSH connection details for all Controller nodes, including cell Controller nodes, of the source {OpenStackPreviousInstaller} cloud. +* `CONTROLLER_SSH` defines the SSH connection details for all Controller nodes, including cell Controller nodes, of the source {OpenStackPreviousInstaller} cloud. ifeval::["{build}" == "downstream"] -<2> Replace `` with the path to your SSH key. +* `` defines the path to your SSH key. endif::[] .Procedure diff --git a/docs_user/modules/proc_stopping-openstack-services.adoc b/docs_user/modules/proc_stopping-openstack-services.adoc index 6dd564f74..930461e6b 100644 --- a/docs_user/modules/proc_stopping-openstack-services.adoc +++ b/docs_user/modules/proc_stopping-openstack-services.adoc @@ -36,7 +36,7 @@ ifeval::["{build}" != "downstream"] CONTROLLER1_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100" endif::[] ifeval::["{build}" == "downstream"] -CONTROLLER1_SSH="ssh -i ** root@**" <1> +CONTROLLER1_SSH="ssh -i ** root@**" CONTROLLER2_SSH="ssh -i ** root@**" CONTROLLER3_SSH="ssh -i ** root@**" endif::[] @@ -51,15 +51,16 @@ CONTROLLER2_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@1 CONTROLLER3_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.109" endif::[] ifeval::["{build}" == "downstream"] -CONTROLLER1_SSH="ssh -i ** root@**" <2> +CONTROLLER1_SSH="ssh -i ** root@**" CONTROLLER2_SSH="ssh -i ** root@**" CONTROLLER3_SSH="ssh -i ** root@**" # ... endif::[] ---- ifeval::["{build}" == "downstream"] -<1> Replace `` with the path to your SSH key. -<2> Replace ` IP>` with IP addresses of all Controller nodes. ++ +* `` defines the path to your SSH key. +* ` IP>` defines the IP addresses of all Controller nodes. endif::[] .Procedure diff --git a/docs_user/modules/proc_using-new-subnet-ranges.adoc b/docs_user/modules/proc_using-new-subnet-ranges.adoc index 9b24e92cf..79b0201b5 100644 --- a/docs_user/modules/proc_using-new-subnet-ranges.adoc +++ b/docs_user/modules/proc_using-new-subnet-ranges.adoc @@ -29,6 +29,7 @@ endif::[] . Configure link local routes on the existing deployment nodes for the control plane subnets. This is done through {OpenStackPreviousInstaller} configuration: + +[subs=+quotes] ---- network_config: - type: ovs_bridge @@ -36,11 +37,12 @@ network_config: routes: - ip_netmask: 0.0.0.0/0 next_hop: 192.168.1.1 - - ip_netmask: 172.31.0.0/24 <1> - next_hop: 192.168.1.100 <2> + - *ip_netmask: 172.31.0.0/24* + *next_hop: 192.168.1.100* ---- -<1> The new control plane subnet. -<2> The control plane IP address of the existing data plane node. ++ +* `ip_netmask` defines the new control plane subnet. +* `next_hop` defines the control plane IP address of the existing data plane node. + Repeat this configuration for other networks that need to use different subnets for the new and existing parts of the deployment. @@ -82,11 +84,12 @@ the risk of network disruption. You must enforce the changes by setting the . You also must configure link local routes to existing deployment on {rhos_long} worker nodes. This is achieved by adding `routes` entries to the `NodeNetworkConfigurationPolicy` CRs for each network. For example: + ---- - - destination: 192.168.122.0/24 <1> - next-hop-interface: ospbr <2> + - destination: 192.168.122.0/24 + next-hop-interface: ospbr ---- -<1> The original subnet of the isolated network on the data plane. -<2> The {rhocp_long} worker network interface that corresponds to the isolated network on the data plane. ++ +* `destination` defines the original subnet of the isolated network on the data plane. +* `next-hop-interface` defines the {rhocp_long} worker network interface that corresponds to the isolated network on the data plane. + As a result, the following route is added to your {OpenShiftShort} nodes: +