diff --git a/hooks/playbooks/adoption_bgp_post_overcloud.yaml b/hooks/playbooks/adoption_bgp_post_overcloud.yaml
new file mode 100644
index 0000000000..39bec0fee2
--- /dev/null
+++ b/hooks/playbooks/adoption_bgp_post_overcloud.yaml
@@ -0,0 +1,37 @@
+---
+# Copyright Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: BGP post_overcloud hook
+ hosts: "{{ cifmw_target_host | default('localhost') }}"
+ gather_facts: false
+ tasks:
+ - name: Remove default route from OC nodes
+ delegate_to: "{{ _vm }}"
+ become: true
+ ansible.builtin.command:
+ cmd: ip route del default via 192.168.111.1
+ failed_when: false
+ loop: >-
+ {{
+ _vm_groups['osp-r0-computes'] | list +
+ _vm_groups['osp-r0-controllers'] | list +
+ _vm_groups['osp-r1-computes'] | list +
+ _vm_groups['osp-r1-controllers'] | list +
+ _vm_groups['osp-r2-computes'] | list +
+ _vm_groups['osp-r2-controllers'] | list
+ }}
+ loop_control:
+ loop_var: _vm
diff --git a/hooks/playbooks/adoption_bgp_pre_overcloud.yaml b/hooks/playbooks/adoption_bgp_pre_overcloud.yaml
new file mode 100644
index 0000000000..e4d561e469
--- /dev/null
+++ b/hooks/playbooks/adoption_bgp_pre_overcloud.yaml
@@ -0,0 +1,36 @@
+---
+# Copyright Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: BGP pre_overcloud hook
+ hosts: "{{ cifmw_target_host | default('localhost') }}"
+ gather_facts: false
+ tasks:
+ - name: Add default route to OC nodes
+ delegate_to: "{{ _vm }}"
+ become: true
+ ansible.builtin.command:
+ cmd: ip route add default via 192.168.111.1
+ loop: >-
+ {{
+ _vm_groups['osp-r0-computes'] | list +
+ _vm_groups['osp-r0-controllers'] | list +
+ _vm_groups['osp-r1-computes'] | list +
+ _vm_groups['osp-r1-controllers'] | list +
+ _vm_groups['osp-r2-computes'] | list +
+ _vm_groups['osp-r2-controllers'] | list
+ }}
+ loop_control:
+ loop_var: _vm
diff --git a/playbooks/bgp/prepare-bgp-spines-leaves.yaml b/playbooks/bgp/prepare-bgp-spines-leaves.yaml
index 02cc851648..53bea8643a 100644
--- a/playbooks/bgp/prepare-bgp-spines-leaves.yaml
+++ b/playbooks/bgp/prepare-bgp-spines-leaves.yaml
@@ -1,4 +1,60 @@
---
+- name: Start spines and leafs and add them to inventory
+ hosts: localhost
+ vars:
+ leafs_list:
+ - leaf-0
+ - leaf-1
+ - leaf-2
+ - leaf-3
+ - leaf-4
+ - leaf-5
+ spines_list:
+ - spine-0
+ - spine-1
+ routers_list:
+ - router-0
+ fabric_list: "{{ leafs_list + spines_list + routers_list }}"
+ tasks:
+ - name: Start spine and leaf VMs
+ delegate_to: hypervisor
+ become: true
+ community.libvirt.virt:
+ name: "cifmw-{{ item }}"
+ state: running
+ loop: "{{ fabric_list }}"
+
+ - name: Add leafs group to inventory
+ when: '"leafs" not in groups'
+ ansible.builtin.add_host:
+ name: "{{ item }}.utility"
+ group: leafs
+ loop: "{{ leafs_list }}"
+
+ - name: Add spines group to inventory
+ when: '"spines" not in groups'
+ ansible.builtin.add_host:
+ name: "{{ item }}.utility"
+ group: spines
+ loop: "{{ spines_list }}"
+
+ - name: Add routers group to inventory
+ when: '"routers" not in groups'
+ ansible.builtin.add_host:
+ name: "{{ item }}.utility"
+ group: routers
+ loop: "{{ routers_list }}"
+
+ - name: Check SSH connectivity
+ delegate_to: hypervisor
+ ansible.builtin.wait_for:
+ port: 22
+ host: "{{ item }}.utility"
+ search_regex: OpenSSH
+ delay: 10
+ timeout: 120
+ loop: "{{ fabric_list }}"
+
- name: Common spines and leaves configuration
hosts: "spines,leafs{{ router_bool | default(false) | ternary(',routers', '') }}"
tasks:
@@ -183,10 +239,13 @@
autoconnect: true
conn_name: "{{ item }}"
type: ethernet
+ ifname: "{{ router_downlink_ifs[loop_index | int] }}"
method4: disabled
method6: link-local
state: present
loop: "{{ router_downlink_conns }}"
+ loop_control:
+ index_var: loop_index
# uplink router IPv4 is configured for both IPv4 and IPv6 jobs
- name: Configure uplink router connections with nmcli when IPv4
@@ -453,6 +512,8 @@
autoconnect: true
conn_name: "{{ item }}"
ip4: "{{ leaf_ds_ip4 }}/30"
+ type: ethernet
+ ifname: "{{ downlink_ifs_rack3[loop_index | int] }}"
method4: manual
method6: link-local
state: present
@@ -470,6 +531,8 @@
autoconnect: true
conn_name: "{{ item }}"
ip4: "{{ leaf_ds_ip4 }}/30"
+ type: ethernet
+ ifname: "{{ leaf_downlink_ifs[loop_index | int] }}"
method4: manual
method6: link-local
state: present
@@ -513,6 +576,8 @@
conn_name: "{{ item }}"
ip4: "{{ _leaf_ds_ip4 }}/30"
ip6: "{{ _leaf_ds_ip6 }}/126"
+ type: ethernet
+ ifname: "{{ leaf_downlink_ifs[loop_index | int] }}"
method4: manual
method6: manual
state: present
@@ -549,8 +614,12 @@
conn_name: "{{ item }}"
method4: disabled
method6: link-local
+ type: ethernet
+ ifname: "{{ uplink_ifs[loop_index | int] }}"
state: present
loop: "{{ uplink_conns }}"
+ loop_control:
+ index_var: loop_index
- name: Enable FRR Zebra daemon
become: true
diff --git a/roles/adoption_osp_deploy/tasks/config_files.yml b/roles/adoption_osp_deploy/tasks/config_files.yml
index 28eab22e97..e217f9a725 100644
--- a/roles/adoption_osp_deploy/tasks/config_files.yml
+++ b/roles/adoption_osp_deploy/tasks/config_files.yml
@@ -34,7 +34,16 @@
register: _original_config_download
delegate_to: "localhost"
+- name: Copy config download file as is (bgp)
+ when: bgp
+ delegate_to: "osp-undercloud-0"
+ ansible.builtin.copy:
+ content: "{{ _original_config_download['content'] |b64decode | from_yaml | to_nice_yaml(indent=2, sort_keys=false) }}"
+ dest: "{{ ansible_user_dir }}/config_download_{{ _stack.stackname }}.yaml"
+ mode: "0644"
+
- name: Generate config download file
+ when: not bgp
vars:
_ctlplane_net: "{{ cifmw_networking_env_definition.networks.ctlplane }}"
_hostname_map_translation: >-
diff --git a/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml b/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml
index a6913bc271..f23c060df6 100644
--- a/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml
+++ b/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml
@@ -22,6 +22,7 @@
- user_dir
- name: Generate adoption vars file
+ when: not bgp
delegate_to: "controller-0"
vars:
_undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}"
@@ -44,3 +45,22 @@
src: "adoption_vars.yaml.j2"
dest: "{{ ansible_user_dir }}/adoption_vars.yaml"
mode: "0644"
+
+
+- name: Generate adoption vars file (BGP)
+ when: bgp
+ delegate_to: "controller-0"
+ vars:
+ _undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}"
+ _undercloud_net: "{{ cifmw_networking_env_definition.instances[_undercloud_name] }}"
+ _undercloud_ip: "{{ _undercloud_net.networks.ctlplaner0[ip_version|default('ip_v4')] }}"
+ _controller_1_name: "{{ _vm_groups['osp-r0-controllers'] | first }}"
+ _controller_1_net: "{{ cifmw_networking_env_definition.instances[_controller_1_name] }}"
+ _controller_1_internalapi_ip: "99.99.0.29"
+ _compute_1_name: "{{ _vm_groups['osp-r0-computes'] | first }}"
+ _compute_1_net: "{{ cifmw_networking_env_definition.instances[_compute_1_name] }}"
+ _compute_1_ip: "{{ _compute_1_net.networks.ctlplaner0[ip_version|default('ip_v4')] }}"
+ ansible.builtin.template:
+ src: "adoption_vars_bgp.yaml.j2"
+ dest: "{{ ansible_user_dir }}/adoption_vars.yaml"
+ mode: "0644"
diff --git a/roles/adoption_osp_deploy/tasks/getent_registry_ips_bgp.yml b/roles/adoption_osp_deploy/tasks/getent_registry_ips_bgp.yml
new file mode 100644
index 0000000000..319ba86ddc
--- /dev/null
+++ b/roles/adoption_osp_deploy/tasks/getent_registry_ips_bgp.yml
@@ -0,0 +1,42 @@
+---
+# Copyright Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Obtain IPs whose routes need to be added to the undercloud (bgp)
+ ansible.builtin.getent:
+ database: ahosts
+ key: "{{ item }}"
+ register: _current_vm_getent
+ loop:
+ - registry.redhat.io
+ - cdn.redhat.com
+ - access.redhat.com
+ - cdn01.quay.io
+
+- name: Accumulate IPs from this VM
+ ansible.builtin.set_fact:
+ _accumulated_ips: >-
+ {{
+ _accumulated_ips | default([]) +
+ (
+ _current_vm_getent.results |
+ map(attribute='ansible_facts.getent_ahosts') |
+ map('dict2items') |
+ flatten |
+ map(attribute='key') |
+ reject('match', '.*:.*') |
+ list
+ )
+ }}
diff --git a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml
index d259b8c866..64c772c581 100644
--- a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml
+++ b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml
@@ -137,12 +137,21 @@
script: "{{ _source_cmd }}; {{ _vip_provision_cmd }}"
- name: Create tripleo ansible inventory
+ when: not bgp
delegate_to: "osp-undercloud-0"
ansible.builtin.template:
src: "tripleo-ansible-inventory.yaml.j2"
dest: "{{ ansible_user_dir }}/overcloud-deploy/{{ _overcloud_name }}/tripleo-ansible-inventory.yaml"
mode: "0644"
+ - name: Create tripleo ansible inventory (BGP)
+ when: bgp
+ delegate_to: "osp-undercloud-0"
+ ansible.builtin.template:
+ src: "tripleo-ansible-inventory_bgp.yaml.j2"
+ dest: "{{ ansible_user_dir }}/overcloud-deploy/{{ _overcloud_name }}/tripleo-ansible-inventory.yaml"
+ mode: "0644"
+
- name: Ensure os-net-config and openvswitch is installed in overcloud nodes
become: true
delegate_to: "{{ overcloud_vm }}"
@@ -184,6 +193,90 @@
loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}"
loop_control:
loop_var: overcloud_vm
+ when: not bgp
+
+ - name: Obtain IPs whose routes need to be added to the undercloud (bgp)
+ when: bgp
+ ansible.builtin.include_tasks: getent_registry_ips_bgp.yml
+ args:
+ apply:
+ delegate_to: "{{ _vm }}"
+ loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}"
+ loop_control:
+ loop_var: _vm
+ pause: 1
+
+ - name: Generate os-net-config file for overcloud nodes (bgp)
+ become: true
+ delegate_to: "{{ overcloud_vm }}"
+ vars:
+ _node_net: "{{ cifmw_networking_env_definition.instances[overcloud_vm] }}"
+ _dns_server: "{{ _ctlplane_net.[dns_version|default('dns_v4')] }}"
+ _interface_mtu: 1500
+ _ips_for_oc_routes_list: "{{ _accumulated_ips | unique }}"
+ vms:
+ osp-r0-compute-0:
+ ctlplane: '192.168.122.100'
+ left: '100.64.0.2'
+ right: '100.65.0.2'
+ main: '99.99.0.2'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0004'
+ osp-r0-compute-1:
+ ctlplane: '192.168.122.101'
+ left: '100.64.0.6'
+ right: '100.65.0.6'
+ main: '99.99.0.6'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0005'
+ osp-r1-compute-0:
+ ctlplane: '192.168.123.105'
+ left: '100.64.1.2'
+ right: '100.65.1.2'
+ main: '99.99.1.2'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0006'
+ osp-r1-compute-1:
+ ctlplane: '192.168.123.106'
+ left: '100.64.1.6'
+ right: '100.65.1.6'
+ main: '99.99.1.6'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0007'
+ osp-r2-compute-0:
+ ctlplane: '192.168.124.110'
+ left: '100.64.2.2'
+ right: '100.65.2.2'
+ main: '99.99.2.2'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0008'
+ osp-r2-compute-1:
+ ctlplane: '192.168.124.111'
+ left: '100.64.2.6'
+ right: '100.65.2.6'
+ main: '99.99.2.6'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0009'
+ osp-r0-controller-0:
+ ctlplane: '192.168.122.140'
+ left: '100.64.0.26'
+ right: '100.65.0.26'
+ main: '99.99.0.29'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0001'
+ osp-r1-controller-0:
+ ctlplane: '192.168.123.142'
+ left: '100.64.1.26'
+ right: '100.65.1.26'
+ main: '99.99.1.29'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0002'
+ osp-r2-controller-0:
+ ctlplane: '192.168.124.144'
+ left: '100.64.2.26'
+ right: '100.65.2.26'
+ main: '99.99.2.29'
+ main6: 'f00d:f00d:f00d:f00d:f00d:f00d:f00d:0003'
+ ansible.builtin.template:
+ src: "os_net_config_overcloud_bgp.yml.j2"
+ dest: /etc/os-net-config/tripleo_config.yaml
+ mode: "0644"
+ loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}"
+ loop_control:
+ loop_var: overcloud_vm
+ when: bgp
- name: Configure network interfaces for overcloud nodes
become: true
diff --git a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml
index 522fb5ed0a..70b8a7c7d7 100644
--- a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml
+++ b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml
@@ -59,6 +59,9 @@
openstack tripleo container image prepare
default --output-env-file
{{ ansible_user_dir }}/containers-prepare-parameters.yaml
+ {% if cifmw_adoption_osp_deploy_scenario.container_prepare_local_push_dest | default(false) | bool %}
+ --local-push-destination
+ {% endif %}
cifmw.general.ci_script:
output_dir: "{{ cifmw_basedir }}/artifacts"
script: "{{ _container_prepare_cmd }}"
@@ -106,6 +109,7 @@
mode: '0755'
- name: Generate os-net-config file
+ when: not bgp
become: true
vars:
_undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}"
@@ -122,6 +126,29 @@
dest: /etc/os-net-config/tripleo_config.yaml
mode: "0644"
+ - name: Obtain IPs whose routes need to be added to the undercloud (bgp)
+ when: bgp
+ ansible.builtin.include_tasks: getent_registry_ips_bgp.yml
+
+ - name: Generate os-net-config file (bgp)
+ when: bgp
+ become: true
+ vars:
+ _undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}"
+ _undercloud_net: "{{ cifmw_networking_env_definition.instances[_undercloud_name] }}"
+ _ctlplane_ip: "{{ _undercloud_net.networks.ctlplaner0[ip_version|default('ip_v4')] }}"
+ _ctlplane_vip: "{{ cifmw_adoption_osp_deploy_scenario.undercloud.ctlplane_vip }}"
+ _ctlplane_net: "{{ cifmw_networking_env_definition.networks.ctlplaner0}}"
+ _dns_server: "{{ _ctlplane_net[dns_version|default('dns_v4')] }}"
+ _gateway_ip: "{{ _ctlplane_net[gw_version|default('gw_v4')] }}"
+ _interface_mtu: "{{ _undercloud_net.networks.ctlplaner0.mtu }}"
+ _ctlplane_cidr: "{{ _undercloud_net.networks.ctlplaner0[prefix_length_version|default('prefix_length_v4')] }}"
+ _ips_for_uc_routes_list: "{{ _accumulated_ips | unique }}"
+ ansible.builtin.template:
+ src: "os_net_config_undercloud_bgp.yml.j2"
+ dest: /etc/os-net-config/tripleo_config.yaml
+ mode: "0644"
+
- name: Copy undercloud.conf file to location to edit it
ansible.builtin.copy:
src: "/usr/share/python-tripleoclient/undercloud.conf.sample"
@@ -130,6 +157,7 @@
mode: "0644"
- name: Add environment specific vars for undercloud
+ when: not bgp
vars:
_undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}"
_undercloud_net: "{{ cifmw_networking_env_definition.instances[_undercloud_name] }}"
@@ -185,6 +213,33 @@
combine(_env_undercloud, recursive=true, list_merge="append_rp")
}}
+- name: Add environment specific vars for undercloud (bgp)
+ when: bgp
+ vars:
+ _undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}"
+ _undercloud_net: "{{ cifmw_networking_env_definition.instances[_undercloud_name] }}"
+ _undercloud_ip: "{{ _undercloud_net.networks.ctlplaner0[ip_version|default('ip_v4')] }}"
+ _undercloud_net_prefix: "{{ _undercloud_net.networks.ctlplaner0[prefix_length_version|default('prefix_length_v4')] }}"
+ _ctlplane_cidr: "{{ cifmw_networking_env_definition.networks.ctlplaner0[network_version|default('network_v4')] }}"
+ _interface_mtu: "{{ _undercloud_net.networks.ctlplaner0.mtu }}"
+ _env_undercloud:
+ config:
+ - section: DEFAULT
+ option: undercloud_ntp_servers
+ value: "{{ cifmw_adoption_osp_deploy_ntp_server }}"
+ - section: DEFAULT
+ option: container_images_file
+ value: "{{ ansible_user_dir }}/containers-prepare-parameters.yaml"
+ - section: DEFAULT
+ option: net_config_override
+ value: "/etc/os-net-config/tripleo_config.yaml"
+ ansible.builtin.set_fact:
+ _undercloud_conf: >-
+ {{
+ cifmw_adoption_osp_deploy_scenario.undercloud |
+ combine(_env_undercloud, recursive=true, list_merge="append_rp")
+ }}
+
- name: Copy undercloud overrides file if present and amend undercloud conf
when: cifmw_adoption_osp_deploy_scenario.undercloud.undercloud_parameters_override is defined
vars:
diff --git a/roles/adoption_osp_deploy/templates/adoption_vars_bgp.yaml.j2 b/roles/adoption_osp_deploy/templates/adoption_vars_bgp.yaml.j2
new file mode 100644
index 0000000000..77a769a59d
--- /dev/null
+++ b/roles/adoption_osp_deploy/templates/adoption_vars_bgp.yaml.j2
@@ -0,0 +1,209 @@
+#jinja2: trim_blocks:True, lstrip_blocks:True
+source_mariadb_ip: 99.99.0.29
+source_ovndb_ip: 99.99.0.29
+edpm_node_hostname: {{ _compute_1_name }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+edpm_node_ip: {{ _compute_1_ip }}
+
+edpm_computes: |
+ {% for compute in _vm_groups['osp-r0-computes'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[compute] %}
+ ["{{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplaner0[ip_version|default('ip_v4')] }}"
+ {% endfor %}
+ {% for compute in _vm_groups['osp-r1-computes'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[compute] %}
+ ["{{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplaner1[ip_version|default('ip_v4')] }}"
+ {% endfor %}
+ {% for compute in _vm_groups['osp-r2-computes'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[compute] %}
+ ["{{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplaner2[ip_version|default('ip_v4')] }}"
+ {% endfor %}
+
+
+{% if _vm_groups['osp-r0-networkers'] | default([]) | length > 0 %}
+edpm_networkers: |
+ {% for networker in _vm_groups['osp-r0-networkers'] | default([]) %}
+ {% set node_nets = cifmw_networking_env_definition.instances[networker] %}
+ ["{{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplaner0[ip_version|default('ip_v4')] }}"
+ {% endfor %}
+ {% for networker in _vm_groups['osp-r1-networkers'] | default([]) %}
+ {% set node_nets = cifmw_networking_env_definition.instances[networker] %}
+ ["{{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplaner1[ip_version|default('ip_v4')] }}"
+ {% endfor %}
+ {% for networker in _vm_groups['osp-r2-networkers'] | default([]) %}
+ {% set node_nets = cifmw_networking_env_definition.instances[networker] %}
+ ["{{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplaner2[ip_version|default('ip_v4')] }}"
+ {% endfor %}
+{% endif %}
+
+
+source_galera_members: |
+ {% for controller in _vm_groups['osp-r0-controllers'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[controller] %}
+ ["{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="99.99.0.29"
+ {% endfor %}
+ {% for controller in _vm_groups['osp-r1-controllers'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[controller] %}
+ ["{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="99.99.1.29"
+ {% endfor %}
+ {% for controller in _vm_groups['osp-r2-controllers'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[controller] %}
+ ["{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="99.99.2.29"
+ {% endfor %}
+
+
+{% if _vm_groups['osp-r0-computes'] | default([]) | length > 0 %}
+edpm_nodes:
+ {% for compute in _vm_groups['osp-r0-computes'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[compute] %}
+ {{ compute }}:
+ hostName: {{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner0[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner0' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+ {% for compute in _vm_groups['osp-r1-computes'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[compute] %}
+ {{ compute }}:
+ hostName: {{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner1[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner1' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+ {% for compute in _vm_groups['osp-r2-computes'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[compute] %}
+ {{ compute }}:
+ hostName: {{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner2[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner2' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+{% endif %}
+
+edpm_nodes_networker:
+{% if _vm_groups['osp-r0-networkers'] | default([]) | length > 0 %}
+ {% for networker in _vm_groups['osp-r0-networkers'] | default([]) %}
+ {% set node_nets = cifmw_networking_env_definition.instances[networker] %}
+ {{ networker }}:
+ hostName: {{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner0[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner0' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+ {% for networker in _vm_groups['osp-r1-networkers'] | default([]) %}
+ {% set node_nets = cifmw_networking_env_definition.instances[networker] %}
+ {{ networker }}:
+ hostName: {{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner1[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner1' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+ {% for networker in _vm_groups['osp-r2-networkers'] | default([]) %}
+ {% set node_nets = cifmw_networking_env_definition.instances[networker] %}
+ {{ networker }}:
+ hostName: {{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner2[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner2' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+{% endif %}
+ {% for controller in _vm_groups['osp-r0-controllers'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[controller] %}
+ {{ controller }}:
+ hostName: {{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner0[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner0' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+ {% for controller in _vm_groups['osp-r1-controllers'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[controller] %}
+ {{ controller }}:
+ hostName: {{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner1[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner1' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+ {% for controller in _vm_groups['osp-r2-controllers'] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[controller] %}
+ {{ controller }}:
+ hostName: {{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ansible:
+ ansibleHost: {{ node_nets.networks.ctlplaner2[ip_version|default('ip_v4')] }}
+ networks:
+ {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %}
+ - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }}
+ name: {{ net }}
+ subnetName: subnet1
+{% if net == 'ctlplaner2' %}
+ defaultRoute: true
+{% endif %}
+ {% endfor %}
+ {% endfor %}
+
+
+upstream_dns: {{ cifmw_networking_env_definition.networks.ctlplane[dns_version|default('dns_v4')] | first }}
+os_cloud_name: {{ cifmw_adoption_osp_deploy_scenario.stacks[0].stackname }}
+standalone_ip: {{ _undercloud_ip }}
diff --git a/roles/adoption_osp_deploy/templates/os_net_config_overcloud_bgp.yml.j2 b/roles/adoption_osp_deploy/templates/os_net_config_overcloud_bgp.yml.j2
new file mode 100644
index 0000000000..5a057bb2c0
--- /dev/null
+++ b/roles/adoption_osp_deploy/templates/os_net_config_overcloud_bgp.yml.j2
@@ -0,0 +1,77 @@
+#jinja2: trim_blocks:True, lstrip_blocks:True
+network_config:
+- type: interface
+ name: nic1
+ mtu: {{ _interface_mtu }}
+ use_dhcp: true
+ defroute: false
+ routes:
+ - ip_netmask: 10.0.0.0/8
+ next_hop: 192.168.111.1
+ - ip_netmask: 23.0.0.0/8
+ next_hop: 192.168.111.1
+ - ip_netmask: 104.0.0.0/8
+ next_hop: 192.168.111.1
+ - ip_netmask: 212.0.0.0/8
+ next_hop: 192.168.111.1
+{% for _ip in _ips_for_oc_routes_list %}
+ - ip_netmask: {{ _ip }}/32
+ next_hop: 192.168.111.1
+{% endfor %}
+- type: interface
+ name: nic2
+ mtu: {{ _interface_mtu }}
+{% if 'r0' in overcloud_vm %}
+ dns_servers: ['192.168.122.1', '192.168.125.1']
+{% elif 'r1' in overcloud_vm %}
+ dns_servers: ['192.168.123.1', '192.168.125.1']
+{% else %}
+ dns_servers: ['192.168.124.1', '192.168.125.1']
+{% endif %}
+ domain: []
+ routes:
+{% if 'r0' in overcloud_vm %}
+ - ip_netmask: 192.168.123.0/24
+ next_hop: 192.168.122.1
+ - ip_netmask: 192.168.124.0/24
+ next_hop: 192.168.122.1
+{% elif 'r1' in overcloud_vm %}
+ - ip_netmask: 192.168.122.0/24
+ next_hop: 192.168.123.1
+ - ip_netmask: 192.168.124.0/24
+ next_hop: 192.168.123.1
+{% else %}
+ - ip_netmask: 192.168.122.0/24
+ next_hop: 192.168.124.1
+ - ip_netmask: 192.168.123.0/24
+ next_hop: 192.168.124.1
+{% endif %}
+ use_dhcp: false
+ addresses:
+ - ip_netmask: {{ vms[overcloud_vm].ctlplane }}/24
+- type: interface
+ name: nic3
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ {{ vms[overcloud_vm].left }}/30
+- type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ {{ vms[overcloud_vm].right }}/30
+- type: interface
+ name: lo
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ {{ vms[overcloud_vm].main }}/32
+ - ip_netmask:
+ {{ vms[overcloud_vm].main6 }}/128
+- type: ovs_bridge
+ name: br-ex
+ use_dhcp: false
+- type: ovs_bridge
+ name: br-vlan
+ use_dhcp: false
diff --git a/roles/adoption_osp_deploy/templates/os_net_config_undercloud_bgp.yml.j2 b/roles/adoption_osp_deploy/templates/os_net_config_undercloud_bgp.yml.j2
new file mode 100644
index 0000000000..917ad41a86
--- /dev/null
+++ b/roles/adoption_osp_deploy/templates/os_net_config_undercloud_bgp.yml.j2
@@ -0,0 +1,77 @@
+#jinja2: trim_blocks:True, lstrip_blocks:True
+{% if ':' in _ctlplane_ip %}
+{% set _ctlplane_ip_cidr = 128 %}
+{% else %}
+{% set _ctlplane_ip_cidr = 32 %}
+{% endif %}
+{% if ':' in _ctlplane_vip %}
+{% set _ctlplane_vip_cidr = 128 %}
+{% else %}
+{% set _ctlplane_vip_cidr = 32 %}
+{% endif %}
+network_config:
+network_config:
+- type: interface
+ name: nic1
+ mtu: {{ _interface_mtu }}
+ use_dhcp: true
+ defroute: false
+ routes:
+ - ip_netmask: 10.0.0.0/8
+ next_hop: 192.168.111.1
+ - ip_netmask: 23.0.0.0/8
+ next_hop: 192.168.111.1
+ - ip_netmask: 104.0.0.0/8
+ next_hop: 192.168.111.1
+ - ip_netmask: 212.0.0.0/8
+ next_hop: 192.168.111.1
+{% for _ip in _ips_for_uc_routes_list %}
+ - ip_netmask: {{ _ip }}/32
+ next_hop: 192.168.111.1
+{% endfor %}
+- type: ovs_bridge
+ name: br-ctlplane
+ mtu: {{ _interface_mtu }}
+ use_dhcp: false
+ dns_servers:
+ {% for _dns_ip in dns_server | default([]) %}
+ - {{ _dns_ip }}
+ {% endfor %}
+ - {{ _gateway_ip }}
+ domain: []
+ addresses:
+ - ip_netmask: {{ _ctlplane_ip }}/{{ _ctlplane_cidr }}
+ - ip_netmask: {{ _ctlplane_ip }}/{{ _ctlplane_ip_cidr }}
+ - ip_netmask: {{ _ctlplane_vip }}/{{ _ctlplane_vip_cidr }}
+ routes:
+ - ip_netmask: 192.168.123.0/24
+ next_hop: 192.168.122.1
+ - ip_netmask: 192.168.124.0/24
+ next_hop: 192.168.122.1
+ members:
+ - type: interface
+ name: {{ cifmw_adoption_osp_deploy_scenario.undercloud.os_net_config_iface |
+ default('nic2') }}
+ mtu: {{ _interface_mtu }}
+ # force the MAC address of the bridge to this interface
+ primary: true
+- type: interface
+ name: nic3
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ 100.64.0.30/30
+- type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ 100.65.0.30/30
+- type: interface
+ name: lo
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ 99.99.0.33/32
+ - ip_netmask:
+ f00d:f00d:f00d:f00d:f00d:f00d:f00d:25/128
diff --git a/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory_bgp.yaml.j2 b/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory_bgp.yaml.j2
new file mode 100644
index 0000000000..9e947009c4
--- /dev/null
+++ b/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory_bgp.yaml.j2
@@ -0,0 +1,68 @@
+#jinja2: trim_blocks:True, lstrip_blocks:True
+{% for group, role in cifmw_adoption_osp_deploy_scenario.roles_groups_map.items() %}
+{{ role }}:
+ hosts:
+ {% for node in _vm_groups[group] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[node] %}
+ {{ node }}:
+ {% if 'ctlplaner0' in node_nets.networks %}
+ ansible_host: {{ node_nets.networks.ctlplaner0[ip_version|default('ip_v4')] }}
+ canonical_hostname: {{ node }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ctlplane_ip: {{ node_nets.networks.ctlplaner0[ip_version|default('ip_v4')] }}
+ {% elif 'ctlplaner1' in node_nets.networks %}
+ ansible_host: {{ node_nets.networks.ctlplaner1[ip_version|default('ip_v4')] }}
+ canonical_hostname: {{ node }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ctlplane_ip: {{ node_nets.networks.ctlplaner1[ip_version|default('ip_v4')] }}
+ {% else %}
+ ansible_host: {{ node_nets.networks.ctlplaner2[ip_version|default('ip_v4')] }}
+ canonical_hostname: {{ node }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}
+ ctlplane_ip: {{ node_nets.networks.ctlplaner2[ip_version|default('ip_v4')] }}
+ {% endif %}
+ {% for network_name, net in node_nets.networks.items() %}
+ {% if 'vlan_id' in net %}
+ {% set net_name = ['storage_mgmt'] if network_name == 'storagemgmt' else [network_name] %}
+ {% set net_name = ['internal_api'] if network_name == 'internalapi' else [network_name] %}
+ {{ net_name[0] }}_ip: {{ net[ip_version|default('ip_v4')] }}
+ {% endif %}
+ {% endfor %}
+ {% endfor %}
+ vars:
+ ansible_ssh_common_args: -o StrictHostKeyChecking=no
+ ansible_ssh_user: zuul
+{% endfor %}
+Undercloud:
+ hosts:
+ undercloud: {}
+ vars:
+ ansible_connection: local
+ ansible_host: localhost
+allovercloud:
+ children:
+ {% for _, role in cifmw_adoption_osp_deploy_scenario.roles_groups_map.items() %}
+ {{ role }}: {}
+ {% endfor %}
+computes:
+ children:
+ {% for group, role in cifmw_adoption_osp_deploy_scenario.roles_groups_map.items() %}
+ {% if 'osp' in group and 'compute' in group %}
+ {{ role }}: {}
+ {% endif %}
+ {% endfor %}
+{{ _overcloud_name }}:
+ hosts:
+ {% for group, _ in cifmw_adoption_osp_deploy_scenario.roles_groups_map.items() %}
+ {% if 'osp' in group and ('compute' in group or 'controller' in group) %}
+ {% for node in _vm_groups[group] %}
+ {% set node_nets = cifmw_networking_env_definition.instances[node] %}
+ {{ node }}:
+ ansible_user: tripleo-admin
+ {% if 'ctlplaner0' in node_nets.networks %}
+ ansible_host: {{ node_nets.networks.ctlplaner0[ip_version|default('ip_v4')] }}
+ {% elif 'ctlplaner1' in node_nets.networks %}
+ ansible_host: {{ node_nets.networks.ctlplaner1[ip_version|default('ip_v4')] }}
+ {% else %}
+ ansible_host: {{ node_nets.networks.ctlplaner2[ip_version|default('ip_v4')] }}
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl-adoption b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl-adoption
new file mode 120000
index 0000000000..43886fd2df
--- /dev/null
+++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl-adoption
@@ -0,0 +1 @@
+bgp-l3-xl
\ No newline at end of file
diff --git a/scenarios/adoption/bgp-l3-xl.yml b/scenarios/adoption/bgp-l3-xl.yml
new file mode 100644
index 0000000000..53870d2a86
--- /dev/null
+++ b/scenarios/adoption/bgp-l3-xl.yml
@@ -0,0 +1,489 @@
+---
+# By default, the OSP VMs will run using a default image.
+# In upstream, it's usually latest centos-stream-9
+# For downstream, it's usually rhel-9.4 image, depending on
+# the job configuration.
+#
+# Since OSP infra must use an older RHEL image, you can override it
+# by setting "osp_base_img_url" to point to the downstream QCOW2 image,
+# and "osp_base_img_sha256" holding the SHA256SUM of the image.
+#
+# We can't automatically discover the image, the role/module sets the
+# value globally, and it would clash with the needs for RHOSO images.
+
+# Use anchor to avoid repetitions. This block is common to all of OSP nodes.
+_osp_img_data: &osp_base_conf
+ image_local_dir: "{{ cifmw_basedir }}/images/"
+ disk_file_name: osp-base.qcow2
+ image_url: "{{ osp_base_img_url | default(cifmw_discovered_image_url) }}"
+ sha256_image_name: >-
+ {{ osp_base_img_sha256 | default(cifmw_discovered_hash) }}
+
+cifmw_architecture_scenario: bgp-adoption
+
+
+libvirt_manager_patch_layout:
+ vms:
+ # Let's remove the default computes, since we want to adopt the
+ # OSP ones
+ compute:
+ <<: *osp_base_conf
+ amount: 0
+ osp-undercloud:
+ <<: *osp_base_conf
+ amount: 1
+ memory: 16
+ cpus: 8
+ disksize: 80
+ nets:
+ - ocpbm
+ - r0_tr
+ spineleafnets:
+ - # rack0 - undercloud
+ - "l00-node4"
+ - "l01-node4"
+ osp-r0-controller:
+ <<: *osp_base_conf
+ amount: 1
+ memory: 16
+ cpus: 8
+ disksize: 80
+ nets:
+ - ocpbm
+ - r0_tr
+ spineleafnets:
+ - # rack0 - controller0
+ - "l00-node3"
+ - "l01-node3"
+ osp-r1-controller:
+ <<: *osp_base_conf
+ amount: 1
+ memory: 16
+ cpus: 8
+ disksize: 80
+ nets:
+ - ocpbm
+ - r1_tr
+ spineleafnets:
+ - # rack1 - controller1
+ - "l10-node3"
+ - "l11-node3"
+ osp-r2-controller:
+ <<: *osp_base_conf
+ amount: 1
+ memory: 16
+ cpus: 8
+ disksize: 80
+ nets:
+ - ocpbm
+ - r2_tr
+ spineleafnets:
+ - # rack2 - controller2
+ - "l20-node3"
+ - "l21-node3"
+ osp-r0-compute:
+ <<: *osp_base_conf
+ amount: 2
+ memory: 4
+ cpus: 4
+ disksize: 20
+ nets:
+ - ocpbm
+ - r0_tr
+ spineleafnets:
+ - # rack0 - compute0
+ - "l00-node0"
+ - "l01-node0"
+ - # rack0 - compute0
+ - "l00-node1"
+ - "l01-node1"
+ osp-r1-compute:
+ <<: *osp_base_conf
+ amount: 2
+ memory: 4
+ cpus: 4
+ disksize: 20
+ nets:
+ - ocpbm
+ - r1_tr
+ spineleafnets:
+ - # rack1 - compute0
+ - "l10-node0"
+ - "l11-node0"
+ - # rack1 - compute1
+ - "l10-node1"
+ - "l11-node1"
+ osp-r2-compute:
+ <<: *osp_base_conf
+ amount: 2
+ memory: 4
+ cpus: 4
+ disksize: 20
+ nets:
+ - ocpbm
+ - r2_tr
+ spineleafnets:
+ - # rack2 - compute0
+ - "l20-node0"
+ - "l21-node0"
+ - # rack2 - compute1
+ - "l20-node1"
+ - "l21-node1"
+
+ router:
+ root_part_id: >-
+ {{
+ (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') |
+ ternary(4, 1)
+ }}
+ image_url: "{{ cifmw_discovered_image_url }}"
+ sha256_image_name: "{{ cifmw_discovered_hash }}"
+ image_local_dir: "{{ cifmw_basedir }}/images/"
+ disk_file_name: "base-os.qcow2"
+ amount: 1
+ disksize: 25
+ memory: 4
+ cpus: 2
+ nets: # nets common to all the router nodes
+ - "ocpbm"
+ spineleafnets:
+ - # router - ocp_tester
+ - "s0-rtr"
+ - "s1-rtr"
+ - "rtr-ocp"
+ spine:
+ root_part_id: >-
+ {{
+ (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') |
+ ternary(4, 1)
+ }}
+ image_url: "{{ cifmw_discovered_image_url }}"
+ sha256_image_name: "{{ cifmw_discovered_hash }}"
+ image_local_dir: "{{ cifmw_basedir }}/images/"
+ disk_file_name: "base-os.qcow2"
+ amount: 2
+ disksize: 25
+ memory: 4
+ cpus: 2
+ nets: # nets common to all the spine nodes
+ - "ocpbm"
+ spineleafnets:
+ - # spine0
+ - "l00-s0"
+ - "l01-s0"
+ - "l10-s0"
+ - "l11-s0"
+ - "l20-s0"
+ - "l21-s0"
+ - "s0-rtr"
+ - # spine1
+ - "l00-s1"
+ - "l01-s1"
+ - "l10-s1"
+ - "l11-s1"
+ - "l20-s1"
+ - "l21-s1"
+ - "s1-rtr"
+ leaf:
+ root_part_id: >-
+ {{
+ (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') |
+ ternary(4, 1)
+ }}
+ image_url: "{{ cifmw_discovered_image_url }}"
+ sha256_image_name: "{{ cifmw_discovered_hash }}"
+ image_local_dir: "{{ cifmw_basedir }}/images/"
+ disk_file_name: "base-os.qcow2"
+ amount: 6
+ disksize: 25
+ memory: 4
+ cpus: 2
+ nets: # nets common to all the leaf nodes
+ - "ocpbm"
+ spineleafnets:
+ - # rack0 - leaf00
+ - "l00-s0"
+ - "l00-s1"
+ - "l00-node0"
+ - "l00-node1"
+ - "l00-node2"
+ - "l00-ocp0"
+ - "l00-ocp1"
+ - "l00-ocp2"
+ - "l00-node3"
+ - "l00-node4"
+ - # rack0 - leaf01
+ - "l01-s0"
+ - "l01-s1"
+ - "l01-node0"
+ - "l01-node1"
+ - "l01-node2"
+ - "l01-ocp0"
+ - "l01-ocp1"
+ - "l01-ocp2"
+ - "l01-node3"
+ - "l01-node4"
+ - # rack1 - leaf10
+ - "l10-s0"
+ - "l10-s1"
+ - "l10-node0"
+ - "l10-node1"
+ - "l10-node2"
+ - "l10-ocp0"
+ - "l10-ocp1"
+ - "l10-ocp2"
+ - "l10-node3"
+ - "l10-node4"
+ - # rack1 - leaf11
+ - "l11-s0"
+ - "l11-s1"
+ - "l11-node0"
+ - "l11-node1"
+ - "l11-node2"
+ - "l11-ocp0"
+ - "l11-ocp1"
+ - "l11-ocp2"
+ - "l11-node3"
+ - "l11-node4"
+ - # rack2 - leaf20
+ - "l20-s0"
+ - "l20-s1"
+ - "l20-node0"
+ - "l20-node1"
+ - "l20-node2"
+ - "l20-ocp0"
+ - "l20-ocp1"
+ - "l20-ocp2"
+ - "l20-node3"
+ - "l20-node4"
+ - # rack2 - leaf21
+ - "l21-s0"
+ - "l21-s1"
+ - "l21-node0"
+ - "l21-node1"
+ - "l21-node2"
+ - "l21-ocp0"
+ - "l21-ocp1"
+ - "l21-ocp2"
+ - "l21-node3"
+ - "l21-node4"
+
+
+ # set to zero
+ r0-compute:
+ amount: 0
+ <<: *osp_base_conf
+ r1-compute:
+ amount: 0
+ <<: *osp_base_conf
+ r2-compute:
+ amount: 0
+ <<: *osp_base_conf
+ r0-networker:
+ amount: 0
+ <<: *osp_base_conf
+ r1-networker:
+ amount: 0
+ <<: *osp_base_conf
+ r2-networker:
+ amount: 0
+ <<: *osp_base_conf
+
+
+cifmw_libvirt_manager_default_gw_nets:
+ - ocpbm
+ - r0_tr
+ - r1_tr
+ - r2_tr
+cifmw_networking_mapper_interfaces_info_translations:
+ osp_trunk:
+ - controlplane
+ - ctlplane
+ r0_tr:
+ - ctlplaner0
+ r1_tr:
+ - ctlplaner1
+ r2_tr:
+ - ctlplaner2
+
+networking_mapper_definition_patch:
+ networks:
+ ctlplane:
+ network: "192.168.125.0/24"
+ gateway: "192.168.125.1"
+ dns:
+ - "192.168.122.1"
+ mtu: 1500
+
+ ctlplaner0:
+ network: "192.168.122.0/24"
+ gateway: "192.168.122.1"
+ dns:
+ - "192.168.122.1"
+ mtu: 1500
+
+ ctlplaner1:
+ network: "192.168.123.0/24"
+ gateway: "192.168.123.1"
+ dns:
+ - "192.168.123.1"
+ mtu: 1500
+
+ ctlplaner2:
+ network: "192.168.124.0/24"
+ gateway: "192.168.124.1"
+ dns:
+ - "192.168.124.1"
+ mtu: 1500
+
+ internalapi:
+ network: "172.17.0.0/24"
+ vlan: 20
+ mtu: 1500
+
+ storage:
+ network: "172.18.0.0/24"
+ vlan: 21
+ mtu: 1500
+
+ tenant:
+ network: "172.19.0.0/24"
+ vlan: 22
+ mtu: 1500
+
+ octavia:
+ vlan: 23
+ mtu: 1500
+ network: "172.23.0.0/24"
+
+ # Not really used, but required by architecture
+ # https://github.com/openstack-k8s-operators/architecture/blob/main/lib/networking/netconfig/kustomization.yaml#L28-L36
+ external:
+ network: "192.168.32.0/20"
+ vlan: 99
+ mtu: 1500
+
+ # ensure ranges do not collide even if we don't create these vms
+ group-templates:
+ r0-computes:
+ network-template:
+ range:
+ start: 200
+ length: 5
+ r1-computes:
+ network-template:
+ range:
+ start: 205
+ length: 5
+ r2-computes:
+ network-template:
+ range:
+ start: 210
+ length: 5
+ r0-networkers:
+ network-template:
+ range:
+ start: 215
+ length: 5
+ r1-networkers:
+ network-template:
+ range:
+ start: 220
+ length: 5
+ r2-networkers:
+ network-template:
+ range:
+ start: 225
+ length: 5
+
+ osp-r0-controllers:
+ network-template:
+ range:
+ start: 140
+ length: 2
+ networks:
+ ctlplaner0: {}
+ external:
+ trunk-parent: ctlplaner0
+ internalapi:
+ trunk-parent: ctlplaner0
+ tenant:
+ trunk-parent: ctlplaner0
+ storage:
+ trunk-parent: ctlplaner0
+ osp-r1-controllers:
+ network-template:
+ range:
+ start: 142
+ length: 2
+ networks:
+ ctlplaner1: {}
+ external:
+ trunk-parent: ctlplaner1
+ internalapi:
+ trunk-parent: ctlplaner1
+ tenant:
+ trunk-parent: ctlplaner1
+ storage:
+ trunk-parent: ctlplaner1
+ osp-r2-controllers:
+ network-template:
+ range:
+ start: 144
+ length: 2
+ networks:
+ ctlplaner2: {}
+ external:
+ trunk-parent: ctlplaner2
+ internalapi:
+ trunk-parent: ctlplaner2
+ tenant:
+ trunk-parent: ctlplaner2
+ storage:
+ trunk-parent: ctlplaner2
+ osp-r0-computes:
+ network-template:
+ range:
+ start: 100
+ length: 5
+ networks:
+ ctlplaner0: {}
+ internalapi:
+ trunk-parent: ctlplaner0
+ tenant:
+ trunk-parent: ctlplaner0
+ storage:
+ trunk-parent: ctlplaner0
+ osp-r1-computes:
+ network-template:
+ range:
+ start: 105
+ length: 5
+ networks:
+ ctlplaner1: {}
+ internalapi:
+ trunk-parent: ctlplaner1
+ tenant:
+ trunk-parent: ctlplaner1
+ storage:
+ trunk-parent: ctlplaner1
+ osp-r2-computes:
+ network-template:
+ range:
+ start: 110
+ length: 5
+ networks:
+ ctlplaner2: {}
+ internalapi:
+ trunk-parent: ctlplaner2
+ tenant:
+ trunk-parent: ctlplaner2
+ storage:
+ trunk-parent: ctlplaner2
+ osp-underclouds:
+ network-template:
+ range:
+ start: 95
+ length: 1
+ networks:
+ ctlplaner0: {}
diff --git a/scenarios/reproducers/bgp-l3-xl.yml b/scenarios/reproducers/bgp-l3-xl.yml
index e658f81ce8..9d45b9e6ee 100644
--- a/scenarios/reproducers/bgp-l3-xl.yml
+++ b/scenarios/reproducers/bgp-l3-xl.yml
@@ -47,36 +47,48 @@ cifmw_libvirt_manager_network_interface_types:
l00-node0: network
l00-node1: network
l00-node2: network
+ l00-node3: network
+ l00-node4: network
l00-ocp0: network
l00-ocp1: network
l00-ocp2: network
l01-node0: network
l01-node1: network
l01-node2: network
+ l01-node3: network
+ l01-node4: network
l01-ocp0: network
l01-ocp1: network
l01-ocp2: network
l10-node0: network
l10-node1: network
l10-node2: network
+ l10-node3: network
+ l10-node4: network
l10-ocp0: network
l10-ocp1: network
l10-ocp2: network
l11-node0: network
l11-node1: network
l11-node2: network
+ l11-node3: network
+ l11-node4: network
l11-ocp0: network
l11-ocp1: network
l11-ocp2: network
l20-node0: network
l20-node1: network
l20-node2: network
+ l20-node3: network
+ l20-node4: network
l20-ocp0: network
l20-ocp1: network
l20-ocp2: network
l21-node0: network
l21-node1: network
l21-node2: network
+ l21-node3: network
+ l21-node4: network
l21-ocp0: network
l21-ocp1: network
l21-ocp2: network
@@ -193,6 +205,16 @@ cifmw_libvirt_manager_configuration:
l00-node2
+ l00-node3: |
+
+ l00-node3
+
+
+ l00-node4: |
+
+ l00-node4
+
+
l00-ocp0: |
l00-ocp0
@@ -223,6 +245,16 @@ cifmw_libvirt_manager_configuration:
l01-node2
+ l01-node3: |
+
+ l01-node3
+
+
+ l01-node4: |
+
+ l01-node4
+
+
l01-ocp0: |
l01-ocp0
@@ -254,6 +286,16 @@ cifmw_libvirt_manager_configuration:
l10-node2
+ l10-node3: |
+
+ l10-node3
+
+
+ l10-node4: |
+
+ l10-node4
+
+
l10-ocp0: |
l10-ocp0
@@ -284,6 +326,16 @@ cifmw_libvirt_manager_configuration:
l11-node2
+ l11-node3: |
+
+ l11-node3
+
+
+ l11-node4: |
+
+ l11-node4
+
+
l11-ocp0: |
l11-ocp0
@@ -315,6 +367,16 @@ cifmw_libvirt_manager_configuration:
l20-node2
+ l20-node3: |
+
+ l20-node3
+
+
+ l20-node4: |
+
+ l20-node4
+
+
l20-ocp0: |
l20-ocp0
@@ -345,6 +407,16 @@ cifmw_libvirt_manager_configuration:
l21-node2
+ l21-node3: |
+
+ l21-node3
+
+
+ l21-node4: |
+
+ l21-node4
+
+
l21-ocp0: |
l21-ocp0
@@ -430,7 +502,7 @@ cifmw_libvirt_manager_configuration:
- ocpbm
- osp_trunk
r0-compute: &r0_compute_def
- amount: 2
+ amount: 0
root_part_id: >-
{{
(cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') |
@@ -454,8 +526,12 @@ cifmw_libvirt_manager_configuration:
- "l00-node1"
- "l01-node1"
r1-compute:
- amount: 2
- root_part_id: "{{ cifmw_root_partition_id }}"
+ amount: 0
+ root_part_id: >-
+ {{
+ (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') |
+ ternary(4, 1)
+ }}
uefi: "{{ cifmw_use_uefi }}"
image_url: "{{ cifmw_discovered_image_url }}"
sha256_image_name: "{{ cifmw_discovered_hash }}"
@@ -475,8 +551,12 @@ cifmw_libvirt_manager_configuration:
- "l10-node1"
- "l11-node1"
r2-compute:
- amount: 2
- root_part_id: "{{ cifmw_root_partition_id }}"
+ amount: 0
+ root_part_id: >-
+ {{
+ (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') |
+ ternary(4, 1)
+ }}
uefi: "{{ cifmw_use_uefi }}"
image_url: "{{ cifmw_discovered_image_url }}"
sha256_image_name: "{{ cifmw_discovered_hash }}"
@@ -579,7 +659,7 @@ cifmw_libvirt_manager_configuration:
- "ocpbm"
- "osp_trunk"
ocp_worker:
- amount: 10
+ amount: 7
uefi: true
root_part_id: 4
admin_user: core
@@ -601,28 +681,19 @@ cifmw_libvirt_manager_configuration:
- # rack0 - ocp worker 1
- "l00-ocp1"
- "l01-ocp1"
- - # rack0 - ocp worker 2
- - "l00-ocp2"
- - "l01-ocp2"
- - # rack1 - ocp worker 3
+ - # rack1 - ocp worker 2
- "l10-ocp0"
- "l11-ocp0"
- - # rack1 - ocp worker 4
+ - # rack1 - ocp worker 3
- "l10-ocp1"
- "l11-ocp1"
- - # rack1 - ocp worker 5
- - "l10-ocp2"
- - "l11-ocp2"
- - # rack2 - ocp worker 6
+ - # rack2 - ocp worker 4
- "l20-ocp0"
- "l21-ocp0"
- - # rack2 - ocp worker 7
+ - # rack2 - ocp worker 5
- "l20-ocp1"
- "l21-ocp1"
- - # rack2 - ocp worker 8
- - "l20-ocp2"
- - "l21-ocp2"
- - # router - ocp_tester (worker 9)
+ - # router - ocp_tester (worker 6)
- "rtr-ocp"
router:
amount: 1
@@ -704,6 +775,8 @@ cifmw_libvirt_manager_configuration:
- "l00-ocp0"
- "l00-ocp1"
- "l00-ocp2"
+ - "l00-node3"
+ - "l00-node4"
- # rack0 - leaf01
- "l01-s0"
- "l01-s1"
@@ -713,6 +786,8 @@ cifmw_libvirt_manager_configuration:
- "l01-ocp0"
- "l01-ocp1"
- "l01-ocp2"
+ - "l01-node3"
+ - "l01-node4"
- # rack1 - leaf10
- "l10-s0"
- "l10-s1"
@@ -722,6 +797,7 @@ cifmw_libvirt_manager_configuration:
- "l10-ocp0"
- "l10-ocp1"
- "l10-ocp2"
+ - "l10-node3"
- # rack1 - leaf11
- "l11-s0"
- "l11-s1"
@@ -731,6 +807,7 @@ cifmw_libvirt_manager_configuration:
- "l11-ocp0"
- "l11-ocp1"
- "l11-ocp2"
+ - "l11-node3"
- # rack2 - leaf20
- "l20-s0"
- "l20-s1"
@@ -740,6 +817,7 @@ cifmw_libvirt_manager_configuration:
- "l20-ocp0"
- "l20-ocp1"
- "l20-ocp2"
+ - "l20-node3"
- # rack2 - leaf21
- "l21-s0"
- "l21-s1"
@@ -749,6 +827,7 @@ cifmw_libvirt_manager_configuration:
- "l21-ocp0"
- "l21-ocp1"
- "l21-ocp2"
+ - "l21-node3"
## devscript support for OCP deploy
cifmw_devscripts_config_overrides:
@@ -763,13 +842,12 @@ cifmw_devscripts_enable_ocp_nodes_host_routing: true
# controller-0 as-is and be consumed by the `deploy-va.sh` script.
# Please note, all paths are on the controller-0, meaning managed by the
# Framework. Please do not edit them!
-_arch_repo: "{{ cifmw_architecture_repo }}"
cifmw_architecture_scenario: bgp-l3-xl
cifmw_kustomize_deploy_architecture_examples_path: "examples/dt/"
cifmw_arch_automation_file: "bgp-l3-xl.yaml"
cifmw_architecture_automation_file: >-
{{
- (_arch_repo,
+ (cifmw_architecture_repo,
'automation/vars',
cifmw_arch_automation_file) |
path_join
@@ -777,7 +855,7 @@ cifmw_architecture_automation_file: >-
cifmw_kustomize_deploy_metallb_source_files: >-
{{
- (_arch_repo,
+ (cifmw_architecture_repo,
'examples/dt/bgp-l3-xl/metallb') |
path_join
}}
@@ -795,9 +873,10 @@ pre_deploy:
extra_vars:
num_racks: "{{ num_racks }}"
router_bool: true
- edpm_nodes_per_rack: 3
+ edpm_nodes_per_rack: 5
ocp_nodes_per_rack: 3
router_uplink_ip: 100.64.10.1
+ cifmw_repo_setup_rhos_release_rpm: "{{ cifmw_repo_setup_rhos_release_rpm }}"
# post_deploy:
# - name: BGP computes configuration
@@ -919,6 +998,13 @@ cifmw_networking_definition:
mtu: 1500
tools:
multus:
+ ipv4_routes:
+ - destination: "172.31.0.0/24"
+ gateway: "172.17.0.1"
+ - destination: "192.168.188.0/24"
+ gateway: "172.17.0.1"
+ - destination: "99.99.0.0/16"
+ gateway: "172.17.0.1"
ranges:
- start: 30
end: 70
@@ -937,6 +1023,13 @@ cifmw_networking_definition:
mtu: 1500
tools:
multus:
+ ipv4_routes:
+ - destination: "172.31.0.0/24"
+ gateway: "172.18.0.1"
+ - destination: "192.168.188.0/24"
+ gateway: "172.18.0.1"
+ - destination: "99.99.0.0/16"
+ gateway: "172.18.0.1"
ranges:
- start: 30
end: 70
diff --git a/scenarios/reproducers/networking-definition-bgp.yml b/scenarios/reproducers/networking-definition-bgp.yml
new file mode 100644
index 0000000000..0d9a11d358
--- /dev/null
+++ b/scenarios/reproducers/networking-definition-bgp.yml
@@ -0,0 +1,266 @@
+cifmw_networking_definition:
+ networks:
+ ctlplane:
+ network: "192.168.125.0/24"
+ gateway: "192.168.125.1"
+ dns:
+ - "192.168.122.1"
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 120
+ - start: 150
+ end: 200
+
+ ctlplaner0:
+ network: "192.168.122.0/24"
+ gateway: "192.168.122.1"
+ dns:
+ - "192.168.122.1"
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 130
+ - start: 150
+ end: 200
+
+ ctlplaner1:
+ network: "192.168.123.0/24"
+ gateway: "192.168.123.1"
+ dns:
+ - "192.168.123.1"
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ netconfig:
+ ranges:
+ - start: 100
+ end: 130
+ - start: 150
+ end: 170
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ ctlplaner2:
+ network: "192.168.124.0/24"
+ gateway: "192.168.124.1"
+ dns:
+ - "192.168.124.1"
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ netconfig:
+ ranges:
+ - start: 100
+ end: 130
+ - start: 150
+ end: 170
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+
+ internalapi:
+ network: "172.17.0.0/24"
+ vlan: 20
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+
+ storage:
+ network: "172.18.0.0/24"
+ vlan: 21
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+
+ tenant:
+ network: "172.19.0.0/24"
+ vlan: 22
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+
+ octavia:
+ vlan: 23
+ mtu: 1500
+ network: "172.23.0.0/24"
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+
+ # Not really used, but required by architecture
+ # https://github.com/openstack-k8s-operators/architecture/blob/main/lib/networking/netconfig/kustomization.yaml#L28-L36
+ external:
+ network: "192.168.32.0/20"
+ vlan: 99
+ mtu: 1500
+ tools:
+ netconfig:
+ ranges:
+ - start: 130
+ end: 250
+
+ group-templates:
+ r0-computes:
+ network-template:
+ range:
+ start: 100
+ length: 5
+ networks:
+ ctlplaner0: {}
+ internalapi:
+ trunk-parent: ctlplaner0
+ tenant:
+ trunk-parent: ctlplaner0
+ storage:
+ trunk-parent: ctlplaner0
+ r1-computes:
+ network-template:
+ range:
+ start: 110
+ length: 5
+ networks:
+ ctlplaner1: {}
+ internalapi:
+ trunk-parent: ctlplaner1
+ tenant:
+ trunk-parent: ctlplaner1
+ storage:
+ trunk-parent: ctlplaner1
+ r2-computes:
+ network-template:
+ range:
+ start: 120
+ length: 5
+ networks:
+ ctlplaner2: {}
+ internalapi:
+ trunk-parent: ctlplaner2
+ tenant:
+ trunk-parent: ctlplaner2
+ storage:
+ trunk-parent: ctlplaner2
+ r0-networkers:
+ network-template:
+ range:
+ start: 200
+ length: 5
+ networks:
+ ctlplaner0: {}
+ internalapi:
+ trunk-parent: ctlplaner0
+ tenant:
+ trunk-parent: ctlplaner0
+ storage:
+ trunk-parent: ctlplaner0
+ r1-networkers:
+ network-template:
+ range:
+ start: 210
+ length: 5
+ networks:
+ ctlplaner1: {}
+ internalapi:
+ trunk-parent: ctlplaner1
+ tenant:
+ trunk-parent: ctlplaner1
+ storage:
+ trunk-parent: ctlplaner1
+ r2-networkers:
+ network-template:
+ range:
+ start: 220
+ length: 5
+ networks:
+ ctlplaner2: {}
+ internalapi:
+ trunk-parent: ctlplaner2
+ tenant:
+ trunk-parent: ctlplaner2
+ storage:
+ trunk-parent: ctlplaner2
+ ocps:
+ network-template:
+ range:
+ start: 10
+ length: 10
+ networks: {}
+ ocp_workers:
+ network-template:
+ range:
+ start: 20
+ length: 10
+ networks: {}
+
+ instances:
+ controller-0:
+ networks:
+ ctlplane:
+ ip: "192.168.125.9"