diff --git a/roles/devscripts/tasks/130_prep_host.yml b/roles/devscripts/tasks/130_prep_host.yml index a98cb96fe7..38594ac6d9 100644 --- a/roles/devscripts/tasks/130_prep_host.yml +++ b/roles/devscripts/tasks/130_prep_host.yml @@ -40,6 +40,11 @@ - bootstrap ansible.builtin.import_tasks: 135_patch_src.yml +- name: Patch dev-scripts IPv6 VIPs DNS lookup. + tags: + - bootstrap + ansible.builtin.import_tasks: 135a_patch_ipv6_vips.yml + - name: Copy token and pull secret to repo. tags: - bootstrap diff --git a/roles/devscripts/tasks/135a_patch_ipv6_vips.yml b/roles/devscripts/tasks/135a_patch_ipv6_vips.yml new file mode 100644 index 0000000000..a6aa5646f5 --- /dev/null +++ b/roles/devscripts/tasks/135a_patch_ipv6_vips.yml @@ -0,0 +1,56 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Fix dev-scripts bug where API_VIPS and INGRESS_VIPS use IPv4 DNS lookup even when IPv6 is configured +# This patches the set_api_and_ingress_vip function in network.sh to use AAAA records +# when IP_STACK is v6 or v6v4 (IPv6-only or IPv6-primary dual-stack deployments) +- name: Fix API_VIPS IPv6 DNS lookup in dev-scripts + when: + - "'manage_br_bridge' in cifmw_devscripts_config" + - cifmw_devscripts_config['manage_br_bridge'] == 'n' + - cifmw_devscripts_config.get('ip_stack', 'v4') == 'v6' or + cifmw_devscripts_config.get('external_subnet_v6') is defined + ansible.builtin.replace: + path: "{{ cifmw_devscripts_repo_dir }}/network.sh" + regexp: '^(\s+)API_VIPS=\$\(dig \+noall \+answer "api\.\$\{CLUSTER_DOMAIN\}"\s+\| awk ''\{print \$NF\}''\)$' + replace: |2 + if [[ "${IP_STACK}" == "v6" || "${IP_STACK}" == "v6v4" ]]; then + API_VIPS=$(dig -t AAAA +noall +answer "api.${CLUSTER_DOMAIN}" | awk '{print $NF}') + else + API_VIPS=$(dig +noall +answer "api.${CLUSTER_DOMAIN}" | awk '{print $NF}') + fi + owner: "{{ cifmw_devscripts_user }}" + group: "{{ cifmw_devscripts_user }}" + mode: "0644" + +- name: Fix INGRESS_VIPS IPv6 DNS lookup in dev-scripts + when: + - "'manage_br_bridge' in cifmw_devscripts_config" + - cifmw_devscripts_config['manage_br_bridge'] == 'n' + - cifmw_devscripts_config.get('ip_stack', 'v4') == 'v6' or + cifmw_devscripts_config.get('external_subnet_v6') is defined + ansible.builtin.replace: + path: "{{ cifmw_devscripts_repo_dir }}/network.sh" + regexp: '^(\s+)INGRESS_VIPS=\$\(dig \+noall \+answer "test\.apps\.\$\{CLUSTER_DOMAIN\}"\s+\| awk ''\{print \$NF\}''\)$' + replace: |2 + if [[ "${IP_STACK}" == "v6" || "${IP_STACK}" == "v6v4" ]]; then + INGRESS_VIPS=$(dig -t AAAA +noall +answer "test.apps.${CLUSTER_DOMAIN}" | awk '{print $NF}') + else + INGRESS_VIPS=$(dig +noall +answer "test.apps.${CLUSTER_DOMAIN}" | awk '{print $NF}') + fi + owner: "{{ cifmw_devscripts_user }}" + group: "{{ cifmw_devscripts_user }}" + mode: "0644" diff --git a/roles/libvirt_manager/tasks/generate_networking_data.yml b/roles/libvirt_manager/tasks/generate_networking_data.yml index c464a0867e..3f69c76cb1 100644 --- a/roles/libvirt_manager/tasks/generate_networking_data.yml +++ b/roles/libvirt_manager/tasks/generate_networking_data.yml @@ -182,12 +182,12 @@ networks: {{ _lnet_data.name | replace('cifmw_', '') }}: {% if _lnet_data.ranges[0].start_v4 is defined and _lnet_data.ranges[0].start_v4 %} - {% set net_4 = _lnet_data.ranges[0].start_v4 | ansible.utils.ipsubnet(_lnet_data.ranges[0].prefix_length_v4) %} - network-v4: {{ net_4}} + {%- set net_4 = _lnet_data.ranges[0].start_v4 | ansible.utils.ipsubnet(_lnet_data.ranges[0].prefix_length_v4) %} + network-v4: '{{ net_4 }}' {% endif %} {% if _lnet_data.ranges[0].start_v6 is defined and _lnet_data.ranges[0].start_v6 %} - {% set net_6 = _lnet_data.ranges[0].start_v6 | ansible.utils.ipsubnet(_lnet_data.ranges[0].prefix_length_v6) %} - network-v6: {{ net_6 }} + {%- set net_6 = _lnet_data.ranges[0].start_v6 | ansible.utils.ipsubnet(_lnet_data.ranges[0].prefix_length_v6) %} + network-v6: '{{ net_6 }}' {% endif %} group-templates: {% for group in _cifmw_libvirt_manager_layout.vms.keys() if group != 'controller' and @@ -202,12 +202,12 @@ {% if cifmw_networking_definition['group-templates'][_gr ~ 's']['network-template'] is undefined %} {% if net_4 is defined %} range-v4: - start: {{ net_4 | ansible.utils.nthhost(ns.ip_start | int ) }} + start: '{{ net_4 | ansible.utils.nthhost(ns.ip_start | int ) }}' length: {{ _cifmw_libvirt_manager_layout.vms[group].amount | default(1) }} {% endif %} {% if net_6 is defined %} range-v6: - start: {{ net_6 | ansible.utils.nthhost(ns.ip_start | int) }} + start: '{{ net_6 | ansible.utils.nthhost(ns.ip_start | int) }}' length: {{ _cifmw_libvirt_manager_layout.vms[group].amount | default(1) }} {% endif %} {% set ns.ip_start = ns.ip_start|int + (_cifmw_libvirt_manager_layout.vms[group].amount | default(1) | int ) + 1 %} @@ -220,12 +220,12 @@ {{ cifmw_libvirt_manager_pub_net }}: {% if net_4 is defined and cifmw_networking_definition['group-templates']['baremetals']['network-template'] is undefined %} range-v4: - start: {{ net_4 | ansible.utils.nthhost(ns.ip_start) }} + start: '{{ net_4 | ansible.utils.nthhost(ns.ip_start) }}' length: {{ cifmw_baremetal_hosts | length }} {% endif %} {% if net_6 is defined and cifmw_networking_definition['group-templates']['baremetals']['network-template'] is undefined %} range-v6: - start: {{ net_6 | ansible.utils.nthhost(ns.ip_start) }} + start: '{{ net_6 | ansible.utils.nthhost(ns.ip_start) }}' length: {{ cifmw_baremetal_hosts | length }} {% endif %} {% endif %} @@ -235,10 +235,10 @@ networks: {{ _lnet_data.name | replace('cifmw_', '') }}: {% if net_4 is defined %} - ip-v4: "{{ net_4 | ansible.utils.nthhost(9) }}" + ip-v4: '{{ net_4 | ansible.utils.nthhost(9) }}' {% endif %} {% if net_6 is defined %} - ip-v6: "{{ net_6 | ansible.utils.nthhost(9) }}" + ip-v6: '{{ net_6 | ansible.utils.nthhost(9) }}' {% endif %} {% endif %} block: diff --git a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.README.md b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.README.md new file mode 100644 index 0000000000..afbce0ac16 --- /dev/null +++ b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.README.md @@ -0,0 +1,256 @@ +# dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.yml + +## Overview + +This reproducer is the **IPv6 variant** of `dt-nfv-ovs-dpdk-sriov-2nodesets.yml`. It configures an NFV deployment with OVS-DPDK and SR-IOV support using **IPv6 as the primary IP version** across all OpenStack networks, while maintaining dual-stack configuration on hypervisor bridges for compatibility. + +## Key Features + +- **IPv6 Primary**: All OpenStack networks use IPv6 addressing +- **Dual-Stack ctlplane**: Both IPv4 and IPv6 IPAM for ctlplane network +- **2 NodeSets**: Supports deployments with two different EDPM nodesets (different hardware) +- **OVS-DPDK**: High-performance DPDK-accelerated networking +- **SR-IOV**: Direct hardware access for VMs +- **OpenShift IPv6**: OCP configured with IPv6 networking + +## Network Configuration + +### IPv6 Network Ranges + +| Network | IPv6 Range | VLAN | Purpose | +|--------------|-------------------------|------|---------| +| ctlplane | 2620:cf:cf:aaaa::/64 | - | Control plane (dual-stack) | +| internalapi | 2620:cf:cf:bbbb::/64 | 20 | Internal API | +| storage | 2620:cf:cf:cccc::/64 | 21 | Storage network | +| tenant | 2620:cf:cf:eeee::/64 | 22 | Tenant/overlay network | +| storagemgmt | 2620:cf:cf:dddd::/64 | 23 | Storage management | +| external | 2620:cf:cf:cf02::/64 | - | External network | + +### OpenShift IPv6 Networks + +| Network | IPv6 Range | Purpose | +|------------------|--------------------------|---------| +| provisioning | fd00:1101::/64 | Provisioning network | +| external_subnet | 2620:cf:cf:cf02::/64 | External access | +| service_subnet | 2620:cf:cf:cf03::/112 | OCP services | +| cluster_subnet | fd01::/48 | OCP pod network | + +## Key Differences from IPv4 Version + +### Added Configuration + +1. **Primary IP Version** + ```yaml + cifmw_ci_gen_kustomize_values_primary_ip_version: 6 + ``` + +2. **IPv6 Networking Definition** + - All networks configured with IPv6 ranges + - VLANs preserved from original (20-23) + - MTU settings maintained + +3. **OpenShift IPv6 Configuration** + ```yaml + cifmw_devscripts_config_overrides: + ip_stack: "v6" + provisioning_network: "fd00:1101::/64" + external_subnet_v6: "2620:cf:cf:cf02::/64" + service_subnet_v6: "2620:cf:cf:cf03::/112" + cluster_subnet_v6: "fd01::/48" + ``` + +4. **Dual-Stack ctlplane IPAM** + ```yaml + ipam: + type: whereabouts + ipRanges: + - range: "192.168.122.0/24" # IPv4 (compatibility) + - range: "2620:cf:cf:aaaa::/64" # IPv6 (primary) + ``` + +### Unchanged Configuration + +- VM definitions (controller, OCP nodes) +- Libvirt network topology +- BMH configuration +- LVMS setup +- EDPM image configuration + +## Usage + +### In Zuul Jobs + +This reproducer is used by the IPv6 variant of NFV jobs: + +```yaml +- job: + name: uni08theta-rhel9-rhoso18.0-nfv-ovs-dpdk-sriov-ipv6-trunk-patches-2nodesets + vars: + variable_files: + - "{{ ci_framework_src }}/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.yml" + - ... (other variable files) +``` + +### Manual Deployment + +```bash +# Set up your environment +cd ~/ci-framework + +# Create your custom variables file +cat > my-ipv6-vars.yml < +cifmw_devscripts_pull_secret: +hypervisor: +EOF + +# Deploy using this reproducer +ansible-playbook deploy-edpm.yml \ + -e @scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.yml \ + -e @my-ipv6-vars.yml +``` + +## Infrastructure Requirements + +### Switch Configuration + +The physical switch must support IPv6 on all VLANs: + +``` +# Enable IPv6 globally +ipv6 unicast-routing + +# Configure VLAN IPv6 gateways +interface Vlan20 # internalapi + ipv6 address 2620:cf:cf:bbbb::1/64 + ipv6 nd managed-config-flag + ipv6 nd prefix 2620:cf:cf:bbbb::/64 + +interface Vlan21 # storage + ipv6 address 2620:cf:cf:cccc::1/64 + ipv6 nd managed-config-flag + ipv6 nd prefix 2620:cf:cf:cccc::/64 + +interface Vlan22 # tenant + ipv6 address 2620:cf:cf:eeee::1/64 + ipv6 nd managed-config-flag + ipv6 nd prefix 2620:cf:cf:eeee::/64 + +interface Vlan23 # storagemgmt + ipv6 address 2620:cf:cf:dddd::1/64 + ipv6 nd managed-config-flag + ipv6 nd prefix 2620:cf:cf:dddd::/64 + +# Enable MLD snooping (IPv6 multicast) +ipv6 mld snooping +``` + +### Hypervisor Requirements + +- **IPv6 enabled**: `sysctl net.ipv6.conf.all.disable_ipv6=0` +- **IPv6 forwarding**: `sysctl net.ipv6.conf.all.forwarding=1` +- **Bridges with IPv6**: All bridges (ocpbm, ocppr, osp_external, osp_trunk) must have IPv6 addresses +- **Router Advertisements**: Enable RA if using SLAAC + +### DNS Requirements + +- DNS server with IPv6 support (AAAA records) +- Reverse DNS for IPv6 (ip6.arpa zones) +- DNS reachable at 2620:cf:cf:aaaa::1 + +## Topology + +``` + ┌─────────────────┐ + │ Hypervisor │ + │ (dual-stack) │ + └────────┬────────┘ + │ + ┌────────────────────┼────────────────────┐ + │ │ │ + ┌────▼────┐ ┌────▼────┐ ┌────▼────┐ + │ OCP │ │ OCP │ │ OCP │ + │ Master │ │ Master │ │ Master │ + │ #1 │ │ #2 │ │ #3 │ + └─────────┘ └─────────┘ └─────────┘ + │ │ │ + └────────────────────┼────────────────────┘ + │ + ┌────────▼────────┐ + │ controller-0 │ + │ (IPv6: aaaa::9) │ + └────────┬────────┘ + │ + ┌────────────────────┼────────────────────┐ + │ │ │ + ┌────▼────┐ ┌────▼────┐ ┌────▼────┐ + │ Compute │ │ Compute │ │ Compute │ + │ Node │ │ Node │ │ Node │ + │ (Set 1) │ │ (Set 2) │ │ (Set 2) │ + └─────────┘ └─────────┘ └─────────┘ + OVS-DPDK OVS-DPDK OVS-DPDK + SR-IOV SR-IOV SR-IOV +``` + +## Verification + +After deployment, verify IPv6 configuration: + +```bash +# On hypervisor - check IPv6 addresses +ip -6 addr show osp_trunk +# Should show: 2620:cf:cf:aaaa::1/64 (if configured by nmstate) + +# Test connectivity to controller-0 +ping6 2620:cf:cf:aaaa::9 + +# On compute nodes - check IPv6 configuration +ssh heat-admin@ +ip -6 addr show +# Should show IPv6 addresses on bond interfaces + +# Verify OpenStack endpoints use IPv6 +openstack endpoint list +# Should show IPv6 addresses like [2620:cf:cf:aaaa::X] +``` + +## Troubleshooting + +### Common Issues + +**Issue**: Computes can't reach control plane +- **Check**: DHCPv6 or SLAAC working on ctlplane +- **Verify**: `tcpdump -i osp_trunk -n icmp6` + +**Issue**: Services not listening on IPv6 +- **Check**: `cifmw_ci_gen_kustomize_values_primary_ip_version: 6` is set +- **Verify**: `ss -tlnp6` on controller + +**Issue**: DNS resolution fails +- **Check**: AAAA records exist for all services +- **Verify**: `dig AAAA keystone.openstack.svc` + +**Issue**: VLANs not working +- **Check**: Switch has IPv6 enabled on VLANs 20-23 +- **Verify**: MLD snooping enabled + +## Related Files + +- **Original IPv4 reproducer**: `dt-nfv-ovs-dpdk-sriov-2nodesets.yml` +- **Job definition**: `zuul.d/ci-framework-rhoso-18-rhel9-trunk-nfv-jobs.yaml` +- **IPv6 UNI scenario example**: `scenarios/uni/uni04delta-ipv6/` + +## References + +- [IPv6 Networking in OpenStack](https://docs.openstack.org/neutron/latest/admin/config-ipv6.html) +- [OVS-DPDK Documentation](https://docs.openvswitch.org/en/latest/topics/dpdk/) +- [SR-IOV Configuration](https://docs.openstack.org/neutron/latest/admin/config-sriov.html) +- CI Framework IPv6 examples + +## Author + +Generated for NFV IPv6 testing scenarios. + +## Version History + +- **1.0** (2025-01-06): Initial IPv6 variant created based on dt-nfv-ovs-dpdk-sriov-2nodesets.yml diff --git a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.yml b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.yml new file mode 100644 index 0000000000..52741280f6 --- /dev/null +++ b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets-ipv6.yml @@ -0,0 +1,125 @@ +--- +# IPv6 variant of dt-nfv-ovs-dpdk-sriov-2nodesets +# This reproducer configures NFV OVS-DPDK SR-IOV with 2 different nodesets using IPv6 as primary IP + +cifmw_architecture_scenario: "ovs-dpdk-sriov-2nodesets" + +# Automation section. Most of those parameters will be passed to the +# controller-0 as-is and be consumed by the `deploy-va.sh` script. +# Please note, all paths are on the controller-0, meaning managed by the +# Framework. Please do not edit them! +_arch_repo: "{{ cifmw_architecture_repo }}" + +# Enable IPv6 as primary IP version +cifmw_ci_gen_kustomize_values_primary_ip_version: 6 + +# HERE if you want to override kustomization, you can uncomment this parameter +# and push the data structure you want to apply. +# cifmw_architecture_user_kustomize: +# stage_0: +# 'network-values': +# data: +# starwars: Obiwan + +# HERE, if you want to stop the deployment loop at any stage, you can uncomment +# the following parameter and update the value to match the stage you want to +# reach. Known stages are: +# pre_kustomize_stage_INDEX +# pre_apply_stage_INDEX +# post_apply_stage_INDEX +# +# cifmw_deploy_architecture_stopper: + +# Libvirt network configuration - maintain compatibility +cifmw_libvirt_manager_net_prefix_add: false +cifmw_libvirt_manager_fixed_networks: + - ocpbm + - ocppr + - osp_external + - osp_trunk + +cifmw_libvirt_manager_configuration: + networks: + ocpbm: | + + ocpbm + + + + ocppr: | + + ocppr + + + + osp_external: | + + osp_external + + + + osp_trunk: | + + osp_trunk + + + + vms: + controller: + uefi: "{{ cifmw_use_uefi }}" + root_part_id: "{{ cifmw_root_partition_id }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - osp_trunk + ocp: + amount: 3 + uefi: true + root_part_id: 4 + admin_user: core + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "ocp_master" + disksize: "100" + extra_disks_num: 3 + extra_disks_size: "50G" + cpus: 10 + memory: 32 + nets: + - ocppr + - ocpbm + - osp_trunk + - osp_external + +# Note: with that extra_network_names "osp_trunk", we instruct +# devscripts role to create a new network, and associate it to +# the OCP nodes. This one is a "private network", and will hold +# the VLANs used for network isolation. + +# Please create a custom env file to provide: +# cifmw_devscripts_ci_token: +# cifmw_devscripts_pull_secret: + +# Baremetal host configuration +cifmw_config_bmh: true + +# BMH are deployed in a different NS than the secret OSP BMO +# references in each BMH. Metal3 requires the referenced +# secrets to be in the same NS or be allowed to access them +cifmw_devscripts_config_overrides_patch_bmo_watch_all_namespaces: + bmo_watch_all_namespaces: true + +# Use EDPM image for computes +cifmw_update_containers_edpm_image_url: "{{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/edpm-hardened-uefi:{{ cifmw_update_containers_tag }}" + +# Set Logical Volume Manager Storage by default for local storage +cifmw_use_lvms: true +cifmw_lvms_disk_list: + - /dev/vda + - /dev/vdb + - /dev/vdc