Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
754e3cd
ansible-lint: fix yaml[line-length] in roles
ednxzu Dec 13, 2025
8bf992f
ansible-lint: fix yaml[line-length] in ovn-db
ednxzu Dec 13, 2025
8e2f464
ansible-lint: fix yaml[line-length] in ovs-dpdk
ednxzu Dec 13, 2025
852ebb7
ansible-lint: fix yaml[line-length] in placement
ednxzu Dec 13, 2025
42c391c
ansible-lint: fix yaml[line-length] in roles
ednxzu Dec 13, 2025
4939a11
Allow SNI frontend when using a single haproxy VIP
ednxzu Feb 2, 2026
c17a155
Revert "CI: Pin testtools in yet another place"
mnasiadka Feb 27, 2026
3db2fc3
Merge "Revert "CI: Pin testtools in yet another place""
Mar 2, 2026
677494a
Merge "Allow SNI frontend when using a single haproxy VIP"
Mar 2, 2026
c93e9e6
Merge "ansible-lint: fix yaml[line-length] in roles"
Mar 3, 2026
9c43bd2
Merge "ansible-lint: fix yaml[line-length] in ovn-db"
Mar 3, 2026
c097925
Merge "ansible-lint: fix yaml[line-length] in ovs-dpdk"
Mar 3, 2026
0f1cb4c
Merge "ansible-lint: fix yaml[line-length] in placement"
Mar 3, 2026
b9ab5fe
Merge "ansible-lint: fix yaml[line-length] in roles"
Mar 3, 2026
13af6fc
ansible-lint: fix yaml[line-length] in roles
ednxzu Dec 13, 2025
38ce985
ansible-lint: fix yaml[line-length] in rabbitmq
ednxzu Dec 13, 2025
63387f7
ansible-lint: fix yaml[line-length] in skyline
ednxzu Dec 13, 2025
e522602
ansible-lint: fix yaml[line-length] in tacker
ednxzu Dec 13, 2025
a25f42c
ansible-lint: fix yaml[line-length] in globals
ednxzu Dec 13, 2025
0032455
ansible-lint: fix yaml[line-length] in etcd
ednxzu Dec 13, 2025
baca5f2
ansible-lint: fix yaml[line-length] in trove
ednxzu Jan 7, 2026
ad59c9e
Switch from passlib to internal bcrypt filter
mnasiadka Dec 5, 2025
2f1dc3d
ironic: Use baremetal_node_info
mnasiadka Nov 16, 2023
0646710
Merge "ironic: Use baremetal_node_info"
Mar 3, 2026
dc2d1ac
Merge "Switch from passlib to internal bcrypt filter"
Mar 5, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 25 additions & 3 deletions ansible/roles/etcd/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,36 @@ etcd_bootstrap_service_environment:
ETCD_INITIAL_CLUSTER_STATE: "existing"
ETCD_INITIAL_ADVERTISE_PEER_URLS: "{{ etcd_peer_internal_endpoint }}"
ETCD_INITIAL_CLUSTER_TOKEN: "{{ etcd_cluster_token }}"
ETCD_INITIAL_CLUSTER: "{% for host in groups['etcd_had_volume_True'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }},{% endfor %}{{ ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(inventory_hostname) | put_address_in_context('url') }}:{{ etcd_peer_port }}"
ETCD_INITIAL_CLUSTER: >-
{%- set ns = namespace(cluster_members=[]) -%}
{%- for host in groups['etcd_had_volume_True'] -%}
{%- set member = hostvars[host].ansible_facts.hostname ~ '=' ~
etcd_protocol ~ '://' ~
('api' | kolla_address(host) | put_address_in_context('url')) ~ ':' ~
etcd_peer_port -%}
{%- set ns.cluster_members = ns.cluster_members + [member] -%}
{%- endfor -%}
{%- set current_member = ansible_facts.hostname ~ '=' ~
etcd_protocol ~ '://' ~
('api' | kolla_address(inventory_hostname) | put_address_in_context('url')) ~ ':' ~
etcd_peer_port -%}
{%- set ns.cluster_members = ns.cluster_members + [current_member] -%}
{{ ns.cluster_members | join(',') }}
etcd_bootstrap_cluster_environment:
KOLLA_BOOTSTRAP_STATUS: "bootstrap cluster"
ETCD_INITIAL_CLUSTER_STATE: "new"
ETCD_INITIAL_ADVERTISE_PEER_URLS: "{{ etcd_peer_internal_endpoint }}"
ETCD_INITIAL_CLUSTER_TOKEN: "{{ etcd_cluster_token }}"
ETCD_INITIAL_CLUSTER: "{% for host in groups['etcd'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}"

ETCD_INITIAL_CLUSTER: >-
{%- set ns = namespace(cluster_members=[]) -%}
{%- for host in groups['etcd'] -%}
{%- set member = hostvars[host].ansible_facts.hostname ~ '=' ~
etcd_protocol ~ '://' ~
('api' | kolla_address(host) | put_address_in_context('url')) ~ ':' ~
etcd_peer_port -%}
{%- set ns.cluster_members = ns.cluster_members + [member] -%}
{%- endfor -%}
{{ ns.cluster_members | join(',') }}
####################
# Docker
####################
Expand Down
22 changes: 20 additions & 2 deletions ansible/roles/etcd/tasks/bootstrap.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,32 @@

- name: Determine whether a new cluster needs bootstrapping
set_fact:
etcd_bootstrap_cluster: "{% for host in groups['etcd'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}"
etcd_bootstrap_cluster: >-
{%- set ns = namespace(cluster_members=[]) -%}
{%- for host in groups['etcd'] -%}
{%- set member = hostvars[host].ansible_facts.hostname ~ '=' ~
etcd_protocol ~ '://' ~
('api' | kolla_address(host) | put_address_in_context('url')) ~ ':' ~
etcd_peer_port -%}
{%- set ns.cluster_members = ns.cluster_members + [member] -%}
{%- endfor -%}
{{ ns.cluster_members | join(',') }}
when: not (etcd_cluster_exists | bool)
changed_when: not (etcd_cluster_exists | bool)
notify: Bootstrap etcd cluster

- name: Determine when new services need bootstrapping
set_fact:
etcd_bootstrap_services: "{% for host in groups['etcd_had_volume_False'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}"
etcd_bootstrap_services: >-
{%- set ns = namespace(cluster_members=[]) -%}
{%- for host in groups['etcd_had_volume_False'] -%}
{%- set member = hostvars[host].ansible_facts.hostname ~ '=' ~
etcd_protocol ~ '://' ~
('api' | kolla_address(host) | put_address_in_context('url')) ~ ':' ~
etcd_peer_port -%}
{%- set ns.cluster_members = ns.cluster_members + [member] -%}
{%- endfor -%}
{{ ns.cluster_members | join(',') }}
when:
- etcd_cluster_exists | bool
- groups.etcd_had_volume_False is defined
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,8 @@ backend {{ service_name }}_back
{% set external = haproxy_service.external | default(false) | bool %}
{# Active/passive defaults to false #}
{% set active_passive = haproxy_service.active_passive | default(false) | bool %}
{# Skip anything that is external when the external vip is not enabled #}
{% if haproxy_service.enabled | bool and (not external or haproxy_enable_external_vip | bool) %}
{# Skip anything that is external, unless the external vip is enabled or single external frontend mode is set. #}
{% if haproxy_service.enabled | bool and (not external or haproxy_enable_external_vip | bool or haproxy_single_external_frontend | bool) %}
{# Here we define variables and their defaults #}
{# services can be listening on a different port than haproxy #}
{% set listen_port = haproxy_service.listen_port | default(haproxy_service.port) %}
Expand Down Expand Up @@ -150,7 +150,18 @@ backend {{ service_name }}_back
{{ userlist_macro(haproxy_name, auth_user, auth_pass) }}
{% endif %}
{% if with_frontend %}
{% if not (external | bool and haproxy_single_external_frontend | bool and mode == 'http') %}
{# In single external frontend mode, skip frontends that conflict with or are subsumed
by the external_frontend:
- External http services are handled by the SNI/map frontend.
- External redirect services with no separate VIP duplicate the internal redirect
on the same IP:port.
- Internal http services on the same port as the SNI frontend conflict with it
when there is no separate external VIP (single VIP means same bind address). #}
{% if not (haproxy_single_external_frontend | bool and (
(external | bool and mode == 'http') or
(external | bool and mode == 'redirect' and not haproxy_enable_external_vip | bool) or
(not external | bool and not haproxy_enable_external_vip | bool and mode == 'http' and haproxy_service.port | string == haproxy_single_external_frontend_public_port | string)
)) %}
{{ frontend_macro(haproxy_name, haproxy_service.port, mode, external,
frontend_http_extra, frontend_redirect_extra, frontend_tcp_extra) }}
{% endif %}
Expand Down
45 changes: 16 additions & 29 deletions ansible/roles/ironic/tasks/upgrade.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,49 +50,36 @@
{{ volumes_dir }}/ironic_dhcp_hosts/_data
when: container_volume_facts.volumes['ironic_inspector_dhcp_hosts'] is defined

- name: Get Ironic API container facts
become: true
vars:
container_name: "{{ ironic_services['ironic-api'].container_name }}"
kolla_container_facts:
action: get_containers
container_engine: "{{ kolla_container_engine }}"
name:
- "{{ container_name }}"
check_mode: false
register: container_facts

- name: Wait for Ironic nodes not to wait
become: true
vars:
container_name: "{{ ironic_services['ironic-api'].container_name }}"
command: >
{{ kolla_container_engine }} exec kolla_toolbox openstack
--os-interface {{ openstack_interface }}
--os-auth-url {{ openstack_auth.auth_url }}
--os-username {{ openstack_auth.username }}
--os-password {{ openstack_auth.password }}
--os-identity-api-version 3
--os-user-domain-name {{ openstack_auth.user_domain_name }}
--os-system-scope "all"
--os-region-name {{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
baremetal node list --format json --column "Provisioning State"
ironic_auth:
auth_url: "{{ openstack_auth.auth_url }}"
username: "{{ openstack_auth.username }}"
password: "{{ openstack_auth.password }}"
user_domain_name: "{{ openstack_auth.user_domain_name }}"
system_scope: "all"
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: "openstack.cloud.baremetal_node_info"
module_args:
region_name: "{{ openstack_region_name }}"
auth: "{{ ironic_auth }}"
interface: "{{ openstack_interface }}"
cacert: "{{ openstack_cacert }}"
register: ironic_nodes
changed_when: false
retries: 10
delay: 30
until:
- ironic_nodes is success
- (ironic_nodes.stdout |
from_json |
map(attribute='Provisioning State') |
- (ironic_nodes.nodes |
map(attribute='provision_state') |
select('search', '\\bwait\\b') |
length) == 0
run_once: true
when:
- not ironic_upgrade_skip_wait_check | bool
- container_facts.containers[container_name] is defined

- name: Import tasks from config.yml
import_tasks: config.yml
Expand Down
14 changes: 10 additions & 4 deletions ansible/roles/ovn-controller/tasks/setup-ovs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,21 @@
# Format: physnet1:br1,physnet2:br2
ovn_mappings: "{{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
# Format: physnet1:00:11:22:33:44:55,physnet2:00:11:22:33:44:56
ovn_macs: "{% for physnet, bridge in neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) %}{{ physnet }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname
+ bridge) }}{% if not loop.last %},{% endif %}{% endfor %}"
ovn_macs: >-
{%- set ns = namespace(macs=[]) -%}
{%- for physnet, bridge in neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) -%}
{%- set mac_entry = physnet ~ ':' ~ (ovn_base_mac | random_mac(seed=inventory_hostname ~ bridge)) -%}
{%- set ns.macs = ns.macs + [mac_entry] -%}
{%- endfor -%}
{{ ns.macs | join(',') }}
ovn_cms_opts: >-
{{
(
[]
+ ( ['enable-chassis-as-gw'] if inventory_hostname in groups['ovn-controller-network'] else [] )
+ ( ['availability-zones=' + neutron_ovn_availability_zones | join(':')] if inventory_hostname in groups['ovn-controller-network'] and neutron_ovn_availability_zones
else [] )
+ ( ['availability-zones=' + neutron_ovn_availability_zones | join(':')]
if inventory_hostname in groups['ovn-controller-network'] and neutron_ovn_availability_zones
else [] )
) | join(',')
}}
become: true
Expand Down
16 changes: 12 additions & 4 deletions ansible/roles/ovn-db/tasks/bootstrap-initial.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,16 @@
block:
- name: Set bootstrap args fact for NB (new cluster)
set_fact:
ovn_nb_db_bootstrap_args: "{% if groups['ovn-nb-db'] | length > 1 and inventory_hostname != groups['ovn-nb-db'][0] %} --db-nb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-nb-db'][0]) | put_address_in_context('url') }} {% endif %}"
ovn_nb_db_bootstrap_args: >-
{% if groups['ovn-nb-db'] | length > 1 and inventory_hostname != groups['ovn-nb-db'][0] %}
--db-nb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-nb-db'][0]) | put_address_in_context('url') }} {% endif %}
when: groups['ovn-nb-db_leader'] is not defined and groups['ovn-nb-db_follower'] is not defined

- name: Set bootstrap args fact for SB (new cluster)
set_fact:
ovn_sb_db_bootstrap_args: "{% if groups['ovn-sb-db'] | length > 1 and inventory_hostname != groups['ovn-sb-db'][0] %} --db-sb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-sb-db'][0]) | put_address_in_context('url') }} {% endif %}"
ovn_sb_db_bootstrap_args: >-
{% if groups['ovn-sb-db'] | length > 1 and inventory_hostname != groups['ovn-sb-db'][0] %}
--db-sb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-sb-db'][0]) | put_address_in_context('url') }} {% endif %}
when: groups['ovn-sb-db_leader'] is not defined and groups['ovn-sb-db_follower'] is not defined

- name: Check NB cluster status
Expand Down Expand Up @@ -60,12 +64,16 @@

- name: Set bootstrap args fact for NB (new member)
set_fact:
ovn_nb_db_bootstrap_args: "--db-nb-cluster-remote-addr={{ 'api' | kolla_address(groups.get('ovn-nb-db_leader', groups['ovn-nb-db'])[0] | default()) | put_address_in_context('url') }}"
ovn_nb_db_bootstrap_args: >-
--db-nb-cluster-remote-addr={{ 'api' | kolla_address(groups.get(
'ovn-nb-db_leader', groups['ovn-nb-db'])[0] | default()) | put_address_in_context('url') }}
when: inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '') and groups['ovn-nb-db_leader'] is defined

- name: Set bootstrap args fact for SB (new member)
set_fact:
ovn_sb_db_bootstrap_args: "--db-sb-cluster-remote-addr={{ 'api' | kolla_address(groups.get('ovn-sb-db_leader', groups['ovn-sb-db'])[0] | default()) | put_address_in_context('url') }}"
ovn_sb_db_bootstrap_args: >-
--db-sb-cluster-remote-addr={{ 'api' | kolla_address(groups.get(
'ovn-sb-db_leader', groups['ovn-sb-db'])[0] | default()) | put_address_in_context('url') }}
when: inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '') and groups['ovn-sb-db_leader'] is defined

- name: Import tasks from config.yml
Expand Down
8 changes: 6 additions & 2 deletions ansible/roles/ovs-dpdk/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,13 @@ ovsdpdk_services:
ovs_bridge_mappings: "{{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
# Format: eth1:br1,eth2:br2
ovs_port_mappings: "{{ neutron_external_interface.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
tunnel_interface_network: "{{ hostvars[inventory_hostname].ansible_facts[dpdk_tunnel_interface]['ipv4']['network'] }}/{{ hostvars[inventory_hostname].ansible_facts[dpdk_tunnel_interface]['ipv4']['netmask'] }}"
tunnel_interface_network: >-
{{ hostvars[inventory_hostname].ansible_facts[dpdk_tunnel_interface]['ipv4']['network'] }}/{{
hostvars[inventory_hostname].ansible_facts[dpdk_tunnel_interface]['ipv4']['netmask'] }}"
tunnel_interface_cidr: "{{ dpdk_tunnel_interface_address }}/{{ tunnel_interface_network | ansible.utils.ipaddr('prefix') }}"
ovs_cidr_mappings: "{% if neutron_bridge_name.split(',') | length != 1 %} {neutron_bridge_name.split(',')[0]}:{{ tunnel_interface_cidr }} {% else %} {{ neutron_bridge_name }}:{{ tunnel_interface_cidr }} {% endif %}"
ovs_cidr_mappings: >-
{% if neutron_bridge_name.split(',') | length != 1 %} {{ neutron_bridge_name.split(',')[0] }}:{{ tunnel_interface_cidr }}
{% else %} {{ neutron_bridge_name }}:{{ tunnel_interface_cidr }} {% endif %}
ovs_mem_channels: 4
ovs_socket_mem: 1024
ovs_hugepage_mountpoint: /dev/hugepages
Expand Down
5 changes: 4 additions & 1 deletion ansible/roles/placement/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,10 @@ placement_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
placement_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
placement_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
placement_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
placement_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl {{ 'https' if placement_enable_tls_backend | bool else 'http' }}://{{ api_interface_address | put_address_in_context('url') }}:{{ placement_api_listen_port }}"]
placement_api_healthcheck_test:
- "CMD-SHELL"
- healthcheck_curl {{ 'https' if placement_enable_tls_backend | bool else 'http' }}://{{
api_interface_address | put_address_in_context('url') }}:{{ placement_api_listen_port }}
placement_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
placement_api_healthcheck:
interval: "{{ placement_api_healthcheck_interval }}"
Expand Down
6 changes: 5 additions & 1 deletion ansible/roles/prometheus-node-exporters/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,5 +54,9 @@ prometheus_extra_volumes: "{{ default_extra_volumes }}"
prometheus_node_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_cadvisor_extra_volumes: "{{ prometheus_extra_volumes }}"

prometheus_cadvisor_cmdline_extras: "--docker_only --store_container_labels=false --disable_metrics=percpu,referenced_memory,cpu_topology,resctrl,udp,advtcp,sched,hugetlb,memory_numa,tcp,process --housekeeping_interval={{ prometheus_scrape_interval }}"
prometheus_cadvisor_cmdline_extras: >-
--docker_only
--store_container_labels=false
--disable_metrics=percpu,referenced_memory,cpu_topology,resctrl,udp,advtcp,sched,hugetlb,memory_numa,tcp,process
--housekeeping_interval={{ prometheus_scrape_interval }}
prometheus_node_exporter_cmdline_extras: ""
Loading