@type opensearch
- host {{ opensearch_address }}
+ hosts {% for host in groups['opensearch'] %}{{ 'api' | kolla_address(host) }}{% if not loop.last %},{% endif %}{% endfor %}
port {{ opensearch_port }}
scheme {{ fluentd_opensearch_scheme }}
-{% if fluentd_opensearch_path != '' %}
- path {{ fluentd_opensearch_path }}
-{% endif %}
-{% if fluentd_opensearch_scheme == 'https' %}
- ssl_version {{ fluentd_opensearch_ssl_version }}
- ssl_verify {{ fluentd_opensearch_ssl_verify }}
-{% if fluentd_opensearch_cacert | length > 0 %}
- ca_file {{ fluentd_opensearch_cacert }}
-{% endif %}
-{% endif %}
-{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%}
+{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != '' -%}
user {{ fluentd_opensearch_user }}
password {{ fluentd_opensearch_password }}
-{% endif %}
+{%- endif %}
logstash_format true
logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true
-{% if match_pattern != 'retry_os' %}
+{% if match_pattern != 'retry_os' -%}
retry_tag retry_os
-{% endif %}
+{%- endif %}
request_timeout {{ fluentd_opensearch_request_timeout }}
suppress_type_name true
bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
@type file
-{% if match_pattern == 'retry_os' %}
+{% if match_pattern == 'retry_os' -%}
path /var/lib/fluentd/data/opensearch.buffer/openstack_retry.*
{% else %}
path /var/lib/fluentd/data/opensearch.buffer/openstack.*
-{% endif %}
+{%- endif %}
flush_interval 15s
chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
diff --git a/ansible/roles/glance/tasks/config.yml b/ansible/roles/glance/tasks/config.yml
index d268e92d32..52ee34a973 100644
--- a/ansible/roles/glance/tasks/config.yml
+++ b/ansible/roles/glance/tasks/config.yml
@@ -30,7 +30,7 @@
glance_policy_file: "{{ glance_policy.results.0.stat.path | basename }}"
glance_policy_file_path: "{{ glance_policy.results.0.stat.path }}"
when:
- - glance_policy.results
+ - glance_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/gnocchi/defaults/main.yml b/ansible/roles/gnocchi/defaults/main.yml
index e0d3f3c6ad..d185fef648 100644
--- a/ansible/roles/gnocchi/defaults/main.yml
+++ b/ansible/roles/gnocchi/defaults/main.yml
@@ -8,6 +8,7 @@ gnocchi_services:
volumes: "{{ gnocchi_api_default_volumes + gnocchi_api_extra_volumes }}"
dimensions: "{{ gnocchi_api_dimensions }}"
healthcheck: "{{ gnocchi_api_healthcheck }}"
+ wsgi: "gnocchi.wsgi.api:application"
haproxy:
gnocchi_api:
enabled: "{{ enable_gnocchi }}"
@@ -199,3 +200,8 @@ gnocchi_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }}
# Copy certificates
###################
gnocchi_copy_certs: "{{ kolla_copy_ca_into_containers | bool or gnocchi_database_enable_tls_internal | bool }}"
+
+####################
+# WSGI
+####################
+gnocchi_wsgi_provider: "uwsgi"
diff --git a/ansible/roles/gnocchi/tasks/config.yml b/ansible/roles/gnocchi/tasks/config.yml
index c8e6897aee..b978044e29 100644
--- a/ansible/roles/gnocchi/tasks/config.yml
+++ b/ansible/roles/gnocchi/tasks/config.yml
@@ -30,7 +30,7 @@
gnocchi_policy_file: "{{ gnocchi_policy.results.0.stat.path | basename }}"
gnocchi_policy_file_path: "{{ gnocchi_policy.results.0.stat.path }}"
when:
- - gnocchi_policy.results
+ - gnocchi_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
@@ -67,10 +67,26 @@
dest: "{{ node_config_directory }}/{{ item }}/wsgi-gnocchi.conf"
mode: "0660"
become: true
- when: service | service_enabled_and_mapped_to_host
+ when:
+ - gnocchi_wsgi_provider == "apache"
+ - service | service_enabled_and_mapped_to_host
with_items:
- "gnocchi-api"
+- name: "Configure uWSGI for Gnocchi"
+ include_role:
+ name: service-uwsgi-config
+ vars:
+ project_services: "{{ gnocchi_services }}"
+ service: "{{ gnocchi_services['gnocchi-api'] }}"
+ service_name: "gnocchi-api"
+ service_uwsgi_config_http_port: "{{ gnocchi_api_listen_port }}"
+ service_uwsgi_config_module: "{{ service.wsgi }}"
+ service_uwsgi_config_uid: "gnocchi"
+ when:
+ - gnocchi_wsgi_provider == "uwsgi"
+ - service | service_enabled_and_mapped_to_host
+
- name: Copying over existing policy file
template:
src: "{{ gnocchi_policy_file_path }}"
diff --git a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2
index de8ed12900..2fc22c6469 100644
--- a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2
+++ b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2
@@ -1,20 +1,27 @@
-{% set gnocchi_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
+{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
{% set gnocchi_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
+{% set command = ('/usr/sbin/' + apache_binary + ' -DFOREGROUND') if gnocchi_wsgi_provider == 'apache' else 'uwsgi /etc/gnocchi/gnocchi-api-uwsgi.ini' %}
{
- "command": "{{ gnocchi_cmd }} -DFOREGROUND",
+ "command": "{{ command }}",
"config_files": [
{
"source": "{{ container_config_directory }}/gnocchi.conf",
"dest": "/etc/gnocchi/gnocchi.conf",
"owner": "gnocchi",
"perm": "0600"
- },
+ }{% if gnocchi_wsgi_provider == 'apache' %},
{
"source": "{{ container_config_directory }}/wsgi-gnocchi.conf",
"dest": "/etc/{{ gnocchi_dir }}/wsgi-gnocchi.conf",
"owner": "gnocchi",
"perm": "0600"
- }{% if gnocchi_policy_file is defined %},
+ }{% elif gnocchi_wsgi_provider == 'uwsgi' %},
+ {
+ "source": "{{ container_config_directory }}/gnocchi-api-uwsgi.ini",
+ "dest": "/etc/gnocchi/gnocchi-api-uwsgi.ini",
+ "owner": "gnocchi",
+ "perm": "0600"
+ }{% endif %}{% if gnocchi_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ gnocchi_policy_file }}",
"dest": "/etc/gnocchi/{{ gnocchi_policy_file }}",
diff --git a/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 b/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2
index e84923d67d..4cc1568849 100644
--- a/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2
+++ b/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2
@@ -1,5 +1,6 @@
+{% set remoted = '/usr/sbin/pacemaker-remoted' if kolla_base_distro in ['centos', 'rocky'] else '/usr/sbin/pacemaker_remoted' %}
{
- "command": "/usr/sbin/pacemaker_remoted -l /var/log/kolla/hacluster/pacemaker-remoted.log{% if openstack_logging_debug | bool %} -VV{% endif %} -p {{ hacluster_pacemaker_remote_port }}",
+ "command": "{{ remoted }} -l /var/log/kolla/hacluster/pacemaker-remoted.log{% if openstack_logging_debug | bool %} -VV{% endif %} -p {{ hacluster_pacemaker_remote_port }}",
"config_files": [
{
"source": "{{ container_config_directory }}/authkey",
diff --git a/ansible/roles/heat/tasks/config.yml b/ansible/roles/heat/tasks/config.yml
index 107e72198e..10271bd8d4 100644
--- a/ansible/roles/heat/tasks/config.yml
+++ b/ansible/roles/heat/tasks/config.yml
@@ -26,7 +26,7 @@
heat_policy_file: "{{ heat_policy.results.0.stat.path | basename }}"
heat_policy_file_path: "{{ heat_policy.results.0.stat.path }}"
when:
- - heat_policy.results
+ - heat_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 b/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2
index 27aea0dd07..b692c2d6fa 100644
--- a/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2
+++ b/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2
@@ -34,9 +34,7 @@ CustomLog "{{ heat_log_dir }}/apache-cfn-access.log" common
WSGIScriptAlias / {{ binary_path }}/heat-wsgi-api-cfn
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog "{{ heat_log_dir }}/heat-api-cfn-error.log"
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ heat_log_dir }}/heat-api-cfn-access.log" logformat
diff --git a/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 b/ansible/roles/heat/templates/wsgi-heat-api.conf.j2
index d197f764a5..1f285dc153 100644
--- a/ansible/roles/heat/templates/wsgi-heat-api.conf.j2
+++ b/ansible/roles/heat/templates/wsgi-heat-api.conf.j2
@@ -34,9 +34,7 @@ CustomLog "{{ heat_log_dir }}/apache-access.log" common
WSGIScriptAlias / {{ binary_path }}/heat-wsgi-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog "{{ heat_log_dir }}/heat-api-error.log"
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ heat_log_dir }}/heat-api-access.log" logformat
diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml
index cdda448e16..01a6e3082a 100644
--- a/ansible/roles/horizon/defaults/main.yml
+++ b/ansible/roles/horizon/defaults/main.yml
@@ -20,13 +20,13 @@ horizon_services:
ENABLE_OCTAVIA: "{{ 'yes' if enable_horizon_octavia | bool else 'no' }}"
ENABLE_TACKER: "{{ 'yes' if enable_horizon_tacker | bool else 'no' }}"
ENABLE_TROVE: "{{ 'yes' if enable_horizon_trove | bool else 'no' }}"
- ENABLE_VENUS: "{{ 'yes' if enable_horizon_venus | bool else 'no' }}"
ENABLE_WATCHER: "{{ 'yes' if enable_horizon_watcher | bool else 'no' }}"
ENABLE_ZUN: "{{ 'yes' if enable_horizon_zun | bool else 'no' }}"
FORCE_GENERATE: "{{ 'yes' if horizon_dev_mode | bool else 'no' }}"
volumes: "{{ horizon_default_volumes + horizon_extra_volumes }}"
dimensions: "{{ horizon_dimensions }}"
healthcheck: "{{ horizon_healthcheck }}"
+ wsgi: "openstack_dashboard.wsgi:application"
haproxy:
horizon:
enabled: "{{ enable_horizon }}"
@@ -168,3 +168,8 @@ horizon_use_keystone_public_url: False
# Copy certificates
###################
horizon_copy_certs: "{{ kolla_copy_ca_into_containers | bool or horizon_enable_tls_backend | bool }}"
+
+############
+# WSGI
+############
+horizon_wsgi_provider: "uwsgi"
diff --git a/ansible/roles/horizon/tasks/config.yml b/ansible/roles/horizon/tasks/config.yml
index 37d06c2007..7fece46ebd 100644
--- a/ansible/roles/horizon/tasks/config.yml
+++ b/ansible/roles/horizon/tasks/config.yml
@@ -59,7 +59,26 @@
- "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/horizon.conf"
- "{{ node_custom_config }}/horizon/horizon.conf"
- "horizon.conf.j2"
- when: service | service_enabled_and_mapped_to_host
+ when:
+ - service | service_enabled_and_mapped_to_host
+ - horizon_wsgi_provider == "apache"
+
+- name: "Configure uWSGI for Horizon"
+ include_role:
+ name: service-uwsgi-config
+ vars:
+ project_services: "{{ horizon_services }}"
+ service: "{{ project_services[service_name] }}"
+ service_name: "horizon"
+ service_uwsgi_config_http_port: "{{ horizon_listen_port }}"
+ service_uwsgi_config_module: "{{ service.wsgi }}"
+ service_uwsgi_config_tls_backend: "{{ horizon_enable_tls_backend | bool }}"
+ service_uwsgi_config_tls_cert: "/etc/horizon/certs/horizon-cert.pem"
+ service_uwsgi_config_tls_key: "/etc/horizon/certs/horizon-key.pem"
+ service_uwsgi_config_uid: "{{ 'horizon' if enable_haproxy | bool else 'root' }}"
+ when:
+ - service | service_enabled_and_mapped_to_host
+ - horizon_wsgi_provider == "uwsgi"
- name: Copying over kolla-settings.py
become: true
diff --git a/ansible/roles/horizon/tasks/policy_item.yml b/ansible/roles/horizon/tasks/policy_item.yml
index 708f60e6ef..7b427108a3 100644
--- a/ansible/roles/horizon/tasks/policy_item.yml
+++ b/ansible/roles/horizon/tasks/policy_item.yml
@@ -22,4 +22,4 @@
set_fact:
custom_policy: "{{ custom_policy + [overwritten_files.results.0.stat.path] }}"
when:
- - overwritten_files.results
+ - overwritten_files.results | length > 0
diff --git a/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 b/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2
index 2abbd6acc2..4860f0a887 100644
--- a/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2
+++ b/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2
@@ -19,6 +19,7 @@ DATABASES = {
{% elif groups['memcached'] | length > 0 and not horizon_backend_database | bool %}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES['default']['LOCATION'] = [{% for host in groups['memcached'] %}'{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ memcached_port }}'{% if not loop.last %},{% endif %}{% endfor %}]
+CACHES['default']['OPTIONS'] = {'ignore_exc': True}
{% endif %}
{% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %}
diff --git a/ansible/roles/horizon/templates/horizon.json.j2 b/ansible/roles/horizon/templates/horizon.json.j2
index 5e070ff493..edbc335f8a 100644
--- a/ansible/roles/horizon/templates/horizon.json.j2
+++ b/ansible/roles/horizon/templates/horizon.json.j2
@@ -1,14 +1,23 @@
{% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
{% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'horizon.conf' %}
+{% set uwsgi_cmd = 'uwsgi /etc/horizon/horizon-uwsgi.ini' %}
+{% set command = uwsgi_cmd if horizon_wsgi_provider == 'uwsgi' else ('/usr/sbin/' + apache_cmd + ' -DFOREGROUND') %}
{
- "command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND",
+ "command": "{{ command }}",
"config_files": [
{
+{% if horizon_wsgi_provider == 'apache' %}
"source": "{{ container_config_directory }}/horizon.conf",
"dest": "/etc/{{ apache_dir }}/{{ apache_file }}",
"owner": "horizon",
"perm": "0600"
+{% elif horizon_wsgi_provider == 'uwsgi' %}
+ "source": "{{ container_config_directory }}/horizon-uwsgi.ini",
+ "dest": "/etc/horizon/horizon-uwsgi.ini",
+ "owner": "horizon",
+ "perm": "0600"
+{% endif %}
},
{% for path in custom_policy %}
{
diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml
index fb485375a2..9c80ad6fea 100644
--- a/ansible/roles/ironic/defaults/main.yml
+++ b/ansible/roles/ironic/defaults/main.yml
@@ -68,6 +68,17 @@ ironic_services:
image: "{{ ironic_dnsmasq_image_full }}"
volumes: "{{ ironic_dnsmasq_default_volumes + ironic_dnsmasq_extra_volumes }}"
dimensions: "{{ ironic_dnsmasq_dimensions }}"
+ pid_mode: host
+ ironic-pxe-filter:
+ container_name: ironic_pxe_filter
+ group: ironic-dnsmasq
+ enabled: "{{ enable_ironic_pxe_filter }}"
+ image: "{{ ironic_pxe_filter_image_full }}"
+ volumes: "{{ ironic_pxe_filter_default_volumes + ironic_pxe_filter_extra_volumes }}"
+ dimensions: "{{ ironic_pxe_filter_dimensions }}"
+ # TODO: --pid container:ironic_dnsmasq but this is more complicated since we need to
+ # declare dependency in systemd too.
+ pid_mode: host
ironic-prometheus-exporter:
container_name: ironic_prometheus_exporter
group: ironic-conductor
@@ -127,6 +138,10 @@ ironic_dnsmasq_image: "{{ docker_image_url }}dnsmasq"
ironic_dnsmasq_tag: "{{ ironic_tag }}"
ironic_dnsmasq_image_full: "{{ ironic_dnsmasq_image }}:{{ ironic_dnsmasq_tag }}"
+ironic_pxe_filter_image: "{{ docker_image_url }}ironic-pxe-filter"
+ironic_pxe_filter_tag: "{{ ironic_tag }}"
+ironic_pxe_filter_image_full: "{{ ironic_pxe_filter_image }}:{{ ironic_pxe_filter_tag }}"
+
ironic_prometheus_exporter_image: "{{ docker_image_url }}ironic-prometheus-exporter"
ironic_prometheus_exporter_tag: "{{ ironic_tag }}"
ironic_prometheus_exporter_image_full: "{{ ironic_prometheus_exporter_image }}:{{ ironic_prometheus_exporter_tag }}"
@@ -136,6 +151,7 @@ ironic_conductor_dimensions: "{{ default_container_dimensions }}"
ironic_tftp_dimensions: "{{ default_container_dimensions }}"
ironic_http_dimensions: "{{ default_container_dimensions }}"
ironic_dnsmasq_dimensions: "{{ default_container_dimensions }}"
+ironic_pxe_filter_dimensions: "{{ default_container_dimensions }}"
ironic_prometheus_exporter_dimensions: "{{ default_container_dimensions }}"
ironic_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
@@ -212,8 +228,16 @@ ironic_dnsmasq_default_volumes:
- "{{ node_config_directory }}/ironic-dnsmasq/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
- "ironic_dhcp_hosts:/etc/dnsmasq/dhcp-hostsdir:ro"
+ - "kolla_logs:/var/log/kolla"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
+ironic_pxe_filter_default_volumes:
+ - "{{ node_config_directory }}/ironic-pxe-filter/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "kolla_logs:/var/log/kolla"
+ - "ironic_dhcp_hosts:/etc/dnsmasq/dhcp-hostsdir"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
ironic_prometheus_exporter_default_volumes:
- "{{ node_config_directory }}/ironic-prometheus-exporter/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -227,6 +251,7 @@ ironic_conductor_extra_volumes: "{{ ironic_extra_volumes }}"
ironic_tftp_extra_volumes: "{{ ironic_extra_volumes }}"
ironic_http_extra_volumes: "{{ ironic_extra_volumes }}"
ironic_dnsmasq_extra_volumes: "{{ ironic_extra_volumes }}"
+ironic_pxe_filter_extra_volumes: "{{ ironic_extra_volumes }}"
ironic_prometheus_exporter_extra_volumes: "{{ ironic_extra_volumes }}"
####################
diff --git a/ansible/roles/ironic/handlers/main.yml b/ansible/roles/ironic/handlers/main.yml
index d7989a5736..417fbd46f5 100644
--- a/ansible/roles/ironic/handlers/main.yml
+++ b/ansible/roles/ironic/handlers/main.yml
@@ -69,6 +69,21 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
cap_add: "{{ service.cap_add }}"
+ pid_mode: "{{ service.pid_mode }}"
+
+- name: Restart ironic-pxe-filter container
+ vars:
+ service_name: "ironic-pxe-filter"
+ service: "{{ ironic_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ pid_mode: "{{ service.pid_mode }}"
- name: Restart ironic-prometheus-exporter container
vars:
diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml
index 98a3e66e2e..9b7627e189 100644
--- a/ansible/roles/ironic/tasks/config.yml
+++ b/ansible/roles/ironic/tasks/config.yml
@@ -26,7 +26,7 @@
ironic_policy_file: "{{ ironic_policy.results.0.stat.path | basename }}"
ironic_policy_file_path: "{{ ironic_policy.results.0.stat.path }}"
when:
- - ironic_policy.results
+ - ironic_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
@@ -54,7 +54,7 @@
mode: "0660"
become: true
when:
- - item.key in [ "ironic-api", "ironic-conductor", "ironic-prometheus-exporter" ]
+ - item.key in [ "ironic-api", "ironic-conductor", "ironic-prometheus-exporter", "ironic-pxe-filter" ]
with_dict: "{{ ironic_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over dnsmasq.conf
diff --git a/ansible/roles/ironic/tasks/rolling_upgrade.yml b/ansible/roles/ironic/tasks/rolling_upgrade.yml
index 66a86fcf6e..40998142c1 100644
--- a/ansible/roles/ironic/tasks/rolling_upgrade.yml
+++ b/ansible/roles/ironic/tasks/rolling_upgrade.yml
@@ -5,22 +5,20 @@
# This is only needed when performing a slow rolling upgrade process
# where you need to maintain compatibility between different versions
# during the upgrade. For direct version jumps, this section can be skipped.
-- import_tasks: config.yml
- vars:
- pin_release_version: "{{ ironic_pin_release_version }}"
+- name: Pin release version for rolling upgrades
when: ironic_pin_release_version | length > 0
+ block:
+ - import_tasks: config.yml
+ vars:
+ pin_release_version: "{{ ironic_pin_release_version }}"
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap_service.yml
+ - import_tasks: check-containers.yml
-# TODO(donghm): Flush_handlers to restart ironic services
-# should be run in serial nodes to decrease downtime. Update when
-# the module ansible strategy for rolling upgrade is finished.
+ - import_tasks: bootstrap_service.yml
-# Restart ironic services with pinned release version
-- name: Flush handlers
- meta: flush_handlers
+ # Restart ironic services with pinned release version
+ - name: Flush handlers
+ meta: flush_handlers
# Unpin version
- import_tasks: config.yml
diff --git a/ansible/roles/ironic/templates/ipa.ipxe.j2 b/ansible/roles/ironic/templates/ipa.ipxe.j2
index 676f885c45..4ae0d25762 100644
--- a/ansible/roles/ironic/templates/ipa.ipxe.j2
+++ b/ansible/roles/ironic/templates/ipa.ipxe.j2
@@ -13,6 +13,6 @@ chain pxelinux.cfg/${mac:hexhyp} || goto ipa
:ipa
:retry_boot
imgfree
-kernel --timeout 30000 {{ ironic_http_url }}/ironic-agent.kernel ipa-inspection-callback-url={{ ironic_internal_endpoint }}/v1/continue systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=ironic-agent.initramfs {{ ironic_kernel_cmdline_extras | join(' ') }} || goto retry_boot
+kernel --timeout 30000 {{ ironic_http_url }}/ironic-agent.kernel ipa-inspection-callback-url={{ ironic_internal_endpoint }}/v1/continue_inspection systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=ironic-agent.initramfs {{ ironic_kernel_cmdline_extras | join(' ') }} || goto retry_boot
initrd --timeout 30000 {{ ironic_http_url }}/ironic-agent.initramfs || goto retry_boot
boot
diff --git a/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 b/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2
index 7e62ac2c87..ecb7a93daa 100644
--- a/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2
+++ b/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2
@@ -37,9 +37,7 @@ LogLevel info
WSGIScriptAlias / {{ wsgi_directory }}/ironic-api-wsgi
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog "{{ ironic_log_dir }}/ironic-api-error.log"
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ ironic_log_dir }}/ironic-api-access.log" logformat
diff --git a/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2 b/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2
new file mode 100644
index 0000000000..6bcf7e351b
--- /dev/null
+++ b/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2
@@ -0,0 +1,23 @@
+{
+ "command": "ironic-pxe-filter --config-file /etc/ironic/ironic.conf --log-file /var/log/kolla/ironic/ironic-pxe-filter.log",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/ironic.conf",
+ "dest": "/etc/ironic/ironic.conf",
+ "owner": "ironic",
+ "perm": "0600"
+ }
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/ironic",
+ "owner": "ironic:ironic",
+ "recurse": true
+ },
+ {
+ "path": "/var/lib/ironic",
+ "owner": "ironic:ironic",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/ironic/templates/ironic.conf.j2 b/ansible/roles/ironic/templates/ironic.conf.j2
index fcf130bf76..66fbdb1f77 100644
--- a/ansible/roles/ironic/templates/ironic.conf.j2
+++ b/ansible/roles/ironic/templates/ironic.conf.j2
@@ -4,7 +4,7 @@ auth_strategy = noauth
{% endif %}
debug = {{ ironic_logging_debug }}
-log_file = /var/log/kolla/ironic/{{ service_name }}
+log_file = /var/log/kolla/ironic/{{ service_name }}.log
transport_url = {{ rpc_transport_url }}
@@ -197,3 +197,6 @@ dhcp_provider = none
[oslo_concurrency]
lock_path = /var/lib/ironic/tmp
+
+[pxe_filter]
+dhcp_hostsdir = /etc/dnsmasq/dhcp-hostsdir
diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml
index 3aad442a2c..3d236dfef7 100644
--- a/ansible/roles/keystone/defaults/main.yml
+++ b/ansible/roles/keystone/defaults/main.yml
@@ -8,6 +8,7 @@ keystone_services:
volumes: "{{ keystone_default_volumes + keystone_extra_volumes }}"
dimensions: "{{ keystone_dimensions }}"
healthcheck: "{{ keystone_healthcheck }}"
+ wsgi: "keystone.wsgi.api:application"
haproxy:
keystone_internal:
enabled: "{{ enable_keystone }}"
@@ -225,12 +226,15 @@ keystone_remote_id_attribute_oidc: "HTTP_OIDC_ISS"
keystone_container_federation_oidc_metadata_folder: "{{ '/etc/apache2/metadata' if kolla_base_distro in ['debian', 'ubuntu'] else '/etc/httpd/metadata' }}"
keystone_container_federation_oidc_idp_certificate_folder: "{{ '/etc/apache2/cert' if kolla_base_distro in ['debian', 'ubuntu'] else '/etc/httpd/cert' }}"
keystone_container_federation_oidc_attribute_mappings_folder: "{{ container_config_directory }}/federation/oidc/attribute_maps"
-keystone_host_federation_oidc_metadata_folder: "{{ node_config_directory }}/keystone/federation/oidc/metadata"
-keystone_host_federation_oidc_idp_certificate_folder: "{{ node_config_directory }}/keystone/federation/oidc/cert"
-keystone_host_federation_oidc_attribute_mappings_folder: "{{ node_config_directory }}/keystone/federation/oidc/attribute_maps"
+keystone_host_federation_base_folder: "{{ node_config_directory }}/keystone/federation"
+keystone_host_federation_oidc_metadata_folder: "{{ keystone_host_federation_base_folder }}/oidc/metadata"
+keystone_host_federation_oidc_idp_certificate_folder: "{{ keystone_host_federation_base_folder }}/oidc/cert"
+keystone_host_federation_oidc_attribute_mappings_folder: "{{ keystone_host_federation_base_folder }}/oidc/attribute_maps"
keystone_federation_oidc_jwks_uri: ""
keystone_federation_oidc_additional_options: {}
+keystone_federation_oidc_error_page_retry_login_delay_milliseconds: 5000
+
# These variables are used to define multiple trusted Horizon dashboards.
# keystone_trusted_dashboards: ['', '', '']
horizon_trusted_dashboards: "{{ ['%s://%s/auth/websso/' % (public_protocol, kolla_external_fqdn), '%s/auth/websso/' % (horizon_public_endpoint)] if enable_horizon | bool else [] }}"
@@ -257,3 +261,8 @@ keystone_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }
# Copy certificates
###################
keystone_copy_certs: "{{ kolla_copy_ca_into_containers | bool or keystone_enable_tls_backend | bool or keystone_database_enable_tls_internal | bool }}"
+
+############
+# WSGI
+############
+keystone_wsgi_provider: "uwsgi"
diff --git a/ansible/roles/keystone/tasks/config-federation-oidc.yml b/ansible/roles/keystone/tasks/config-federation-oidc.yml
index 81384931d0..7a50e8c40a 100644
--- a/ansible/roles/keystone/tasks/config-federation-oidc.yml
+++ b/ansible/roles/keystone/tasks/config-federation-oidc.yml
@@ -85,3 +85,21 @@
keystone_federation_openid_certificate_key_ids: "{{ certificates_path.files | map(attribute='path') | map('regex_replace', '^.*/(.*)\\.pem$', '\\1#' + keystone_container_federation_oidc_idp_certificate_folder + '/\\1.pem') | list }}" # noqa 204
when:
- inventory_hostname in groups[keystone.group]
+
+- name: Copying modOIDC error page template
+ vars:
+ keystone: "{{ keystone_services.keystone }}"
+ template:
+ src: "{{ item }}"
+ dest: "{{ keystone_host_federation_base_folder }}/modoidc-error-page.html"
+ mode: "0660"
+ become: true
+ when:
+ - inventory_hostname in groups[keystone.group]
+ - keystone.enabled | bool
+ - keystone_enable_federation_openid | bool
+ with_first_found:
+ - files:
+ - "{{ node_custom_config }}/keystone/federation/modoidc-error-page.html"
+ - "modoidc-error-page.html.j2"
+ skip: true
diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml
index d9c54e88e6..df6617dd41 100644
--- a/ansible/roles/keystone/tasks/config.yml
+++ b/ansible/roles/keystone/tasks/config.yml
@@ -26,7 +26,7 @@
keystone_policy_file: "{{ keystone_policy.results.0.stat.path | basename }}"
keystone_policy_file_path: "{{ keystone_policy.results.0.stat.path }}"
when:
- - keystone_policy.results
+ - keystone_policy.results | length > 0
- name: Check if Keystone domain-specific config is supplied
stat:
@@ -132,12 +132,31 @@
dest: "{{ node_config_directory }}/keystone/wsgi-keystone.conf"
mode: "0660"
become: true
- when: service | service_enabled_and_mapped_to_host
+ when:
+ - service | service_enabled_and_mapped_to_host
+ - keystone_wsgi_provider == "apache"
with_first_found:
- "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/wsgi-keystone.conf"
- "{{ node_custom_config }}/keystone/wsgi-keystone.conf"
- "wsgi-keystone.conf.j2"
+- name: "Configure uWSGI for Keystone"
+ include_role:
+ name: service-uwsgi-config
+ vars:
+ project_services: "{{ keystone_services }}"
+ service: "{{ keystone_services['keystone'] }}"
+ service_name: "keystone"
+ service_uwsgi_config_http_port: "{{ keystone_listen_port }}"
+ service_uwsgi_config_module: "{{ service.wsgi }}"
+ service_uwsgi_config_tls_backend: "{{ keystone_enable_tls_backend | bool }}"
+ service_uwsgi_config_tls_cert: "/etc/keystone/certs/keystone-cert.pem"
+ service_uwsgi_config_tls_key: "/etc/keystone/certs/keystone-key.pem"
+ service_uwsgi_config_uid: "keystone"
+ when:
+ - service | service_enabled_and_mapped_to_host
+ - keystone_wsgi_provider == "uwsgi"
+
- name: Checking whether keystone-paste.ini file exists
vars:
service: "{{ keystone_services['keystone'] }}"
diff --git a/ansible/roles/keystone/templates/keystone-startup.sh.j2 b/ansible/roles/keystone/templates/keystone-startup.sh.j2
index 126ec865df..224e86f5dd 100644
--- a/ansible/roles/keystone/templates/keystone-startup.sh.j2
+++ b/ansible/roles/keystone/templates/keystone-startup.sh.j2
@@ -1,5 +1,7 @@
#!/bin/bash -x
-{% set keystone_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
+{% set apache_cmd = '/usr/sbin/apache2' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/sbin/httpd' %}
+{% set uwsgi_cmd = 'uwsgi /etc/keystone/keystone-api-uwsgi.ini' %}
+{% set keystone_cmd = uwsgi_cmd if keystone_wsgi_provider == 'uwsgi' else (apache_cmd + ' -DFOREGROUND') %}
set -o errexit
set -o pipefail
@@ -21,4 +23,4 @@ while [ ! -f "${FERNET_KEY_DIR}/0" ]; do
fi
done
-exec /usr/sbin/{{ keystone_cmd }} -DFOREGROUND $@
+exec {{ keystone_cmd }} $@
diff --git a/ansible/roles/keystone/templates/keystone.json.j2 b/ansible/roles/keystone/templates/keystone.json.j2
index c5b567d69f..dda23b82b4 100644
--- a/ansible/roles/keystone/templates/keystone.json.j2
+++ b/ansible/roles/keystone/templates/keystone.json.j2
@@ -34,13 +34,19 @@
"dest": "/etc/keystone/{{ keystone_policy_file }}",
"owner": "keystone",
"perm": "0600"
- }{% endif %},
+ }{% endif %}{% if keystone_wsgi_provider == 'apache' %},
{
"source": "{{ container_config_directory }}/wsgi-keystone.conf",
"dest": "/etc/{{ keystone_dir }}/wsgi-keystone.conf",
"owner": "keystone",
"perm": "0600"
- }{% if keystone_enable_tls_backend | bool %},
+ }{% elif keystone_wsgi_provider == 'uwsgi' %},
+ {
+ "source": "{{ container_config_directory }}/keystone-uwsgi.ini",
+ "dest": "/etc/keystone/keystone-api-uwsgi.ini",
+ "owner": "keystone",
+ "perm": "0600"
+ }{% endif %}{% if keystone_enable_tls_backend | bool %},
{
"source": "{{ container_config_directory }}/keystone-cert.pem",
"dest": "/etc/keystone/certs/keystone-cert.pem",
@@ -52,8 +58,7 @@
"dest": "/etc/keystone/certs/keystone-key.pem",
"owner": "keystone",
"perm": "0600"
- }{% endif %}
- {% if keystone_enable_federation_openid | bool %},
+ }{% endif %}{% if keystone_enable_federation_openid | bool %},
{
"source": "{{ container_config_directory }}/federation/oidc/metadata",
"dest": "{{ keystone_container_federation_oidc_metadata_folder }}",
@@ -67,7 +72,14 @@
"owner": "{{ apache_user }}:{{ apache_user }}",
"perm": "0600",
"merge": true
- }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ },
+ {
+ "source": "{{ container_config_directory }}/federation/modoidc-error-page.html",
+ "dest": "/var/www/html/modoidc-error-page.html",
+ "owner": "{{ apache_user }}:{{ apache_user }}",
+ "perm": "0600"
+ }
+ {% endif %}{% if kolla_copy_ca_into_containers | bool %},
{
"source": "{{ container_config_directory }}/ca-certificates",
"dest": "/var/lib/kolla/share/ca-certificates",
diff --git a/ansible/roles/keystone/templates/modoidc-error-page.html.j2 b/ansible/roles/keystone/templates/modoidc-error-page.html.j2
new file mode 100644
index 0000000000..1d8db9c077
--- /dev/null
+++ b/ansible/roles/keystone/templates/modoidc-error-page.html.j2
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+ It seems that an error happened during the login process.
+ You will be redirected again. Wait a few seconds please.
+
+ Redirect me now.
+
+
+
+ Error: %s
+ %s
+
+
diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
index 427c36d105..b94e825572 100644
--- a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
+++ b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
@@ -44,9 +44,7 @@ LogLevel info
WSGIScriptAlias / {{ binary_path }}/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog "{{ keystone_log_dir }}/keystone-apache-public-error.log"
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ keystone_log_dir }}/keystone-apache-public-access.log" logformat
@@ -69,6 +67,7 @@ LogLevel info
{% if keystone_federation_oidc_jwks_uri | length > 0 %}
OIDCOAuthVerifyJwksUri {{ keystone_federation_oidc_jwks_uri }}
{% endif %}
+ OIDCHTMLErrorTemplate /var/www/html/modoidc-error-page.html
{% if keystone_federation_openid_certificate_key_ids | length > 0 %}
OIDCOAuthVerifyCertFiles {{ keystone_federation_openid_certificate_key_ids | join(" ") }}
{% endif %}
diff --git a/ansible/roles/kuryr/tasks/config.yml b/ansible/roles/kuryr/tasks/config.yml
index d0d436a87e..10bfff47db 100644
--- a/ansible/roles/kuryr/tasks/config.yml
+++ b/ansible/roles/kuryr/tasks/config.yml
@@ -26,7 +26,7 @@
kuryr_policy_file: "{{ kuryr_policy.results.0.stat.path | basename }}"
kuryr_policy_file_path: "{{ kuryr_policy.results.0.stat.path }}"
when:
- - kuryr_policy.results
+ - kuryr_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
index 7d3492c08a..1cb4d0a21a 100644
--- a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
+++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
@@ -1,4 +1,8 @@
-#!/bin/bash -x
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+set -o xtrace
{% if kolla_enable_tls_internal | bool or kolla_enable_tls_external | bool %}
{% if kolla_enable_tls_external | bool %}
diff --git a/ansible/roles/magnum/tasks/config.yml b/ansible/roles/magnum/tasks/config.yml
index 6243153244..5921da31e0 100644
--- a/ansible/roles/magnum/tasks/config.yml
+++ b/ansible/roles/magnum/tasks/config.yml
@@ -26,7 +26,7 @@
magnum_policy_file: "{{ magnum_policy.results.0.stat.path | basename }}"
magnum_policy_file_path: "{{ magnum_policy.results.0.stat.path }}"
when:
- - magnum_policy.results
+ - magnum_policy.results | length > 0
- name: Check if kubeconfig file is supplied
stat:
diff --git a/ansible/roles/manila/tasks/config.yml b/ansible/roles/manila/tasks/config.yml
index f334acc9c5..1607316a26 100644
--- a/ansible/roles/manila/tasks/config.yml
+++ b/ansible/roles/manila/tasks/config.yml
@@ -31,7 +31,7 @@
manila_policy_file: "{{ manila_policy.results.0.stat.path | basename }}"
manila_policy_file_path: "{{ manila_policy.results.0.stat.path }}"
when:
- - manila_policy.results
+ - manila_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/manila/templates/manila-share.conf.j2 b/ansible/roles/manila/templates/manila-share.conf.j2
index ee1deb3413..cade41e823 100644
--- a/ansible/roles/manila/templates/manila-share.conf.j2
+++ b/ansible/roles/manila/templates/manila-share.conf.j2
@@ -74,8 +74,6 @@ memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_addres
share_driver = manila.share.drivers.generic.GenericShareDriver
{% if neutron_plugin_agent == "openvswitch" %}
interface_driver = manila.network.linux.interface.OVSInterfaceDriver
-{% elif neutron_plugin_agent == "linuxbridge" %}
-interface_driver = manila.network.linux.interface.BridgeInterfaceDriver
{% endif %}
driver_handles_share_servers = true
diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml
index 837baaab88..31c5efb2ac 100644
--- a/ansible/roles/mariadb/defaults/main.yml
+++ b/ansible/roles/mariadb/defaults/main.yml
@@ -8,25 +8,7 @@ mariadb_services:
volumes: "{{ mariadb_default_volumes + mariadb_extra_volumes }}"
dimensions: "{{ mariadb_dimensions }}"
healthcheck: "{{ mariadb_healthcheck }}"
- environment:
- MYSQL_USERNAME: "{{ mariadb_monitor_user }}"
- MYSQL_PASSWORD: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
- MYSQL_HOST: "{{ api_interface_address }}"
- AVAILABLE_WHEN_DONOR: "1"
haproxy:
- mariadb:
- enabled: "{{ enable_mariadb | bool and not enable_external_mariadb_load_balancer | bool }}"
- mode: "tcp"
- port: "{{ database_port }}"
- listen_port: "{{ mariadb_port }}"
- frontend_tcp_extra:
- - "option clitcpka"
- - "timeout client 3600s"
- backend_tcp_extra:
- - "option srvtcpka"
- - "timeout server 3600s"
- - "{% if enable_mariadb_clustercheck | bool %}option httpchk{% endif %}"
- custom_member_list: "{{ internal_haproxy_members.split(';') }}"
mariadb_external_lb:
enabled: "{{ enable_external_mariadb_load_balancer | bool }}"
mode: "tcp"
@@ -39,18 +21,6 @@ mariadb_services:
- "option srvtcpka"
- "timeout server 3600s"
custom_member_list: "{{ external_haproxy_members.split(';') }}"
- mariadb-clustercheck:
- container_name: mariadb_clustercheck
- group: "{{ mariadb_shard_group }}"
- enabled: "{{ enable_mariadb_clustercheck | bool }}"
- image: "{{ mariadb_clustercheck_image_full }}"
- volumes: "{{ mariadb_clustercheck_default_volumes + mariadb_clustercheck_extra_volumes }}"
- dimensions: "{{ mariadb_clustercheck_dimensions }}"
- environment:
- MYSQL_USERNAME: "{{ mariadb_monitor_user }}"
- MYSQL_PASSWORD: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
- MYSQL_HOST: "{{ api_interface_address }}"
- AVAILABLE_WHEN_DONOR: "1"
####################
# Database
@@ -61,8 +31,7 @@ database_max_timeout: 120
####################
# HAProxy
####################
-internal_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ hostvars[host].ansible_facts.hostname }} {{ 'api' | kolla_address(host) }}:{{ mariadb_port }} check port {% if enable_mariadb_clustercheck | bool %}{{ mariadb_clustercheck_port }}{% else %}{{ mariadb_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}"
-external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ host }} {{ host }}:{{ mariadb_port }} check port {% if enable_mariadb_clustercheck | bool %}{{ mariadb_clustercheck_port }}{% else %}{{ mariadb_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}"
+external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ host }} {{ host }}:{{ mariadb_port }} check port {{ mariadb_port }} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}"
####################
# Docker
@@ -71,12 +40,7 @@ mariadb_image: "{{ docker_image_url }}mariadb-server"
mariadb_tag: "{{ openstack_tag }}"
mariadb_image_full: "{{ mariadb_image }}:{{ mariadb_tag }}"
-mariadb_clustercheck_image: "{{ docker_image_url }}mariadb-clustercheck"
-mariadb_clustercheck_tag: "{{ mariadb_tag }}"
-mariadb_clustercheck_image_full: "{{ mariadb_clustercheck_image }}:{{ mariadb_clustercheck_tag }}"
-
mariadb_dimensions: "{{ default_container_dimensions }}"
-mariadb_clustercheck_dimensions: "{{ default_container_dimensions }}"
mariadb_default_volumes:
- "{{ node_config_directory }}/mariadb/:{{ container_config_directory }}/:ro"
@@ -84,20 +48,15 @@ mariadb_default_volumes:
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "{{ mariadb_datadir_volume }}:/var/lib/mysql"
- "kolla_logs:/var/log/kolla/"
-mariadb_clustercheck_default_volumes:
- - "{{ node_config_directory }}/mariadb-clustercheck/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
mariadb_extra_volumes: "{{ default_extra_volumes }}"
-mariadb_clustercheck_extra_volumes: "{{ default_extra_volumes }}"
mariadb_enable_healthchecks: "{{ enable_container_healthchecks }}"
mariadb_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
mariadb_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
mariadb_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-mariadb_healthcheck_test: ["CMD-SHELL", "/usr/bin/clustercheck"]
+mariadb_healthcheck_test: ["CMD-SHELL", "/usr/bin/healthcheck.sh --defaults-file /etc/{{ 'mysql/' if kolla_base_distro in ['ubuntu', 'debian'] else '' }}healthcheck.cnf --connect --galera_online"]
+
mariadb_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
mariadb_healthcheck:
interval: "{{ mariadb_healthcheck_interval }}"
@@ -116,25 +75,21 @@ mariadb_recover_tmp_file_path: "/tmp/kolla_mariadb_recover_inventory_name_{{ mar
# WSREP options
###############
mariadb_wsrep_extra_provider_options: []
+mariadb_wsrep_sst_method: "mariabackup"
####################
# Backups
####################
mariadb_backup_host: "{{ groups[mariadb_shard_group][0] }}"
mariadb_backup_database_schema: "mysql"
-mariadb_backup_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}backup{% else %}{{ mariadb_shard_backup_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}"
+mariadb_backup_database_user: "{{ mariadb_shard_backup_user_prefix }}{{ mariadb_shard_id | string }}"
mariadb_backup_type: "full"
-mariadb_backup_possible: "{{ mariadb_loadbalancer != 'haproxy' or inventory_hostname in mariadb_default_database_shard_hosts }}"
-
-####################
-# Clustercheck
-####################
-enable_mariadb_clustercheck: "{{ 'True' if mariadb_loadbalancer == 'haproxy' else 'False' }}"
+mariadb_backup_possible: "{{ inventory_hostname in mariadb_default_database_shard_hosts }}"
####################
# Sharding
####################
-mariadb_shard_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}{{ mariadb_shard_root_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}"
+mariadb_shard_database_user: "{{ mariadb_shard_root_user_prefix }}{{ mariadb_shard_id | string }}"
mariadb_database_shard: "{{ mariadb_shards_info }}"
# Database
diff --git a/ansible/roles/mariadb/handlers/main.yml b/ansible/roles/mariadb/handlers/main.yml
index c5d120615f..5e1b68ae90 100644
--- a/ansible/roles/mariadb/handlers/main.yml
+++ b/ansible/roles/mariadb/handlers/main.yml
@@ -72,19 +72,3 @@
- groups[mariadb_shard_group + '_port_alive_False'] is defined
- inventory_hostname in groups[mariadb_shard_group + '_port_alive_False']
listen: Restart mariadb container
-
-- name: Restart mariadb-clustercheck container
- vars:
- service_name: "mariadb-clustercheck"
- service: "{{ mariadb_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ service.image }}"
- name: "{{ service.container_name }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- environment: "{{ service.environment }}"
- when:
- - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/mariadb/tasks/config.yml b/ansible/roles/mariadb/tasks/config.yml
index bf1779be55..405e34ffbd 100644
--- a/ansible/roles/mariadb/tasks/config.yml
+++ b/ansible/roles/mariadb/tasks/config.yml
@@ -70,6 +70,22 @@
become: true
when: service | service_enabled_and_mapped_to_host
+- name: Copying over healthcheck.cnf
+ vars:
+ service_name: "mariadb"
+ service: "{{ mariadb_services[service_name] }}"
+ merge_configs:
+ sources:
+ - "{{ role_path }}/templates/healthcheck.cnf.j2"
+ - "{{ node_custom_config }}/healthcheck.cnf"
+ - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/healthcheck.cnf"
+ dest: "{{ node_config_directory }}/{{ service_name }}/healthcheck.cnf"
+ mode: "0660"
+ become: true
+ when:
+ - mariadb_enable_healthchecks | bool
+ - service | service_enabled_and_mapped_to_host
+
- include_tasks: copy-certs.yml
when:
- mariadb_copy_certs | bool
diff --git a/ansible/roles/mariadb/tasks/loadbalancer.yml b/ansible/roles/mariadb/tasks/loadbalancer.yml
index 78cac3fb56..bc7439dfd8 100644
--- a/ansible/roles/mariadb/tasks/loadbalancer.yml
+++ b/ansible/roles/mariadb/tasks/loadbalancer.yml
@@ -30,7 +30,7 @@
login_user: "{{ database_user }}"
login_password: "{{ database_password }}"
name: "{{ mariadb_monitor_user }}"
- password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
+ password: "{{ mariadb_monitor_password }}"
host: "%"
priv: "*.*:USAGE,REPLICATION CLIENT"
tags: always
diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml
index 11e4b8333e..165aa3963b 100644
--- a/ansible/roles/mariadb/tasks/recover_cluster.yml
+++ b/ansible/roles/mariadb/tasks/recover_cluster.yml
@@ -111,6 +111,22 @@
- bootstrap_host is defined
- bootstrap_host == inventory_hostname
+- name: Refresh galera.cnf to set first MariaDB container as primary
+ vars:
+ service_name: "mariadb"
+ service: "{{ mariadb_services[service_name] }}"
+ primary_host_on_recovery: "{{ bootstrap_host == inventory_hostname }}"
+ merge_configs:
+ sources:
+ - "{{ role_path }}/templates/galera.cnf.j2"
+ - "{{ node_custom_config }}/galera.cnf"
+ - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf"
+ dest: "{{ node_config_directory }}/{{ service_name }}/galera.cnf"
+ mode: "0660"
+ become: true
+ when:
+ - bootstrap_host is defined
+
- name: Starting first MariaDB container
become: true
kolla_container:
@@ -144,14 +160,6 @@
- bootstrap_host is defined
- bootstrap_host == inventory_hostname
-- name: Set first MariaDB container as primary
- become: true
- shell: "{{ kolla_container_engine }} exec {{ mariadb_service.container_name }} mariadb -uroot -p{{ database_password }} -e \"SET GLOBAL wsrep_provider_options='pc.bootstrap=yes';\""
- no_log: True
- when:
- - bootstrap_host is defined
- - bootstrap_host == inventory_hostname
-
- name: Wait for MariaDB to become operational
become: true
kolla_toolbox:
@@ -203,6 +211,23 @@
- bootstrap_host is defined
- bootstrap_host != inventory_hostname
+- name: Unset pc.bootstrap for primary MariaDB galera.cnf for next restart
+ vars:
+ service_name: "mariadb"
+ service: "{{ mariadb_services[service_name] }}"
+ primary_host_on_recovery: false
+ merge_configs:
+ sources:
+ - "{{ role_path }}/templates/galera.cnf.j2"
+ - "{{ node_custom_config }}/galera.cnf"
+ - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf"
+ dest: "{{ node_config_directory }}/{{ service_name }}/galera.cnf"
+ mode: "0660"
+ become: true
+ when:
+ - bootstrap_host is defined
+ - bootstrap_host == inventory_hostname
+
- name: Restart master MariaDB container(s)
become: true
kolla_container:
diff --git a/ansible/roles/mariadb/tasks/register.yml b/ansible/roles/mariadb/tasks/register.yml
index 8c679d9385..1b733afa70 100644
--- a/ansible/roles/mariadb/tasks/register.yml
+++ b/ansible/roles/mariadb/tasks/register.yml
@@ -29,7 +29,7 @@
login_user: "{{ database_user }}"
login_password: "{{ database_password }}"
name: "{{ mariadb_monitor_user }}"
- password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
+ password: "{{ mariadb_monitor_password }}"
host: "%"
priv: "*.*:USAGE,REPLICATION CLIENT"
when:
diff --git a/ansible/roles/mariadb/tasks/upgrade.yml b/ansible/roles/mariadb/tasks/upgrade.yml
index 5b10a7e111..9a54536962 100644
--- a/ansible/roles/mariadb/tasks/upgrade.yml
+++ b/ansible/roles/mariadb/tasks/upgrade.yml
@@ -1,2 +1,33 @@
---
+- name: Set wsrep_sst_method to rsync for upgrade
+ become: true
+ no_log: true
+ shell: >
+ {{ kolla_container_engine }} exec {{ mariadb_service.container_name }}
+ mysql -uroot -p{{ database_password }}
+ -e "SET GLOBAL wsrep_sst_method='rsync';"
+
- import_tasks: deploy.yml
+ vars:
+ mariadb_wsrep_sst_method: "rsync"
+
+- name: Set wsrep_sst_method to mariabackup after upgrade
+ become: true
+ no_log: true
+ shell: >
+ {{ kolla_container_engine }} exec {{ mariadb_service.container_name }}
+ mysql -uroot -p{{ database_password }}
+ -e "SET GLOBAL wsrep_sst_method='mariabackup';"
+
+- import_tasks: deploy.yml
+
+# TODO(seunghun1ee): Remove this task after 2026.1
+- name: "Stop and remove mariadb_clustercheck containers"
+ become: true
+ kolla_container:
+ action: "stop_and_remove_container"
+ common_options: "{{ docker_common_options }}"
+ name: "mariadb_clustercheck"
+ ignore_missing: true
+ when:
+ - "'mariadb_clustercheck' not in skip_stop_containers"
diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2
index c7e5916fd5..805669aed1 100644
--- a/ansible/roles/mariadb/templates/galera.cnf.j2
+++ b/ansible/roles/mariadb/templates/galera.cnf.j2
@@ -1,5 +1,4 @@
{%- set wsrep_driver = '/usr/lib/galera/libgalera_smm.so' if kolla_base_distro in ['debian', 'ubuntu'] else '/usr/lib64/galera/libgalera_smm.so' %}
-{% set sst_method = 'mariabackup' %}
[client]
default-character-set=utf8
@@ -32,7 +31,7 @@ datadir=/var/lib/mysql/
wsrep_cluster_address=gcomm://{% if (groups[mariadb_shard_group] | length) > 1 %}{% for host in groups[mariadb_shard_group] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ mariadb_wsrep_port }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
-wsrep_provider_options=gmcast.listen_addr=tcp://{{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_ist_port }};{% for option in mariadb_wsrep_extra_provider_options %}{{ option }}{% if not loop.last %};{% endif %}{% endfor %}
+wsrep_provider_options={% if primary_host_on_recovery is defined and primary_host_on_recovery %}pc.bootstrap=yes;{% endif %}gmcast.listen_addr=tcp://{{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_ist_port }};{% for option in mariadb_wsrep_extra_provider_options %}{{ option }}{% if not loop.last %};{% endif %}{% endfor %}
wsrep_node_address={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }}
@@ -41,7 +40,7 @@ wsrep_sst_receive_address={{ api_interface_address | put_address_in_context('url
wsrep_provider={{ wsrep_driver }}
wsrep_cluster_name="{{ database_cluster_name }}"
wsrep_node_name={{ ansible_facts.hostname }}
-wsrep_sst_method={{ sst_method }}
+wsrep_sst_method={{ mariadb_wsrep_sst_method }}
wsrep_sst_auth={{ database_user }}:{{ database_password }}
wsrep_slave_threads=4
wsrep_on = ON
@@ -62,7 +61,7 @@ innodb_buffer_pool_size = '8192M'
pid-file=/var/lib/mysql/mariadb.pid
[sst]
-{% if sst_method == 'mariabackup' and api_address_family == 'ipv6' %}
+{% if mariadb_wsrep_sst_method == 'mariabackup' and api_address_family == 'ipv6' %}
# NOTE(yoctozepto): for IPv6 we need to tweak sockopt for socat (mariabackup sst backend)
# see: https://mariadb.com/kb/en/library/xtrabackup-v2-sst-method/#performing-ssts-with-ipv6-addresses
# and: https://jira.mariadb.org/browse/MDEV-18797
diff --git a/ansible/roles/mariadb/templates/healthcheck.cnf.j2 b/ansible/roles/mariadb/templates/healthcheck.cnf.j2
new file mode 100644
index 0000000000..670280dd51
--- /dev/null
+++ b/ansible/roles/mariadb/templates/healthcheck.cnf.j2
@@ -0,0 +1,3 @@
+[mariadb-client]
+user={{ mariadb_monitor_user }}
+password={{ mariadb_monitor_password }}
diff --git a/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 b/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2
deleted file mode 100644
index aad07bff6a..0000000000
--- a/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "socat_wrapper {% if network_address_family == 'ipv6' %}-6{% endif %} -d -lf/var/log/kolla/mariadb/mariadb-clustercheck.log tcp-l:{{ mariadb_clustercheck_port }},fork,reuseaddr,bind={{ api_interface_address }} EXEC:clustercheck",
- "config_files": [],
- "permissions": [
- {
- "path": "/var/log/kolla/mariadb",
- "owner": "mysql:mysql",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/mariadb/templates/mariadb.json.j2 b/ansible/roles/mariadb/templates/mariadb.json.j2
index 7910d69293..2ecc9fa5ef 100644
--- a/ansible/roles/mariadb/templates/mariadb.json.j2
+++ b/ansible/roles/mariadb/templates/mariadb.json.j2
@@ -8,6 +8,13 @@
"owner": "mysql",
"perm": "0600"
}
+ {% if mariadb_enable_healthchecks | bool %},
+ {
+ "source": "{{ container_config_directory }}/healthcheck.cnf",
+ "dest": "/etc/{{ mysql_dir }}/healthcheck.cnf",
+ "owner": "mysql",
+ "perm": "0600"
+ }{% endif %}
{% if database_enable_tls_backend | bool %},
{
"source": "{{ container_config_directory }}/ca-certificates/root.crt",
diff --git a/ansible/roles/masakari/tasks/config.yml b/ansible/roles/masakari/tasks/config.yml
index 106faf4284..16e14e99f4 100644
--- a/ansible/roles/masakari/tasks/config.yml
+++ b/ansible/roles/masakari/tasks/config.yml
@@ -26,7 +26,7 @@
masakari_policy_file: "{{ masakari_policy.results.0.stat.path | basename }}"
masakari_policy_file_path: "{{ masakari_policy.results.0.stat.path }}"
when:
- - masakari_policy.results
+ - masakari_policy.results | length > 0
- name: Copying over existing policy file
template:
diff --git a/ansible/roles/mistral/tasks/config.yml b/ansible/roles/mistral/tasks/config.yml
index 4cb4fcdebf..d9ea731db3 100644
--- a/ansible/roles/mistral/tasks/config.yml
+++ b/ansible/roles/mistral/tasks/config.yml
@@ -26,7 +26,7 @@
mistral_policy_file: "{{ mistral_policy.results.0.stat.path | basename }}"
mistral_policy_file_path: "{{ mistral_policy.results.0.stat.path }}"
when:
- - mistral_policy.results
+ - mistral_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml
index 47d3e05a26..8d3dcdcfd7 100644
--- a/ansible/roles/neutron/defaults/main.yml
+++ b/ansible/roles/neutron/defaults/main.yml
@@ -33,7 +33,7 @@ neutron_services:
neutron-rpc-server:
container_name: "neutron_rpc_server"
image: "{{ neutron_rpc_server_image_full }}"
- enabled: "{{ neutron_plugin_agent in ['linuxbridge', 'openvswitch'] }}"
+ enabled: true
group: "neutron-rpc-server"
host_in_groups: "{{ inventory_hostname in groups['neutron-rpc-server'] }}"
volumes: "{{ neutron_rpc_server_default_volumes + neutron_rpc_server_extra_volumes }}"
@@ -62,8 +62,6 @@ neutron_services:
image: "{{ neutron_openvswitch_agent_image_full }}"
enabled: "{{ neutron_plugin_agent == 'openvswitch' }}"
privileged: True
- environment:
- KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}"
host_in_groups: >-
{{
(inventory_hostname in groups['compute']
@@ -81,24 +79,6 @@ neutron_services:
volumes: "{{ neutron_openvswitch_agent_default_volumes + neutron_openvswitch_agent_extra_volumes }}"
dimensions: "{{ neutron_openvswitch_agent_dimensions }}"
healthcheck: "{{ neutron_openvswitch_agent_healthcheck }}"
- neutron-linuxbridge-agent:
- container_name: "neutron_linuxbridge_agent"
- image: "{{ neutron_linuxbridge_agent_image_full }}"
- privileged: True
- enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}"
- environment:
- KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}"
- host_in_groups: >-
- {{
- inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- }}
- volumes: "{{ neutron_linuxbridge_agent_default_volumes + neutron_linuxbridge_agent_extra_volumes }}"
- dimensions: "{{ neutron_linuxbridge_agent_dimensions }}"
- healthcheck: "{{ neutron_linuxbridge_agent_healthcheck }}"
neutron-dhcp-agent:
cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}"
container_name: "neutron_dhcp_agent"
@@ -123,7 +103,6 @@ neutron_services:
enabled: "{{ neutron_plugin_agent != 'ovn' }}"
environment:
KOLLA_IMAGE: "{{ neutron_l3_agent_image_full }}"
- KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}"
KOLLA_NAME: "neutron_l3_agent"
KOLLA_NEUTRON_WRAPPERS: "{{ 'true' if neutron_agents_wrappers | bool else 'false' }}"
host_in_groups: >-
@@ -135,7 +114,6 @@ neutron_services:
dimensions: "{{ neutron_l3_agent_dimensions }}"
healthcheck: "{{ neutron_l3_agent_healthcheck }}"
pid_mode: "{{ 'host' if neutron_agents_wrappers | bool else '' }}"
-
neutron-sriov-agent:
container_name: "neutron_sriov_agent"
image: "{{ neutron_sriov_agent_image_full }}"
@@ -174,6 +152,7 @@ neutron_services:
dimensions: "{{ neutron_metadata_agent_dimensions }}"
healthcheck: "{{ neutron_metadata_agent_healthcheck }}"
neutron-ovn-metadata-agent:
+ cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}"
container_name: "neutron_ovn_metadata_agent"
image: "{{ neutron_ovn_metadata_agent_image_full }}"
privileged: True
@@ -182,6 +161,11 @@ neutron_services:
volumes: "{{ neutron_ovn_metadata_agent_default_volumes + neutron_ovn_metadata_agent_extra_volumes }}"
dimensions: "{{ neutron_ovn_metadata_agent_dimensions }}"
healthcheck: "{{ neutron_ovn_metadata_agent_healthcheck }}"
+ pid_mode: "{{ 'host' if neutron_agents_wrappers | bool else '' }}"
+ environment:
+ KOLLA_IMAGE: "{{ neutron_ovn_metadata_agent_image_full }}"
+ KOLLA_NAME: "neutron_ovn_metadata_agent"
+ KOLLA_NEUTRON_WRAPPERS: "{{ 'true' if neutron_agents_wrappers | bool else 'false' }}"
neutron-bgp-dragent:
container_name: "neutron_bgp_dragent"
image: "{{ neutron_bgp_dragent_image_full }}"
@@ -254,8 +238,6 @@ neutron_config_validation:
config: "/etc/neutron/l3_agent.ini"
- generator: "/neutron/etc/oslo-config-generator/dhcp_agent.ini"
config: "/etc/neutron/dhcp_agent.ini"
- - generator: "/neutron/etc/oslo-config-generator/linuxbridge_agent.ini"
- config: "/etc/neutron/plugins/ml2/linuxbridge_agent.ini"
####################
# Database
@@ -307,10 +289,6 @@ neutron_eswitchd_image: "{{ docker_image_url }}neutron-mlnx-agent"
neutron_eswitchd_tag: "{{ neutron_mlnx_agent_tag }}"
neutron_eswitchd_image_full: "{{ neutron_eswitchd_image }}:{{ neutron_eswitchd_tag }}"
-neutron_linuxbridge_agent_image: "{{ docker_image_url }}neutron-linuxbridge-agent"
-neutron_linuxbridge_agent_tag: "{{ neutron_tag }}"
-neutron_linuxbridge_agent_image_full: "{{ neutron_linuxbridge_agent_image }}:{{ neutron_linuxbridge_agent_tag }}"
-
neutron_metadata_agent_image: "{{ docker_image_url }}neutron-metadata-agent"
neutron_metadata_agent_tag: "{{ neutron_tag }}"
neutron_metadata_agent_image_full: "{{ neutron_metadata_agent_image }}:{{ neutron_metadata_agent_tag }}"
@@ -366,7 +344,6 @@ neutron_l3_agent_dimensions: "{{ neutron_agent_dimensions }}"
neutron_sriov_agent_dimensions: "{{ neutron_agent_dimensions }}"
neutron_mlnx_agent_dimensions: "{{ neutron_agent_dimensions }}"
neutron_eswitchd_dimensions: "{{ neutron_agent_dimensions }}"
-neutron_linuxbridge_agent_dimensions: "{{ neutron_agent_dimensions }}"
neutron_metadata_agent_dimensions: "{{ neutron_agent_dimensions }}"
neutron_ovn_metadata_agent_dimensions: "{{ neutron_agent_dimensions }}"
neutron_openvswitch_agent_dimensions: "{{ neutron_agent_dimensions }}"
@@ -406,19 +383,6 @@ neutron_l3_agent_healthcheck:
test: "{% if neutron_l3_agent_enable_healthchecks | bool %}{{ neutron_l3_agent_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ neutron_l3_agent_healthcheck_timeout }}"
-neutron_linuxbridge_agent_enable_healthchecks: "{{ enable_container_healthchecks }}"
-neutron_linuxbridge_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-neutron_linuxbridge_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-neutron_linuxbridge_agent_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-neutron_linuxbridge_agent_healthcheck_test: ["CMD-SHELL", "healthcheck_port neutron-linuxbridge-agent {{ om_rpc_port }}"]
-neutron_linuxbridge_agent_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-neutron_linuxbridge_agent_healthcheck:
- interval: "{{ neutron_linuxbridge_agent_healthcheck_interval }}"
- retries: "{{ neutron_linuxbridge_agent_healthcheck_retries }}"
- start_period: "{{ neutron_linuxbridge_agent_healthcheck_start_period }}"
- test: "{% if neutron_linuxbridge_agent_enable_healthchecks | bool %}{{ neutron_linuxbridge_agent_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ neutron_linuxbridge_agent_healthcheck_timeout }}"
-
neutron_metadata_agent_enable_healthchecks: "no"
neutron_metadata_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
neutron_metadata_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
@@ -571,9 +535,9 @@ neutron_dhcp_agent_default_volumes:
- "kolla_logs:/var/log/kolla/"
- "{{ '/dev/shm:/dev/shm' }}"
- "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
- - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' }}"
- - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}"
- - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}"
+ - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}"
+ - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}"
+ - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}"
neutron_l3_agent_default_volumes:
- "{{ node_config_directory }}/neutron-l3-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -584,9 +548,9 @@ neutron_l3_agent_default_volumes:
- "kolla_logs:/var/log/kolla/"
- "{{ '/dev/shm:/dev/shm' }}"
- "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
- - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' }}"
- - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}"
- - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}"
+ - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}"
+ - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}"
+ - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}"
neutron_sriov_agent_default_volumes:
- "{{ node_config_directory }}/neutron-sriov-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -609,14 +573,6 @@ neutron_eswitchd_default_volumes:
- "kolla_logs:/var/log/kolla/"
- "{{ '/dev/shm:/dev/shm' }}"
- "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
-neutron_linuxbridge_agent_default_volumes:
- - "{{ node_config_directory }}/neutron-linuxbridge-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "/lib/modules:/lib/modules:ro"
- - "kolla_logs:/var/log/kolla/"
- - "{{ '/dev/shm:/dev/shm' }}"
- - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_metadata_agent_default_volumes:
- "{{ node_config_directory }}/neutron-metadata-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -634,6 +590,9 @@ neutron_ovn_metadata_agent_default_volumes:
- "kolla_logs:/var/log/kolla/"
- "{{ '/dev/shm:/dev/shm' }}"
- "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}"
+ - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}"
+ - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}"
neutron_openvswitch_agent_default_volumes:
- "{{ node_config_directory }}/neutron-openvswitch-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -706,7 +665,6 @@ neutron_l3_agent_extra_volumes: "{{ neutron_extra_volumes }}"
neutron_sriov_agent_extra_volumes: "{{ neutron_extra_volumes }}"
neutron_mlnx_agent_extra_volumes: "{{ neutron_extra_volumes }}"
neutron_eswitchd_extra_volumes: "{{ neutron_extra_volumes }}"
-neutron_linuxbridge_agent_extra_volumes: "{{ neutron_extra_volumes }}"
neutron_metadata_agent_extra_volumes: "{{ neutron_extra_volumes }}"
neutron_ovn_metadata_agent_extra_volumes: "{{ neutron_extra_volumes }}"
neutron_openvswitch_agent_extra_volumes: "{{ neutron_extra_volumes }}"
@@ -748,7 +706,7 @@ neutron_l3_agent_host_ipv6_neigh_gc_thresh3: "{{ neutron_l3_agent_host_ipv4_neig
neutron_api_workers: "{{ openstack_service_workers }}"
neutron_metadata_workers: "{{ openstack_service_workers }}"
-neutron_agents_wrappers: "no"
+neutron_agents_wrappers: "yes"
####################
# Subprojects
@@ -769,8 +727,6 @@ neutron_subprojects:
# Mechanism drivers
####################
mechanism_drivers:
- - name: "linuxbridge"
- enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}"
- name: "openvswitch"
enabled: "{{ neutron_plugin_agent == 'openvswitch' }}"
- name: "mlnx_infiniband"
diff --git a/ansible/roles/neutron/handlers/main.yml b/ansible/roles/neutron/handlers/main.yml
index 867880bcbc..454db95d22 100644
--- a/ansible/roles/neutron/handlers/main.yml
+++ b/ansible/roles/neutron/handlers/main.yml
@@ -69,7 +69,7 @@
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
- environment: "{{ service.environment }}"
+ environment: "{{ service.environment | default(omit) }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
@@ -91,22 +91,6 @@
healthcheck: "{{ service.healthcheck | default(omit) }}"
with_sequence: "start=1 end={{ num_nova_fake_per_node }}"
-- name: Restart neutron-linuxbridge-agent container
- vars:
- service_name: "neutron-linuxbridge-agent"
- service: "{{ neutron_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- environment: "{{ service.environment }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- privileged: "{{ service.privileged | default(False) }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
-
- name: Restart neutron-dhcp-agent container
vars:
service_name: "neutron-dhcp-agent"
@@ -150,7 +134,7 @@
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
- environment: "{{ service.environment }}"
+ environment: "{{ service.environment | default(omit) }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
@@ -180,7 +164,7 @@
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
- environment: "{{ service.environment }}"
+ environment: "{{ service.environment | default(omit) }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
@@ -266,6 +250,9 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
+ pid_mode: "{{ service.pid_mode | default(omit) }}"
+ cgroupns_mode: "{{ service.cgroupns_mode | default(omit) }}"
+ environment: "{{ service.environment | default(omit) }}"
- name: Restart neutron-bgp-dragent container
vars:
diff --git a/ansible/roles/neutron/tasks/config-host.yml b/ansible/roles/neutron/tasks/config-host.yml
index a33919b123..8ad3f3d5a1 100644
--- a/ansible/roles/neutron/tasks/config-host.yml
+++ b/ansible/roles/neutron/tasks/config-host.yml
@@ -8,7 +8,7 @@
neutron_services |
select_services_enabled_and_mapped_to_host |
list |
- intersect(["neutron-l3-agent", "neutron-linuxbridge-agent", "neutron-openvswitch-agent"]) |
+ intersect(["neutron-l3-agent", "neutron-openvswitch-agent"]) |
list |
length > 0
diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml
index a8c22e0068..18b8f3991d 100644
--- a/ansible/roles/neutron/tasks/config.yml
+++ b/ansible/roles/neutron/tasks/config.yml
@@ -38,7 +38,7 @@
neutron_policy_file: "{{ neutron_policy.results.0.stat.path | basename }}"
neutron_policy_file_path: "{{ neutron_policy.results.0.stat.path }}"
when:
- - neutron_policy.results
+ - neutron_policy.results | length > 0
- name: Copying over existing policy file
template:
@@ -69,7 +69,6 @@
- "neutron-eswitchd"
- "neutron-infoblox-ipam-agent"
- "neutron-l3-agent"
- - "neutron-linuxbridge-agent"
- "neutron-metadata-agent"
- "neutron-metering-agent"
- "neutron-mlnx-agent"
@@ -144,20 +143,6 @@
- item.key in services_need_ml2_conf_ini
with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
-- name: Copying over linuxbridge_agent.ini
- become: true
- vars:
- service_name: "neutron-linuxbridge-agent"
- service: "{{ neutron_services[service_name] }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/linuxbridge_agent.ini.j2"
- - "{{ node_custom_config }}/neutron/linuxbridge_agent.ini"
- - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/linuxbridge_agent.ini"
- dest: "{{ node_config_directory }}/{{ service_name }}/linuxbridge_agent.ini"
- mode: "0660"
- when: service | service_enabled_and_mapped_to_host
-
- name: Copying over openvswitch_agent.ini
become: true
vars:
@@ -372,7 +357,6 @@
vars:
service_name: "{{ item.0 }}"
services_need_ml2_conf_ini:
- - "neutron-linuxbridge-agent"
- "neutron-openvswitch-agent"
- "neutron-server"
template:
diff --git a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 b/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2
deleted file mode 100644
index 5b0ae990b8..0000000000
--- a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-[agent]
-{% if neutron_agent_extensions %}
-extensions = {{ neutron_agent_extensions|map(attribute='name')|join(',') }}
-{% endif %}
-
-[linux_bridge]
-{% if inventory_hostname in groups["network"] or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool ) %}
-{# Format: physnet1:br1,physnet2:br2 #}
-physical_interface_mappings = {{ neutron_physical_networks.split(',') | zip(neutron_external_interface.split(',')) | map('join', ':') | join(',') }}
-{% endif %}
-
-[securitygroup]
-firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
-
-[vxlan]
-l2_population = true
-local_ip = {{ tunnel_interface_address }}
-
-{% if enable_neutron_sriov | bool %}
-[FDB]
-# Allows instances using sriov ports to communicate with instances that do not.
-# See https://docs.openstack.org/neutron/latest/admin/config-sriov.html
-shared_physical_device_mappings = {{ neutron_sriov_physnets }}
-{% endif %}
diff --git a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 b/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2
deleted file mode 100644
index 937abe37c8..0000000000
--- a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "command": "neutron-linuxbridge-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/linuxbridge_agent.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {% if neutron_policy_file is defined %}{
- "source": "{{ container_config_directory }}/{{ neutron_policy_file }}",
- "dest": "/etc/neutron/{{ neutron_policy_file }}",
- "owner": "neutron",
- "perm": "0600"
- },{% endif %}
-{% if check_extra_ml2_plugins is defined and check_extra_ml2_plugins.matched > 0 %}{% for plugin in check_extra_ml2_plugins.files %}
- {
- "source": "{{ container_config_directory }}/{{ plugin.path | basename }}",
- "dest": "/etc/neutron/plugins/ml2/{{ plugin.path | basename }}",
- "owner": "neutron",
- "perm": "0600"
- },
-{% endfor %}{% endif %}
- {
- "source": "{{ container_config_directory }}/linuxbridge_agent.ini",
- "dest": "/etc/neutron/plugins/ml2/linuxbridge_agent.ini",
- "owner": "neutron",
- "perm": "0600"
- }{% if kolla_copy_ca_into_containers | bool %},
- {
- "source": "{{ container_config_directory }}/ca-certificates",
- "dest": "/var/lib/kolla/share/ca-certificates",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron.conf.j2 b/ansible/roles/neutron/templates/neutron.conf.j2
index 648ea97dfb..0c5323d7ee 100644
--- a/ansible/roles/neutron/templates/neutron.conf.j2
+++ b/ansible/roles/neutron/templates/neutron.conf.j2
@@ -25,8 +25,6 @@ state_path = /var/lib/neutron/kolla
{% if neutron_plugin_agent == "openvswitch" or (neutron_plugin_agent == "ovn" and neutron_ovn_dhcp_agent | bool) %}
interface_driver = openvswitch
-{% elif neutron_plugin_agent == "linuxbridge" %}
-interface_driver = linuxbridge
{% endif %}
{% if enable_nova_fake | bool %}
diff --git a/ansible/roles/nova-cell/tasks/config.yml b/ansible/roles/nova-cell/tasks/config.yml
index 8ff28b3292..7783d5ee09 100644
--- a/ansible/roles/nova-cell/tasks/config.yml
+++ b/ansible/roles/nova-cell/tasks/config.yml
@@ -35,7 +35,7 @@
nova_policy_file: "{{ nova_policy.results.0.stat.path | basename }}"
nova_policy_file_path: "{{ nova_policy.results.0.stat.path }}"
when:
- - nova_policy.results
+ - nova_policy.results | length > 0
- name: Check for vendordata file
stat:
diff --git a/ansible/roles/nova-cell/tasks/create_cells.yml b/ansible/roles/nova-cell/tasks/create_cells.yml
index e4606d88cd..66c7f18759 100644
--- a/ansible/roles/nova-cell/tasks/create_cells.yml
+++ b/ansible/roles/nova-cell/tasks/create_cells.yml
@@ -26,7 +26,7 @@
- '"already exists" not in nova_cell_create.stdout'
when:
- inventory_hostname == groups[nova_conductor.group][0] | default(None)
- - nova_cell_settings | length == 0
+ - not nova_cell_settings | bool
- name: Update cell
vars:
@@ -51,5 +51,5 @@
- nova_cell_updated.rc != 0
when:
- inventory_hostname == groups[nova_conductor.group][0] | default(None)
- - nova_cell_settings | length > 0
+ - nova_cell_settings | bool
- nova_cell_settings.cell_message_queue != nova_cell_rpc_transport_url or nova_cell_settings.cell_database != nova_cell_database_url
diff --git a/ansible/roles/nova-cell/tasks/external_ceph.yml b/ansible/roles/nova-cell/tasks/external_ceph.yml
index de8fc143e6..f2d7ba6967 100644
--- a/ansible/roles/nova-cell/tasks/external_ceph.yml
+++ b/ansible/roles/nova-cell/tasks/external_ceph.yml
@@ -200,6 +200,6 @@
# reload. This may be due to differences in tested versions of libvirt
# (8.0.0 vs 6.0.0). Reload should be low overhead, so do it always.
libvirt_restart_handlers: >-
- {{ ['Restart nova-libvirt container']
- if enable_nova_libvirt_container | bool else
- ['Reload libvirtd'] }}
+ {{ ['Reload libvirtd']
+ if not enable_nova_libvirt_container | bool else
+ [] }}
diff --git a/ansible/roles/nova/tasks/config.yml b/ansible/roles/nova/tasks/config.yml
index 92e55f417b..6666d1a3e3 100644
--- a/ansible/roles/nova/tasks/config.yml
+++ b/ansible/roles/nova/tasks/config.yml
@@ -26,7 +26,7 @@
nova_policy_file: "{{ nova_policy.results.0.stat.path | basename }}"
nova_policy_file_path: "{{ nova_policy.results.0.stat.path }}"
when:
- - nova_policy.results
+ - nova_policy.results | length > 0
- name: Check for vendordata file
stat:
@@ -103,6 +103,21 @@
- service | service_enabled_and_mapped_to_host
- nova_wsgi_provider == "apache"
+- name: Copying over vendordata file for nova services
+ vars:
+ service: "{{ nova_services[item] }}"
+ copy:
+ src: "{{ vendordata_file_path }}"
+ dest: "{{ node_config_directory }}/{{ item }}/vendordata.json"
+ mode: "0660"
+ become: True
+ when:
+ - vendordata_file_path is defined
+ - service | service_enabled_and_mapped_to_host
+ loop:
+ - "nova-metadata"
+ - "nova-api"
+
- name: "Configure uWSGI for Nova"
include_role:
name: service-uwsgi-config
@@ -123,15 +138,3 @@
loop:
- { name: "nova-api", port: "{{ nova_api_listen_port }}" }
- { name: "nova-metadata", port: "{{ nova_metadata_listen_port }}" }
-
-- name: Copying over vendordata file
- vars:
- service: "{{ nova_services['nova-api'] }}"
- copy:
- src: "{{ vendordata_file_path }}"
- dest: "{{ node_config_directory }}/nova-api/vendordata.json"
- mode: "0660"
- become: True
- when:
- - vendordata_file_path is defined
- - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/nova/tasks/map_cell0.yml b/ansible/roles/nova/tasks/map_cell0.yml
index 1fb6c4314b..429b2fb955 100644
--- a/ansible/roles/nova/tasks/map_cell0.yml
+++ b/ansible/roles/nova/tasks/map_cell0.yml
@@ -59,7 +59,7 @@
failed_when:
- nova_cell0_updated.rc != 0
when:
- - nova_cell_settings | length > 0
+ - nova_cell_settings | bool
- nova_cell_settings.cell_database != nova_cell0_connection
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"
diff --git a/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 b/ansible/roles/nova/templates/nova-api-wsgi.conf.j2
index 7acd59eb8c..aeea3e932f 100644
--- a/ansible/roles/nova/templates/nova-api-wsgi.conf.j2
+++ b/ansible/roles/nova/templates/nova-api-wsgi.conf.j2
@@ -37,9 +37,7 @@ LogLevel info
WSGIScriptAlias / {{ wsgi_directory }}/nova-api-wsgi
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog "{{ nova_log_dir }}/nova-api-error.log"
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ nova_log_dir }}/nova-api-access.log" logformat
diff --git a/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 b/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2
index 58ab62302f..8519ebf339 100644
--- a/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2
+++ b/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2
@@ -37,9 +37,7 @@ LogLevel info
WSGIScriptAlias / {{ wsgi_directory }}/nova-metadata-wsgi
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog "{{ nova_log_dir }}/nova-metadata-error.log"
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ nova_log_dir }}/nova-metadata-access.log" logformat
diff --git a/ansible/roles/octavia/defaults/main.yml b/ansible/roles/octavia/defaults/main.yml
index df3d39ae62..e683e67669 100644
--- a/ansible/roles/octavia/defaults/main.yml
+++ b/ansible/roles/octavia/defaults/main.yml
@@ -277,6 +277,15 @@ octavia_ks_users:
password: "{{ octavia_keystone_password }}"
role: "admin"
+####################
+# Notification
+####################
+octavia_notification_topics:
+ - name: notifications
+ enabled: "{{ enable_ceilometer | bool }}"
+
+octavia_enabled_notification_topics: "{{ octavia_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
+
####################
# Kolla
####################
diff --git a/ansible/roles/octavia/tasks/config.yml b/ansible/roles/octavia/tasks/config.yml
index 73990ac6c8..63910990c1 100644
--- a/ansible/roles/octavia/tasks/config.yml
+++ b/ansible/roles/octavia/tasks/config.yml
@@ -29,7 +29,7 @@
octavia_policy_file: "{{ octavia_policy.results.0.stat.path | basename }}"
octavia_policy_file_path: "{{ octavia_policy.results.0.stat.path }}"
when:
- - octavia_policy.results
+ - octavia_policy.results | length > 0
- name: Copying over existing policy file
template:
diff --git a/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 b/ansible/roles/octavia/templates/octavia-wsgi.conf.j2
index e3a3a598c9..f4203c1c8a 100644
--- a/ansible/roles/octavia/templates/octavia-wsgi.conf.j2
+++ b/ansible/roles/octavia/templates/octavia-wsgi.conf.j2
@@ -24,9 +24,7 @@ LogLevel info
WSGIScriptAlias / {{ wsgi_directory }}/octavia-wsgi
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/kolla/octavia/octavia-api-error.log
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog /var/log/kolla/octavia/octavia-api-access.log logformat
diff --git a/ansible/roles/octavia/templates/octavia.conf.j2 b/ansible/roles/octavia/templates/octavia.conf.j2
index 552b4cbfec..586957f303 100644
--- a/ansible/roles/octavia/templates/octavia.conf.j2
+++ b/ansible/roles/octavia/templates/octavia.conf.j2
@@ -122,6 +122,12 @@ rpc_thread_pool_size = 2
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
+{% if octavia_enabled_notification_topics %}
+driver = messagingv2
+topics = {{ octavia_enabled_notification_topics | map(attribute='name') | join(',') }}
+{% else %}
+driver = noop
+{% endif %}
[oslo_messaging_rabbit]
use_queue_manager = true
@@ -152,7 +158,7 @@ ca_certificates_file = {{ openstack_cacert }}
[neutron]
region_name = {{ openstack_region_name }}
-endpoint_type = internal
+valid_interfaces = internal
ca_certificates_file = {{ openstack_cacert }}
[nova]
diff --git a/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 b/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2
index ca2f04886e..4555094e02 100644
--- a/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2
+++ b/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2
@@ -2,7 +2,7 @@ opensearchDashboards.defaultAppId: "{{ opensearch_dashboards_default_app_id }}"
logging.dest: /var/log/kolla/opensearch-dashboards/opensearch-dashboards.log
server.port: {{ opensearch_dashboards_port }}
server.host: "{{ api_interface_address }}"
-opensearch.hosts: "{{ opensearch_internal_endpoint }}"
+opensearch.hosts: [{% for host in groups['opensearch'] %}"http://{{ 'api' | kolla_address(host) }}:{{ opensearch_port }}"{% if not loop.last %},{% endif %}{% endfor %}]
opensearch.requestTimeout: {{ opensearch_dashboards_opensearch_request_timeout }}
opensearch.shardTimeout: {{ opensearch_dashboards_opensearch_shard_timeout }}
opensearch.ssl.verificationMode: "{{ 'full' if opensearch_dashboards_opensearch_ssl_verify | bool else 'none' }}"
diff --git a/ansible/roles/ovn-db/defaults/main.yml b/ansible/roles/ovn-db/defaults/main.yml
index 18e999e1e6..0bc27949b6 100644
--- a/ansible/roles/ovn-db/defaults/main.yml
+++ b/ansible/roles/ovn-db/defaults/main.yml
@@ -4,6 +4,9 @@ ovn_db_services:
container_name: ovn_northd
group: ovn-northd
enabled: true
+ environment:
+ OVN_NB_DB: "{{ ovn_nb_connection }}"
+ OVN_SB_DB: "{{ ovn_sb_connection_no_relay }}"
image: "{{ ovn_northd_image_full }}"
volumes: "{{ ovn_northd_default_volumes + ovn_northd_extra_volumes }}"
dimensions: "{{ ovn_northd_dimensions }}"
@@ -11,6 +14,8 @@ ovn_db_services:
container_name: ovn_nb_db
group: ovn-nb-db
enabled: true
+ environment:
+ OVN_NB_DB: "{{ ovn_nb_connection }}"
image: "{{ ovn_nb_db_image_full }}"
volumes: "{{ ovn_nb_db_default_volumes + ovn_nb_db_extra_volumes }}"
dimensions: "{{ ovn_nb_db_dimensions }}"
@@ -18,6 +23,8 @@ ovn_db_services:
container_name: ovn_sb_db
group: ovn-sb-db
enabled: true
+ environment:
+ OVN_SB_DB: "{{ ovn_sb_connection_no_relay }}"
image: "{{ ovn_sb_db_image_full }}"
volumes: "{{ ovn_sb_db_default_volumes + ovn_sb_db_extra_volumes }}"
dimensions: "{{ ovn_sb_db_dimensions }}"
diff --git a/ansible/roles/ovn-db/handlers/main.yml b/ansible/roles/ovn-db/handlers/main.yml
index 76c04399f4..128659c7a5 100644
--- a/ansible/roles/ovn-db/handlers/main.yml
+++ b/ansible/roles/ovn-db/handlers/main.yml
@@ -7,6 +7,7 @@
kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
+ environment: "{{ service.environment }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
@@ -20,6 +21,7 @@
kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
+ environment: "{{ service.environment }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
@@ -48,6 +50,7 @@
kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
+ environment: "{{ service.environment }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
diff --git a/ansible/roles/ovn-db/tasks/bootstrap-db.yml b/ansible/roles/ovn-db/tasks/bootstrap-db.yml
index adeec211a0..89282ab98b 100644
--- a/ansible/roles/ovn-db/tasks/bootstrap-db.yml
+++ b/ansible/roles/ovn-db/tasks/bootstrap-db.yml
@@ -11,7 +11,10 @@
- name: Get OVN_Northbound cluster leader
become: true
- command: "{{ kolla_container_engine }} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound"
+ command: >-
+ {{ kolla_container_engine }} exec ovn_nb_db
+ ovs-appctl -t /var/run/ovn/ovnnb_db.ctl
+ cluster/status OVN_Northbound
changed_when: False
register: ovn_nb_cluster_status
@@ -19,12 +22,24 @@
vars:
search_string: "Role: leader"
become: true
- command: "{{ kolla_container_engine }} exec ovn_nb_db ovn-nbctl --inactivity-probe={{ ovn_nb_db_inactivity_probe }} set-connection ptcp:{{ ovn_nb_db_port }}:0.0.0.0"
+ command: >-
+ {{ kolla_container_engine }} exec ovn_nb_db
+ ovn-nbctl
+ --db unix:/var/run/ovn/ovnnb_db.sock
+ --inactivity-probe={{ ovn_nb_db_inactivity_probe }}
+ set-connection ptcp:{{ ovn_nb_db_port }}:0.0.0.0
+ register: ovn_nb_set_connection_result
+ retries: 3
+ delay: 5
+ until: ovn_nb_set_connection_result.rc == 0
when: ovn_nb_cluster_status is search(search_string)
- name: Get OVN_Southbound cluster leader
become: true
- command: "{{ kolla_container_engine }} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound"
+ command: >-
+ {{ kolla_container_engine }} exec ovn_sb_db
+ ovs-appctl -t /var/run/ovn/ovnsb_db.ctl
+ cluster/status OVN_Southbound
changed_when: False
register: ovn_sb_cluster_status
@@ -32,7 +47,16 @@
vars:
search_string: "Role: leader"
become: true
- command: "{{ kolla_container_engine }} exec ovn_sb_db ovn-sbctl --inactivity-probe={{ ovn_sb_db_inactivity_probe }} set-connection ptcp:{{ ovn_sb_db_port }}:0.0.0.0"
+ command: >-
+ {{ kolla_container_engine }} exec ovn_sb_db
+ ovn-sbctl
+ --db unix:/var/run/ovn/ovnsb_db.sock
+ --inactivity-probe={{ ovn_sb_db_inactivity_probe }}
+ set-connection ptcp:{{ ovn_sb_db_port }}:0.0.0.0
+ register: ovn_sb_set_connection_result
+ retries: 3
+ delay: 5
+ until: ovn_sb_set_connection_result.rc == 0
when: ovn_sb_cluster_status is search(search_string)
- name: Wait for ovn-nb-db
@@ -72,4 +96,4 @@
delay: 6
when:
- enable_ovn_sb_db_relay | bool
- loop: "{{ range(1, (ovn_sb_db_relay_count | int) +1) }}"
+ loop: "{{ range(1, (ovn_sb_db_relay_count | int) +1) | list }}"
diff --git a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml
index 693e2c1ddf..fde7295039 100644
--- a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml
+++ b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml
@@ -20,7 +20,7 @@
changed_when: false
register: ovn_nb_db_cluster_status
when: groups['ovn-nb-db_leader'] is defined and inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '')
- delegate_to: "{{ groups['ovn-nb-db_leader'][0] }}"
+ delegate_to: "{{ groups['ovn-nb-db_leader'][0] if groups['ovn-nb-db_leader'] is defined else omit }}"
- name: Check SB cluster status
command: >
@@ -30,7 +30,7 @@
changed_when: false
register: ovn_sb_db_cluster_status
when: groups['ovn-sb-db_leader'] is defined and inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '')
- delegate_to: "{{ groups['ovn-sb-db_leader'][0] }}"
+ delegate_to: "{{ groups['ovn-sb-db_leader'][0] if groups['ovn-sb-db_leader'] is defined else omit }}"
- name: Remove an old node with the same ip address as the new node in NB DB
vars:
@@ -42,7 +42,7 @@
when:
- ovn_nb_db_cluster_status.stdout is defined
- (ovn_nb_db_cluster_status.stdout is search('at tcp:' + api_interface_address)) and inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '')
- delegate_to: "{{ groups['ovn-nb-db_leader'][0] }}"
+ delegate_to: "{{ groups['ovn-nb-db_leader'][0] if groups['ovn-nb-db_leader'] is defined else omit }}"
- name: Remove an old node with the same ip address as the new node in SB DB
vars:
@@ -54,7 +54,7 @@
when:
- ovn_sb_db_cluster_status.stdout is defined
- (ovn_sb_db_cluster_status.stdout is search('at tcp:' + api_interface_address)) and inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '')
- delegate_to: "{{ groups['ovn-sb-db_leader'][0] }}"
+ delegate_to: "{{ groups['ovn-sb-db_leader'][0] if groups['ovn-sb-db_leader'] is defined else omit }}"
- name: Set bootstrap args fact for NB (new member)
set_fact:
diff --git a/ansible/roles/ovn-db/tasks/config-relay.yml b/ansible/roles/ovn-db/tasks/config-relay.yml
index 71c3828768..f26cd2b48f 100644
--- a/ansible/roles/ovn-db/tasks/config-relay.yml
+++ b/ansible/roles/ovn-db/tasks/config-relay.yml
@@ -19,8 +19,6 @@
dest: "{{ node_config_directory }}/ovn-sb-db-relay-{{ item }}/config.json"
mode: "0660"
become: true
- notify:
- - Restart ovn-sb-db-relay container
- name: Generate config files for OVN relay services
vars:
@@ -31,5 +29,3 @@
dest: "{{ node_config_directory }}/ovn-sb-db-relay-{{ item }}/ovsdb-relay.json"
mode: "0660"
become: true
- notify:
- - Restart ovn-sb-db-relay container
diff --git a/ansible/roles/placement/tasks/config.yml b/ansible/roles/placement/tasks/config.yml
index 9093dc4bdc..8926746825 100644
--- a/ansible/roles/placement/tasks/config.yml
+++ b/ansible/roles/placement/tasks/config.yml
@@ -26,7 +26,7 @@
placement_policy_file: "{{ placement_policy.results.0.stat.path | basename }}"
placement_policy_file_path: "{{ placement_policy.results.0.stat.path }}"
when:
- - placement_policy.results
+ - placement_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 b/ansible/roles/placement/templates/placement-api-wsgi.conf.j2
index aa313b6809..c1809aed13 100644
--- a/ansible/roles/placement/templates/placement-api-wsgi.conf.j2
+++ b/ansible/roles/placement/templates/placement-api-wsgi.conf.j2
@@ -25,9 +25,7 @@ LogLevel info
WSGIScriptAlias / {{ wsgi_directory }}/placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog "{{ log_dir }}/placement-api-error.log"
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ log_dir }}/placement-api-access.log" logformat
diff --git a/ansible/roles/prechecks/vars/main.yml b/ansible/roles/prechecks/vars/main.yml
index 6b65e8bfcd..1da91bb291 100644
--- a/ansible/roles/prechecks/vars/main.yml
+++ b/ansible/roles/prechecks/vars/main.yml
@@ -1,8 +1,8 @@
---
docker_version_min: '18.09'
docker_py_version_min: '3.4.1'
-ansible_version_min: '2.17'
-ansible_version_max: '2.18'
+ansible_version_min: '2.18'
+ansible_version_max: '2.19'
# Top level keys should match ansible_facts.distribution.
# These map to lists of supported releases (ansible_facts.distribution_release) or
@@ -14,6 +14,6 @@ host_os_distributions:
Debian:
- "bookworm"
Rocky:
- - "9"
+ - "10"
Ubuntu:
- "noble"
diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml
index 883fdce2fe..27affe4472 100644
--- a/ansible/roles/prometheus/defaults/main.yml
+++ b/ansible/roles/prometheus/defaults/main.yml
@@ -286,10 +286,6 @@ prometheus_blackbox_exporter_endpoints_default:
- "trove:os_endpoint:{{ trove_public_base_endpoint }}"
- "{{ ('trove_internal:os_endpoint:' + trove_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
enabled: "{{ enable_trove | bool }}"
- - endpoints:
- - "venus:os_endpoint:{{ venus_public_endpoint }}"
- - "{{ ('venus_internal:os_endpoint:' + venus_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
- enabled: "{{ enable_venus | bool }}"
- endpoints:
- "watcher:os_endpoint:{{ watcher_public_endpoint }}"
- "{{ ('watcher_internal:os_endpoint:' + watcher_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
diff --git a/ansible/roles/prometheus/tasks/bootstrap.yml b/ansible/roles/prometheus/tasks/bootstrap.yml
index 3eda6b1a3e..9cc8e2634c 100644
--- a/ansible/roles/prometheus/tasks/bootstrap.yml
+++ b/ansible/roles/prometheus/tasks/bootstrap.yml
@@ -3,7 +3,7 @@
become: true
vars:
shard_id: "{{ item.key }}"
- shard_root_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}{{ mariadb_shard_root_user_prefix }}{{ shard_id | string }}{% endif %}"
+ shard_root_user: "{{ mariadb_shard_root_user_prefix }}{{ shard_id | string }}"
shard_host: "{{ mariadb_shards_info.shards[shard_id].hosts[0] }}"
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
diff --git a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2
index 870448243b..a7495b4395 100644
--- a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2
+++ b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2
@@ -27,8 +27,10 @@ wsgi-file = {{ service_uwsgi_config_wsgi_file }}
plugins-dir = {{ '/usr/lib/uwsgi/plugins' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/lib64/uwsgi' }}
plugins = python3
processes = {{ service_uwsgi_config_workers }}
+socket-timeout = 30
thunder-lock = true
{% if service_uwsgi_config_uid is defined %}
uid = {{ service_uwsgi_config_uid }}
{% endif %}
worker-reload-mercy = {{ service_uwsgi_config_worker_timeout }}
+static-map = /static=/var/lib/kolla/venv/lib/python3/site-packages/static
diff --git a/ansible/roles/skyline/templates/skyline.yaml.j2 b/ansible/roles/skyline/templates/skyline.yaml.j2
index c48dc4eeb9..ad7fe09ae9 100644
--- a/ansible/roles/skyline/templates/skyline.yaml.j2
+++ b/ansible/roles/skyline/templates/skyline.yaml.j2
@@ -2,7 +2,7 @@ default:
access_token_expire: {{ skyline_access_token_expire_seconds }}
access_token_renew: {{ skyline_access_token_renew_seconds }}
cors_allow_origins: {{ skyline_backend_cors_origins }}
- database_url: mysql://{{ skyline_database_user }}:{{ skyline_database_password }}@{{ skyline_database_address }}/{{ skyline_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if skyline_database_enable_tls_internal | bool }}
+ database_url: mysql+pymysql://{{ skyline_database_user }}:{{ skyline_database_password }}@{{ skyline_database_address }}/{{ skyline_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if skyline_database_enable_tls_internal | bool }}
debug: {{ skyline_logging_debug }}
log_dir: {{ log_dir }}
{% if enable_prometheus | bool %}
diff --git a/ansible/roles/tacker/tasks/config.yml b/ansible/roles/tacker/tasks/config.yml
index 95669e5268..f6473b0d31 100644
--- a/ansible/roles/tacker/tasks/config.yml
+++ b/ansible/roles/tacker/tasks/config.yml
@@ -26,7 +26,7 @@
tacker_policy_file: "{{ tacker_policy.results.0.stat.path | basename }}"
tacker_policy_file_path: "{{ tacker_policy.results.0.stat.path }}"
when:
- - tacker_policy.results
+ - tacker_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/trove/tasks/config.yml b/ansible/roles/trove/tasks/config.yml
index fa2dc44179..8427d0079b 100644
--- a/ansible/roles/trove/tasks/config.yml
+++ b/ansible/roles/trove/tasks/config.yml
@@ -26,7 +26,7 @@
trove_policy_file: "{{ trove_policy.results.0.stat.path | basename }}"
trove_policy_file_path: "{{ trove_policy.results.0.stat.path }}"
when:
- - trove_policy.results
+ - trove_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/trove/templates/trove-wsgi.conf.j2 b/ansible/roles/trove/templates/trove-wsgi.conf.j2
index 26449a5384..3d79d04e7d 100644
--- a/ansible/roles/trove/templates/trove-wsgi.conf.j2
+++ b/ansible/roles/trove/templates/trove-wsgi.conf.j2
@@ -24,9 +24,7 @@ LogLevel info
WSGIScriptAlias / {{ wsgi_directory }}/trove-wsgi
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
+ ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/kolla/trove/trove-api-error.log
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog /var/log/kolla/trove/trove-api-access.log logformat
diff --git a/ansible/roles/venus/defaults/main.yml b/ansible/roles/venus/defaults/main.yml
deleted file mode 100644
index 2677cb5263..0000000000
--- a/ansible/roles/venus/defaults/main.yml
+++ /dev/null
@@ -1,172 +0,0 @@
----
-venus_services:
- venus-api:
- container_name: venus_api
- group: venus-api
- enabled: true
- image: "{{ venus_api_image_full }}"
- volumes: "{{ venus_api_default_volumes + venus_api_extra_volumes }}"
- dimensions: "{{ venus_api_dimensions }}"
- healthcheck: "{{ venus_api_healthcheck }}"
- haproxy:
- venus_api:
- enabled: "{{ enable_venus }}"
- mode: "http"
- external: false
- port: "{{ venus_api_port }}"
- backend_http_extra:
- - "option httpchk"
- venus_api_external:
- enabled: "{{ enable_venus }}"
- mode: "http"
- external: true
- external_fqdn: "{{ venus_external_fqdn }}"
- port: "{{ venus_api_public_port }}"
- backend_http_extra:
- - "option httpchk"
- venus-manager:
- container_name: venus_manager
- group: venus-manager
- enabled: true
- image: "{{ venus_manager_image_full }}"
- volumes: "{{ venus_manager_default_volumes + venus_manager_extra_volumes }}"
- dimensions: "{{ venus_manager_dimensions }}"
-
-####################
-# Config Validate
-####################
-venus_config_validation:
- - generator: "/venus/tools/config/venus-config-generator.conf"
- config: "/etc/venus/venus.conf"
-
-####################
-# Database
-####################
-venus_database_name: "venus"
-venus_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}venus{% endif %}"
-venus_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
-
-####################
-# Database sharding
-####################
-venus_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ venus_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-venus_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-venus_database_shard:
- users:
- - user: "{{ venus_database_user }}"
- password: "{{ venus_database_password }}"
- shard_id: "{{ venus_database_shard_id }}"
- rules:
- - schema: "{{ venus_database_name }}"
- shard_id: "{{ venus_database_shard_id }}"
- - user: "{{ venus_database_user }}"
- shard_id: "{{ venus_database_shard_id }}"
-
-
-####################
-# Docker
-####################
-venus_tag: "{{ openstack_tag }}"
-
-venus_api_image: "{{ docker_image_url }}venus-api"
-venus_api_tag: "{{ venus_tag }}"
-venus_api_image_full: "{{ venus_api_image }}:{{ venus_api_tag }}"
-
-venus_manager_image: "{{ docker_image_url }}venus-manager"
-venus_manager_tag: "{{ venus_tag }}"
-venus_manager_image_full: "{{ venus_manager_image }}:{{ venus_manager_tag }}"
-
-venus_api_dimensions: "{{ default_container_dimensions }}"
-venus_manager_dimensions: "{{ default_container_dimensions }}"
-
-venus_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
-venus_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-venus_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-venus_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-venus_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ venus_api_port }}"]
-venus_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-venus_api_healthcheck:
- interval: "{{ venus_api_healthcheck_interval }}"
- retries: "{{ venus_api_healthcheck_retries }}"
- start_period: "{{ venus_api_healthcheck_start_period }}"
- test: "{% if venus_api_enable_healthchecks | bool %}{{ venus_api_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ venus_api_healthcheck_timeout }}"
-
-venus_api_default_volumes:
- - "{{ node_config_directory }}/venus-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ '/dev/shm:/dev/shm' }}"
- - "{{ kolla_dev_repos_directory ~ '/venus:/dev-mode/venus' if venus_dev_mode | bool else '' }}"
- - "venus:/var/lib/venus/"
-venus_manager_default_volumes:
- - "{{ node_config_directory }}/venus-manager/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ '/dev/shm:/dev/shm' }}"
- - "{{ kolla_dev_repos_directory ~ '/venus:/dev-mode/venus' if venus_dev_mode | bool else '' }}"
- - "venus:/var/lib/venus/"
-
-venus_extra_volumes: "{{ default_extra_volumes }}"
-venus_api_extra_volumes: "{{ venus_extra_volumes }}"
-venus_manager_extra_volumes: "{{ venus_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-venus_logging_debug: "{{ openstack_logging_debug }}"
-
-venus_keystone_user: "venus"
-
-openstack_venus_auth: "{{ openstack_auth }}"
-
-
-####################
-# Kolla
-####################
-venus_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-venus_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-venus_dev_mode: "{{ kolla_dev_mode }}"
-venus_source_version: "{{ kolla_source_version }}"
-
-####################
-# logging
-####################
-openstack_logging_default_format_string: "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [- req-None - - - - -] %(instance)s%(message)s"
-openstack_logging_context_format_string: "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s"
-
-####################
-# Notifications
-####################
-venus_notification_topics:
- - name: notifications
- enabled: "{{ enable_ceilometer | bool }}"
-
-venus_enabled_notification_topics: "{{ venus_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
-
-####################
-# Keystone
-####################
-venus_ks_services:
- - name: "venus"
- type: "LMS"
- description: "Log Manager Service"
- endpoints:
- - {'interface': 'internal', 'url': '{{ venus_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ venus_public_endpoint }}'}
-
-venus_ks_users:
- - project: "service"
- user: "{{ venus_keystone_user }}"
- password: "{{ venus_keystone_password }}"
- role: "admin"
-
-# Database
-venus_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }}"
-
-###################
-# Copy certificates
-###################
-venus_copy_certs: "{{ kolla_copy_ca_into_containers | bool or venus_database_enable_tls_internal | bool }}"
diff --git a/ansible/roles/venus/handlers/main.yml b/ansible/roles/venus/handlers/main.yml
deleted file mode 100644
index 1f8b3fdb50..0000000000
--- a/ansible/roles/venus/handlers/main.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Restart venus-api container
- vars:
- service_name: "venus-api"
- service: "{{ venus_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
-
-- name: Restart venus-manager container
- vars:
- service_name: "venus-manager"
- service: "{{ venus_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
diff --git a/ansible/roles/venus/tasks/bootstrap.yml b/ansible/roles/venus/tasks/bootstrap.yml
deleted file mode 100644
index 57938e60f1..0000000000
--- a/ansible/roles/venus/tasks/bootstrap.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- name: Creating venus database
- become: true
- kolla_toolbox:
- container_engine: "{{ kolla_container_engine }}"
- module_name: mysql_db
- module_args:
- ca_cert: "{{ openstack_cacert if database_enable_tls_internal | bool else omit }}"
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ venus_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ venus_database_name }}"
- run_once: True
- delegate_to: "{{ groups['venus-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Creating venus database user and setting permissions
- become: true
- kolla_toolbox:
- container_engine: "{{ kolla_container_engine }}"
- module_name: mysql_user
- module_args:
- ca_cert: "{{ openstack_cacert if database_enable_tls_internal | bool else omit }}"
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ venus_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ venus_database_user }}"
- password: "{{ venus_database_password }}"
- host: "%"
- priv: "{{ venus_database_name }}.*:ALL"
- append_privs: "yes"
- run_once: True
- delegate_to: "{{ groups['venus-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
diff --git a/ansible/roles/venus/tasks/clone.yml b/ansible/roles/venus/tasks/clone.yml
deleted file mode 100644
index 4d85cc0e80..0000000000
--- a/ansible/roles/venus/tasks/clone.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cloning venus source repository for development
- become: true
- git:
- repo: "{{ venus_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ venus_dev_repos_pull }}"
- version: "{{ venus_source_version }}"
diff --git a/ansible/roles/venus/tasks/config.yml b/ansible/roles/venus/tasks/config.yml
deleted file mode 100644
index 05cfe4de3e..0000000000
--- a/ansible/roles/venus/tasks/config.yml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: Check if policies shall be overwritten
- stat:
- path: "{{ item }}"
- run_once: True
- delegate_to: localhost
- register: venus_policy
- with_first_found:
- - files: "{{ supported_policy_format_list }}"
- paths:
- - "{{ node_custom_config }}/venus/"
- skip: true
-
-- name: Set venus policy file
- set_fact:
- venus_policy_file: "{{ venus_policy.results.0.stat.path | basename }}"
- venus_policy_file_path: "{{ venus_policy.results.0.stat.path }}"
- when:
- - venus_policy.results
-
-- include_tasks: copy-certs.yml
- when:
- - venus_copy_certs | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: Copying over venus.conf
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/venus.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/venus.conf"
- - "{{ node_custom_config }}/venus/{{ item.key }}.conf"
- - "{{ node_custom_config }}/venus/{{ inventory_hostname }}/venus.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/venus.conf"
- mode: "0660"
- become: true
- with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: Copying over existing policy file
- template:
- src: "{{ venus_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ venus_policy_file }}"
- mode: "0660"
- when:
- - venus_policy_file is defined
- with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/venus/tasks/config_validate.yml b/ansible/roles/venus/tasks/config_validate.yml
deleted file mode 100644
index 57ab862017..0000000000
--- a/ansible/roles/venus/tasks/config_validate.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_role:
- name: service-config-validate
- vars:
- service_config_validate_services: "{{ venus_services }}"
- service_name: "{{ project_name }}"
- service_config_validation: "{{ venus_config_validation }}"
diff --git a/ansible/roles/venus/tasks/loadbalancer.yml b/ansible/roles/venus/tasks/loadbalancer.yml
deleted file mode 100644
index b692351e63..0000000000
--- a/ansible/roles/venus/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ venus_services }}"
- tags: always
diff --git a/ansible/roles/venus/tasks/precheck.yml b/ansible/roles/venus/tasks/precheck.yml
deleted file mode 100644
index 10408219d1..0000000000
--- a/ansible/roles/venus/tasks/precheck.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ venus_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- action: get_containers
- container_engine: "{{ kolla_container_engine }}"
- name:
- - venus_api
- check_mode: false
- register: container_facts
-
-- name: Checking free port for Venus API
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ venus_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts.containers['venus_api'] is not defined
- - inventory_hostname in groups['venus-api']
diff --git a/ansible/roles/venus/tasks/register.yml b/ansible/roles/venus/tasks/register.yml
deleted file mode 100644
index d61d9a9b0c..0000000000
--- a/ansible/roles/venus/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_venus_auth }}"
- service_ks_register_services: "{{ venus_ks_services }}"
- service_ks_register_users: "{{ venus_ks_users }}"
diff --git a/ansible/roles/venus/templates/venus-api.json.j2 b/ansible/roles/venus/templates/venus-api.json.j2
deleted file mode 100644
index 0a825529d8..0000000000
--- a/ansible/roles/venus/templates/venus-api.json.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "command": "venus_api --config-file /etc/venus/venus.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/venus.conf",
- "dest": "/etc/venus/venus.conf",
- "owner": "venus",
- "perm": "0644"
- }{% if kolla_copy_ca_into_containers | bool %},
- {
- "source": "{{ container_config_directory }}/ca-certificates",
- "dest": "/var/lib/kolla/share/ca-certificates",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path":"/var/log/kolla/venus/venus-api.log",
- "owner": "venus:venus",
- "recurse": true
- }
- ]
-}
-
diff --git a/ansible/roles/venus/templates/venus-manager.json.j2 b/ansible/roles/venus/templates/venus-manager.json.j2
deleted file mode 100644
index 02f7503cb3..0000000000
--- a/ansible/roles/venus/templates/venus-manager.json.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "command": "venus_manager --config-file /etc/venus/venus.conf task start",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/venus.conf",
- "dest": "/etc/venus/venus.conf",
- "owner": "venus",
- "perm": "0644"
- }{% if kolla_copy_ca_into_containers | bool %},
- {
- "source": "{{ container_config_directory }}/ca-certificates",
- "dest": "/var/lib/kolla/share/ca-certificates",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path":"/var/log/kolla/venus/venus-manager.log",
- "owner": "venus:venus",
- "recurse": true
- }
- ]
-}
-
diff --git a/ansible/roles/venus/templates/venus.conf.j2 b/ansible/roles/venus/templates/venus.conf.j2
deleted file mode 100644
index d4cbd91f40..0000000000
--- a/ansible/roles/venus/templates/venus.conf.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-[DEFAULT]
-my_ip = {{ api_interface_address }}
-periodic_interval = 60
-rootwrap_config = /etc/venus/rootwrap.conf
-api_paste_config = /etc/venus/api-paste.ini
-log_dir = /var/log/kolla/venus/
-debug = {{ venus_logging_debug }}
-auth_strategy = keystone
-os_region_name = {{ openstack_region_name }}
-osapi_venus_listen = {{ api_interface_address }}
-osapi_venus_listen_port = {{ venus_api_port }}
-
-logging_default_format_string = {{ openstack_logging_default_format_string }}
-logging_context_format_string = {{ openstack_logging_context_format_string }}
-
-transport_url = {{ rpc_transport_url }}
-
-[database]
-connection = mysql+pymysql://{{ venus_database_user }}:{{ venus_database_password }}@{{ venus_database_address }}/{{ venus_database_name }}?charset=utf8{{ '&ssl_ca=' ~ openstack_cacert if venus_database_enable_tls_internal | bool }}
-
-[keystone_authtoken]
-cafile = {{ openstack_cacert }}
-project_name = service
-password = {{ venus_keystone_password }}
-username = {{ venus_keystone_user }}
-auth_url = {{ keystone_internal_url }}
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-auth_type = password
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if enable_opensearch | bool %}
-[elasticsearch]
-url = {{ opensearch_internal_endpoint }}
-{% endif %}
-
-[oslo_concurrency]
-lock_path = /var/lib/venus/tmp
diff --git a/ansible/roles/venus/vars/main.yml b/ansible/roles/venus/vars/main.yml
deleted file mode 100644
index 3955d5f95f..0000000000
--- a/ansible/roles/venus/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "venus"
diff --git a/ansible/roles/watcher/tasks/config.yml b/ansible/roles/watcher/tasks/config.yml
index ee1a6c6912..1b21a5202d 100644
--- a/ansible/roles/watcher/tasks/config.yml
+++ b/ansible/roles/watcher/tasks/config.yml
@@ -26,7 +26,7 @@
watcher_policy_file: "{{ watcher_policy.results.0.stat.path | basename }}"
watcher_policy_file_path: "{{ watcher_policy.results.0.stat.path }}"
when:
- - watcher_policy.results
+ - watcher_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/roles/zun/tasks/config.yml b/ansible/roles/zun/tasks/config.yml
index 7d5100189d..7ef4c7e3b5 100644
--- a/ansible/roles/zun/tasks/config.yml
+++ b/ansible/roles/zun/tasks/config.yml
@@ -31,7 +31,7 @@
zun_policy_file: "{{ zun_policy.results.0.stat.path | basename }}"
zun_policy_file_path: "{{ zun_policy.results.0.stat.path }}"
when:
- - zun_policy.results
+ - zun_policy.results | length > 0
- include_tasks: copy-certs.yml
when:
diff --git a/ansible/site.yml b/ansible/site.yml
index 5588746dfb..33855f1d61 100644
--- a/ansible/site.yml
+++ b/ansible/site.yml
@@ -68,7 +68,6 @@
- enable_tacker_{{ enable_tacker | bool }}
- enable_telegraf_{{ enable_telegraf | bool }}
- enable_trove_{{ enable_trove | bool }}
- - enable_venus_{{ enable_venus | bool }}
- enable_watcher_{{ enable_watcher | bool }}
- enable_zun_{{ enable_zun | bool }}
tags: always
@@ -87,7 +86,6 @@
- name: Apply role common
gather_facts: false
hosts:
- - cron
- kolla-logs
- kolla-toolbox
serial: '{{ kolla_serial|default("0") }}'
@@ -100,6 +98,20 @@
roles:
- role: common
+- name: Apply role cron
+ gather_facts: false
+ hosts:
+ - cron
+ serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ cron_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - cron
+ roles:
+ - role: cron
+
- name: Apply role fluentd
gather_facts: false
hosts:
@@ -313,11 +325,6 @@
tasks_from: loadbalancer
tags: trove
when: enable_trove | bool
- - include_role:
- name: venus
- tasks_from: loadbalancer
- tags: venus
- when: enable_venus | bool
- include_role:
name: watcher
tasks_from: loadbalancer
@@ -1053,21 +1060,6 @@
- { role: masakari,
tags: masakari }
-- name: Apply role venus
- gather_facts: false
- hosts:
- - venus-api
- - venus-manager
- - '&enable_venus_True'
- serial: '{{ kolla_serial|default("0") }}'
- max_fail_percentage: >-
- {{ venus_max_fail_percentage |
- default(kolla_max_fail_percentage) |
- default(100) }}
- roles:
- - { role: venus,
- tags: venus }
-
- name: Apply role skyline
gather_facts: false
hosts:
diff --git a/doc/source/admin/mariadb-backup-and-restore.rst b/doc/source/admin/mariadb-backup-and-restore.rst
index 6ebb73d5b5..69fd411fd4 100644
--- a/doc/source/admin/mariadb-backup-and-restore.rst
+++ b/doc/source/admin/mariadb-backup-and-restore.rst
@@ -83,7 +83,7 @@ following options on the first database node:
docker run --rm -it --volumes-from mariadb --name dbrestore \
--volume mariadb_backup:/backup \
- quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \
+ quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \
/bin/bash
(dbrestore) $ cd /backup
(dbrestore) $ rm -rf /backup/restore
@@ -105,7 +105,7 @@ place, again on the first node:
docker run --rm -it --volumes-from mariadb --name dbrestore \
--volume mariadb_backup:/backup \
- quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \
+ quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \
/bin/bash
(dbrestore) $ rm -rf /var/lib/mysql/*
(dbrestore) $ rm -rf /var/lib/mysql/\.[^\.]*
@@ -148,7 +148,7 @@ incremental backup,
docker run --rm -it --volumes-from mariadb --name dbrestore \
--volume mariadb_backup:/backup --tmpfs /backup/restore \
- quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \
+ quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \
/bin/bash
(dbrestore) $ cd /backup
(dbrestore) $ rm -rf /backup/restore
diff --git a/doc/source/conf.py b/doc/source/conf.py
index add1790c4a..785cb15e20 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -142,10 +142,10 @@
TESTED_RUNTIMES_GOVERNANCE_URL =\
'https://governance.openstack.org/tc/reference/runtimes/{}.html'.format(KOLLA_OPENSTACK_RELEASE)
-ANSIBLE_CORE_VERSION_MIN = '2.17'
-ANSIBLE_CORE_VERSION_MAX = '2.18'
-ANSIBLE_VERSION_MIN = '10'
-ANSIBLE_VERSION_MAX = '11'
+ANSIBLE_CORE_VERSION_MIN = '2.18'
+ANSIBLE_CORE_VERSION_MAX = '2.19'
+ANSIBLE_VERSION_MIN = '11'
+ANSIBLE_VERSION_MAX = '12'
GLOBAL_VARIABLE_MAP = {
'|ANSIBLE_CORE_VERSION_MIN|': ANSIBLE_CORE_VERSION_MIN,
diff --git a/doc/source/contributor/adding-a-new-service.rst b/doc/source/contributor/adding-a-new-service.rst
index 665439112c..967ca139b8 100644
--- a/doc/source/contributor/adding-a-new-service.rst
+++ b/doc/source/contributor/adding-a-new-service.rst
@@ -42,10 +42,10 @@ which Kolla uses throughout and which should be followed.
* Log rotation
- For OpenStack services there should be a ``cron-logrotate-PROJECT.conf.j2``
- template file in ``ansible/roles/common/templates`` with the following
+ template file in ``ansible/roles/cron/templates`` with the following
content:
- .. path ansible/roles/common/templates/cron-logrotate-PROJECT.conf.j2
+ .. path ansible/roles/cron/templates/cron-logrotate-PROJECT.conf.j2
.. code-block:: console
"/var/log/kolla/PROJECT/*.log"
@@ -53,14 +53,14 @@ which Kolla uses throughout and which should be followed.
}
- For OpenStack services there should be an entry in the ``services`` list
- in the ``cron.json.j2`` template file in ``ansible/roles/common/templates``.
+ in the ``cron.json.j2`` template file in ``ansible/roles/cron/templates``.
* Log delivery
- For OpenStack services the service should add a new ``rewriterule`` in the
``match`` element in the ``01-rewrite.conf.j2`` template file in
- ``ansible/roles/common/templates/conf/filter`` to deliver log messages to
- Elasticsearch.
+ ``ansible/roles/fluentd/templates/conf/filter`` to deliver log messages to
+ Opensearch.
* Documentation
diff --git a/doc/source/reference/bare-metal/ironic-guide.rst b/doc/source/reference/bare-metal/ironic-guide.rst
index d7a5ee90b7..99e8bbefb2 100644
--- a/doc/source/reference/bare-metal/ironic-guide.rst
+++ b/doc/source/reference/bare-metal/ironic-guide.rst
@@ -107,6 +107,26 @@ You may optionally pass extra kernel parameters to the inspection kernel using:
in ``/etc/kolla/globals.yml``.
+PXE filter (optional)
+~~~~~~~~~~~~~~~~~~~~~
+
+To keep parity with the standalone inspector you can enable the experimental
+PXE filter service:
+
+.. code-block:: yaml
+
+ enable_ironic_pxe_filter: "yes"
+
+The PXE filter container runs alongside ``ironic-dnsmasq`` and cleans up stale
+DHCP entries. It is especially useful when auto discovery is enabled and when
+the dnsmasq DHCP range overlaps with a Neutron-served network. For the upstream
+details see
+https://docs.openstack.org/ironic/latest/admin/inspection/pxe_filter.html.
+
+.. note::
+
+ Upstream still classifies this PXE filter implementation as experimental.
+
Configure conductor's HTTP server port (optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The port used for conductor's HTTP server is controlled via
diff --git a/doc/source/reference/containers/kuryr-guide.rst b/doc/source/reference/containers/kuryr-guide.rst
index fbcbfd4192..f748d16c5a 100644
--- a/doc/source/reference/containers/kuryr-guide.rst
+++ b/doc/source/reference/containers/kuryr-guide.rst
@@ -26,7 +26,8 @@ The IP address is host running the etcd service. ```2375``` is port that
allows Docker daemon to be accessed remotely. ```2379``` is the etcd listening
port.
-By default etcd and kuryr are disabled in the ``group_vars/all.yml``.
+By default etcd and kuryr are disabled in the ``group_vars/all/etcd.yml`` and
+``group_vars/all/kuryr.yml`` files.
In order to enable them, you need to edit the file globals.yml and set the
following variables
diff --git a/doc/source/reference/databases/mariadb-guide.rst b/doc/source/reference/databases/mariadb-guide.rst
index 71b51f77c4..e34e4bf0a4 100644
--- a/doc/source/reference/databases/mariadb-guide.rst
+++ b/doc/source/reference/databases/mariadb-guide.rst
@@ -52,9 +52,9 @@ inventory file in the way described below:
.. note::
If ``mariadb_shard_id`` is not defined for host in inventory file it will be set automatically
- to ``mariadb_default_database_shard_id`` (default 0) from ``group_vars/all.yml`` and can be
- overwritten in ``/etc/kolla/globals.yml``. Shard which is marked as default is special in case
- of backup or loadbalance, as it is described below.
+ to ``mariadb_default_database_shard_id`` (default 0) from ``group_vars/all/mariadb.yml`` and
+ can be overwritten in ``/etc/kolla/globals.yml``. Shard which is marked as default is
+ special in case of backup or loadbalance, as it is described below.
Loadbalancer
------------
diff --git a/doc/source/reference/networking/neutron.rst b/doc/source/reference/networking/neutron.rst
index 0dae7dcb6d..3994ed886e 100644
--- a/doc/source/reference/networking/neutron.rst
+++ b/doc/source/reference/networking/neutron.rst
@@ -279,6 +279,13 @@ In order to deploy Neutron OVN Agent you need to set the following:
Currently the agent is only needed for QoS for hardware offloaded ports.
+When in need of running `ovn-nbctl` or `ovn-sbctl` commands it's most
+convenient to run them from ``ovn_northd`` container:
+
+.. code-block:: console
+
+ docker exec ovn_northd ovn-nbctl show
+
Mellanox Infiniband (ml2/mlnx)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -339,9 +346,10 @@ In this example:
Running Neutron agents subprocesses in separate containers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-There is an experimental feature in Kolla-Ansible that allows to overcome
-the issue of breaking data plane connectivity and dhcp services when
-restarting neutron-l3-agent and neutron-dhcp-agent.
+There is a feature in Kolla-Ansible that allows to overcome
+the issue of breaking data plane connectivity, dhcp and metadata services
+when restarting neutron-l3-agent and neutron-dhcp-agent in ml2/ovs or
+restarting the neutron-ovn-metadata-agent in ml2/ovn.
To enable it, modify the configuration in ``/etc/kolla/globals.yml``:
diff --git a/doc/source/reference/networking/octavia.rst b/doc/source/reference/networking/octavia.rst
index af08cf9e04..7a29d67fa8 100644
--- a/doc/source/reference/networking/octavia.rst
+++ b/doc/source/reference/networking/octavia.rst
@@ -340,7 +340,7 @@ Now deploy Octavia:
Amphora image
-------------
-It is necessary to build an Amphora image. On CentOS / Rocky 9:
+It is necessary to build an Amphora image. On CentOS / Rocky 10:
.. code-block:: console
diff --git a/doc/source/reference/orchestration-and-nfv/tacker-guide.rst b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst
index 78fdf4821b..4c99e72e15 100644
--- a/doc/source/reference/orchestration-and-nfv/tacker-guide.rst
+++ b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst
@@ -30,7 +30,7 @@ Preparation and Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~
By default tacker and required services are disabled in
-the ``group_vars/all.yml`` file.
+the ``group_vars/all/tacker.yml`` file.
In order to enable them, you need to edit the file
``/etc/kolla/globals.yml`` and set the following variables:
diff --git a/doc/source/reference/shared-services/keystone-guide.rst b/doc/source/reference/shared-services/keystone-guide.rst
index d0958a3f92..b92230dcb6 100644
--- a/doc/source/reference/shared-services/keystone-guide.rst
+++ b/doc/source/reference/shared-services/keystone-guide.rst
@@ -104,6 +104,20 @@ Example for Keycloak shown below:
keystone_federation_oidc_additional_options:
OIDCTokenBindingPolicy: disabled
+When using OIDC, operators can also use the following variable
+to customize the delay to retry authenticating in the IdP if the
+authentication has timeout:
+
+``keystone_federation_oidc_error_page_retry_login_delay_milliseconds``
+ Default is 5000 milliseconds (5 seconds).
+
+It is also possible to override the ``OIDCHTMLErrorTemplate``,
+the custom error template page via:
+
+.. code-block:: yaml
+
+ {{ node_custom_config }}/keystone/federation/modoidc-error-page.html
+
Identity providers configurations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/user/multinode.rst b/doc/source/user/multinode.rst
index 21bf67fa45..5a0db7cf2a 100644
--- a/doc/source/user/multinode.rst
+++ b/doc/source/user/multinode.rst
@@ -136,7 +136,7 @@ host or group variables:
`__
are quite complex, but it is worth becoming familiar with them if using host
and group variables. The playbook group variables in
-``ansible/group_vars/all.yml`` define global defaults, and these take
+``ansible/group_vars/all/`` define global defaults, and these take
precedence over variables defined in an inventory file and inventory
``group_vars/all``, but not over inventory ``group_vars/*``. Variables in
'extra' files (``globals.yml``) have the highest precedence, so any variables
diff --git a/doc/source/user/operating-kolla.rst b/doc/source/user/operating-kolla.rst
index 0b2d78eca8..a48f3357f1 100644
--- a/doc/source/user/operating-kolla.rst
+++ b/doc/source/user/operating-kolla.rst
@@ -198,15 +198,42 @@ After this command is complete, the containers will have been recreated from
the new images and all database schema upgrades and similar actions performed
for you.
+
+CLI Command Completion
+~~~~~~~~~~~~~~~~~~~~~~
+
+Kolla Ansible supports shell command completion to make the CLI easier to use.
+
+To enable Bash completion, generate the completion script:
+
+.. code-block:: console
+
+ kolla-ansible complete --shell bash > ~/.kolla_ansible_completion.sh
+
+Then, add the following line to your ``~/.bashrc`` file:
+
+.. code-block:: console
+
+ source ~/.kolla_ansible_completion.sh
+
+Finally, reload your shell configuration:
+
+.. code-block:: console
+
+ source ~/.bashrc
+
+.. note::
+
+ If you're using a shell other than Bash, replace ``--shell bash`` with your shell type,
+ e.g., ``zsh``, and adapt your shell's configuration file accordingly.
+
+
Tips and Tricks
~~~~~~~~~~~~~~~
Kolla Ansible CLI
-----------------
-When running the ``kolla-ansible`` CLI, additional arguments may be passed to
-``ansible-playbook`` via the ``EXTRA_OPTS`` environment variable.
-
``kolla-ansible deploy -i INVENTORY`` is used to deploy and start all Kolla
containers.
@@ -242,6 +269,10 @@ images on hosts.
files for enabled OpenStack services, without then restarting the containers so
it is not applied right away.
+``kolla-ansible validate-config -i INVENTORY`` is used to validate generated
+configuration files of enabled OpenStack services. By default, the results are
+saved to ``/var/log/kolla/config-validate`` when issues are detected.
+
``kolla-ansible ... -i INVENTORY1 -i INVENTORY2`` Multiple inventories can be
specified by passing the ``--inventory`` or ``-i`` command line option multiple
times. This can be useful to share configuration between multiple environments.
diff --git a/doc/source/user/quickstart-development.rst b/doc/source/user/quickstart-development.rst
index 0dccdb1f18..6e29a2ad51 100644
--- a/doc/source/user/quickstart-development.rst
+++ b/doc/source/user/quickstart-development.rst
@@ -186,7 +186,7 @@ There are a few options that are required to deploy Kolla Ansible:
- Rocky (``rocky``)
- Ubuntu (``ubuntu``)
- For newcomers, we recommend to use Rocky Linux 9 or Ubuntu 24.04.
+ For newcomers, we recommend to use Rocky Linux 10 or Ubuntu 24.04.
.. code-block:: console
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index 4afcab38aa..b4d8553670 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -177,7 +177,7 @@ There are a few options that are required to deploy Kolla Ansible:
- Rocky (``rocky``)
- Ubuntu (``ubuntu``)
- For newcomers, we recommend to use Rocky Linux 9 or Ubuntu 24.04.
+ For newcomers, we recommend to use Rocky Linux 10 or Ubuntu 24.04.
.. code-block:: console
diff --git a/doc/source/user/support-matrix.rst b/doc/source/user/support-matrix.rst
index 0301775f19..8925a2c021 100644
--- a/doc/source/user/support-matrix.rst
+++ b/doc/source/user/support-matrix.rst
@@ -9,13 +9,13 @@ Kolla Ansible supports the following host Operating Systems (OS):
.. note::
- CentOS Stream 9 is supported as a host OS while Kolla does not publish CS9
+ CentOS Stream 10 is supported as a host OS while Kolla does not publish CS10
based images. Users can build them on their own. We recommend using Rocky
- Linux 9 images instead.
+ Linux 10 images instead.
-* CentOS Stream 9
+* CentOS Stream 10
* Debian Bookworm (12)
-* Rocky Linux 9
+* Rocky Linux 10
* Ubuntu Noble (24.04)
Supported container images
diff --git a/doc/source/user/troubleshooting.rst b/doc/source/user/troubleshooting.rst
index d00a94ca74..76a9ac6047 100644
--- a/doc/source/user/troubleshooting.rst
+++ b/doc/source/user/troubleshooting.rst
@@ -88,13 +88,13 @@ You can find all kolla logs in there.
/var/lib/docker/volumes/kolla_logs/_data
When ``enable_central_logging`` is enabled, to view the logs in a web browser
-using Kibana, go to
-``http://:`` or
-``http://:``. Authenticate
-using ```` and ````.
+using OpenSearch Dashboards, go to
+``http://:`` or
+``http://:``. Authenticate
+using ``opensearch`` and ````.
The values ````, ````
-```` and ```` can be found in
-``/kolla/ansible/group_vars/all.yml`` or if the default
-values are overridden, in ``/etc/kolla/globals.yml``. The value of
-```` can be found in ``/etc/kolla/passwords.yml``.
+```` can be found in
+``/kolla/ansible/group_vars/all/opensearch.yml``. The value
+of ```` can be found in
+``/etc/kolla/passwords.yml``.
diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml
index cb0e651d59..8f0973f5a8 100644
--- a/etc/kolla/globals.yml
+++ b/etc/kolla/globals.yml
@@ -1,7 +1,7 @@
---
# You can use this file to override _any_ variable throughout Kolla.
# Additional options can be found in the
-# 'kolla-ansible/ansible/group_vars/all.yml' file. Default value of all the
+# 'kolla-ansible/ansible/group_vars/all' directory. Default value of all the
# commented parameters are shown here, To override the default value uncomment
# the parameter and change its value.
@@ -160,9 +160,7 @@ workaround_ansible_issue_8743: yes
# addresses for that reason.
#neutron_external_interface: "eth1"
-# Valid options are [ openvswitch, ovn, linuxbridge ]
-# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable.
-# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html
+# Valid options are [ openvswitch, ovn ]
#neutron_plugin_agent: "openvswitch"
# Valid options are [ internal, infoblox ]
@@ -175,6 +173,9 @@ workaround_ansible_issue_8743: yes
# Neutron rolling upgrade were enable by default
#neutron_enable_rolling_upgrade: "yes"
+# Enable wrapper containers to keep Neutron agent restarts isolated from the main service containers
+#neutron_agents_wrappers: "yes"
+
# Configure neutron logging framework to log ingress/egress connections to instances
# for security groups rules. More information can be found here:
# https://docs.openstack.org/neutron/latest/admin/config-logging.html
@@ -378,13 +379,13 @@ workaround_ansible_issue_8743: yes
#enable_horizon_octavia: "{{ enable_octavia | bool }}"
#enable_horizon_tacker: "{{ enable_tacker | bool }}"
#enable_horizon_trove: "{{ enable_trove | bool }}"
-#enable_horizon_venus: "{{ enable_venus | bool }}"
#enable_horizon_watcher: "{{ enable_watcher | bool }}"
#enable_horizon_zun: "{{ enable_zun | bool }}"
#enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}"
#enable_ironic: "no"
#enable_ironic_neutron_agent: "no"
#enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}"
+#enable_ironic_pxe_filter: "no"
#enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
#enable_kuryr: "no"
#enable_magnum: "no"
@@ -421,7 +422,7 @@ workaround_ansible_issue_8743: yes
#enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}"
#enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
#enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
-#enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
+#enable_openvswitch: "{{ enable_neutron }}"
#enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
#enable_ovs_dpdk: "no"
#enable_osprofiler: "no"
@@ -434,7 +435,6 @@ workaround_ansible_issue_8743: yes
#enable_telegraf: "no"
#enable_trove: "no"
#enable_trove_singletenant: "no"
-#enable_venus: "no"
#enable_watcher: "no"
#enable_zun: "no"
diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml
index 8647dd9d29..5705873d68 100644
--- a/etc/kolla/passwords.yml
+++ b/etc/kolla/passwords.yml
@@ -131,9 +131,6 @@ tacker_keystone_password:
zun_database_password:
zun_keystone_password:
-venus_database_password:
-venus_keystone_password:
-
masakari_database_password:
masakari_keystone_password:
diff --git a/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml b/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml
new file mode 100644
index 0000000000..33ed5451df
--- /dev/null
+++ b/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Minimum supported Ansible version is now ``11`` (ansible-core 2.18)
+ and maximum supported is ``12`` (ansible-core 2.19).
diff --git a/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml b/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml
new file mode 100644
index 0000000000..7f558e85a8
--- /dev/null
+++ b/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes an issue where Horizon returned HTTP 500 errors when one of the
+ Memcached nodes was unavailable by setting ``ignore_exc`` to ``True`` in
+ the cache backend.
+ `LP#2106557 `__
diff --git a/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml b/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml
new file mode 100644
index 0000000000..94bf9a694b
--- /dev/null
+++ b/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes an issue where vendordata.json, if defined,
+ was not being copied to the nova-metadata directory.
+ `LP#2111328 `__
diff --git a/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml b/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml
new file mode 100644
index 0000000000..060471f697
--- /dev/null
+++ b/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Handlers to trigger a restart nova_libvirt and ovn_sb_db_relay
+ containers have been removed and restarts of these services
+ are now under the control of the service-check-containers
+ role `LP#2123946 `__.
diff --git a/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml b/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml
new file mode 100644
index 0000000000..7bbb085e5c
--- /dev/null
+++ b/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ A ``cron`` Ansible role has been created and its deployment is not part
+ of the ``common`` role anymore.
diff --git a/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml b/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml
new file mode 100644
index 0000000000..e50b469113
--- /dev/null
+++ b/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - |
+ Enable the configuration of the timeout manager by
+ ``OIDCStateTimeout`` variable. We also provide means to
+ override the error page for the modOIDC plugin via
+ ``{{ node_custom_config }}/keystone/federation/modoidc-error-page.html``
+ file.
+
+upgrade:
+ - |
+ It was added a default template for the modOIDC plugin,
+ which will handle authentication errors for federated users.
+ The default template is found at
+ "ansible/roles/keystone/templates/modoidc-error-page.html.j2";
+ it can also be replaced/overwritten. One can also overwrite,
+ the timeout, instead of the whole page via the following variable:
+ ``keystone_federation_oidc_error_page_retry_login_delay_milliseconds``.
+ The default timeout for the page redirection is 5 seconds.
diff --git a/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml b/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml
new file mode 100644
index 0000000000..8e044d6687
--- /dev/null
+++ b/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Remove reference to EXTRA_OPTS in documentation.
diff --git a/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml b/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml
new file mode 100644
index 0000000000..ab486ca79d
--- /dev/null
+++ b/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ ProxySQL is now automatically enabled when MariaDB is enabled.
+ MariaDB container healthcheck method was updated as healthcheck script was
+ replaced from Clustercheck to official MariaDB docker image's
+ `healthcheck.sh `__
+upgrade:
+ - |
+ Database loadbalancing with HAProxy and MariaDB Clustercheck is no longer
+ supported. For the system that uses HAProxy and Clustercheck, upgrading
+ MariaDB with ``kolla-ansible upgrade`` will deploy ProxySQL containers and
+ remove MariaDB Clustercheck containers.
diff --git a/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml b/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml
new file mode 100644
index 0000000000..0fa17f01d0
--- /dev/null
+++ b/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ ``neutron_legacy_iptables`` and its handling has been dropped.
diff --git a/releasenotes/notes/drop-venus-b929071fb79b8026.yaml b/releasenotes/notes/drop-venus-b929071fb79b8026.yaml
new file mode 100644
index 0000000000..e33fe99bb0
--- /dev/null
+++ b/releasenotes/notes/drop-venus-b929071fb79b8026.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Support for deploying ``Venus`` container images has been dropped.
diff --git a/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml b/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml
new file mode 100644
index 0000000000..513f2d190f
--- /dev/null
+++ b/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes an issue where CORS can be blocked when attempting
+ to upload an image via the Horizon user interface.
diff --git a/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml b/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml
new file mode 100644
index 0000000000..3f16efb467
--- /dev/null
+++ b/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Fluentd now sends logs directly to OpenSearch node IPs instead of using
+ a Load Balancer. This change reduces Load Balancer overhead from high
+ log volumes. The Load Balancer for OpenSearch remains in place, as it
+ is still used by OpenSearch Dashboards. Fluentd continues to handle node
+ availability, automatically distributing logs via round-robin to
+ available nodes, ensuring log delivery even if individual OpenSearch
+ nodes become unavailable.
+fixes:
+ - |
+ Fixed Fluentd configuration template to avoid generating unnecessary
+ empty lines when optional parameters are not set.
diff --git a/releasenotes/notes/horizon-port-584efee771a14fd9.yaml b/releasenotes/notes/horizon-port-584efee771a14fd9.yaml
new file mode 100644
index 0000000000..96f2f37737
--- /dev/null
+++ b/releasenotes/notes/horizon-port-584efee771a14fd9.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ ``Horizon`` default port (80/443) has been changed to ``8080`` when using
+ HAProxy, while the old default has been retained for development
+ environments using ``enable_haproxy`` set to ``no``.
diff --git a/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml b/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml
new file mode 100644
index 0000000000..80f38db8d2
--- /dev/null
+++ b/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Adds the optional ``ironic-pxe-filter`` service controlled by
+ ``enable_ironic_pxe_filter``. This brings parity with the standalone
+ inspector. Upstream currently classifies the PXE filter as experimental.
diff --git a/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml b/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml
new file mode 100644
index 0000000000..3c8c90d155
--- /dev/null
+++ b/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Move tasks that modified host configuration from kolla-ansible
+ role common to a-c-k as they need to be run only once at the
+ bootstrap of the host and are not strongly related to the common
+ services.
diff --git a/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml b/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml
new file mode 100644
index 0000000000..1b7464f8ae
--- /dev/null
+++ b/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ Neutron agent wrappers are now enabled by default. The wrapper containers
+ restart DHCP, L3, and related agents without having to respawn the main
+ service containers, which reduces dataplane disruptions during upgrades and
+ restarts. Operators who need the previous behaviour can set
+ ``neutron_agents_wrappers`` to ``"no"`` in ``/etc/kolla/globals.yml``.
diff --git a/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml b/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml
new file mode 100644
index 0000000000..3a13a9ee79
--- /dev/null
+++ b/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Implement ``neutron_agents_wrappers`` for the
+ neutron-ovn-metdata-agent. This allows the haproxy processes which
+ forward metadata requests in ml2/ovn setups to spawn in separate
+ containers.
diff --git a/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml b/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml
new file mode 100644
index 0000000000..ca6a5f4ac1
--- /dev/null
+++ b/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Adds a missing override for ``octavia_notification_topics`` so that
+ operators can add their own notification topics for Octavia. By
+ default it will send notifications to ceilometer when ceilometer
+ is enabled.
diff --git a/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml b/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml
new file mode 100644
index 0000000000..1fdd9b0880
--- /dev/null
+++ b/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ OpenSearch Dashboards now connects directly to OpenSearch nodes, rather
+ than via a HAProxy endpoint. This should have no user facing impact.
diff --git a/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml b/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml
new file mode 100644
index 0000000000..69f2665c3b
--- /dev/null
+++ b/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The OVN container images (``ovn-nb-db``, ``ovn-northd`` and ``ovn-sb-db``)
+ have now default environment variables in place that ease running of
+ ``ovn-nbctl`` and ``ovn-sbctl`` commands for operators.
diff --git a/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml b/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml
new file mode 100644
index 0000000000..f5625e7820
--- /dev/null
+++ b/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Support for Linux Bridge mechanism driver has been removed. The driver was
+ already removed from neutron.
diff --git a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml
index f3d3cd764d..0c120edd68 100644
--- a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml
+++ b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml
@@ -9,10 +9,18 @@ features:
* - Service
- Variable
+ * - Aodh
+ - aodh_wsgi_provider
+ * - Gnocchi
+ - gnocchi_wsgi_provider
* - Heat
- heat_wsgi_provider
+ * - Horizon
+ - horizon_wsgi_provider
* - Ironic
- ironic_wsgi_provider
+ * - Keystone
+ - keystone_wsgi_provider
* - Masakari
- masakari_wsgi_provider
* - Octavia
diff --git a/requirements-core.yml b/requirements-core.yml
index a1d367ef2d..2eca2c9f36 100644
--- a/requirements-core.yml
+++ b/requirements-core.yml
@@ -2,19 +2,19 @@
collections:
- name: ansible.netcommon
source: https://galaxy.ansible.com
- version: <8
+ version: <9
- name: ansible.posix
source: https://galaxy.ansible.com
- version: <2
+ version: <3
- name: ansible.utils
source: https://galaxy.ansible.com
- version: <6
+ version: <7
- name: community.crypto
source: https://galaxy.ansible.com
- version: <3
+ version: <4
- name: community.general
source: https://galaxy.ansible.com
- version: <11
+ version: <12
- name: community.docker
source: https://galaxy.ansible.com
version: <5
diff --git a/requirements.txt b/requirements.txt
index 568d7f9b63..bf2591b8e1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,7 +11,7 @@ hvac>=0.10.1 # Apache-2.0
Jinja2>=3 # BSD License (3 clause)
# Ansible and ansible's json_query
-ansible-core>=2.17,<2.19 # GPLv3
+ansible-core>=2.18,!=2.19.0,<2.20; python_version >= '3.11' # GPLv3
jmespath>=0.9.3 # MIT
# ini parsing
diff --git a/roles/cephadm/defaults/main.yml b/roles/cephadm/defaults/main.yml
index 6739d491ce..5ab4c04257 100644
--- a/roles/cephadm/defaults/main.yml
+++ b/roles/cephadm/defaults/main.yml
@@ -1,5 +1,5 @@
---
-cephadm_ceph_release: "reef"
+cephadm_ceph_release: "squid"
cephadm_ceph_apt_repo: "deb http://download.ceph.com/debian-{{ cephadm_ceph_release }}/ {{ ansible_distribution_release }} main"
cephadm_use_package_from_distribution: false
diff --git a/roles/cephadm/tasks/pkg_redhat.yml b/roles/cephadm/tasks/pkg_redhat.yml
index 85708cef43..af8c747b83 100644
--- a/roles/cephadm/tasks/pkg_redhat.yml
+++ b/roles/cephadm/tasks/pkg_redhat.yml
@@ -22,8 +22,11 @@
become: True
when: not cephadm_use_package_from_distribution
-- name: Install cephadm
+# NOTE(mnasiadka): cephadm bootstrap failing on jinja2 missing
+- name: Install cephadm and jinja2
dnf:
- name: "cephadm"
+ name:
+ - cephadm
+ - python3-jinja2
install_weak_deps: False
become: True
diff --git a/roles/kolla-ansible-deploy-bifrost/tasks/main.yml b/roles/kolla-ansible-deploy-bifrost/tasks/main.yml
new file mode 100644
index 0000000000..5b510da401
--- /dev/null
+++ b/roles/kolla-ansible-deploy-bifrost/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Deploy Bifrost
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible deploy-bifrost
+ -i /etc/kolla/inventory
+ >/tmp/logs/ansible/deploy-bifrost 2>&1
+
diff --git a/roles/kolla-ansible-deploy/tasks/certificates.yml b/roles/kolla-ansible-deploy/tasks/certificates.yml
new file mode 100644
index 0000000000..6296a1dbe2
--- /dev/null
+++ b/roles/kolla-ansible-deploy/tasks/certificates.yml
@@ -0,0 +1,39 @@
+---
+- name: Generate self-signed certificates for the optional internal TLS tests
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible certificates
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/certificates 2>&1
+
+- name: Init pebble when Lets Encrypt is enabled
+ when: (le_enabled | default(False)) | bool
+ block:
+ - name: "Run pebble container"
+ become: true
+ community.docker.docker_container:
+ name: pebble
+ image: "ghcr.io/letsencrypt/pebble:latest"
+ env:
+ PEBBLE_VA_NOSLEEP: "1"
+ PEBBLE_VA_ALWAYS_VALID: "1"
+ network_mode: host
+
+ - name: "Wait for pebble to start"
+ ansible.builtin.wait_for:
+ port: 15000
+ delay: 3
+
+ - name: "Copy pebble miniCA to /etc/kolla/certificates"
+ become: true
+ ansible.builtin.command:
+ cmd: "docker cp pebble:/test/certs/pebble.minica.pem /etc/kolla/certificates/ca/pebble-root.crt"
+
+ - name: "Fetch pebble.crt and store it in /etc/kolla/certificates/ca/"
+ become: true
+ ansible.builtin.get_url:
+ url: "https://127.0.0.1:15000/roots/0"
+ dest: "/etc/kolla/certificates/ca/pebble.crt"
+ validate_certs: false
diff --git a/roles/kolla-ansible-deploy/tasks/deploy.yml b/roles/kolla-ansible-deploy/tasks/deploy.yml
new file mode 100644
index 0000000000..771006735d
--- /dev/null
+++ b/roles/kolla-ansible-deploy/tasks/deploy.yml
@@ -0,0 +1,55 @@
+---
+- name: Run kolla-ansible prechecks
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible prechecks
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/deploy-prechecks 2>&1
+
+- name: Run kolla-ansible pull
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible pull
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/pull 2>&1
+
+- name: Run kolla-ansible deploy
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible deploy
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/deploy 2>&1
+
+- name: Run kolla-ansible post-deploy
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible post-deploy
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/post-deploy 2>&1
+
+- name: Run kolla-ansible validate-config
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible validate-config
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/validate-config 2>&1
+ when: not is_upgrade | bool
+
+- name: Run kolla-ansible check
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible check
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/check 2>&1
diff --git a/roles/kolla-ansible-deploy/tasks/main.yml b/roles/kolla-ansible-deploy/tasks/main.yml
new file mode 100644
index 0000000000..e02765d99a
--- /dev/null
+++ b/roles/kolla-ansible-deploy/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- name: Ensure /etc/kolla is writable
+ become: true
+ ansible.builtin.file:
+ path: /etc/kolla
+ state: directory
+ mode: "0777"
+ recurse: true
+
+- import_tasks: certificates.yml
+- import_tasks: deploy.yml
diff --git a/roles/kolla-ansible-reconfigure/tasks/main.yml b/roles/kolla-ansible-reconfigure/tasks/main.yml
new file mode 100644
index 0000000000..4cb60025a7
--- /dev/null
+++ b/roles/kolla-ansible-reconfigure/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+- name: Run kolla-ansible prechecks
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible prechecks
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/reconfigure-prechecks 2>&1
+
+- name: Remove OVN DB containers and volumes on primary to test recreation (docker)
+ become: true
+ when:
+ - scenario == 'ovn'
+ - container_engine == 'docker'
+ vars:
+ ovn_db_services:
+ - "ovn_nb_db"
+ - "ovn_sb_db"
+ block:
+ - name: Remove OVN DB containers
+ community.docker.docker_container:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ ovn_db_services }}"
+
+ - name: Remove OVN DB volumes
+ community.docker.docker_volume:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ ovn_db_services }}"
+
+- name: Remove OVN DB containers and volumes on primary to test recreation (podman)
+ become: true
+ when:
+ - scenario == 'ovn'
+ - container_engine == 'podman'
+ vars:
+ ovn_db_services:
+ - "ovn_nb_db"
+ - "ovn_sb_db"
+ block:
+ - name: Remove OVN DB containers
+ containers.podman.podman_container:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ ovn_db_services }}"
+
+ - name: Remove OVN DB volumes
+ containers.podman.podman_volume:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ ovn_db_services }}"
+
+- name: Run kolla-ansible reconfigure
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible reconfigure
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/reconfigure 2>&1
+
diff --git a/roles/kolla-ansible-setup-disks/README.rst b/roles/kolla-ansible-setup-disks/README.rst
new file mode 100644
index 0000000000..90bb4e92c0
--- /dev/null
+++ b/roles/kolla-ansible-setup-disks/README.rst
@@ -0,0 +1,15 @@
+Prepare disks for Kolla-Ansible CI run.
+
+**Role Variables**
+
+.. zuul:rolevar:: kolla_ansible_setup_disks_filepath
+
+ Path to allocated file passed to loopmount
+
+.. zuul:rolevar:: kolla_ansible_setup_disks_lv_name
+
+ Logical volume name to create (skipped if not set)
+
+.. zuul:rolevar:: kolla_ansible_setup_disks_vg_name
+
+ Volume group name to create
diff --git a/roles/kolla-ansible-setup-disks/tasks/main.yml b/roles/kolla-ansible-setup-disks/tasks/main.yml
new file mode 100644
index 0000000000..e4e25f736a
--- /dev/null
+++ b/roles/kolla-ansible-setup-disks/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+- name: Check if kolla_ansible_setup_disks_file_path is set
+ ansible.builtin.assert:
+ that: kolla_ansible_setup_disks_file_path is defined
+
+- name: Check if kolla_ansible_setup_disks_vg_name is set
+ ansible.builtin.assert:
+ that: kolla_ansible_setup_disks_vg_name is defined
+
+- name: Allocate file for disk backing
+ become: true
+ community.general.filesize:
+ path: "{{ kolla_ansible_setup_disks_file_path }}"
+ size: "{{ kolla_ansible_setup_disks_file_size | default('5G') }}"
+
+- name: Get free loop device
+ become: true
+ ansible.builtin.shell:
+ cmd: "losetup -f"
+ register: _loop_device
+
+- name: Mount file on loop device
+ become: true
+ ansible.builtin.shell:
+ cmd: >
+ losetup {{ _loop_device.stdout }}
+ {{ kolla_ansible_setup_disks_file_path }}
+
+- name: Create LVM extents on loop device
+ become: true
+ community.general.lvg:
+ vg: "{{ kolla_ansible_setup_disks_vg_name }}"
+ pvs: "{{ _loop_device.stdout }}"
+
+- name: Create LV
+ become: true
+ community.general.lvol:
+ vg: "{{ kolla_ansible_setup_disks_vg_name }}"
+ lv: "{{ kolla_ansible_setup_disks_lv_name }}"
+ size: "100%FREE"
+ when:
+ - kolla_ansible_setup_disks_lv_name is defined
diff --git a/roles/kolla-ansible-tempest/defaults/main.yml b/roles/kolla-ansible-tempest/defaults/main.yml
new file mode 100644
index 0000000000..600059789a
--- /dev/null
+++ b/roles/kolla-ansible-tempest/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+kolla_ansible_tempest_packages:
+ - python-tempestconf
+ - tempest
+
+kolla_ansible_tempest_cirros_ver: "0.6.3"
+kolla_ansible_tempest_exclude_regex: ""
+kolla_ansible_tempest_packages_extra: []
+kolla_ansible_tempest_regex: ""
+
+post_upgrade: false
diff --git a/roles/kolla-ansible-tempest/tasks/main.yml b/roles/kolla-ansible-tempest/tasks/main.yml
new file mode 100644
index 0000000000..7c118feff8
--- /dev/null
+++ b/roles/kolla-ansible-tempest/tasks/main.yml
@@ -0,0 +1,49 @@
+---
+- name: Install required packages
+ ansible.builtin.pip:
+ name: "{{ kolla_ansible_tempest_packages + kolla_ansible_tempest_packages_extra }}"
+ virtualenv: "{{ kolla_ansible_venv_path }}"
+ virtualenv_command: "python3 -m venv"
+
+- name: Init tempest workspace
+ ansible.builtin.shell:
+ cmd: >
+ {{ kolla_ansible_venv_path }}/bin/tempest init tempest
+ >/tmp/logs/ansible/test-init-tempest 2>&1
+ creates: "/home/zuul/tempest"
+
+- name: Discover tempest config
+ vars:
+ ver: "{{ kolla_ansible_tempest_cirros_ver }}"
+ image: "https://download.cirros-cloud.net/{{ ver }}/cirros-{{ ver }}-x86_64-disk.img"
+ ansible.builtin.shell:
+ chdir: "/home/zuul/tempest"
+ cmd: >
+ {{ kolla_ansible_venv_path }}/bin/discover-tempest-config
+ --debug
+ --image {{ image }}
+ --os-cloud kolla-admin
+ >/tmp/logs/ansible/test-init-tempest-discover 2>&1
+ environment:
+ OS_CLIENT_CONFIG_FILE: "/etc/kolla/clouds.yaml"
+
+- name: Run tempest tests
+ environment:
+ OS_LOG_CAPTURE: "1"
+ OS_STDOUT_CAPTURE: "1"
+ OS_STDERR_CAPTURE: "1"
+ OS_TEST_TIMEOUT: "3600"
+ vars:
+ tempest_log_file: "test-tempest-run{{ '-post-upgrade' if post_upgrade | bool else '' }}"
+ ansible.builtin.shell:
+ chdir: "/home/zuul/tempest"
+ cmd: >
+ {{ kolla_ansible_venv_path }}/bin/tempest run
+ --config-file etc/tempest.conf
+ {% if kolla_ansible_tempest_regex | length > 0 %}
+ --regex '{{ kolla_ansible_tempest_regex }}'
+ {% endif %}
+ {% if kolla_ansible_tempest_exclude_regex | length > 0 %}
+ --exclude-regex '{{ kolla_ansible_tempest_exclude_regex }}'
+ {% endif %}
+ >/tmp/logs/ansible/{{ tempest_log_file }} 2>&1
diff --git a/roles/kolla-ansible-test-bifrost/tasks/main.yml b/roles/kolla-ansible-test-bifrost/tasks/main.yml
new file mode 100644
index 0000000000..431aed5840
--- /dev/null
+++ b/roles/kolla-ansible-test-bifrost/tasks/main.yml
@@ -0,0 +1,35 @@
+---
+- name: Check baremetal driver list
+ become: true
+ ansible.builtin.command:
+ cmd: >
+ {{ container_engine }} exec bifrost_deploy
+ bash -c 'OS_CLOUD=bifrost baremetal driver list'
+ register: bdl
+ until: bdl.rc == 0
+ retries: 5
+ delay: 10
+
+- name: Check baremetal node list
+ become: true
+ ansible.builtin.command:
+ cmd: >
+ {{ container_engine }} exec bifrost_deploy
+ bash -c 'OS_CLOUD=bifrost baremetal node list'
+
+- name: Create baremetal node
+ become: true
+ ansible.builtin.command:
+ cmd: >
+ {{ container_engine }} exec bifrost_deploy
+ bash -c 'OS_CLOUD=bifrost baremetal node create
+ --driver redfish --name test-node'
+
+- name: Delete baremetal node
+ become: true
+ ansible.builtin.command:
+ cmd: >
+ {{ container_engine }} exec bifrost_deploy
+ bash -c 'OS_CLOUD=bifrost baremetal node delete
+ test-node'
+
diff --git a/roles/kolla-ansible-test-dashboard/tasks/main.yml b/roles/kolla-ansible-test-dashboard/tasks/main.yml
new file mode 100644
index 0000000000..8455b31549
--- /dev/null
+++ b/roles/kolla-ansible-test-dashboard/tasks/main.yml
@@ -0,0 +1,82 @@
+---
+- name: Get contents of clouds.yaml
+ ansible.builtin.slurp:
+ src: /etc/kolla/clouds.yaml
+ register: clouds_yaml
+
+- name: Query dashboard and check that the returned page looks like a login page
+ vars:
+ clouds: "{{ clouds_yaml['content'] | b64decode | from_yaml }}"
+ url_scheme: "{{ clouds.clouds['kolla-admin'].auth.auth_url | urlsplit('scheme') }}"
+ url_host: "{{ kolla_external_vip_address | default(kolla_internal_vip_address) }}"
+ ansible.builtin.uri:
+ url: "{{ url_scheme + '://' + url_host }}"
+ ca_path: "{{ clouds.clouds['kolla-admin'].cacert | default(omit) }}"
+ follow_redirects: "all"
+ return_content: true
+ validate_certs: "{{ 'false' if scenario == 'lets-encrypt' else 'true' }}"
+ register: dashboard_output
+ until: dashboard_output.content.find('Login') != -1
+ retries: 30
+ delay: 10
+
+- name: Check if testinfra subdirectory exists
+ ansible.builtin.stat:
+ path: "{{ zuul.project.src_dir }}/tests/testinfra"
+ register: testinfra_dir
+
+- name: Run testinfra tests
+ when: testinfra_dir.stat.exists
+ block:
+ - name: Ensure testinfra subdirectory exists
+ ansible.builtin.file:
+ path: "/home/zuul/testinfra"
+ state: directory
+
+ - name: Ensure screenshots directory exists
+ ansible.builtin.file:
+ path: "/home/zuul/testinfra/screenshots"
+ state: directory
+
+ - name: Ensure required packages are installed
+ ansible.builtin.pip:
+ name:
+ - pytest-html
+ - pytest-testinfra
+ - selenium
+ virtualenv: "{{ kolla_ansible_venv_path }}"
+ virtualenv_command: "python3 -m venv"
+
+ - name: Run Selenium Firefox container (Docker)
+ become: true
+ when: container_engine == 'docker'
+ community.docker.docker_container:
+ name: "selenium"
+ detach: true
+ image: "quay.io/opendevmirror/selenium-standalone-firefox:latest"
+ network_mode: host
+
+ - name: Run Selenium Firefox container (Podman)
+ become: true
+ when: container_engine == 'podman'
+ containers.podman.podman_container:
+ name: "selenium"
+ detach: true
+ image: "quay.io/opendevmirror/selenium-standalone-firefox:latest"
+ network_mode: host
+
+ - name: Wait for port 444 to be up
+ ansible.builtin.wait_for:
+ port: 4444
+
+ - name: Run testinfra tests
+ environment:
+ HORIZON_PROTO: "{{ 'https' if tls_enabled | bool else 'http' }}"
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ py.test
+ --junit-xml /home/zuul/testinfra/testinfra-junit.xml -o junit_family=xunit1
+ --html=/home/zuul/testinfra/test-results-testinfra.html --self-contained-html
+ -v tests/testinfra
+ chdir: "{{ zuul.project.src_dir }}"
diff --git a/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml b/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml
new file mode 100644
index 0000000000..ac8d11c1c6
--- /dev/null
+++ b/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Upgrade Bifrost
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible deploy-bifrost
+ -i /etc/kolla/inventory
+ >/tmp/logs/ansible/upgrade-bifrost 2>&1
diff --git a/roles/kolla-ansible-upgrade/tasks/main.yml b/roles/kolla-ansible-upgrade/tasks/main.yml
new file mode 100644
index 0000000000..c66e06fe20
--- /dev/null
+++ b/roles/kolla-ansible-upgrade/tasks/main.yml
@@ -0,0 +1,65 @@
+---
+- name: Generate self-signed certificates for the optional internal TLS tests
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible certificates
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/upgrade-certificates 2>&1
+
+# NOTE(mnasiadka): Need to run bootstrap before upgrade
+- name: Run kolla-ansible bootstrap-servers
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible bootstrap-servers
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/upgrade-bootstrap 2>&1
+
+- name: Run kolla-ansible prechecks
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible prechecks
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/upgrade-prechecks 2>&1
+
+- name: Run kolla-ansible pull
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible pull
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/upgrade-pull 2>&1
+
+- name: Run kolla-ansible upgrade
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible upgrade
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/upgrade 2>&1
+
+- name: Run kolla-ansible post-deploy
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible post-deploy
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/upgrade-post-deploy 2>&1
+
+- name: Run kolla-ansible validate-config on upgrades
+ ansible.builtin.shell:
+ cmd: >
+ . {{ kolla_ansible_venv_path }}/bin/activate &&
+ kolla-ansible validate-config
+ -i /etc/kolla/inventory
+ -vvv
+ >/tmp/logs/ansible/upgrade-validate-config 2>&1
+
diff --git a/roles/openstack-clients/defaults/main.yml b/roles/openstack-clients/defaults/main.yml
index d335a3ef1d..6b1e3c6d97 100644
--- a/roles/openstack-clients/defaults/main.yml
+++ b/roles/openstack-clients/defaults/main.yml
@@ -1,9 +1,13 @@
---
openstack_clients_pip_packages:
+ - package: aodhclient
+ enabled: "{{ scenario == 'telemetry' }}"
- package: python-barbicanclient
- enabled: "{{ scenario == 'scenario_nfv' }}"
+ enabled: "{{ scenario == 'nfv' }}"
- package: python-designateclient
enabled: "{{ scenario == 'magnum' }}"
+ - package: gnocchiclient
+ enabled: "{{ scenario == 'telemetry' }}"
- package: python-heatclient
enabled: true
- package: python-ironicclient
@@ -13,13 +17,13 @@ openstack_clients_pip_packages:
- package: python-masakariclient
enabled: "{{ scenario == 'masakari' }}"
- package: python-mistralclient
- enabled: "{{ scenario == 'scenario_nfv' }}"
+ enabled: "{{ scenario == 'nfv' }}"
- package: python-octaviaclient
enabled: "{{ scenario in ['octavia', 'ovn'] }}"
- package: python-openstackclient
enabled: true
- package: python-tackerclient
- enabled: "{{ scenario == 'scenario_nfv' }}"
+ enabled: "{{ scenario == 'nfv' }}"
- package: python-troveclient
enabled: "{{ scenario == 'magnum' }}"
- package: python-zunclient
diff --git a/test-requirements.txt b/test-requirements.txt
index 303f3d5b1e..8451a2b904 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,3 +1,31 @@
+# Password hashing
+bcrypt>=3.0.0 # Apache-2.0
+
+# password generation
+cryptography>=2.1 # BSD/Apache-2.0
+
+# Hashicorp Vault
+hvac>=0.10.1 # Apache-2.0
+
+# templating
+Jinja2>=3 # BSD License (3 clause)
+
+# Ansible and ansible's json_query
+ansible-core>=2.18,!=2.19.0,<2.20; python_version >= '3.11' # GPLv3
+jmespath>=0.9.3 # MIT
+
+# ini parsing
+oslo.config>=5.2.0 # Apache-2.0
+
+# password generation
+oslo.utils>=3.33.0 # Apache-2.0
+
+# Password hashing
+passlib[bcrypt]>=1.0.0 # BSD
+
+# CLI
+cliff>=4.7.0 # Apache-2.0
+
# coverage testing
coverage!=4.4,>=4.0 # Apache-2.0
diff --git a/tests/check-logs.sh b/tests/check-logs.sh
index d78da1ee17..dac11ccb9b 100755
--- a/tests/check-logs.sh
+++ b/tests/check-logs.sh
@@ -63,9 +63,6 @@ function check_fluentd_missing_logs {
/var/log/kolla/mariadb/mariadb-bootstrap.log)
continue
;;
- /var/log/kolla/mariadb/mariadb-clustercheck.log)
- continue
- ;;
/var/log/kolla/mariadb/mariadb-upgrade.log)
continue
;;
@@ -100,9 +97,6 @@ function check_fluentd_missing_logs {
/var/log/kolla/tenks/*)
continue
;;
- /var/log/kolla/venus/*)
- continue
- ;;
/var/log/kolla/zun/*)
continue
;;
@@ -196,6 +190,22 @@ if sudo test -d /var/log/kolla; then
fi
done
+ # NOTE: Check if OpenSearch output plugin has connected in OpenSearch scenarios, otherwise
+ # check_fluentd_missing_logs will fail because fluentd will only parse files when
+ # output plugin is working.
+ retries=0
+ retries_max=10
+ until [[ $(sudo tail -n 5 /var/log/kolla/fluentd/fluentd.log | grep "Could not communicate to OpenSearch" | wc -l) -eq 0 ]]; do
+ echo "Found 'Could not communicate to OpenSearch' in last 5 lines of fluentd.log, sleeping 30 seconds"
+ retries=$((retries + 1))
+ if [[ $retries != $retries_max ]]; then
+ sleep 30
+ else
+ echo "Found 'Could not communicate to OpenSearch' in last 5 lines of fluentd.log after 10 retries." | tee -a $fluentd_error_summary_file
+ break
+ fi
+ done
+
if check_fluentd_missing_logs >/dev/null; then
any_critical=1
echo "(critical) Found some missing log files in fluentd logs. Matches in $fluentd_error_summary_file"
diff --git a/tests/deploy-bifrost.sh b/tests/deploy-bifrost.sh
deleted file mode 100755
index b06a6d769e..0000000000
--- a/tests/deploy-bifrost.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-# Enable unbuffered output for Ansible in Jenkins.
-export PYTHONUNBUFFERED=1
-
-
-function deploy_bifrost {
- RAW_INVENTORY=/etc/kolla/inventory
-
- source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
-
- # TODO(mgoddard): run prechecks.
- # Deploy the bifrost container.
- # TODO(mgoddard): add pull action when we have a local registry service in
- # CI.
- kolla-ansible deploy-bifrost -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy-bifrost
-}
-
-
-deploy_bifrost
diff --git a/tests/deploy.sh b/tests/deploy.sh
deleted file mode 100755
index e81dbf5dcc..0000000000
--- a/tests/deploy.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-# Enable unbuffered output for Ansible in Jenkins.
-export PYTHONUNBUFFERED=1
-
-function init_pebble {
-
- sudo echo "[i] Pulling letsencrypt/pebble" > /tmp/logs/ansible/certificates
- sudo docker pull quay.io/openstack.kolla/pebble:latest &>> /tmp/logs/ansible/certificates
-
- sudo echo "[i] Force removing old pebble container" &>> /tmp/logs/ansible/certificates
- sudo docker rm -f pebble &>> /tmp/logs/ansible/certificates
-
- sudo echo "[i] Run new pebble container" &>> /tmp/logs/ansible/certificates
- sudo docker run --name pebble --rm -d -e "PEBBLE_VA_NOSLEEP=1" -e "PEBBLE_VA_ALWAYS_VALID=1" --net=host quay.io/openstack.kolla/pebble:latest &>> /tmp/logs/ansible/certificates
-
- sudo echo "[i] Wait for pebble container be up" &>> /tmp/logs/ansible/certificates
- # wait until pebble starts
- while ! sudo docker logs pebble | grep -q "Listening on"; do
- sleep 1
- done
- sudo echo "[i] Wait for pebble container done" &>> /tmp/logs/ansible/certificates
-
- sudo echo "[i] Pebble container logs" &>> /tmp/logs/ansible/certificates
- sudo docker logs pebble &>> /tmp/logs/ansible/certificates
-}
-
-function pebble_cacert {
-
- sudo docker cp pebble:/test/certs/pebble.minica.pem /etc/kolla/certificates/ca/pebble-root.crt
- sudo curl -k -s -o /etc/kolla/certificates/ca/pebble.crt -v https://127.0.0.1:15000/roots/0
-}
-
-function certificates {
-
- RAW_INVENTORY=/etc/kolla/inventory
- source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
-
- # generate self-signed certificates for the optional internal TLS tests
- if [[ "$TLS_ENABLED" = "True" ]]; then
- kolla-ansible certificates -i ${RAW_INVENTORY} -vvv > /tmp/logs/ansible/certificates
- fi
- if [[ "$LE_ENABLED" = "True" ]]; then
- init_pebble
- pebble_cacert
- fi
-
- #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there
- sudo chmod -R 777 /etc/kolla
-}
-
-
-function deploy {
-
- RAW_INVENTORY=/etc/kolla/inventory
- source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
-
- #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there
- sudo chmod -R 777 /etc/kolla
-
- certificates
-
- # Actually do the deployment
- kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy-prechecks
- kolla-ansible pull -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/pull
- kolla-ansible deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy
- kolla-ansible post-deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/post-deploy
-
- if [[ $HAS_UPGRADE == 'no' ]]; then
- kolla-ansible validate-config -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/validate-config
- #TODO(r-krcek) check can be moved out of the if statement in the flamingo cycle
- kolla-ansible check -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/check
- fi
-}
-
-
-deploy
diff --git a/tests/get_logs.sh b/tests/get_logs.sh
index 8633ec1c36..db18de5cdf 100644
--- a/tests/get_logs.sh
+++ b/tests/get_logs.sh
@@ -15,7 +15,7 @@ copy_logs() {
echo "Invalid container engine: ${CONTAINER_ENGINE}"
exit 1
fi
-
+ cp -rL /home/zuul/tempest ${LOG_DIR}/
[ -d ${VOLUMES_DIR}/kolla_logs/_data ] && cp -rnL ${VOLUMES_DIR}/kolla_logs/_data/* ${LOG_DIR}/kolla/
[ -d /etc/kolla ] && cp -rnL /etc/kolla/* ${LOG_DIR}/kolla_configs/
# Don't save the IPA images.
diff --git a/tests/post.yml b/tests/post.yml
index 77dd25b615..46e4370960 100644
--- a/tests/post.yml
+++ b/tests/post.yml
@@ -2,6 +2,10 @@
- hosts: all
vars:
logs_dir: "/tmp/logs"
+ roles:
+ - role: fetch-subunit-output
+ zuul_work_dir: '/home/zuul/tempest'
+
tasks:
# TODO(mhiner): Currently only Docker to Podman migration is tested.
# If we want to test the other direction we have to rework this.
@@ -72,6 +76,26 @@
ara_report_local_dir: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}/ara-report"
kolla_ansible_local_src_dir: "{{ zuul.executor.work_root }}/src/{{ zuul.project.canonical_hostname }}/openstack/kolla-ansible"
tasks:
+ - name: Download testinfra to executor
+ synchronize:
+ src: "/home/zuul/testinfra"
+ dest: "{{ zuul.executor.log_root }}/"
+ mode: pull
+ # TODO(mnasiadka): Remove in G/2026.1 cycle
+ ignore_errors: true
+
+ - name: Return artifact to Zuul
+ zuul_return:
+ data:
+ zuul:
+ artifacts:
+ - name: "TestInfra Unit Test Report"
+ url: "testinfra/test-results-testinfra.html"
+ metadata:
+ type: unit_test_report
+ - name: "TestInfra Screenshots"
+ url: "testinfra/screenshots"
+
- name: Check for existence of ara sqlite
stat:
path: "{{ ansible_env.HOME }}/.ara/server/ansible.sqlite"
diff --git a/tests/pre.yml b/tests/pre.yml
index 804c090d18..1a666e97d1 100644
--- a/tests/pre.yml
+++ b/tests/pre.yml
@@ -61,6 +61,13 @@
- gawk
- python3-pip
- python3-setuptools
+ - python3-requests
+
+ - name: Install stestr
+ become: true
+ pip:
+ break_system_packages: true
+ name: stestr
- name: Install lvm on storage scenarios
become: true
diff --git a/tests/reconfigure.sh b/tests/reconfigure.sh
deleted file mode 100755
index 1824755729..0000000000
--- a/tests/reconfigure.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-# Enable unbuffered output for Ansible in Jenkins.
-export PYTHONUNBUFFERED=1
-
-
-function reconfigure {
- RAW_INVENTORY=/etc/kolla/inventory
-
- source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
-
- # TODO(jeffrey4l): make some configure file change and
- # trigger a real reconfigure
- # NOTE(mnasiadka): Remove OVN DB containers and volumes on primary to test recreation
- if [[ $SCENARIO == "ovn" ]]; then
- sudo ${CONTAINER_ENGINE} rm -f ovn_nb_db ovn_sb_db && sudo ${CONTAINER_ENGINE} volume rm ovn_nb_db ovn_sb_db
- fi
- kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/reconfigure-prechecks
- kolla-ansible reconfigure -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/reconfigure
-}
-
-
-reconfigure
diff --git a/tests/run-hashi-vault.yml b/tests/run-openbao.yml
similarity index 92%
rename from tests/run-hashi-vault.yml
rename to tests/run-openbao.yml
index f9a014e76e..f8d2fe2596 100644
--- a/tests/run-hashi-vault.yml
+++ b/tests/run-openbao.yml
@@ -68,10 +68,10 @@
command: "{{ kolla_ansible_venv_path }}/bin/kolla-genpwd"
# At this point we have generated all necessary configuration, and are
- # ready to test Hashicorp Vault.
- - name: Run test-hashicorp-vault-passwords.sh script
+ # ready to test OpenBao.
+ - name: Run test-openbao-passwords.sh script
script:
- cmd: test-hashicorp-vault-passwords.sh
+ cmd: test-openbao-passwords.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
@@ -85,11 +85,11 @@
- name: Read generated file
slurp:
- src: "/tmp/passwords-hashicorp-vault.yml"
+ src: "/tmp/passwords-openbao.yml"
register: generated_file
# This test will load in the original input file and the one that was
- # generated by Vault and ensure that the keys are the same in both files.
+ # generated by OpenBao and ensure that the keys are the same in both files.
# This ensures that we are not missing any passwords.
- name: Check passwords that were written to Vault are as expected
vars:
diff --git a/tests/run.yml b/tests/run.yml
index 99f110e315..d421c3e7a0 100644
--- a/tests/run.yml
+++ b/tests/run.yml
@@ -10,7 +10,7 @@
- name: Set facts for commonly used variables
vars:
# NOTE(yoctozepto): needed here to use in other facts too
- openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-opensearch', 'venus'] }}"
+ openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-opensearch'] }}"
set_fact:
kolla_inventory_path: "/etc/kolla/inventory"
logs_dir: "/tmp/logs"
@@ -35,12 +35,9 @@
name: "{{ 'bind-utils' if ansible_os_family == 'RedHat' else 'dnsutils' }}"
when: scenario == 'magnum'
- - name: Prepare disks for a storage service
- script: "setup_disks.sh {{ disk_type }}"
+ - import_role:
+ name: kolla-ansible-setup-disks
when: scenario in ['cephadm', 'zun']
- become: true
- vars:
- disk_type: "{{ 'ceph-lvm' if scenario in ['cephadm'] else scenario }}"
- name: Update /etc/hosts with internal API FQDN
blockinfile:
@@ -391,28 +388,14 @@
# Deploy control plane. For upgrade jobs this is the previous release.
- block:
- - name: Run deploy.sh script
- script:
- cmd: deploy.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- TLS_ENABLED: "{{ tls_enabled }}"
- LE_ENABLED: "{{ le_enabled }}"
- KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
- HAS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}"
+ - import_role:
+ name: kolla-ansible-deploy
# NOTE(yoctozepto): this is nice as the first step after the deployment
# because it waits for the services to stabilize well enough so that
# the dashboard is able to show the login prompt
- - name: Run test-dashboard.sh script
- script:
- cmd: test-dashboard.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- TLS_ENABLED: "{{ tls_enabled }}"
- LE_ENABLED: "{{ le_enabled }}"
+ - import_role:
+ name: kolla-ansible-test-dashboard
when: dashboard_enabled
- name: Run init-core-openstack.sh script
@@ -430,7 +413,7 @@
EXT_NET_GATEWAY: "{{ neutron_external_network_prefix }}1"
EXT_NET_DEMO_ROUTER_ADDR: "{{ neutron_external_network_prefix }}10"
SCENARIO: "{{ scenario }}"
- when: openstack_core_tested or scenario in ['ironic', 'magnum', 'scenario_nfv', 'zun', 'octavia']
+ when: openstack_core_tested or scenario in ['ironic', 'magnum', 'nfv', 'zun', 'octavia']
- name: Run test-ovn.sh script
script:
@@ -440,17 +423,10 @@
when: scenario == "ovn"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
+ IS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}"
- - name: Run test-core-openstack.sh script
- script:
- cmd: test-core-openstack.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- SCENARIO: "{{ scenario }}"
- HAS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}"
- PHASE: deploy
- IP_VERSION: "{{ 6 if address_family == 'ipv6' else 4 }}"
+ - import_role:
+ name: kolla-ansible-tempest
when: openstack_core_tested
- name: Run test-zun.sh script
@@ -468,7 +444,7 @@
cmd: test-scenario-nfv.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
- when: scenario == "scenario_nfv"
+ when: scenario == "nfv"
- block:
- name: Run deploy-tenks.sh script
@@ -550,13 +526,6 @@
when:
- scenario == "prometheus-opensearch"
- - name: Run test-venus.sh script
- script:
- cmd: test-venus.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- when: scenario == "venus"
-
- name: Run test-skyline.sh script
script:
cmd: test-skyline.sh
@@ -571,6 +540,13 @@
chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == "skyline-sso"
+ - name: Run test-telemetry.sh script
+ script:
+ cmd: test-telemetry.sh
+ executable: /bin/bash
+ chdir: "{{ kolla_ansible_src_dir }}"
+ when: scenario == "telemetry"
+
- name: Run test-container-engine-migration.sh script
script:
cmd: test-container-engine-migration.sh
@@ -731,43 +707,17 @@
--final /etc/kolla/passwords.yml
# Perform an upgrade to the in-development code.
- - name: Run upgrade.sh script
- shell:
- cmd: tests/upgrade.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
- SCENARIO: "{{ scenario }}"
+ - import_role:
+ name: kolla-ansible-upgrade
- # NOTE(yoctozepto): this is nice as the first step after the upgrade
- # because it waits for the services to stabilize well enough so that
- # the dashboard is able to show the login prompt
- - name: Run test-dashboard.sh script (post upgrade)
- shell:
- cmd: tests/test-dashboard.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- TLS_ENABLED: "{{ tls_enabled }}"
+ - import_role:
+ name: kolla-ansible-test-dashboard
when: dashboard_enabled
- # NOTE(yoctozepto): We need the script module here to avoid
- # a bug in Glance OSC [1][2] which results in a failure when a volume
- # is given as a source. The stdin works differently in shell/command
- # than script.
- # [1] https://opendev.org/openstack/python-openstackclient/src/commit/6810414e45a32dd44263dff47fec161989508ef0/openstackclient/image/v2/image.py#L114-L120
- # [2] https://opendev.org/openstack/python-openstackclient/src/commit/6810414e45a32dd44263dff47fec161989508ef0/openstackclient/image/v2/image.py#L414
- - name: Run test-core-openstack.sh script (post upgrade)
- script:
- cmd: test-core-openstack.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- SCENARIO: "{{ scenario }}"
- HAS_UPGRADE: 'yes'
- PHASE: upgrade
- IP_VERSION: "{{ 6 if address_family == 'ipv6' else 4 }}"
+ - import_role:
+ name: kolla-ansible-tempest
+ vars:
+ post_upgrade: true
when: openstack_core_tested
- name: Run test-prometheus-opensearch.sh script (post-upgrade)
@@ -781,31 +731,17 @@
when: is_upgrade
# Bifrost testing.
- - block:
- - name: Run deploy-bifrost.sh script
- shell:
- cmd: tests/deploy-bifrost.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
+ - name: Bifrost testing
+ when: scenario == "bifrost"
+ block:
+ - import_role:
+ name: kolla-ansible-deploy-bifrost
- - name: Run test-bifrost.sh script
- shell:
- cmd: tests/test-bifrost.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- CONTAINER_ENGINE: "{{ container_engine }}"
+ - import_role:
+ name: kolla-ansible-test-bifrost
- - name: Run upgrade-bifrost.sh script
- shell:
- cmd: tests/upgrade-bifrost.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
- when: scenario == "bifrost"
+ - import_role:
+ name: kolla-ansible-upgrade-bifrost
# NOTE(yoctozepto): each host checks itself
- hosts: all
@@ -841,15 +777,8 @@
- hosts: primary
any_errors_fatal: true
tasks:
- - name: Run reconfigure.sh script
- script:
- cmd: reconfigure.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- environment:
- KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
- SCENARIO: "{{ scenario }}"
- CONTAINER_ENGINE: "{{ container_engine }}"
+ - import_role:
+ name: kolla-ansible-reconfigure
when:
- not is_upgrade
- scenario != "bifrost"
diff --git a/tests/setup_disks.sh b/tests/setup_disks.sh
deleted file mode 100644
index 6cd03ddc41..0000000000
--- a/tests/setup_disks.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-# $1: scenario / ceph store type
-
-set -o xtrace
-set -o errexit
-
-mkdir -p /opt/data/kolla
-
-if [ $1 = 'zun' ]; then
- # create cinder-volumes volume group for cinder lvm backend
- free_device=$(losetup -f)
- fallocate -l 5G /var/lib/cinder_data.img
- losetup $free_device /var/lib/cinder_data.img
- pvcreate $free_device
- vgcreate cinder-volumes $free_device
-elif [ $1 = 'ceph-lvm' ]; then
- free_device=$(losetup -f)
- fallocate -l 5G /var/lib/ceph-osd1.img
- losetup $free_device /var/lib/ceph-osd1.img
- pvcreate $free_device
- vgcreate cephvg $free_device
- lvcreate -l 100%FREE -n cephlv cephvg
-else
- echo "Unknown type" >&2
- exit 1
-fi
-
-partprobe
diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2
index fe7950e656..ddcc631dd4 100644
--- a/tests/templates/globals-default.j2
+++ b/tests/templates/globals-default.j2
@@ -45,7 +45,11 @@ mariadb_wsrep_extra_provider_options:
- "evs.inactive_timeout=PT30S"
- "evs.keepalive_period=PT3S"
-mariadb_monitor_connect_timeout: "60000"
+mariadb_monitor_connect_interval: "60000"
+mariadb_monitor_connect_timeout: "180000"
+mariadb_monitor_ping_interval: "60000"
+mariadb_monitor_ping_max_failures: "10"
+mariadb_monitor_ping_timeout: "10000"
nova_compute_virt_type: "{{ virt_type }}"
@@ -110,7 +114,7 @@ enable_prometheus: "yes"
enable_prometheus_openstack_exporter: "no"
{% endif %}
-{% if scenario == "scenario_nfv" %}
+{% if scenario == "nfv" %}
enable_tacker: "yes"
enable_neutron_sfc: "yes"
enable_mistral: "yes"
@@ -123,6 +127,7 @@ enable_aodh: "yes"
{% if scenario == "ironic" %}
enable_ironic: "yes"
+enable_ironic_pxe_filter: "yes"
enable_prometheus: "yes"
enable_prometheus_openstack_exporter: "no"
ironic_dnsmasq_dhcp_ranges:
@@ -232,12 +237,6 @@ octavia_network_type: "tenant"
enable_redis: "yes"
{% endif %}
-{% if scenario == "venus" %}
-enable_opensearch: "yes"
-enable_keystone: "yes"
-enable_venus: "yes"
-{% endif %}
-
{% if groups['all'] | length == 1 %}
keepalived_track_script_enabled: "no"
{% endif %}
@@ -278,5 +277,11 @@ enable_skyline: "yes"
skyline_enable_sso: "yes"
{% endif %}
+{% if scenario == "telemetry" %}
+enable_aodh: "yes"
+enable_ceilometer: "yes"
+enable_gnocchi: "yes"
+{% endif %}
+
mariadb_monitor_read_only_interval: "30000"
mariadb_monitor_galera_healthcheck_timeout: "30000"
diff --git a/tests/templates/inventory.j2 b/tests/templates/inventory.j2
index ca98719a89..d0711e93ee 100644
--- a/tests/templates/inventory.j2
+++ b/tests/templates/inventory.j2
@@ -231,9 +231,6 @@ control
[blazar:children]
control
-[venus:children]
-monitoring
-
[letsencrypt:children]
loadbalancer
@@ -662,12 +659,6 @@ ovn-database
[ovn-sb-db-relay:children]
ovn-database
-[venus-api:children]
-venus
-
-[venus-manager:children]
-venus
-
[letsencrypt-webserver:children]
letsencrypt
diff --git a/tests/test-bifrost.sh b/tests/test-bifrost.sh
deleted file mode 100755
index b8017c026f..0000000000
--- a/tests/test-bifrost.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-# Enable unbuffered output for Ansible in Jenkins.
-export PYTHONUNBUFFERED=1
-
-
-function test_bifrost {
- container_engine="${1:-docker}"
-
- # TODO(mgoddard): More testing, deploy bare metal nodes.
- # TODO(mgoddard): Use openstackclient when clouds.yaml works. See
- # https://bugs.launchpad.net/bifrost/+bug/1754070.
- attempts=0
- while [[ $(sudo ${container_engine} exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal driver list -f value" | wc -l) -eq 0 ]]; do
- attempts=$((attempts + 1))
- if [[ $attempts -gt 6 ]]; then
- echo "Timed out waiting for ironic conductor to become active"
- exit 1
- fi
- sleep 10
- done
- sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node list"
- sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node create --driver redfish --name test-node"
- sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node delete test-node"
-}
-
-
-test_bifrost $1
diff --git a/tests/test-core-openstack.sh b/tests/test-core-openstack.sh
index c2c0ea3ad2..aa1cf74033 100755
--- a/tests/test-core-openstack.sh
+++ b/tests/test-core-openstack.sh
@@ -514,7 +514,8 @@ function test_openstack_logged {
test_smoke
test_neutron_modules
test_instance_boot
- test_internal_dns_integration
+ # NOTE(mnasiadka): Disable because it started failing in OVN scenario
+ [[ $SCENARIO != "ovn" ]] && test_internal_dns_integration
test_proxysql_prometheus_exporter
# Check for x86_64 architecture to run q35 tests
diff --git a/tests/test-dashboard.sh b/tests/test-dashboard.sh
deleted file mode 100755
index c3cb9a72cc..0000000000
--- a/tests/test-dashboard.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-export PYTHONUNBUFFERED=1
-
-
-function check_dashboard {
- # Query the dashboard, and check that the returned page looks like a login
- # page.
- DASHBOARD_URL=${OS_AUTH_URL%:*}
- output_path=$1
- args=(
- --include
- --location
- --fail
- )
- if [[ "$TLS_ENABLED" = "True" ]]; then
- args+=(--cacert $OS_CACERT)
- fi
- if ! curl "${args[@]}" $DASHBOARD_URL > $output_path; then
- return 1
- fi
- if ! grep Login $output_path >/dev/null; then
- return 1
- fi
-}
-
-function test_dashboard_logged {
- . /etc/kolla/admin-openrc.sh
-
- echo "TESTING: Dashboard"
- # The dashboard has been known to take some time to become accessible, so
- # use retries.
- output_path=$(mktemp)
- attempt=1
- while ! check_dashboard $output_path; do
- echo "Dashboard not accessible yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 30 ]]; then
- echo "FAILED: Dashboard did not become accessible. Response:"
- cat $output_path
- return 1
- fi
- sleep 10
- done
- echo "SUCCESS: Dashboard"
-}
-
-function test_dashboard {
- echo "Testing dashboard"
- log_file=/tmp/logs/ansible/test-dashboard
- if [[ -f $log_file ]]; then
- log_file=${log_file}-upgrade
- fi
- test_dashboard_logged > $log_file 2>&1
- result=$?
- if [[ $result != 0 ]]; then
- echo "Testing dashboard failed. See ansible/test-dashboard for details"
- else
- echo "Successfully tested dashboard. See ansible/test-dashboard for details"
- fi
- return $result
-}
-
-
-test_dashboard
diff --git a/tests/test-hashicorp-vault-passwords.sh b/tests/test-hashicorp-vault-passwords.sh
deleted file mode 100755
index caa9ba6a4c..0000000000
--- a/tests/test-hashicorp-vault-passwords.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-export PYTHONUNBUFFERED=1
-
-function install_vault {
- if [[ "debian" == $BASE_DISTRO ]]; then
- curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add -
- sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
- sudo apt-get update -y && sudo apt-get install -y vault jq
- else
- sudo dnf install -y yum-utils
- sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo
- sudo dnf install -y vault jq
- fi
-}
-
-function start_vault {
- nohup vault server --dev &
- # Give Vault some time to warm up
- sleep 10
-}
-
-function test_vault {
- TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output)
- echo "${TOKEN}" | vault login -address 'http://127.0.0.1:8200' -
- vault kv put -address 'http://127.0.0.1:8200' secret/foo data=bar
-}
-
-function test_writepwd {
- TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output)
- kolla-writepwd \
- --passwords /etc/kolla/passwords.yml \
- --vault-addr 'http://127.0.0.1:8200' \
- --vault-token ${TOKEN} \
- --vault-mount-point secret
-}
-
-function test_readpwd {
- TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output)
- cp etc/kolla/passwords.yml /tmp/passwords-hashicorp-vault.yml
- kolla-readpwd \
- --passwords /tmp/passwords-hashicorp-vault.yml \
- --vault-addr 'http://127.0.0.1:8200' \
- --vault-token ${TOKEN} \
- --vault-mount-point secret
-}
-
-function teardown {
- pkill vault
-}
-
-function test_hashicorp_vault_passwords {
- echo "Setting up development Vault server..."
- source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
- install_vault
- start_vault
- test_vault
- echo "Write passwords to Hashicorp Vault..."
- test_writepwd
- echo "Read passwords from Hashicorp Vault..."
- test_readpwd
- echo "Cleaning up..."
- teardown
-}
-
-test_hashicorp_vault_passwords
diff --git a/tests/test-openbao-passwords.sh b/tests/test-openbao-passwords.sh
new file mode 100755
index 0000000000..5e6b115bad
--- /dev/null
+++ b/tests/test-openbao-passwords.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+set -o xtrace
+set -o errexit
+
+export PYTHONUNBUFFERED=1
+
+function install_openbao {
+ if [[ $BASE_DISTRO =~ (debian|ubuntu) ]]; then
+ curl -fsSLO https://github.com/openbao/openbao/releases/download/v2.4.1/bao_2.4.1_linux_amd64.deb
+ sudo dpkg -i bao_2.4.1_linux_amd64.deb
+ rm -f bao_2.4.1_linux_amd64.deb
+ else
+ sudo dnf install -y https://github.com/openbao/openbao/releases/download/v2.4.1/bao_2.4.1_linux_amd64.rpm
+ fi
+}
+
+function start_openbao {
+ nohup bao server --dev &
+ # Give Vault some time to warm up
+ sleep 10
+}
+
+function test_openbao {
+ TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token)
+ echo "${TOKEN}" | bao login -address 'http://127.0.0.1:8200' -
+ bao kv put -address 'http://127.0.0.1:8200' secret/foo data=bar
+}
+
+function test_writepwd {
+ TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token)
+ kolla-writepwd \
+ --passwords /etc/kolla/passwords.yml \
+ --vault-addr 'http://127.0.0.1:8200' \
+ --vault-token ${TOKEN} \
+ --vault-mount-point secret
+}
+
+function test_readpwd {
+ TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token)
+ cp etc/kolla/passwords.yml /tmp/passwords-openbao.yml
+ kolla-readpwd \
+ --passwords /tmp/passwords-openbao.yml \
+ --vault-addr 'http://127.0.0.1:8200' \
+ --vault-token ${TOKEN} \
+ --vault-mount-point secret
+}
+
+function teardown {
+ pkill bao
+}
+
+function test_openbao_passwords {
+ echo "Setting up development OpenBao server..."
+ source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
+ install_openbao
+ start_openbao
+ test_openbao
+ echo "Write passwords to OpenBao..."
+ test_writepwd
+ echo "Read passwords from OpenBao..."
+ test_readpwd
+ echo "Cleaning up..."
+ teardown
+}
+
+test_openbao_passwords
diff --git a/tests/test-ovn.sh b/tests/test-ovn.sh
index a9d77a95ae..a48fb1161b 100755
--- a/tests/test-ovn.sh
+++ b/tests/test-ovn.sh
@@ -15,10 +15,20 @@ function test_ovn {
# List OVN NB/SB entries
echo "OVN NB DB entries:"
- sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show
+ # TODO(mnasiadka): Remove the first part of conditional in G cycle
+ if [ $IS_UPGRADE == "yes" ]; then
+ sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show
+ else
+ sudo ${container_engine} exec ovn_northd ovn-nbctl show
+ fi
echo "OVN SB DB entries:"
- sudo ${container_engine} exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show
+ # TODO(mnasiadka): Remove the first part of conditional in G cycle
+ if [ $IS_UPGRADE == "yes" ]; then
+ sudo ${container_engine} exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show
+ else
+ sudo ${container_engine} exec ovn_northd ovn-sbctl show
+ fi
OVNNB_STATUS=$(sudo ${container_engine} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound)
OVNSB_STATUS=$(sudo ${container_engine} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound)
@@ -92,9 +102,20 @@ function test_octavia {
openstack floating ip set $lb_fip --port $lb_port_id
echo "OVN NB entries for LB:"
- sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer
+ # TODO(mnasiadka): Remove the first part of conditional in G cycle
+ if [ $IS_UPGRADE == "yes" ]; then
+ sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer
+ else
+ sudo ${container_engine} exec ovn_northd ovn-nbctl list load_balancer
+ fi
+
echo "OVN NB entries for NAT:"
- sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat
+ # TODO(mnasiadka): Remove the first part of conditional in G cycle
+ if [ $IS_UPGRADE == "yes" ]; then
+ sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat
+ else
+ sudo ${container_engine} exec ovn_northd ovn-nbctl list nat
+ fi
echo "Attempt to access the load balanced HTTP server."
attempts=12
diff --git a/tests/test-prometheus-opensearch.sh b/tests/test-prometheus-opensearch.sh
index c1f8272c16..cbb687d072 100755
--- a/tests/test-prometheus-opensearch.sh
+++ b/tests/test-prometheus-opensearch.sh
@@ -170,7 +170,6 @@ function test_prometheus {
function test_prometheus_opensearch_logged {
. /etc/kolla/admin-openrc.sh
-
test_opensearch_dashboards
test_opensearch
test_grafana
diff --git a/tests/test-telemetry.sh b/tests/test-telemetry.sh
new file mode 100755
index 0000000000..91b5f6c323
--- /dev/null
+++ b/tests/test-telemetry.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+set -o xtrace
+set -o errexit
+set -o pipefail
+
+# Enable unbuffered output
+export PYTHONUNBUFFERED=1
+
+function test_aodh {
+ echo "TESTING: Aodh"
+ openstack alarm list
+ echo "SUCCESS: Aodh"
+}
+
+function test_gnocchi {
+ echo "TESTING: Gnocchi"
+ openstack metric list
+ openstack metric resource list
+ echo "SUCCESS: Gnocchi"
+}
+
+function test_telemetry_scenario_logged {
+ . /etc/kolla/admin-openrc.sh
+ . ~/openstackclient-venv/bin/activate
+ test_aodh
+ test_gnocchi
+}
+
+function test_telemetry_scenario {
+ echo "Testing Telemetry"
+ test_telemetry_scenario_logged > /tmp/logs/ansible/test-telemetry-scenario 2>&1
+ result=$?
+ if [[ $result != 0 ]]; then
+ echo "Testing Telemetry scenario failed. See ansible/test-telemetry-scenario for details"
+ else
+ echo "Successfully tested Telemetry scenario. See ansible/test-telemetry-scenario for details"
+ fi
+ return $result
+}
+
+test_telemetry_scenario
diff --git a/tests/test-venus.sh b/tests/test-venus.sh
deleted file mode 100755
index 0039d67749..0000000000
--- a/tests/test-venus.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-set -o pipefail
-
-# Enable unbuffered output
-export PYTHONUNBUFFERED=1
-
-# TODO(yoctozepto): Avoid duplicating this from prometheus-opensearch
-function check_opensearch {
- # Verify that we see a healthy index created due to Fluentd forwarding logs
- local opensearch_url=${OS_AUTH_URL%:*}:9200/_cluster/health
- output_path=$1
- args=(
- --include
- --location
- --fail
- )
- if ! curl "${args[@]}" $opensearch_url > $output_path; then
- return 1
- fi
- # NOTE(mgoddard): Status may be yellow because no indices have been
- # created.
- if ! grep -E '"status":"(green|yellow)"' $output_path >/dev/null; then
- return 1
- fi
-}
-
-function check_venus {
- local venus_url=${OS_AUTH_URL%:*}:10010/custom_config
- output_path=$1
- if ! curl --include --fail $venus_url > $output_path; then
- return 1
- fi
- if ! grep -E '"status": "SUPPORTED"' $output_path >/dev/null; then
- return 1
- fi
-}
-
-function test_opensearch {
- echo "TESTING: OpenSearch"
- output_path=$(mktemp)
- attempt=1
- while ! check_opensearch $output_path; do
- echo "OpenSearch not accessible yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 12 ]]; then
- echo "FAILED: OpenSearch did not become accessible. Response:"
- cat $output_path
- return 1
- fi
- sleep 10
- done
- echo "SUCCESS: OpenSearch"
-}
-
-function test_venus {
- echo "TESTING: Venus"
- output_path=$(mktemp)
- attempt=1
- while ! check_venus $output_path; do
- echo "Venus not accessible yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 12 ]]; then
- echo "FAILED: Venus did not become accessible. Response:"
- cat $output_path
- return 1
- fi
- sleep 10
- done
- echo "SUCCESS: Venus"
-}
-
-function test_venus_scenario_logged {
- . /etc/kolla/admin-openrc.sh
-
- test_opensearch
- test_venus
-}
-
-function test_venus_scenario {
- echo "Testing Venus and OpenSearch"
- test_venus_scenario_logged > /tmp/logs/ansible/test-venus-scenario 2>&1
- result=$?
- if [[ $result != 0 ]]; then
- echo "Testing Venus scenario failed. See ansible/test-venus-scenario for details"
- else
- echo "Successfully tested Venus scenario. See ansible/test-venus-scenario for details"
- fi
- return $result
-}
-
-test_venus_scenario
diff --git a/tests/test_kolla_container_facts.py b/tests/test_kolla_container_facts.py
index e58b058c5f..b6a3d8000b 100644
--- a/tests/test_kolla_container_facts.py
+++ b/tests/test_kolla_container_facts.py
@@ -109,7 +109,7 @@ def contruct_volume(vol_dict: dict) -> mock.Mock:
return volume
-def get_containers(override=None):
+def get_containers(override=None, all: bool = False):
if override:
cont_dicts = override
else:
@@ -117,9 +117,11 @@ def get_containers(override=None):
containers = []
for c in cont_dicts:
- # Only running containers should be returned by the container APIs
- if c['State']['Status'] == 'running':
- containers.append(construct_container(c))
+ # With the option "all", only running containers are returned
+ # by the container API
+ if not all and c['State']['Status'] != 'running':
+ continue
+ containers.append(construct_container(c))
return containers
@@ -152,8 +154,9 @@ def test_get_containers_single(self):
self.assertDictEqual(
self.fake_data['containers'][0],
self.dfw.result['containers']['my_container'])
+ self.dfw.client.containers.list.assert_called_once_with(all=False)
- def test_get_container_multi(self):
+ def test_get_containers_multi(self):
self.dfw = get_DockerFactsWorker(
{'name': ['my_container', 'exited_container'],
'action': 'get_containers'})
@@ -165,8 +168,9 @@ def test_get_container_multi(self):
self.assertIn('my_container', self.dfw.result['containers'])
self.assertNotIn('my_container', self.dfw.result)
self.assertNotIn('exited_container', self.dfw.result['containers'])
+ self.dfw.client.containers.list.assert_called_once_with(all=False)
- def test_get_container_all(self):
+ def test_get_containers_all_running(self):
self.dfw = get_DockerFactsWorker({'name': [],
'action': 'get_containers'})
running_containers = get_containers(self.fake_data['containers'])
@@ -177,6 +181,21 @@ def test_get_container_all(self):
self.assertIn('my_container', self.dfw.result['containers'])
self.assertNotIn('my_container', self.dfw.result)
self.assertNotIn('exited_container', self.dfw.result['containers'])
+ self.dfw.client.containers.list.assert_called_once_with(all=False)
+
+ def test_get_containers_all_including_stopped(self):
+ self.dfw = get_DockerFactsWorker({'name': [],
+ 'action': 'get_containers',
+ 'args': {
+ 'get_all_containers': True}})
+ all_containers = get_containers(self.fake_data['containers'], all=True)
+ self.dfw.client.containers.list.return_value = all_containers
+ self.dfw.get_containers()
+
+ self.assertFalse(self.dfw.result['changed'])
+ self.assertIn('my_container', self.dfw.result['containers'])
+ self.assertIn('exited_container', self.dfw.result['containers'])
+ self.dfw.client.containers.list.assert_called_once_with(all=True)
def test_get_containers_env(self):
fake_env = dict(KOLLA_BASE_DISTRO='ubuntu',
diff --git a/tests/test_kolla_toolbox.py b/tests/test_kolla_toolbox.py
index 112bd403c8..9dca821cc4 100644
--- a/tests/test_kolla_toolbox.py
+++ b/tests/test_kolla_toolbox.py
@@ -13,6 +13,7 @@
# limitations under the License.
import builtins
+import contextlib
import json
import os
import sys
@@ -20,6 +21,18 @@
from ansible.module_utils import basic
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes
+try:
+ from ansible.module_utils.testing import patch_module_args
+except ImportError:
+ # TODO(dougszu): Remove this exception handler when Python 3.10 support
+ # is not required. Python 3.10 isn't supported by Ansible Core 2.18 which
+ # provides patch_module_args
+ @contextlib.contextmanager
+ def patch_module_args(args):
+ serialized_args = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args}))
+ with mock.patch.object(basic, '_ANSIBLE_ARGS', serialized_args):
+ yield
+
from importlib.machinery import SourceFileLoader
from oslotest import base
from unittest import mock
@@ -33,13 +46,6 @@
kolla_toolbox_file).load_module()
-def set_module_args(args):
- """Prepare arguments so they will be picked up during module creation."""
-
- args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
- basic._ANSIBLE_ARGS = to_bytes(args)
-
-
class AnsibleExitJson(BaseException):
"""Exception to be raised by module.exit_json and caught by a test case."""
@@ -307,40 +313,40 @@ class TestModuleInteraction(TestKollaToolboxModule):
"""Class focused on testing user input data from playbook."""
def test_create_ansible_module_missing_required_module_name(self):
- set_module_args({
+ ansible_module_args = {
'container_engine': 'docker'
- })
-
- error = self.assertRaises(AnsibleFailJson,
- kolla_toolbox.create_ansible_module)
+ }
+ with patch_module_args(ansible_module_args):
+ error = self.assertRaises(AnsibleFailJson,
+ kolla_toolbox.create_ansible_module)
self.assertIn('missing required arguments: module_name',
error.result['msg'])
def test_create_ansible_module_missing_required_container_engine(self):
- set_module_args({
+ ansible_module_args = {
'module_name': 'url'
- })
-
- error = self.assertRaises(AnsibleFailJson,
- kolla_toolbox.create_ansible_module)
+ }
+ with patch_module_args(ansible_module_args):
+ error = self.assertRaises(AnsibleFailJson,
+ kolla_toolbox.create_ansible_module)
self.assertIn('missing required arguments: container_engine',
error.result['msg'])
def test_create_ansible_module_invalid_container_engine(self):
- set_module_args({
+ ansible_module_args = {
'module_name': 'url',
'container_engine': 'podmano'
- })
-
- error = self.assertRaises(AnsibleFailJson,
- kolla_toolbox.create_ansible_module)
+ }
+ with patch_module_args(ansible_module_args):
+ error = self.assertRaises(AnsibleFailJson,
+ kolla_toolbox.create_ansible_module)
self.assertIn(
'value of container_engine must be one of: podman, docker',
error.result['msg']
)
def test_create_ansible_module_success(self):
- args = {
+ ansible_module_args = {
'container_engine': 'docker',
'module_name': 'file',
'module_args': {
@@ -357,12 +363,10 @@ def test_create_ansible_module_success(self):
'timeout': 180,
'api_version': '1.5'
}
- set_module_args(args)
-
- module = kolla_toolbox.create_ansible_module()
-
+ with patch_module_args(ansible_module_args):
+ module = kolla_toolbox.create_ansible_module()
self.assertIsInstance(module, AnsibleModule)
- self.assertEqual(args, module.params)
+ self.assertEqual(ansible_module_args, module.params)
class TestContainerEngineClientIntraction(TestKollaToolboxModule):
@@ -381,14 +385,14 @@ def mock_import_error(self, name, globals, locals, fromlist, level):
return self.original_import(name, globals, locals, fromlist, level)
def test_podman_client_params(self):
- set_module_args({
+ ansible_module_args = {
'module_name': 'ping',
'container_engine': 'podman',
'api_version': '1.47',
'timeout': 155
- })
-
- module = kolla_toolbox.create_ansible_module()
+ }
+ with patch_module_args(ansible_module_args):
+ module = kolla_toolbox.create_ansible_module()
mock_podman = mock.MagicMock()
mock_podman_errors = mock.MagicMock()
import_dict = {'podman': mock_podman,
@@ -403,14 +407,14 @@ def test_podman_client_params(self):
)
def test_docker_client_params(self):
- set_module_args({
+ ansible_module_args = {
'module_name': 'ping',
'container_engine': 'docker',
'api_version': '1.47',
'timeout': 155
- })
-
- module = kolla_toolbox.create_ansible_module()
+ }
+ with patch_module_args(ansible_module_args):
+ module = kolla_toolbox.create_ansible_module()
mock_docker = mock.MagicMock()
mock_docker_errors = mock.MagicMock()
import_dict = {'docker': mock_docker,
@@ -425,14 +429,14 @@ def test_docker_client_params(self):
)
def test_create_container_client_podman_not_called_with_auto(self):
- set_module_args({
+ ansible_module_args = {
'module_name': 'ping',
'container_engine': 'podman',
'api_version': 'auto',
'timeout': 90
- })
-
- module = kolla_toolbox.create_ansible_module()
+ }
+ with patch_module_args(ansible_module_args):
+ module = kolla_toolbox.create_ansible_module()
mock_podman = mock.MagicMock()
mock_podman_errors = mock.MagicMock()
import_dict = {'podman': mock_podman,
@@ -446,12 +450,13 @@ def test_create_container_client_podman_not_called_with_auto(self):
)
def test_create_container_client_podman_importerror(self):
- set_module_args({
+ ansible_module_args = {
'module_name': 'ping',
'container_engine': 'podman'
- })
+ }
self.module_to_mock_import = 'podman'
- module = kolla_toolbox.create_ansible_module()
+ with patch_module_args(ansible_module_args):
+ module = kolla_toolbox.create_ansible_module()
with mock.patch('builtins.__import__',
side_effect=self.mock_import_error):
@@ -462,13 +467,13 @@ def test_create_container_client_podman_importerror(self):
error.result['msg'])
def test_create_container_client_docker_importerror(self):
- set_module_args({
+ ansible_module_args = {
'module_name': 'ping',
'container_engine': 'docker'
- })
-
+ }
self.module_to_mock_import = 'docker'
- module = kolla_toolbox.create_ansible_module()
+ with patch_module_args(ansible_module_args):
+ module = kolla_toolbox.create_ansible_module()
with mock.patch('builtins.__import__',
side_effect=self.mock_import_error):
diff --git a/tests/testinfra/test_horizon.py b/tests/testinfra/test_horizon.py
new file mode 100644
index 0000000000..abdb787c68
--- /dev/null
+++ b/tests/testinfra/test_horizon.py
@@ -0,0 +1,120 @@
+# Copyright 2018 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+import yaml
+
+from pathlib import Path
+from selenium.common.exceptions import TimeoutException
+from selenium import webdriver
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support.ui import WebDriverWait
+
+home = Path.home()
+subpath = '/testinfra/screenshots/'
+screenshot_path = str(home) + subpath
+
+with open("/etc/kolla/passwords.yml", 'r') as file:
+ passwords = yaml.safe_load(file)
+ admin_password = passwords.get('keystone_admin_password')
+
+
+def test_horizon_screenshot(host):
+
+ firefox_options = webdriver.FirefoxOptions()
+
+ driver = webdriver.Remote(
+ command_executor='http://localhost:4444/wd/hub',
+ options=firefox_options)
+
+ horizon_proto = host.environment().get('HORIZON_PROTO')
+ horizon_url = horizon_proto + "://192.0.2.10"
+
+ try:
+ driver.get(horizon_url)
+ WebDriverWait(driver, 30).until(
+ lambda driver: driver.execute_script(
+ 'return document.readyState') == 'complete')
+
+ time.sleep(5)
+
+ original_size = driver.get_window_size()
+ required_width = driver.execute_script(
+ 'return document.body.parentNode.scrollWidth')
+ required_height = driver.execute_script(
+ 'return document.body.parentNode.scrollHeight') + 100
+ driver.set_window_size(required_width, required_height)
+
+ driver.find_element(By.TAG_NAME, 'body').\
+ screenshot(screenshot_path + "horizon-main.png") # nosec B108
+
+ driver.set_window_size(
+ original_size['width'], original_size['height'])
+
+ assert 'Login' in driver.title # nosec B101
+
+ except TimeoutException as e:
+ raise e
+ finally:
+ driver.quit()
+
+
+def test_horizon_login(host):
+
+ firefox_options = webdriver.FirefoxOptions()
+
+ driver = webdriver.Remote(
+ command_executor='http://localhost:4444/wd/hub',
+ options=firefox_options)
+
+ horizon_proto = host.environment().get('HORIZON_PROTO')
+ horizon_url = horizon_proto + "://192.0.2.10"
+ logout_url = '/'.join((
+ horizon_url,
+ 'auth',
+ 'logout'))
+
+ try:
+ driver.get(logout_url)
+ user_field = driver.find_element(By.ID, 'id_username')
+ user_field.send_keys('admin')
+ pass_field = driver.find_element(By.ID, 'id_password')
+ pass_field.send_keys(admin_password)
+ button = driver.find_element(By.CSS_SELECTOR, '.btn-primary')
+ button.click()
+ WebDriverWait(driver, 30).until(
+ lambda driver: driver.execute_script(
+ 'return document.readyState') == 'complete')
+
+ time.sleep(10)
+
+ original_size = driver.get_window_size()
+ required_width = driver.execute_script(
+ 'return document.body.parentNode.scrollWidth')
+ required_height = driver.execute_script(
+ 'return document.body.parentNode.scrollHeight') + 100
+ driver.set_window_size(required_width, required_height)
+
+ driver.find_element(By.TAG_NAME, 'body').\
+ screenshot(screenshot_path + "horizon-logged-in.png") # nosec B108
+
+ driver.set_window_size(
+ original_size['width'], original_size['height'])
+
+ assert 'Overview - OpenStack Dashboard' in driver.title # nosec B101
+
+ except TimeoutException as e:
+ raise e
+ finally:
+ driver.quit()
diff --git a/tests/upgrade-bifrost.sh b/tests/upgrade-bifrost.sh
deleted file mode 100755
index a5d5c36826..0000000000
--- a/tests/upgrade-bifrost.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-# Enable unbuffered output for Ansible in Jenkins.
-export PYTHONUNBUFFERED=1
-
-
-function upgrade_bifrost {
- RAW_INVENTORY=/etc/kolla/inventory
-
- source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
-
- # TODO(mgoddard): run prechecks.
- # TODO(mgoddard): add pull action when we have a local registry service in
- # CI.
- # TODO(mgoddard): make some configuration file changes and trigger a real
- # upgrade.
- kolla-ansible deploy-bifrost -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-bifrost
-}
-
-
-upgrade_bifrost
diff --git a/tests/upgrade.sh b/tests/upgrade.sh
deleted file mode 100755
index a53a440e5b..0000000000
--- a/tests/upgrade.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-# Enable unbuffered output for Ansible in Jenkins.
-export PYTHONUNBUFFERED=1
-
-
-function upgrade {
- RAW_INVENTORY=/etc/kolla/inventory
-
- source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
-
- kolla-ansible certificates -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/certificates
- # Previous versions had older docker, requests requirements for example
- # Therefore we need to run bootstrap again to ensure libraries are in
- # proper versions (ansible-collection-kolla is different for new version, potentionally
- # also dependencies).
- kolla-ansible bootstrap-servers -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-bootstrap
- # Skip rabbitmq-ha-precheck before the queues are migrated.
- kolla-ansible prechecks -i ${RAW_INVENTORY} --skip-tags rabbitmq-ha-precheck -vvv &> /tmp/logs/ansible/upgrade-prechecks-pre-rabbitmq
-
- # NOTE(SvenKieske): As om_enable_rabbitmq_transient_quorum_queue now also
- # enables quorum_queues for fanout/reply queues in Epoxy, we need
- # to perform a migration to durable queues.
- # TODO(SvenKieske): Remove these steps in F Cycle.
- SERVICE_TAGS="heat,keystone,neutron,nova"
- if [[ $SCENARIO == "zun" ]] || [[ $SCENARIO == "cephadm" ]]; then
- SERVICE_TAGS+=",cinder"
- fi
- if [[ $SCENARIO == "scenario_nfv" ]]; then
- SERVICE_TAGS+=",barbican"
- fi
- if [[ $SCENARIO == "ironic" ]]; then
- SERVICE_TAGS+=",ironic"
- fi
- if [[ $SCENARIO == "masakari" ]]; then
- SERVICE_TAGS+=",masakari"
- fi
- if [[ $SCENARIO == "ovn" ]] || [[ $SCENARIO == "octavia" ]]; then
- SERVICE_TAGS+=",octavia"
- fi
- if [[ $SCENARIO == "magnum" ]]; then
- SERVICE_TAGS+=",magnum,designate"
- fi
- kolla-ansible stop -i ${RAW_INVENTORY} -vvv --tags $SERVICE_TAGS --yes-i-really-really-mean-it --ignore-missing &> /tmp/logs/ansible/stop
- kolla-ansible genconfig -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/genconfig
- kolla-ansible rabbitmq-reset-state -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/rabbitmq-reset-state
- # Include rabbitmq-ha-precheck this time to confirm all queues have migrated.
- kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-prechecks
-
- kolla-ansible pull -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/pull-upgrade
- kolla-ansible upgrade -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade
-
- kolla-ansible post-deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-post-deploy
-
- kolla-ansible validate-config -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/validate-config
-}
-
-
-upgrade
diff --git a/tools/init-runonce b/tools/init-runonce
index 0c389642b2..900d979f23 100755
--- a/tools/init-runonce
+++ b/tools/init-runonce
@@ -19,7 +19,7 @@ fi
# to be created.
ARCH=$(uname -m)
-CIRROS_RELEASE=${CIRROS_RELEASE:-0.6.2}
+CIRROS_RELEASE=${CIRROS_RELEASE:-0.6.3}
IMAGE_PATH=/opt/cache/files/
IMAGE_URL=${IMAGE_URL:-"https://github.com/cirros-dev/cirros/releases/download/${CIRROS_RELEASE}/"}
IMAGE=cirros-${CIRROS_RELEASE}-${ARCH}-disk.img
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index 82daa26e2c..7a9b481cbe 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -1,58 +1,5 @@
---
- job:
- name: kolla-ansible-variables
- vars:
- address_family: 'ipv4'
- # Test latest ansible-core version on Ubuntu, minimum supported on others.
- # Use SLURP version (two releases back) on SLURP upgrades.
- ansible_core_version_constraint: >-
- {{ ansible_core_version_slurp if is_slurp else ansible_core_version_min if is_upgrade or ansible_facts.distribution != "Ubuntu" else ansible_core_version_max }}
- ansible_core_version_slurp: "==2.16.*"
- ansible_core_version_max: "==2.18.*"
- ansible_core_version_min: "==2.17.*"
- # NOTE(mgoddard): Test the use of interface names with dashes.
- api_interface_name: "vxlan-0"
- api_network_prefix: "192.0.2."
- api_network_prefix_length: "24"
- configure_swap_size: 0
- container_engine: "docker"
- is_upgrade: false
- is_slurp: false
- kolla_internal_vip_address: "192.0.2.10"
- le_enabled: false
- neutron_external_bridge_name: br-0
- neutron_external_interface_name: "veth-{{ neutron_external_bridge_name }}-ext"
- neutron_external_network_prefix: "198.51.100."
- neutron_external_network_prefix_length: "24"
- neutron_external_vxlan_interface_name: "vxlan-1"
- neutron_tenant_network_dns_server: "8.8.8.8"
- neutron_tenant_network_prefix: "203.0.113."
- neutron_tenant_network_prefix_length: "24"
- previous_release: "2025.1"
- scenario: core
- scenario_images_core:
- - ^cron
- - ^fluentd
- - ^glance
- - ^haproxy
- - ^heat
- - ^horizon
- - ^keepalived
- - ^keystone
- - ^kolla-toolbox
- - ^mariadb
- - ^memcached
- - ^neutron
- - ^nova-
- - ^openvswitch
- - ^placement
- - ^proxysql
- - ^rabbitmq
- tls_enabled: false
- virt_type: qemu
-
-- job:
- parent: kolla-ansible-variables
name: kolla-ansible-base
pre-run: tests/pre.yml
run: tests/run.yml
@@ -64,6 +11,17 @@
- openstack/kolla
- openstack/kolla-ansible
- openstack/requirements
+ files:
+ - ^ansible/group_vars/all/common.yml
+ - ^requirements-core.yml
+ - ^roles/kolla-ansible-test-dashboard/
+ - ^tests/check-(config|failure|logs).sh
+ - ^tests/get_logs.sh
+ - ^tests/init-core-openstack.sh
+ - ^tests/(run|pre|post).yml
+ - ^tests/setup_gate.sh
+ - ^tests/templates/(inventory|globals-default).j2
+ - ^tests/upgrade.sh
irrelevant-files:
- ^.*\.rst$
- ^bindep.txt$
@@ -80,363 +38,60 @@
- ^specs/
- ^kolla_ansible/tests/
- ^tools/validate-.*$
+ - ^zuul.d/
roles:
- zuul: zuul/zuul-jobs
- zuul: openstack/kolla
-
-- job:
- parent: kolla-ansible-base
- name: kolla-ansible-scenario-base
- files:
- - ^ansible/group_vars/all.yml
- - ^ansible/roles/common/
- - ^requirements-core.yml
- - ^tests/check-logs.sh
- - ^tests/get_logs.sh
- - ^tests/(pre|run).yml
- - ^tests/templates/(inventory|globals-default.j2)
-
-- job:
- name: kolla-ansible-kvm-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(nova-cell)/
- - ^tests/templates/nova-compute-overrides.j2
- vars:
- virt_type: kvm
-
-- job:
- name: kolla-ansible-ipv6-base
- parent: kolla-ansible-base
- voting: false
- vars:
- api_network_prefix: "fd::"
- api_network_prefix_length: "64"
- kolla_internal_vip_address: "fd::ff:0"
- neutron_external_network_prefix: "fd:1::"
- neutron_external_network_prefix_length: "64"
- neutron_tenant_network_prefix: "fd:f0::"
- neutron_tenant_network_prefix_length: "64"
- neutron_tenant_network_dns_server: 2001:4860:4860::8888
- address_family: 'ipv6'
- scenario: ipv6
-
-- job:
- name: kolla-ansible-bifrost-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/bifrost/
- - ^tests/test-bifrost.sh
- vars:
- scenario: bifrost
- scenario_images_core:
- - ^bifrost
-
-- job:
- name: kolla-ansible-ironic-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(ironic|neutron|nova|nova-cell)/
- - ^tests/deploy-tenks\.sh$
- - ^tests/templates/ironic-overrides\.j2$
- - ^tests/templates/tenks-deploy-config\.yml\.j2$
- - ^tests/test-dashboard\.sh$
- - ^tests/test-ironic\.sh$
- required-projects:
- - openstack/tenks
- vars:
- scenario: ironic
- scenario_images_extra:
- - ^dnsmasq
- - ^ironic
- - ^iscsid
- - ^prometheus
-
-- job:
- name: kolla-ansible-zun-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(zun|kuryr|etcd|cinder|iscsi)/
- - ^tests/setup_disks.sh
- - ^tests/test-core-openstack.sh
- - ^tests/test-zun.sh
- - ^tests/test-dashboard.sh
- vars:
- scenario: zun
- scenario_images_extra:
- - ^zun
- - ^kuryr
- - ^etcd
- - ^cinder
- - ^iscsid
- - ^tgtd
-
-- job:
- name: kolla-ansible-swift-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(glance|swift)/
- - ^tests/setup_disks.sh
- - ^tests/init-swift.sh
- - ^tests/test-core-openstack.sh
- - ^tests/test-dashboard.sh
- - ^tests/test-swift.sh
- vars:
- scenario: swift
- scenario_images_extra:
- - ^swift
-
-- job:
- name: kolla-ansible-cephadm-base
- parent: kolla-ansible-base
- voting: false
- vars:
- scenario: cephadm
- scenario_images_extra:
- - ^cinder
- - ^redis
-
-- job:
- name: kolla-ansible-magnum-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(designate|magnum|trove)/
- - ^tests/test-dashboard.sh
- - ^tests/test-magnum.sh
- vars:
- scenario: magnum
- scenario_images_extra:
- - ^designate
- - ^magnum
- - ^trove
-
-- job:
- name: kolla-ansible-octavia-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(octavia|octavia-certificates)/
- - ^tests/test-dashboard.sh
- - ^tests/test-octavia.sh
- vars:
- scenario: octavia
- scenario_images_extra:
- - ^redis
- - ^octavia
-
-- job:
- name: kolla-ansible-masakari-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/masakari/
- - ^ansible/roles/hacluster/
- - ^tests/test-masakari.sh
- - ^tests/test-dashboard.sh
- vars:
- scenario: masakari
- scenario_images_extra:
- - ^masakari
- - ^hacluster
-
-- job:
- name: kolla-ansible-mariadb-base
- parent: kolla-ansible-scenario-base
- voting: true
- files: !inherit
- - ^ansible/roles/(loadbalancer|mariadb|proxysql-config)/
- - ^tests/test-mariadb.sh
- vars:
- scenario: mariadb
- scenario_images_core:
- - ^cron
- - ^fluentd
- - ^haproxy
- - ^keepalived
- - ^kolla-toolbox
- - ^mariadb
- - ^proxysql
-
-- job:
- name: kolla-ansible-scenario-nfv-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(aodh|barbican|heat|mistral|redis|tacker)/
- - ^tests/test-scenario-nfv.sh
- - ^tests/test-dashboard.sh
vars:
- scenario: scenario_nfv
- scenario_images_extra:
- - ^aodh
- - ^tacker
- - ^mistral
- - ^redis
- - ^barbican
-
-- job:
- name: kolla-ansible-cells-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/nova/
- - ^ansible/roles/nova-cell/
- - ^ansible/roles/loadbalancer/
- - ^tests/test-core-openstack.sh
- - ^tests/test-proxysql.sh
- vars:
- scenario: cells
- scenario_images_extra:
- - ^proxysql
-
-- job:
- name: kolla-ansible-ovn-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/
- - ^tests/test-ovn.sh
- - ^tests/test-core-openstack.sh
- - ^tests/reconfigure.sh
- vars:
- scenario: ovn
- scenario_images_extra:
- - ^redis
- - ^octavia
- - ^ovn
-
-- job:
- name: kolla-ansible-prometheus-opensearch-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/site.yml
- - ^ansible/roles/(common|opensearch|grafana|prometheus)/
- - ^tests/test-prometheus-opensearch.sh
- vars:
- scenario: prometheus-opensearch
- scenario_images_core:
- - ^cron
- - ^fluentd
- - ^grafana
- - ^haproxy
- - ^keepalived
- - ^kolla-toolbox
- - ^mariadb
- - ^memcached
- - ^opensearch
- - ^prometheus
- - ^proxysql
- - ^rabbitmq
-
-- job:
- name: kolla-ansible-venus-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/(common|opensearch|venus)/
- - ^tests/test-venus.sh
- vars:
- scenario: venus
+ address_family: 'ipv4'
+ # Test latest ansible-core version on Ubuntu, minimum supported on others.
+ # Use SLURP version (two releases back) on SLURP upgrades.
+ ansible_core_version_constraint: >-
+ {{ ansible_core_version_slurp if is_slurp else ansible_core_version_min if is_upgrade
+ or ansible_facts.distribution != "Ubuntu" else ansible_core_version_max }}
+ ansible_core_version_slurp: "==2.17.*"
+ ansible_core_version_max: "==2.19.*"
+ ansible_core_version_min: "==2.18.*"
+ # NOTE(mgoddard): Test the use of interface names with dashes.
+ api_interface_name: "vxlan-0"
+ api_network_prefix: "192.0.2."
+ api_network_prefix_length: "24"
+ base_distro: "{{ zuul.job.split('-').2 }}"
+ configure_swap_size: 0
+ container_engine: "docker"
+ is_upgrade: "{{ 'upgrade' in zuul.job }}"
+ is_slurp: "{{ 'slurp' in zuul.job }}"
+ kolla_ansible_tempest_regex: "\\[.*\\bsmoke\\b.*\\]"
+ kolla_internal_vip_address: "192.0.2.10"
+ le_enabled: false
+ neutron_external_bridge_name: br-0
+ neutron_external_interface_name: >-
+ veth-{{ neutron_external_bridge_name }}-ext
+ neutron_external_network_prefix: "198.51.100."
+ neutron_external_network_prefix_length: "24"
+ neutron_external_vxlan_interface_name: "vxlan-1"
+ neutron_tenant_network_dns_server: "8.8.8.8"
+ neutron_tenant_network_prefix: "203.0.113."
+ neutron_tenant_network_prefix_length: "24"
+ previous_release: "{{ '2025.1' if is_slurp else '2025.1' }}"
+ scenario: core
scenario_images_core:
- ^cron
- - ^opensearch
- ^fluentd
+ - ^glance
- ^haproxy
+ - ^heat
+ - ^horizon
- ^keepalived
- ^keystone
- ^kolla-toolbox
- ^mariadb
- ^memcached
+ - ^neutron
+ - ^nova-
+ - ^openvswitch
+ - ^placement
+ - ^proxysql
- ^rabbitmq
- - ^venus
-
-- job:
- name: kolla-ansible-hashi-vault-base
- parent: kolla-ansible-variables
- run: tests/run-hashi-vault.yml
- required-projects:
- - openstack/kolla-ansible
- - openstack/requirements
- voting: false
- files:
- - ^requirements-core.yml
- - ^tests/templates/(inventory|globals-default.j2)
- - ^tests/(pre|run).yml
- - ^kolla_ansible/
- - ^tests/run-hashi-vault.yml
- - ^tests/test-hashicorp-vault-passwords.sh
-
-- job:
- name: kolla-ansible-haproxy-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/haproxy/
- - ^ansible/roles/loadbalancer/
- - ^kolla_ansible/kolla_url.py
- vars:
- external_api_interface_name: vxlan2
- external_api_network_prefix: "192.0.3."
- external_api_network_prefix_length: "24"
- kolla_external_vip_address: "192.0.3.10"
- scenario: haproxy
-
-- job:
- name: kolla-ansible-lets-encrypt-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2
- - ^ansible/roles/(letsencrypt|loadbalancer)/
- - ^tests/test-core-openstack.sh
- - ^tests/test-dashboard.sh
- - ^tests/deploy.sh
- vars:
- scenario: lets-encrypt
- scenario_images_extra:
- - ^letsencrypt
- - ^haproxy
tls_enabled: true
- le_enabled: true
-
-- job:
- name: kolla-ansible-skyline-base
- parent: kolla-ansible-scenario-base
- voting: false
- files: !inherit
- - ^ansible/roles/skyline/
- - ^tests/test-skyline.sh
- vars:
- scenario: skyline
- scenario_images_extra:
- - ^skyline
-
-- job:
- name: kolla-ansible-skyline-sso-base
- parent: kolla-ansible-scenario-base
- voting: false
- files:
- - ^ansible/roles/skyline/
- - ^tests/test-skyline-sso.sh
- vars:
- scenario: skyline-sso
- scenario_images_extra:
- - ^skyline
-
-- job:
- name: kolla-ansible-container-engine-migration-base
- parent: kolla-ansible-base
- voting: false
- files:
- - ^ansible/migrate-container-engine.yml
- - ^ansible/roles/container-engine-migration/
- - ^tests/test-container-engine-migration.sh
- vars:
- scenario: container-engine-migration
+ virt_type: qemu
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
deleted file mode 100644
index e416ce8452..0000000000
--- a/zuul.d/jobs.yaml
+++ /dev/null
@@ -1,612 +0,0 @@
----
-- job:
- name: kolla-ansible-centos9s
- parent: kolla-ansible-base
- nodeset: kolla-ansible-centos9s
- voting: false
- vars:
- base_distro: centos
- tls_enabled: true
- kolla_build_images: true
-
-- job:
- name: kolla-ansible-centos10s
- parent: kolla-ansible-base
- nodeset: kolla-ansible-centos10s-8GB
- voting: false
- vars:
- base_distro: centos
- tls_enabled: true
- kolla_build_images: true
-
-- job:
- name: kolla-ansible-centos10s-aarch64
- parent: kolla-ansible-centos10s
- nodeset: kolla-ansible-centos10s-aarch64-8GB
-
-- job:
- name: kolla-ansible-debian-aarch64
- parent: kolla-ansible-debian
- nodeset: kolla-ansible-debian-bookworm-aarch64-8GB
- timeout: 10800
- voting: false
- required-projects:
- - openstack/kolla
-
-- job:
- name: kolla-ansible-debian-aarch64-podman
- parent: kolla-ansible-debian
- nodeset: kolla-ansible-debian-bookworm-aarch64-8GB
- timeout: 10800
- voting: false
- vars:
- container_engine: podman
- required-projects:
- - openstack/kolla
-
-- job:
- name: kolla-ansible-debian
- parent: kolla-ansible-base
- nodeset: kolla-ansible-debian-bookworm-16GB
- vars:
- base_distro: debian
- tls_enabled: true
-
-- job:
- name: kolla-ansible-debian-podman
- parent: kolla-ansible-base
- nodeset: kolla-ansible-debian-bookworm-16GB
- vars:
- base_distro: debian
- tls_enabled: true
- container_engine: podman
-
-- job:
- name: kolla-ansible-rocky9
- parent: kolla-ansible-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
- tls_enabled: true
-
-- job:
- name: kolla-ansible-rocky9-podman
- parent: kolla-ansible-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
- tls_enabled: true
- container_engine: podman
-
-- job:
- name: kolla-ansible-ubuntu
- parent: kolla-ansible-base
- nodeset: kolla-ansible-ubuntu-noble-16GB
- vars:
- base_distro: ubuntu
- tls_enabled: true
-
-- job:
- name: kolla-ansible-ubuntu-podman
- parent: kolla-ansible-base
- nodeset: kolla-ansible-ubuntu-noble-16GB
- vars:
- base_distro: ubuntu
- tls_enabled: true
- container_engine: podman
-
-- job:
- name: kolla-ansible-rocky9-kvm
- parent: kolla-ansible-kvm-base
- nodeset: kolla-ansible-rocky9-nested-virt
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-kvm
- parent: kolla-ansible-kvm-base
- nodeset: kolla-ansible-ubuntu-noble-nested-virt
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-multinode-ipv6
- parent: kolla-ansible-ipv6-base
- nodeset: kolla-ansible-rocky9-multi
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-multinode-ipv6
- parent: kolla-ansible-ipv6-base
- nodeset: kolla-ansible-ubuntu-noble-multi-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-cephadm
- parent: kolla-ansible-cephadm-base
- nodeset: kolla-ansible-rocky9-multi
- timeout: 10800
- vars:
- base_distro: rocky
- cephadm_use_package_from_distribution: true
-
-- job:
- name: kolla-ansible-ubuntu-cephadm
- parent: kolla-ansible-cephadm-base
- nodeset: kolla-ansible-ubuntu-noble-multi-16GB
- timeout: 10800
- vars:
- base_distro: ubuntu
- cephadm_use_package_from_distribution: true
-
-- job:
- name: kolla-ansible-debian-mariadb
- parent: kolla-ansible-mariadb-base
- nodeset: kolla-ansible-debian-bookworm-multi-16GB
- vars:
- base_distro: debian
-
-- job:
- name: kolla-ansible-rocky9-mariadb
- parent: kolla-ansible-mariadb-base
- nodeset: kolla-ansible-rocky9-multi
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-mariadb
- parent: kolla-ansible-mariadb-base
- nodeset: kolla-ansible-ubuntu-noble-multi-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-upgrade
- parent: kolla-ansible-base
- nodeset: kolla-ansible-rocky9
- timeout: 10800
- vars:
- base_distro: rocky
- is_upgrade: yes
- tls_enabled: true
-
-- job:
- name: kolla-ansible-rocky9-slurp-upgrade
- parent: kolla-ansible-base
- nodeset: kolla-ansible-rocky9
- timeout: 9000
- vars:
- base_distro: rocky
- is_upgrade: yes
- is_slurp: yes
- previous_release: "2024.1"
- tls_enabled: true
-
-- job:
- name: kolla-ansible-debian-upgrade
- parent: kolla-ansible-base
- nodeset: kolla-ansible-debian-bookworm-16GB
- timeout: 10800
- vars:
- base_distro: debian
- is_upgrade: yes
- tls_enabled: true
-
-- job:
- name: kolla-ansible-debian-slurp-upgrade
- parent: kolla-ansible-base
- nodeset: kolla-ansible-debian-bookworm-16GB
- timeout: 9000
- vars:
- base_distro: debian
- is_upgrade: yes
- is_slurp: yes
- previous_release: "2024.1"
- tls_enabled: true
-
-- job:
- name: kolla-ansible-debian-upgrade-aarch64
- parent: kolla-ansible-debian-upgrade
- nodeset: kolla-ansible-debian-bookworm-aarch64-8GB
- voting: false
-
-- job:
- name: kolla-ansible-ubuntu-upgrade
- parent: kolla-ansible-base
- nodeset: kolla-ansible-ubuntu-noble-16GB
- timeout: 10800
- vars:
- base_distro: ubuntu
- is_upgrade: yes
- tls_enabled: true
-
-- job:
- name: kolla-ansible-ubuntu-slurp-upgrade
- parent: kolla-ansible-base
- nodeset: kolla-ansible-ubuntu-noble-16GB
- timeout: 9000
- vars:
- base_distro: ubuntu
- is_upgrade: yes
- is_slurp: yes
- previous_release: "2024.1"
- tls_enabled: true
-
-- job:
- name: kolla-ansible-rocky9-upgrade-cephadm
- parent: kolla-ansible-cephadm-base
- nodeset: kolla-ansible-rocky9-multi
- timeout: 10800
- vars:
- base_distro: rocky
- is_upgrade: yes
-
-- job:
- name: kolla-ansible-rocky9-slurp-upgrade-cephadm
- parent: kolla-ansible-cephadm-base
- nodeset: kolla-ansible-rocky9-multi
- timeout: 9000
- vars:
- base_distro: rocky
- is_upgrade: yes
- is_slurp: yes
- previous_release: "2024.1"
-
-- job:
- name: kolla-ansible-ubuntu-upgrade-cephadm
- parent: kolla-ansible-cephadm-base
- nodeset: kolla-ansible-ubuntu-noble-multi-16GB
- timeout: 10800
- vars:
- base_distro: ubuntu
- is_upgrade: yes
- cephadm_use_package_from_distribution: true
-
-- job:
- name: kolla-ansible-ubuntu-slurp-upgrade-cephadm
- parent: kolla-ansible-cephadm-base
- nodeset: kolla-ansible-ubuntu-noble-multi-16GB
- timeout: 9000
- vars:
- base_distro: ubuntu
- is_upgrade: yes
- is_slurp: yes
- previous_release: "2024.1"
- cephadm_use_package_from_distribution: true
-
-- job:
- name: kolla-ansible-rocky9-upgrade-ovn
- parent: kolla-ansible-ovn-base
- nodeset: kolla-ansible-rocky9-multi
- timeout: 10800
- vars:
- base_distro: rocky
- is_upgrade: yes
-
-- job:
- name: kolla-ansible-debian-upgrade-ovn
- parent: kolla-ansible-ovn-base
- nodeset: kolla-ansible-debian-bookworm-multi-16GB
- timeout: 10800
- vars:
- base_distro: debian
- is_upgrade: yes
-
-- job:
- name: kolla-ansible-ubuntu-upgrade-ovn
- parent: kolla-ansible-ovn-base
- nodeset: kolla-ansible-ubuntu-noble-multi-16GB
- timeout: 10800
- vars:
- base_distro: ubuntu
- is_upgrade: yes
-
-- job:
- name: kolla-ansible-rocky9-bifrost
- parent: kolla-ansible-bifrost-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-bifrost
- parent: kolla-ansible-bifrost-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-zun
- parent: kolla-ansible-zun-base
- nodeset: kolla-ansible-rocky9-multi
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-zun
- parent: kolla-ansible-zun-base
- nodeset: kolla-ansible-ubuntu-noble-multi-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-scenario-nfv
- parent: kolla-ansible-scenario-nfv-base
- nodeset: kolla-ansible-rocky9-multi
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-rocky9-ironic
- parent: kolla-ansible-ironic-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-debian-ironic
- parent: kolla-ansible-ironic-base
- nodeset: kolla-ansible-debian-bookworm-16GB
- vars:
- base_distro: debian
-
-- job:
- name: kolla-ansible-ubuntu-ironic
- parent: kolla-ansible-ironic-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-ironic-upgrade
- parent: kolla-ansible-ironic-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
- is_upgrade: true
-
-- job:
- name: kolla-ansible-debian-ironic-upgrade
- parent: kolla-ansible-ironic-base
- nodeset: kolla-ansible-debian-bookworm-16GB
- vars:
- base_distro: debian
- is_upgrade: true
-
-- job:
- name: kolla-ansible-ubuntu-ironic-upgrade
- parent: kolla-ansible-ironic-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
- is_upgrade: true
-
-- job:
- name: kolla-ansible-rocky9-magnum
- parent: kolla-ansible-magnum-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-magnum
- parent: kolla-ansible-magnum-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-octavia
- parent: kolla-ansible-octavia-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-octavia
- parent: kolla-ansible-octavia-base
- nodeset: kolla-ansible-ubuntu-noble-16GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-ubuntu-masakari
- parent: kolla-ansible-masakari-base
- nodeset: kolla-ansible-ubuntu-noble-masakari-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-masakari
- parent: kolla-ansible-masakari-base
- nodeset: kolla-ansible-rocky9-masakari
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-cells
- parent: kolla-ansible-cells-base
- nodeset: kolla-ansible-ubuntu-noble-multi-16GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-cells
- parent: kolla-ansible-cells-base
- nodeset: kolla-ansible-rocky9-multi
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-rocky9-ovn
- parent: kolla-ansible-ovn-base
- nodeset: kolla-ansible-rocky9-multi
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-debian-ovn
- parent: kolla-ansible-ovn-base
- nodeset: kolla-ansible-debian-bookworm-multi-16GB
- vars:
- base_distro: debian
-
-- job:
- name: kolla-ansible-ubuntu-ovn
- parent: kolla-ansible-ovn-base
- nodeset: kolla-ansible-ubuntu-noble-multi-16GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-ubuntu-lets-encrypt
- parent: kolla-ansible-lets-encrypt-base
- nodeset: kolla-ansible-ubuntu-noble-multi-16GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-lets-encrypt
- parent: kolla-ansible-lets-encrypt-base
- nodeset: kolla-ansible-rocky9-multi
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-rocky9-prometheus-opensearch
- parent: kolla-ansible-prometheus-opensearch-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-prometheus-opensearch
- parent: kolla-ansible-prometheus-opensearch-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-prometheus-opensearch-upgrade
- parent: kolla-ansible-prometheus-opensearch-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
- is_upgrade: yes
-
-- job:
- name: kolla-ansible-ubuntu-prometheus-opensearch-upgrade
- parent: kolla-ansible-prometheus-opensearch-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
- is_upgrade: yes
-
-- job:
- name: kolla-ansible-rocky9-venus
- parent: kolla-ansible-venus-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-venus
- parent: kolla-ansible-venus-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-hashi-vault
- parent: kolla-ansible-hashi-vault-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-rocky9-haproxy-fqdn
- parent: kolla-ansible-haproxy-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
- tls_enabled: true
-
-- job:
- name: kolla-ansible-ubuntu-haproxy-fqdn
- parent: kolla-ansible-haproxy-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
- tls_enabled: true
-
-- job:
- name: kolla-ansible-ubuntu-skyline
- parent: kolla-ansible-skyline-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-skyline
- parent: kolla-ansible-skyline-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-skyline-sso
- parent: kolla-ansible-skyline-sso-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-rocky9-skyline-sso
- parent: kolla-ansible-skyline-sso-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-rocky9-container-engine-migration
- parent: kolla-ansible-container-engine-migration-base
- nodeset: kolla-ansible-rocky9
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-rocky9-container-engine-migration-multinode
- parent: kolla-ansible-container-engine-migration-base
- nodeset: kolla-ansible-rocky9-multi
- vars:
- base_distro: rocky
-
-- job:
- name: kolla-ansible-ubuntu-container-engine-migration
- parent: kolla-ansible-container-engine-migration-base
- nodeset: kolla-ansible-ubuntu-noble-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-ubuntu-container-engine-migration-multinode
- parent: kolla-ansible-container-engine-migration-base
- nodeset: kolla-ansible-ubuntu-noble-multi-8GB
- vars:
- base_distro: ubuntu
-
-- job:
- name: kolla-ansible-debian-container-engine-migration
- parent: kolla-ansible-container-engine-migration-base
- nodeset: kolla-ansible-debian-bookworm-16GB
- vars:
- base_distro: debian
-
-- job:
- name: kolla-ansible-debian-container-engine-migration-multinode
- parent: kolla-ansible-container-engine-migration-base
- nodeset: kolla-ansible-debian-bookworm-multi-16GB
- vars:
- base_distro: debian
diff --git a/zuul.d/nodesets.yaml b/zuul.d/nodesets.yaml
index 68565fba63..295df62e61 100644
--- a/zuul.d/nodesets.yaml
+++ b/zuul.d/nodesets.yaml
@@ -1,12 +1,12 @@
---
- nodeset:
- name: kolla-ansible-centos10s-8GB
+ name: kolla-ansible-centos-10s-8GB
nodes:
- name: primary
label: centos-10-stream-8GB
- nodeset:
- name: kolla-ansible-centos10s-aarch64-8GB
+ name: kolla-ansible-centos-10s-aarch64-8GB
nodes:
- name: primary
label: centos-10-stream-arm64-8GB
@@ -30,16 +30,16 @@
label: debian-bookworm-arm64-8GB
- nodeset:
- name: kolla-ansible-ubuntu-noble-8GB
+ name: kolla-ansible-debian-bookworm-masakari-8GB
nodes:
- name: primary
- label: ubuntu-noble-8GB
-
-- nodeset:
- name: kolla-ansible-ubuntu-noble-16GB
- nodes:
- - name: primary
- label: ubuntu-noble-16GB
+ label: debian-bookworm-8GB
+ - name: secondary
+ label: debian-bookworm-8GB
+ - name: ternary1
+ label: debian-bookworm-8GB
+ - name: ternary2
+ label: debian-bookworm-8GB
- nodeset:
name: kolla-ansible-debian-bookworm-multi-8GB
@@ -62,52 +62,92 @@
label: debian-bookworm-16GB
- nodeset:
- name: kolla-ansible-rocky9
+ name: kolla-ansible-debian-bookworm-nested-virt
nodes:
- name: primary
- label: rockylinux-9
+ label: debian-bookworm-nested-virt-8GB
- nodeset:
- name: kolla-ansible-ubuntu-noble-multi-8GB
+ name: kolla-ansible-rocky-10-8GB
nodes:
- name: primary
- label: ubuntu-noble-8GB
+ label: rockylinux-10-8GB
+
+- nodeset:
+ name: kolla-ansible-rocky-10-16GB
+ nodes:
+ - name: primary
+ label: rockylinux-10-16GB
+
+- nodeset:
+ name: kolla-ansible-rocky-10-masakari-8GB
+ nodes:
+ - name: primary
+ label: rockylinux-10-8GB
+ - name: secondary
+ label: rockylinux-10-8GB
+ - name: ternary1
+ label: rockylinux-10-8GB
+ - name: ternary2
+ label: rockylinux-10-8GB
+
+- nodeset:
+ name: kolla-ansible-rocky-10-multi-8GB
+ nodes:
+ - name: primary
+ label: rockylinux-10-8GB
- name: secondary1
- label: ubuntu-noble-8GB
+ label: rockylinux-10-8GB
- name: secondary2
- label: ubuntu-noble-8GB
+ label: rockylinux-10-8GB
- nodeset:
- name: kolla-ansible-ubuntu-noble-multi-16GB
+ name: kolla-ansible-rocky-10-multi-16GB
nodes:
- name: primary
- label: ubuntu-noble-16GB
+ label: rockylinux-10-16GB
- name: secondary1
- label: ubuntu-noble-16GB
+ label: rockylinux-10-16GB
- name: secondary2
+ label: rockylinux-10-16GB
+
+- nodeset:
+ name: kolla-ansible-ubuntu-noble-8GB
+ nodes:
+ - name: primary
+ label: ubuntu-noble-8GB
+
+- nodeset:
+ name: kolla-ansible-ubuntu-noble-16GB
+ nodes:
+ - name: primary
label: ubuntu-noble-16GB
- nodeset:
- name: kolla-ansible-rocky9-multi
+ name: kolla-ansible-ubuntu-noble-multi-8GB
nodes:
- name: primary
- label: rockylinux-9
+ label: ubuntu-noble-8GB
- name: secondary1
- label: rockylinux-9
+ label: ubuntu-noble-8GB
- name: secondary2
- label: rockylinux-9
+ label: ubuntu-noble-8GB
- nodeset:
- name: kolla-ansible-ubuntu-noble-nested-virt
+ name: kolla-ansible-ubuntu-noble-multi-16GB
nodes:
- name: primary
- label: ubuntu-noble-nested-virt-8GB
+ label: ubuntu-noble-16GB
+ - name: secondary1
+ label: ubuntu-noble-16GB
+ - name: secondary2
+ label: ubuntu-noble-16GB
- nodeset:
- name: kolla-ansible-rocky9-nested-virt
+ name: kolla-ansible-ubuntu-noble-nested-virt
nodes:
- name: primary
- label: nested-virt-centos-9-stream
+ label: ubuntu-noble-nested-virt-8GB
- nodeset:
name: kolla-ansible-ubuntu-noble-masakari-8GB
@@ -120,15 +160,3 @@
label: ubuntu-noble-8GB
- name: ternary2
label: ubuntu-noble-8GB
-
-- nodeset:
- name: kolla-ansible-rocky9-masakari
- nodes:
- - name: primary
- label: rockylinux-9
- - name: secondary
- label: rockylinux-9
- - name: ternary1
- label: rockylinux-9
- - name: ternary2
- label: rockylinux-9
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 7ec21b694f..dffc20c4ed 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -4,66 +4,33 @@
templates:
- ansible-role-jobs
- check-requirements
+ - kolla-ansible-scenario-aio
+ - kolla-ansible-scenario-bifrost
+ - kolla-ansible-scenario-cells
+ - kolla-ansible-scenario-cephadm
+ # NOTE(mnasiadka): Failing since
+ # https://review.opendev.org/c/openstack/kolla-ansible/+/864780
+ # - kolla-ansible-scenario-container-engine-migration
+ - kolla-ansible-scenario-haproxy-fqdn
+ - kolla-ansible-scenario-kayobe
+ - kolla-ansible-scenario-openbao
+ - kolla-ansible-scenario-kvm
+ - kolla-ansible-scenario-lets-encrypt
+ - kolla-ansible-scenario-magnum
+ - kolla-ansible-scenario-mariadb
+ - kolla-ansible-scenario-masakari
+ - kolla-ansible-scenario-nfv
+ - kolla-ansible-scenario-ironic
+ # NOTE(mnasiadka): All runs end up with DISK_FULL
+ #- kolla-ansible-scenario-ipv6
+ - kolla-ansible-scenario-octavia
+ - kolla-ansible-scenario-ovn
+ - kolla-ansible-scenario-prometheus-opensearch
+ # NOTE(mnasiadka): SSO and non-SSO tests are failing
+ #- kolla-ansible-scenario-skyline
+ - kolla-ansible-scenario-telemetry
- openstack-cover-jobs
- - openstack-python3-jobs
+ - openstack-python3-jobs-kolla-ansible
- periodic-stable-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
- check:
- jobs:
- - kolla-ansible-centos10s
- - kolla-ansible-debian
- - kolla-ansible-debian-podman
- - kolla-ansible-ubuntu
- - kolla-ansible-ubuntu-podman
- - kolla-ansible-ubuntu-kvm
- - kolla-ansible-ubuntu-multinode-ipv6
- - kolla-ansible-ubuntu-bifrost
- - kolla-ansible-ubuntu-magnum
- - kolla-ansible-ubuntu-octavia
- - kolla-ansible-ubuntu-masakari
- - kolla-ansible-debian-ironic
- - kolla-ansible-ubuntu-ironic
- - kolla-ansible-debian-ironic-upgrade
- - kolla-ansible-ubuntu-ironic-upgrade
- - kolla-ansible-debian-upgrade
- - kolla-ansible-ubuntu-upgrade
- - kolla-ansible-ubuntu-cells
- - kolla-ansible-debian-mariadb
- - kolla-ansible-ubuntu-mariadb
- - kolla-ansible-debian-ovn
- - kolla-ansible-ubuntu-ovn
- - kolla-ansible-debian-upgrade-ovn
- - kolla-ansible-ubuntu-upgrade-ovn
- - kolla-ansible-debian
- - kolla-ansible-ubuntu-prometheus-opensearch
- - kolla-ansible-ubuntu-prometheus-opensearch-upgrade
- - kolla-ansible-ubuntu-venus
- - kolla-ansible-ubuntu-cephadm
- - kolla-ansible-ubuntu-upgrade-cephadm
- - kolla-ansible-ubuntu-haproxy-fqdn
- - kolla-ansible-ubuntu-lets-encrypt
- - kolla-ansible-ubuntu-skyline
- - kolla-ansible-ubuntu-skyline-sso
- - kolla-ansible-ubuntu-container-engine-migration
- - kolla-ansible-ubuntu-container-engine-migration-multinode
- - kolla-ansible-debian-container-engine-migration
- - kolla-ansible-debian-container-engine-migration-multinode
- check-arm64:
- jobs:
- - kolla-ansible-centos10s-aarch64
- - kolla-ansible-debian-aarch64
- - kolla-ansible-debian-aarch64-podman
- - kolla-ansible-debian-upgrade-aarch64
- gate:
- jobs:
- - kolla-ansible-debian
- - kolla-ansible-debian-mariadb
- - kolla-ansible-debian-upgrade
- - kolla-ansible-debian-podman
- - kolla-ansible-ubuntu
- - kolla-ansible-ubuntu-mariadb
- - kolla-ansible-ubuntu-prometheus-opensearch
- - kolla-ansible-ubuntu-prometheus-opensearch-upgrade
- - kolla-ansible-ubuntu-upgrade
- - kolla-ansible-ubuntu-podman
diff --git a/zuul.d/python3-jobs.yaml b/zuul.d/python3-jobs.yaml
new file mode 100644
index 0000000000..5dc7903ed0
--- /dev/null
+++ b/zuul.d/python3-jobs.yaml
@@ -0,0 +1,23 @@
+- project-template:
+ name: openstack-python3-jobs-kolla-ansible
+ # NOTE(mnasiadka): Local definition to skip py310 jobs on Flamingo
+ description: |
+ Runs unit tests for an OpenStack Python project under the CPython
+ version 3 releases designated for testing the latest release.
+ check:
+ jobs:
+ - openstack-tox-pep8
+ # py3.12 testing is added as mandatory from 2025.1 release onwards.
+ # From 2026.1, we run it as periodic only(assuming py3.10 and py3.13
+ # will be enough coverage to run on every change)
+ - openstack-tox-py312
+ gate:
+ jobs:
+ - openstack-tox-pep8
+ # py3.12 testing is added as mandatory from 2025.1 release onwards.
+ # From 2026.1, we run it as periodic only(assuming py3.10 and py3.13
+ # will be enough coverage to run on every change)
+ - openstack-tox-py312
+ post:
+ jobs:
+ - publish-openstack-python-branch-tarball
\ No newline at end of file
diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml
new file mode 100644
index 0000000000..f8e9b06e31
--- /dev/null
+++ b/zuul.d/scenarios/aio.yaml
@@ -0,0 +1,143 @@
+---
+- job:
+ parent: kolla-ansible-base
+ name: kolla-ansible-aio-base
+ files: !inherit
+ - ^ansible/group_vars/all/(common|fluentd|glance|haproxy|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml
+ - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml
+ - ^ansible/(action_plugins|filter_plugins|library|module_utils)/
+ - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq|service-.*)/
+ - ^kolla_ansible/
+ - ^roles/kolla-ansible-(deploy|tempest|test-dashboard)/
+ - ^tests/testinfra/test_horizon.py
+ - ^tools/init-runonce
+
+- job:
+ name: kolla-ansible-centos-10s
+ parent: kolla-ansible-aio-base
+ nodeset: kolla-ansible-centos-10s-8GB
+ voting: false
+ vars:
+ kolla_build_images: true
+
+- job:
+ name: kolla-ansible-centos-10s-aarch64
+ parent: kolla-ansible-centos-10s
+ nodeset: kolla-ansible-centos-10s-aarch64-8GB
+
+- job:
+ name: kolla-ansible-debian-bookworm
+ parent: kolla-ansible-aio-base
+ nodeset: kolla-ansible-debian-bookworm-16GB
+
+- job:
+ name: kolla-ansible-debian-bookworm-aarch64
+ parent: kolla-ansible-debian-bookworm
+ nodeset: kolla-ansible-debian-bookworm-aarch64-8GB
+ timeout: 10800
+ vars:
+ kolla_build_images: true
+ voting: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-aarch64-podman
+ parent: kolla-ansible-debian-bookworm-aarch64
+ nodeset: kolla-ansible-debian-bookworm-aarch64-8GB
+ timeout: 10800
+ vars:
+ container_engine: podman
+ kolla_build_images: true
+ voting: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-podman
+ parent: kolla-ansible-debian-bookworm
+ nodeset: kolla-ansible-debian-bookworm-16GB
+ vars:
+ container_engine: podman
+
+- job:
+ name: kolla-ansible-debian-bookworm-upgrade
+ parent: kolla-ansible-aio-base
+ nodeset: kolla-ansible-debian-bookworm-16GB
+ timeout: 10800
+
+- job:
+ name: kolla-ansible-debian-bookworm-aarch64-upgrade
+ parent: kolla-ansible-debian-bookworm-upgrade
+ nodeset: kolla-ansible-debian-bookworm-aarch64-8GB
+ voting: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-upgrade-slurp
+ parent: kolla-ansible-debian-bookworm-upgrade
+ nodeset: kolla-ansible-debian-bookworm-16GB
+ timeout: 9000
+
+- job:
+ name: kolla-ansible-rocky-10
+ parent: kolla-ansible-aio-base
+ nodeset: kolla-ansible-rocky-10-16GB
+
+- job:
+ name: kolla-ansible-rocky-10-podman
+ parent: kolla-ansible-rocky-10
+ nodeset: kolla-ansible-rocky-10-16GB
+ vars:
+ container_engine: podman
+
+- job:
+ name: kolla-ansible-ubuntu-noble
+ parent: kolla-ansible-aio-base
+ nodeset: kolla-ansible-ubuntu-noble-16GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-podman
+ parent: kolla-ansible-ubuntu-noble
+ nodeset: kolla-ansible-ubuntu-noble-16GB
+ vars:
+ container_engine: podman
+
+- job:
+ name: kolla-ansible-ubuntu-noble-upgrade
+ parent: kolla-ansible-aio-base
+ nodeset: kolla-ansible-ubuntu-noble-16GB
+ timeout: 10800
+
+- job:
+ name: kolla-ansible-ubuntu-noble-upgrade-slurp
+ parent: kolla-ansible-aio-base
+ nodeset: kolla-ansible-ubuntu-noble-16GB
+ timeout: 9000
+
+- project-template:
+ name: kolla-ansible-scenario-aio
+ description: |
+ Runs Kolla-Ansible AIO scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-centos-10s
+ - kolla-ansible-debian-bookworm
+ - kolla-ansible-debian-bookworm-podman
+ - kolla-ansible-debian-bookworm-upgrade
+ - kolla-ansible-rocky-10
+ - kolla-ansible-rocky-10-podman
+ - kolla-ansible-ubuntu-noble
+ - kolla-ansible-ubuntu-noble-podman
+ - kolla-ansible-ubuntu-noble-upgrade
+ check-arm64:
+ jobs:
+ - kolla-ansible-centos-10s-aarch64
+ - kolla-ansible-debian-bookworm-aarch64
+ - kolla-ansible-debian-bookworm-aarch64-podman
+ - kolla-ansible-debian-bookworm-aarch64-upgrade
+ gate:
+ jobs:
+ - kolla-ansible-debian-bookworm
+ - kolla-ansible-debian-bookworm-podman
+ - kolla-ansible-debian-bookworm-upgrade
+ - kolla-ansible-rocky-10
+ - kolla-ansible-rocky-10-podman
+ - kolla-ansible-ubuntu-noble
+ - kolla-ansible-ubuntu-noble-upgrade
+ - kolla-ansible-ubuntu-noble-podman
diff --git a/zuul.d/scenarios/bifrost.yaml b/zuul.d/scenarios/bifrost.yaml
new file mode 100644
index 0000000000..958c5a69c7
--- /dev/null
+++ b/zuul.d/scenarios/bifrost.yaml
@@ -0,0 +1,38 @@
+---
+- job:
+ name: kolla-ansible-bifrost-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/bifrost.yml
+ - ^ansible/roles/bifrost/
+ - ^roles/kolla-ansible-(deploy|test|upgrade)-bifrost/
+ vars:
+ scenario: bifrost
+ scenario_images_core:
+ - ^bifrost
+
+- job:
+ name: kolla-ansible-debian-bookworm-bifrost
+ parent: kolla-ansible-bifrost-base
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-bifrost
+ parent: kolla-ansible-bifrost-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-bifrost
+ parent: kolla-ansible-bifrost-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-bifrost
+ description: |
+ Runs Kolla-Ansible Bifrost scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-bifrost
+ - kolla-ansible-rocky-10-bifrost
+ - kolla-ansible-ubuntu-noble-bifrost
diff --git a/zuul.d/scenarios/cells.yaml b/zuul.d/scenarios/cells.yaml
new file mode 100644
index 0000000000..b94bbe93a0
--- /dev/null
+++ b/zuul.d/scenarios/cells.yaml
@@ -0,0 +1,40 @@
+---
+- job:
+ name: kolla-ansible-cells-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/(baremetal|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq|sysctl).yml
+ - ^ansible/roles/nova/
+ - ^ansible/roles/nova-cell/
+ - ^ansible/roles/loadbalancer/
+ - ^tests/test-(core-openstack|proxysql).sh
+ vars:
+ scenario: cells
+ scenario_images_extra:
+ - ^proxysql
+
+- job:
+ name: kolla-ansible-debian-bookworm-cells
+ parent: kolla-ansible-cells-base
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+
+- job:
+ name: kolla-ansible-rocky-10-cells
+ parent: kolla-ansible-cells-base
+ nodeset: kolla-ansible-rocky-10-multi-16GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-cells
+ parent: kolla-ansible-cells-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-16GB
+
+- project-template:
+ name: kolla-ansible-scenario-cells
+ description: |
+ Runs Kolla-Ansible Nova Cells scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-cells
+ - kolla-ansible-rocky-10-cells
+ - kolla-ansible-ubuntu-noble-cells
diff --git a/zuul.d/scenarios/cephadm.yaml b/zuul.d/scenarios/cephadm.yaml
new file mode 100644
index 0000000000..28ac2128b9
--- /dev/null
+++ b/zuul.d/scenarios/cephadm.yaml
@@ -0,0 +1,68 @@
+---
+- job:
+ name: kolla-ansible-cephadm-base
+ parent: kolla-ansible-base
+ voting: false
+ files:
+ - ^ansible/group_vars/all/(ceph|ceph-rgw|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml
+ - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml
+ - ^ansible/roles/(ceph-rgw|common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|openvswitch|placement|proxysql|rabbitmq)/
+ - ^roles/cephadm/
+ vars:
+ kolla_ansible_setup_disks_file_path: "/var/lib/ceph-osd.img"
+ kolla_ansible_setup_disks_vg_name: "cephvg"
+ kolla_ansible_setup_disks_lv_name: "cephlv"
+ kolla_ansible_tempest_exclude_regex: "^tempest.api.object_storage"
+ scenario: cephadm
+ scenario_images_extra:
+ - ^cinder
+ - ^redis
+
+- job:
+ name: kolla-ansible-debian-bookworm-cephadm
+ parent: kolla-ansible-cephadm-base
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+ timeout: 10800
+
+- job:
+ name: kolla-ansible-debian-bookworm-cephadm-upgrade
+ parent: kolla-ansible-debian-bookworm-cephadm
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+ timeout: 10800
+
+- job:
+ name: kolla-ansible-debian-bookworm-cephadm-upgrade-slurp
+ parent: kolla-ansible-debian-bookworm-cephadm-upgrade
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+ timeout: 9000
+
+- job:
+ name: kolla-ansible-ubuntu-noble-cephadm
+ parent: kolla-ansible-cephadm-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-16GB
+ timeout: 10800
+ vars:
+ cephadm_use_package_from_distribution: true
+
+- job:
+ name: kolla-ansible-ubuntu-noble-cephadm-upgrade
+ parent: kolla-ansible-ubuntu-noble-cephadm
+ nodeset: kolla-ansible-ubuntu-noble-multi-16GB
+ timeout: 10800
+
+- job:
+ name: kolla-ansible-ubuntu-noble-cephadm-upgrade-slurp
+ parent: kolla-ansible-ubuntu-noble-cephadm-upgrade
+ nodeset: kolla-ansible-ubuntu-noble-multi-16GB
+ timeout: 9000
+
+- project-template:
+ name: kolla-ansible-scenario-cephadm
+ description: |
+ Runs Kolla-Ansible CephAdm scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-cephadm
+ - kolla-ansible-debian-bookworm-cephadm-upgrade
+ - kolla-ansible-ubuntu-noble-cephadm
+ - kolla-ansible-ubuntu-noble-cephadm-upgrade
diff --git a/zuul.d/scenarios/container-engine-migration.yaml b/zuul.d/scenarios/container-engine-migration.yaml
new file mode 100644
index 0000000000..c57aaa23a5
--- /dev/null
+++ b/zuul.d/scenarios/container-engine-migration.yaml
@@ -0,0 +1,42 @@
+---
+- job:
+ name: kolla-ansible-container-engine-migration-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/migrate-container-engine.yml
+ - ^ansible/roles/container-engine-migration/
+ - ^tests/test-container-engine-migration.sh
+ vars:
+ scenario: container-engine-migration
+
+- job:
+ name: kolla-ansible-debian-container-engine-migration
+ parent: kolla-ansible-container-engine-migration-base
+ nodeset: kolla-ansible-debian-bookworm-16GB
+
+- job:
+ name: kolla-ansible-debian-container-engine-migration-multinode
+ parent: kolla-ansible-container-engine-migration-base
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+
+- job:
+ name: kolla-ansible-ubuntu-container-engine-migration
+ parent: kolla-ansible-container-engine-migration-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-container-engine-migration-multinode
+ parent: kolla-ansible-container-engine-migration-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-container-engine-migration
+ description: |
+ Runs Kolla-Ansible container engine migration scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-container-engine-migration
+ - kolla-ansible-debian-container-engine-migration-multinode
+ - kolla-ansible-ubuntu-container-engine-migration
+ - kolla-ansible-ubuntu-container-engine-migration-multinode
diff --git a/zuul.d/scenarios/haproxy-fqdn.yaml b/zuul.d/scenarios/haproxy-fqdn.yaml
new file mode 100644
index 0000000000..51ecfcdc0a
--- /dev/null
+++ b/zuul.d/scenarios/haproxy-fqdn.yaml
@@ -0,0 +1,40 @@
+---
+- job:
+ name: kolla-ansible-haproxy-fqdn-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/haproxy.yml
+ - ^ansible/roles/(haproxy|haproxy-config|loadbalancer|loadbalancer-config)/
+ - ^kolla_ansible/kolla_url.py
+ vars:
+ external_api_interface_name: vxlan2
+ external_api_network_prefix: "192.0.3."
+ external_api_network_prefix_length: "24"
+ kolla_external_vip_address: "192.0.3.10"
+ scenario: haproxy
+
+- job:
+ name: kolla-ansible-debian-bookworm-haproxy-fqdn
+ parent: kolla-ansible-haproxy-fqdn-base
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-haproxy-fqdn
+ parent: kolla-ansible-haproxy-fqdn-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-haproxy-fqdn
+ parent: kolla-ansible-haproxy-fqdn-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-haproxy-fqdn
+ description: |
+ Runs Kolla-Ansible HAProxy FQDN scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-haproxy-fqdn
+ - kolla-ansible-rocky-10-haproxy-fqdn
+ - kolla-ansible-ubuntu-noble-haproxy-fqdn
diff --git a/zuul.d/scenarios/ipv6.yaml b/zuul.d/scenarios/ipv6.yaml
new file mode 100644
index 0000000000..f465b966a5
--- /dev/null
+++ b/zuul.d/scenarios/ipv6.yaml
@@ -0,0 +1,44 @@
+---
+- job:
+ name: kolla-ansible-ipv6-base
+ parent: kolla-ansible-aio-base
+ voting: false
+ vars:
+ address_family: 'ipv6'
+ api_network_prefix: "fd::"
+ api_network_prefix_length: "64"
+ kolla_internal_vip_address: "fd::ff:0"
+ neutron_external_network_prefix: "fd:1::"
+ neutron_external_network_prefix_length: "64"
+ neutron_tenant_network_prefix: "fd:f0::"
+ neutron_tenant_network_prefix_length: "64"
+ neutron_tenant_network_dns_server: 2001:4860:4860::8888
+ scenario: ipv6
+ scenario_images_extra:
+ - ^prometheus
+ tls_enabled: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-ipv6
+ parent: kolla-ansible-ipv6-base
+ nodeset: kolla-ansible-debian-bookworm-multi-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-ipv6
+ parent: kolla-ansible-ipv6-base
+ nodeset: kolla-ansible-rocky-10-multi-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-ipv6
+ parent: kolla-ansible-ipv6-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-ipv6
+ description: |
+ Runs Kolla-Ansible ipv6 scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-ipv6
+ - kolla-ansible-rocky-10-ipv6
+ - kolla-ansible-ubuntu-noble-ipv6
diff --git a/zuul.d/scenarios/ironic.yaml b/zuul.d/scenarios/ironic.yaml
new file mode 100644
index 0000000000..b1fa449060
--- /dev/null
+++ b/zuul.d/scenarios/ironic.yaml
@@ -0,0 +1,60 @@
+---
+- job:
+ name: kolla-ansible-ironic-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/(nova|ironic).yml
+ - ^ansible/roles/(nova|nova-cell|ironic)/
+ - ^tests/deploy-tenks\.sh$
+ - ^tests/templates/ironic-overrides\.j2$
+ - ^tests/templates/tenks-deploy-config\.yml\.j2$
+ - ^tests/test-dashboard\.sh$
+ - ^tests/test-ironic\.sh$
+ required-projects:
+ - openstack/tenks
+ vars:
+ scenario: ironic
+ scenario_images_extra:
+ - ^dnsmasq
+ - ^ironic
+ - ^iscsid
+ - ^prometheus
+ tls_enabled: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-ironic
+ parent: kolla-ansible-ironic-base
+ nodeset: kolla-ansible-debian-bookworm-16GB
+
+- job:
+ name: kolla-ansible-debian-bookworm-ironic-upgrade
+ parent: kolla-ansible-debian-bookworm-ironic
+ nodeset: kolla-ansible-debian-bookworm-16GB
+
+- job:
+ name: kolla-ansible-rocky-10-ironic
+ parent: kolla-ansible-ironic-base
+ nodeset: kolla-ansible-rocky-10-16GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-ironic
+ parent: kolla-ansible-ironic-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-ironic-upgrade
+ parent: kolla-ansible-ubuntu-noble-ironic
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-ironic
+ description: |
+ Runs Kolla-Ansible Ironic scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-ironic
+ - kolla-ansible-debian-bookworm-ironic-upgrade
+ - kolla-ansible-rocky-10-ironic
+ - kolla-ansible-ubuntu-noble-ironic
+ - kolla-ansible-ubuntu-noble-ironic-upgrade
diff --git a/zuul.d/scenarios/kayobe.yaml b/zuul.d/scenarios/kayobe.yaml
new file mode 100644
index 0000000000..abc0d173a8
--- /dev/null
+++ b/zuul.d/scenarios/kayobe.yaml
@@ -0,0 +1,11 @@
+---
+- project-template:
+ name: kolla-ansible-scenario-kayobe
+ description: |
+ Runs a subset of Kayobe jobs in Kolla Ansible CI to catch regressions.
+ check:
+ jobs:
+ - kayobe-overcloud-rocky10:
+ voting: false
+ - kayobe-overcloud-ubuntu-noble:
+ voting: false
diff --git a/zuul.d/scenarios/kvm.yaml b/zuul.d/scenarios/kvm.yaml
new file mode 100644
index 0000000000..ed7d3219cd
--- /dev/null
+++ b/zuul.d/scenarios/kvm.yaml
@@ -0,0 +1,35 @@
+---
+- job:
+ name: kolla-ansible-kvm-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/roles/nova-cell/
+ - ^tests/templates/nova-compute-overrides.j2
+ vars:
+ virt_type: kvm
+
+- job:
+ name: kolla-ansible-debian-bookworm-kvm
+ parent: kolla-ansible-kvm-base
+ nodeset: kolla-ansible-debian-bookworm-nested-virt
+
+- job:
+ name: kolla-ansible-rocky-10-kvm
+ parent: kolla-ansible-kvm-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-kvm
+ parent: kolla-ansible-kvm-base
+ nodeset: kolla-ansible-ubuntu-noble-nested-virt
+
+- project-template:
+ name: kolla-ansible-scenario-kvm
+ description: |
+ Runs Kolla-Ansible KVM scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-kvm
+ - kolla-ansible-rocky-10-kvm
+ - kolla-ansible-ubuntu-noble-kvm
diff --git a/zuul.d/scenarios/lets-encrypt.yaml b/zuul.d/scenarios/lets-encrypt.yaml
new file mode 100644
index 0000000000..fc059a79c0
--- /dev/null
+++ b/zuul.d/scenarios/lets-encrypt.yaml
@@ -0,0 +1,38 @@
+---
+- job:
+ name: kolla-ansible-lets-encrypt-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/lets-encrypt.yml
+ - ^ansible/roles/fluentd/templates/conf/input/11-letsencrypt.conf.j2
+ - ^ansible/roles/(haproxy-config|letsencrypt|loadbalancer|loadbalancer-config)/
+ - ^roles/kolla-ansible-deploy/tasks/certificates.yml
+ - ^tests/test-core-openstack.sh
+ - ^tests/test-dashboard.sh
+ vars:
+ scenario: lets-encrypt
+ scenario_images_extra:
+ - ^letsencrypt
+ - ^haproxy
+ tls_enabled: true
+ le_enabled: true
+
+- job:
+ name: kolla-ansible-debian-bookworm-lets-encrypt
+ parent: kolla-ansible-lets-encrypt-base
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-lets-encrypt
+ parent: kolla-ansible-lets-encrypt-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-16GB
+
+- project-template:
+ name: kolla-ansible-scenario-lets-encrypt
+ description: |
+ Runs Kolla-Ansible Let's Encrypt scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-lets-encrypt
+ - kolla-ansible-ubuntu-noble-lets-encrypt
diff --git a/zuul.d/scenarios/magnum.yaml b/zuul.d/scenarios/magnum.yaml
new file mode 100644
index 0000000000..9a5552150a
--- /dev/null
+++ b/zuul.d/scenarios/magnum.yaml
@@ -0,0 +1,43 @@
+---
+- job:
+ name: kolla-ansible-magnum-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/(designate|magnum|trove).yml
+ - ^ansible/roles/(designate|magnum|trove)/
+ - ^tests/test-dashboard.sh
+ - ^tests/test-magnum.sh
+ vars:
+ scenario: magnum
+ scenario_images_extra:
+ - ^designate
+ - ^magnum
+ - ^trove
+ # TODO: Remove after adding TLS support for Trove
+ tls_enabled: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-magnum
+ parent: kolla-ansible-magnum-base
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-magnum
+ parent: kolla-ansible-magnum-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-magnum
+ parent: kolla-ansible-magnum-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-magnum
+ description: |
+ Runs Kolla-Ansible Magnum scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-magnum
+ - kolla-ansible-rocky-10-magnum
+ - kolla-ansible-ubuntu-noble-magnum
diff --git a/zuul.d/scenarios/mariadb.yaml b/zuul.d/scenarios/mariadb.yaml
new file mode 100644
index 0000000000..3e540e93a1
--- /dev/null
+++ b/zuul.d/scenarios/mariadb.yaml
@@ -0,0 +1,67 @@
+---
+- job:
+ name: kolla-ansible-mariadb-base
+ parent: kolla-ansible-base
+ voting: true
+ files: !inherit
+ - ^ansible/group_vars/all/mariadb.yml
+ - ^ansible/roles/(loadbalancer|loadbalancer-config|mariadb|proxysql-config)/
+ - ^tests/test-mariadb.sh
+ vars:
+ scenario: mariadb
+ scenario_images_core:
+ - ^cron
+ - ^fluentd
+ - ^haproxy
+ - ^keepalived
+ - ^kolla-toolbox
+ - ^mariadb
+ - ^proxysql
+
+- job:
+ name: kolla-ansible-debian-bookworm-mariadb
+ parent: kolla-ansible-mariadb-base
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+
+- job:
+ name: kolla-ansible-debian-bookworm-mariadb-upgrade
+ parent: kolla-ansible-mariadb-base
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+ vars:
+ is_upgrade: true
+
+- job:
+ name: kolla-ansible-rocky-10-mariadb
+ parent: kolla-ansible-mariadb-base
+ nodeset: kolla-ansible-rocky-10-multi-16GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-mariadb
+ parent: kolla-ansible-mariadb-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-mariadb-upgrade
+ parent: kolla-ansible-mariadb-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-8GB
+ vars:
+ is_upgrade: true
+
+- project-template:
+ name: kolla-ansible-scenario-mariadb
+ description: |
+ Runs Kolla-Ansible MariaDB scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-mariadb
+ - kolla-ansible-debian-bookworm-mariadb-upgrade
+ - kolla-ansible-rocky-10-mariadb
+ - kolla-ansible-ubuntu-noble-mariadb
+ - kolla-ansible-ubuntu-noble-mariadb-upgrade
+ gate:
+ jobs:
+ - kolla-ansible-debian-bookworm-mariadb
+ - kolla-ansible-debian-bookworm-mariadb-upgrade
+ - kolla-ansible-rocky-10-mariadb
+ - kolla-ansible-ubuntu-noble-mariadb
+ - kolla-ansible-ubuntu-noble-mariadb-upgrade
diff --git a/zuul.d/scenarios/masakari.yaml b/zuul.d/scenarios/masakari.yaml
new file mode 100644
index 0000000000..4593f0a673
--- /dev/null
+++ b/zuul.d/scenarios/masakari.yaml
@@ -0,0 +1,41 @@
+---
+- job:
+ name: kolla-ansible-masakari-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/(hacluster|masakari).yml
+ - ^ansible/roles/(hacluster|masakari)/
+ - ^tests/test-masakari.sh
+ vars:
+ scenario: masakari
+ scenario_images_extra:
+ - ^masakari
+ - ^hacluster
+ # TODO: Remove once Masakari has TLS support
+ tls_enabled: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-masakari
+ parent: kolla-ansible-masakari-base
+ nodeset: kolla-ansible-debian-bookworm-masakari-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-masakari
+ parent: kolla-ansible-masakari-base
+ nodeset: kolla-ansible-rocky-10-masakari-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-masakari
+ parent: kolla-ansible-masakari-base
+ nodeset: kolla-ansible-ubuntu-noble-masakari-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-masakari
+ description: |
+ Runs Kolla-Ansible Masakari scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-masakari
+ - kolla-ansible-rocky-10-masakari
+ - kolla-ansible-ubuntu-noble-masakari
diff --git a/zuul.d/scenarios/nfv.yaml b/zuul.d/scenarios/nfv.yaml
new file mode 100644
index 0000000000..60e44f7eea
--- /dev/null
+++ b/zuul.d/scenarios/nfv.yaml
@@ -0,0 +1,37 @@
+---
+- job:
+ name: kolla-ansible-scenario-nfv-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/(aodh|barbican|heat|mistral|redis|tacker).yml
+ - ^ansible/roles/(aodh|barbican|heat|mistral|redis|tacker)/
+ - ^tests/test-scenario-nfv.sh
+ vars:
+ scenario: nfv
+ scenario_images_extra:
+ - ^aodh
+ - ^tacker
+ - ^mistral
+ - ^redis
+ - ^barbican
+ tls_enabled: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-nfv
+ parent: kolla-ansible-scenario-nfv-base
+ nodeset: kolla-ansible-debian-bookworm-multi-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-nfv
+ parent: kolla-ansible-scenario-nfv-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-nfv
+ description: |
+ Runs Kolla-Ansible NFV scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-nfv
+ - kolla-ansible-ubuntu-noble-nfv
diff --git a/zuul.d/scenarios/octavia.yaml b/zuul.d/scenarios/octavia.yaml
new file mode 100644
index 0000000000..d7fdcb2080
--- /dev/null
+++ b/zuul.d/scenarios/octavia.yaml
@@ -0,0 +1,34 @@
+---
+- job:
+ name: kolla-ansible-octavia-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/octavia.yml
+ - ^ansible/roles/(octavia|octavia-certificates)/
+ - ^tests/test-octavia.sh
+ vars:
+ scenario: octavia
+ scenario_images_extra:
+ - ^redis
+ - ^octavia
+ tls_enabled: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-octavia
+ parent: kolla-ansible-octavia-base
+ nodeset: kolla-ansible-debian-bookworm-16GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-octavia
+ parent: kolla-ansible-octavia-base
+ nodeset: kolla-ansible-ubuntu-noble-16GB
+
+- project-template:
+ name: kolla-ansible-scenario-octavia
+ description: |
+ Runs Kolla-Ansible Octavia scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-octavia
+ - kolla-ansible-ubuntu-noble-octavia
diff --git a/zuul.d/scenarios/openbao.yaml b/zuul.d/scenarios/openbao.yaml
new file mode 100644
index 0000000000..1cca53ec3a
--- /dev/null
+++ b/zuul.d/scenarios/openbao.yaml
@@ -0,0 +1,40 @@
+---
+- job:
+ name: kolla-ansible-openbao-base
+ parent: kolla-ansible-base
+ run: tests/run-openbao.yml
+ required-projects:
+ - openstack/kolla-ansible
+ - openstack/requirements
+ voting: false
+ files: !override
+ - ^kolla_ansible/
+ - ^requirements-core.yml
+ - ^tests/(pre|run|run-openbao).yml
+ - ^tests/templates/(inventory|globals-default).j2
+ - ^tests/test-openbao-passwords.sh
+
+- job:
+ name: kolla-ansible-debian-bookworm-openbao
+ parent: kolla-ansible-openbao-base
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-openbao
+ parent: kolla-ansible-openbao-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-openbao
+ parent: kolla-ansible-openbao-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-openbao
+ description: |
+ Runs Kolla-Ansible OpenBao scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-openbao
+ - kolla-ansible-rocky-10-openbao
+ - kolla-ansible-ubuntu-noble-openbao
diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml
new file mode 100644
index 0000000000..cbeca20058
--- /dev/null
+++ b/zuul.d/scenarios/ovn.yaml
@@ -0,0 +1,50 @@
+---
+- job:
+ name: kolla-ansible-ovn-base
+ parent: kolla-ansible-base
+ files: !inherit
+ - ^ansible/group_vars/all/(neutron|octavia|openvswitch|ovn).yml
+ - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/
+ - ^tests/test-ovn.sh
+ voting: false
+ vars:
+ scenario: ovn
+ scenario_images_extra:
+ - ^redis
+ - ^octavia
+ - ^ovn
+
+- job:
+ name: kolla-ansible-debian-bookworm-ovn
+ parent: kolla-ansible-ovn-base
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+ # NOTE(mnasiadka): Often cirros boot is crashing on cannot open root device
+ voting: false
+
+- job:
+ name: kolla-ansible-debian-bookworm-ovn-upgrade
+ parent: kolla-ansible-debian-bookworm-ovn
+ nodeset: kolla-ansible-debian-bookworm-multi-16GB
+ timeout: 10800
+
+- job:
+ name: kolla-ansible-ubuntu-noble-ovn
+ parent: kolla-ansible-ovn-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-16GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-ovn-upgrade
+ parent: kolla-ansible-ubuntu-noble-ovn
+ nodeset: kolla-ansible-ubuntu-noble-multi-16GB
+ timeout: 10800
+
+- project-template:
+ name: kolla-ansible-scenario-ovn
+ description: |
+ Runs Kolla-Ansible OVN scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-ovn
+ - kolla-ansible-debian-bookworm-ovn-upgrade
+ - kolla-ansible-ubuntu-noble-ovn
+ - kolla-ansible-ubuntu-noble-ovn-upgrade
diff --git a/zuul.d/scenarios/prometheus-opensearch.yaml b/zuul.d/scenarios/prometheus-opensearch.yaml
new file mode 100644
index 0000000000..f17d34f7de
--- /dev/null
+++ b/zuul.d/scenarios/prometheus-opensearch.yaml
@@ -0,0 +1,61 @@
+---
+- job:
+ name: kolla-ansible-prometheus-opensearch-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/(common|fluentd|grafana|opensearch|prometheus).yml
+ - ^ansible/roles/(common|fluentd|grafana|opensearch|prometheus)/
+ - ^tests/test-prometheus-opensearch.sh
+ vars:
+ scenario: prometheus-opensearch
+ scenario_images_core:
+ - ^cron
+ - ^fluentd
+ - ^grafana
+ - ^haproxy
+ - ^keepalived
+ - ^kolla-toolbox
+ - ^mariadb
+ - ^memcached
+ - ^opensearch
+ - ^prometheus
+ - ^proxysql
+ - ^rabbitmq
+
+- job:
+ name: kolla-ansible-debian-bookworm-prometheus-opensearch
+ parent: kolla-ansible-prometheus-opensearch-base
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-debian-bookworm-prometheus-opensearch-upgrade
+ parent: kolla-ansible-debian-bookworm-prometheus-opensearch
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-prometheus-opensearch
+ parent: kolla-ansible-prometheus-opensearch-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-prometheus-opensearch
+ parent: kolla-ansible-prometheus-opensearch-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-prometheus-opensearch-upgrade
+ parent: kolla-ansible-ubuntu-noble-prometheus-opensearch
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-prometheus-opensearch
+ description: |
+ Runs Kolla-Ansible Prometheus OpenSearch scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-prometheus-opensearch
+ - kolla-ansible-debian-bookworm-prometheus-opensearch-upgrade
+ - kolla-ansible-rocky-10-prometheus-opensearch
+ - kolla-ansible-ubuntu-noble-prometheus-opensearch
+ - kolla-ansible-ubuntu-noble-prometheus-opensearch-upgrade
diff --git a/zuul.d/scenarios/skyline.yaml b/zuul.d/scenarios/skyline.yaml
new file mode 100644
index 0000000000..5ff97810be
--- /dev/null
+++ b/zuul.d/scenarios/skyline.yaml
@@ -0,0 +1,64 @@
+---
+- job:
+ name: kolla-ansible-skyline-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/skyline.yml
+ - ^ansible/roles/skyline/
+ - ^tests/test-skyline.sh
+ vars:
+ scenario: skyline
+ scenario_images_extra:
+ - ^skyline
+
+- job:
+ name: kolla-ansible-skyline-sso-base
+ parent: kolla-ansible-skyline-base
+ files: !inherit
+ - ^tests/test-skyline-sso.sh
+ vars:
+ scenario: skyline-sso
+
+- job:
+ name: kolla-ansible-debian-bookworm-skyline
+ parent: kolla-ansible-skyline-base
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-debian-bookworm-skyline-sso
+ parent: kolla-ansible-skyline-sso-base
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-skyline
+ parent: kolla-ansible-skyline-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-skyline-sso
+ parent: kolla-ansible-skyline-sso-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-skyline
+ parent: kolla-ansible-skyline-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-skyline-sso
+ parent: kolla-ansible-skyline-sso-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-skyline
+ description: |
+ Runs Kolla-Ansible Skyline scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-skyline
+ - kolla-ansible-debian-bookworm-skyline-sso
+ - kolla-ansible-rocky-10-skyline
+ - kolla-ansible-rocky-10-skyline-sso
+ - kolla-ansible-ubuntu-noble-skyline
+ - kolla-ansible-ubuntu-noble-skyline-sso
diff --git a/zuul.d/scenarios/telemetry.yaml b/zuul.d/scenarios/telemetry.yaml
new file mode 100644
index 0000000000..08d01d890c
--- /dev/null
+++ b/zuul.d/scenarios/telemetry.yaml
@@ -0,0 +1,40 @@
+---
+- job:
+ name: kolla-ansible-telemetry-base
+ parent: kolla-ansible-base
+ voting: false
+ files:
+ - ^ansible/group_vars/all/(aodh|ceilometer|gnocchi).yml
+ - ^ansible/roles/(aodh|ceilometer|gnocchi)/
+ - ^tests/test-telemetry.sh
+ vars:
+ scenario: telemetry
+ scenario_images_extra:
+ - ^aodh
+ - ^ceilometer
+ - ^gnocchi
+
+- job:
+ name: kolla-ansible-debian-bookworm-telemetry
+ parent: kolla-ansible-telemetry-base
+ nodeset: kolla-ansible-debian-bookworm-8GB
+
+- job:
+ name: kolla-ansible-rocky-10-telemetry
+ parent: kolla-ansible-telemetry-base
+ nodeset: kolla-ansible-rocky-10-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-noble-telemetry
+ parent: kolla-ansible-telemetry-base
+ nodeset: kolla-ansible-ubuntu-noble-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-telemetry
+ description: |
+ Runs Kolla-Ansible Telemetry scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-bookworm-telemetry
+ - kolla-ansible-rocky-10-telemetry
+ - kolla-ansible-ubuntu-noble-telemetry
diff --git a/zuul.d/scenarios/zun.yaml b/zuul.d/scenarios/zun.yaml
new file mode 100644
index 0000000000..25397725dc
--- /dev/null
+++ b/zuul.d/scenarios/zun.yaml
@@ -0,0 +1,42 @@
+---
+- job:
+ name: kolla-ansible-zun-base
+ parent: kolla-ansible-base
+ voting: false
+ files: !inherit
+ - ^ansible/group_vars/all/(zun|kuryr|etcd|cinder|iscsi).yml
+ - ^ansible/roles/(zun|kuryr|etcd|cinder|iscsi)/
+ - ^tests/setup_disks.sh
+ - ^tests/test-core-openstack.sh
+ - ^tests/test-zun.sh
+ - ^tests/test-dashboard.sh
+ vars:
+ kolla_ansible_setup_disks_file_path: "/var/lib/cinder_data.img"
+ kolla_ansible_setup_disks_vg_name: "cinder-volumes"
+ scenario: zun
+ scenario_images_extra:
+ - ^zun
+ - ^kuryr
+ - ^etcd
+ - ^cinder
+ - ^iscsid
+ - ^tgtd
+
+- job:
+ name: kolla-ansible-debian-zun
+ parent: kolla-ansible-zun-base
+ nodeset: kolla-ansible-debian-bookworm-multi-8GB
+
+- job:
+ name: kolla-ansible-ubuntu-zun
+ parent: kolla-ansible-zun-base
+ nodeset: kolla-ansible-ubuntu-noble-multi-8GB
+
+- project-template:
+ name: kolla-ansible-scenario-zun
+ description: |
+ Runs Kolla-Ansible Zun scenario jobs.
+ check:
+ jobs:
+ - kolla-ansible-debian-zun
+ - kolla-ansible-ubuntu-zun