From 566409e11ada6600355084715e6a34cf44ce6dd2 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 11 Mar 2024 10:07:06 +0100 Subject: [PATCH 001/165] octavia: Use valid_interfaces instead of endpoint_type Since neutronclient removal in [1] valid_interfaces is supported and endpoint_type is deprecated for removal. [1]: https://review.opendev.org/c/openstack/octavia/+/866327 Change-Id: I145f75e13ab40b62b47469c23e34435590d2f767 --- ansible/roles/octavia/templates/octavia.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/octavia/templates/octavia.conf.j2 b/ansible/roles/octavia/templates/octavia.conf.j2 index ca59acd35f..4df9e977df 100644 --- a/ansible/roles/octavia/templates/octavia.conf.j2 +++ b/ansible/roles/octavia/templates/octavia.conf.j2 @@ -147,7 +147,7 @@ ca_certificates_file = {{ openstack_cacert }} [neutron] region_name = {{ openstack_region_name }} -endpoint_type = internal +valid_interfaces = internal ca_certificates_file = {{ openstack_cacert }} [nova] From 568e186a2a94df318037c0413957f6d844a9a26e Mon Sep 17 00:00:00 2001 From: Bertrand Lanson Date: Fri, 8 Nov 2024 15:03:52 +0100 Subject: [PATCH 002/165] Fix inventory file for cyborg control services Fix a scheduling issue in the multinode and all-in-one inventory files, that would cause cyborg api and conductor service to also be scheduled on compute nodes rather then exclusively staying on the control plane. Closes-Bug: #2087552 Change-Id: I69d9a44db037fce42cb5a25b5688313eece15484 --- ansible/inventory/all-in-one | 1 - ansible/inventory/multinode | 1 - ...ix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml | 7 +++++++ 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one index 8d5c22344a..e69865a376 100644 --- a/ansible/inventory/all-in-one +++ b/ansible/inventory/all-in-one @@ -132,7 +132,6 @@ control [cyborg:children] control -compute [tacker:children] control diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode index a12c51dc9a..76861e2c9f 100644 --- a/ansible/inventory/multinode +++ b/ansible/inventory/multinode @@ -147,7 +147,6 @@ control [cyborg:children] control -compute [gnocchi:children] control diff --git a/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml b/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml new file mode 100644 index 0000000000..0b6a77bdcf --- /dev/null +++ b/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes a placement problem for cyborg api and conductor services, + that would be also be scheduled on compute nodes, rather than + being exclusively on control plane. + `LP#2087552 `__ From 649abef5a4a9a3a1975a65ff558f681ff3243f26 Mon Sep 17 00:00:00 2001 From: Marek Buch Date: Tue, 29 Apr 2025 08:53:14 +0000 Subject: [PATCH 003/165] Add documentation for Kolla Ansible CLI command completion This change adds instructions to the documentation on how to enable shell command completion for the kolla-ansible CLI. It covers all necessary steps. Change-Id: I5cacf29d2fb9c0b473cf858f1c050bc70890cc42 Signed-off-by: Marek Buch --- doc/source/user/operating-kolla.rst | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/doc/source/user/operating-kolla.rst b/doc/source/user/operating-kolla.rst index 32973ba4ee..04668a68a5 100644 --- a/doc/source/user/operating-kolla.rst +++ b/doc/source/user/operating-kolla.rst @@ -198,6 +198,36 @@ After this command is complete, the containers will have been recreated from the new images and all database schema upgrades and similar actions performed for you. + +CLI Command Completion +~~~~~~~~~~~~~~~~~~~~~~ + +Kolla Ansible supports shell command completion to make the CLI easier to use. + +To enable Bash completion, generate the completion script: + +.. code-block:: console + + kolla-ansible complete --shell bash > ~/.kolla_ansible_completion.sh + +Then, add the following line to your ``~/.bashrc`` file: + +.. code-block:: console + + source ~/.kolla_ansible_completion.sh + +Finally, reload your shell configuration: + +.. code-block:: console + + source ~/.bashrc + +.. note:: + + If you're using a shell other than Bash, replace ``--shell bash`` with your shell type, + e.g., ``zsh``, and adapt your shell's configuration file accordingly. + + Tips and Tricks ~~~~~~~~~~~~~~~ From 0c60010bd5e11de9dfd398e9f534479601992a7e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 13 Mar 2025 16:21:47 +0100 Subject: [PATCH 004/165] CI: Add telemetry scenario Removing python-ceilometerclient since it didn't have a release since 2017 Change-Id: I4c20d268c202e1699f44f7d672c159d3ffa895eb Signed-off-by: Michal Nasiadka --- roles/openstack-clients/defaults/main.yml | 4 +++ tests/run.yml | 7 ++++ tests/templates/globals-default.j2 | 6 ++++ tests/test-telemetry.sh | 42 +++++++++++++++++++++++ zuul.d/base.yaml | 14 ++++++++ zuul.d/jobs.yaml | 14 ++++++++ zuul.d/project.yaml | 1 + 7 files changed, 88 insertions(+) create mode 100755 tests/test-telemetry.sh diff --git a/roles/openstack-clients/defaults/main.yml b/roles/openstack-clients/defaults/main.yml index 52a5c88851..4a3c5af5b3 100644 --- a/roles/openstack-clients/defaults/main.yml +++ b/roles/openstack-clients/defaults/main.yml @@ -1,9 +1,13 @@ --- openstack_clients_pip_packages: + - package: aodhclient + enabled: "{{ scenario == 'telemetry' }}" - package: python-barbicanclient enabled: "{{ scenario == 'scenario_nfv' }}" - package: python-designateclient enabled: "{{ scenario == 'magnum' }}" + - package: gnocchiclient + enabled: "{{ scenario == 'telemetry' }}" - package: python-heatclient enabled: true - package: python-ironicclient diff --git a/tests/run.yml b/tests/run.yml index 99f110e315..d03187b956 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -571,6 +571,13 @@ chdir: "{{ kolla_ansible_src_dir }}" when: scenario == "skyline-sso" + - name: Run test-telemetry.sh script + script: + cmd: test-telemetry.sh + executable: /bin/bash + chdir: "{{ kolla_ansible_src_dir }}" + when: scenario == "telemetry" + - name: Run test-container-engine-migration.sh script script: cmd: test-container-engine-migration.sh diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index fe7950e656..77b02f4b62 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -278,5 +278,11 @@ enable_skyline: "yes" skyline_enable_sso: "yes" {% endif %} +{% if scenario == "telemetry" %} +enable_aodh: "yes" +enable_ceilometer: "yes" +enable_gnocchi: "yes" +{% endif %} + mariadb_monitor_read_only_interval: "30000" mariadb_monitor_galera_healthcheck_timeout: "30000" diff --git a/tests/test-telemetry.sh b/tests/test-telemetry.sh new file mode 100755 index 0000000000..91b5f6c323 --- /dev/null +++ b/tests/test-telemetry.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -o xtrace +set -o errexit +set -o pipefail + +# Enable unbuffered output +export PYTHONUNBUFFERED=1 + +function test_aodh { + echo "TESTING: Aodh" + openstack alarm list + echo "SUCCESS: Aodh" +} + +function test_gnocchi { + echo "TESTING: Gnocchi" + openstack metric list + openstack metric resource list + echo "SUCCESS: Gnocchi" +} + +function test_telemetry_scenario_logged { + . /etc/kolla/admin-openrc.sh + . ~/openstackclient-venv/bin/activate + test_aodh + test_gnocchi +} + +function test_telemetry_scenario { + echo "Testing Telemetry" + test_telemetry_scenario_logged > /tmp/logs/ansible/test-telemetry-scenario 2>&1 + result=$? + if [[ $result != 0 ]]; then + echo "Testing Telemetry scenario failed. See ansible/test-telemetry-scenario for details" + else + echo "Successfully tested Telemetry scenario. See ansible/test-telemetry-scenario for details" + fi + return $result +} + +test_telemetry_scenario diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 82daa26e2c..8bc43be5b0 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -430,6 +430,20 @@ scenario_images_extra: - ^skyline +- job: + name: kolla-ansible-telemetry-base + parent: kolla-ansible-scenario-base + voting: false + files: + - ^ansible/roles/(aodh|ceilometer|gnocchi)/ + - ^tests/test-telemetry.sh + vars: + scenario: telemetry + scenario_images_extra: + - ^aodh + - ^ceilometer + - ^gnocchi + - job: name: kolla-ansible-container-engine-migration-base parent: kolla-ansible-base diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml index bc59cde853..831b33e542 100644 --- a/zuul.d/jobs.yaml +++ b/zuul.d/jobs.yaml @@ -545,6 +545,20 @@ vars: base_distro: rocky +- job: + name: kolla-ansible-ubuntu-telemetry + parent: kolla-ansible-telemetry-base + nodeset: kolla-ansible-ubuntu-noble-8GB + vars: + base_distro: ubuntu + +- job: + name: kolla-ansible-rocky9-telemetry + parent: kolla-ansible-telemetry-base + nodeset: kolla-ansible-rocky9 + vars: + base_distro: rocky + - job: name: kolla-ansible-rocky9-container-engine-migration parent: kolla-ansible-container-engine-migration-base diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 609201a2c3..7c3f3364ac 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -44,6 +44,7 @@ - kolla-ansible-ubuntu-lets-encrypt - kolla-ansible-ubuntu-skyline - kolla-ansible-ubuntu-skyline-sso + - kolla-ansible-ubuntu-telemetry - kolla-ansible-ubuntu-container-engine-migration - kolla-ansible-ubuntu-container-engine-migration-multinode - kolla-ansible-debian-container-engine-migration From ebe7e4a7523c0e4c78d7c028375fc9c87e9db181 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 24 Jan 2025 08:09:06 +0100 Subject: [PATCH 005/165] gnocchi: Add support for using uWSGI Depends-On: https://review.opendev.org/c/openstack/kolla/+/960437 Change-Id: I64d09767099baa9028b3b06d876c2b30c34ddf79 Signed-off-by: Michal Nasiadka --- ansible/roles/gnocchi/defaults/main.yml | 6 ++++++ ansible/roles/gnocchi/tasks/config.yml | 18 +++++++++++++++++- .../gnocchi/templates/gnocchi-api.json.j2 | 15 +++++++++++---- .../notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml | 2 ++ 4 files changed, 36 insertions(+), 5 deletions(-) diff --git a/ansible/roles/gnocchi/defaults/main.yml b/ansible/roles/gnocchi/defaults/main.yml index e0d3f3c6ad..d185fef648 100644 --- a/ansible/roles/gnocchi/defaults/main.yml +++ b/ansible/roles/gnocchi/defaults/main.yml @@ -8,6 +8,7 @@ gnocchi_services: volumes: "{{ gnocchi_api_default_volumes + gnocchi_api_extra_volumes }}" dimensions: "{{ gnocchi_api_dimensions }}" healthcheck: "{{ gnocchi_api_healthcheck }}" + wsgi: "gnocchi.wsgi.api:application" haproxy: gnocchi_api: enabled: "{{ enable_gnocchi }}" @@ -199,3 +200,8 @@ gnocchi_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }} # Copy certificates ################### gnocchi_copy_certs: "{{ kolla_copy_ca_into_containers | bool or gnocchi_database_enable_tls_internal | bool }}" + +#################### +# WSGI +#################### +gnocchi_wsgi_provider: "uwsgi" diff --git a/ansible/roles/gnocchi/tasks/config.yml b/ansible/roles/gnocchi/tasks/config.yml index c8e6897aee..2c6db03278 100644 --- a/ansible/roles/gnocchi/tasks/config.yml +++ b/ansible/roles/gnocchi/tasks/config.yml @@ -67,10 +67,26 @@ dest: "{{ node_config_directory }}/{{ item }}/wsgi-gnocchi.conf" mode: "0660" become: true - when: service | service_enabled_and_mapped_to_host + when: + - gnocchi_wsgi_provider == "apache" + - service | service_enabled_and_mapped_to_host with_items: - "gnocchi-api" +- name: "Configure uWSGI for Gnocchi" + include_role: + name: service-uwsgi-config + vars: + project_services: "{{ gnocchi_services }}" + service: "{{ gnocchi_services['gnocchi-api'] }}" + service_name: "gnocchi-api" + service_uwsgi_config_http_port: "{{ gnocchi_api_listen_port }}" + service_uwsgi_config_module: "{{ service.wsgi }}" + service_uwsgi_config_uid: "gnocchi" + when: + - gnocchi_wsgi_provider == "uwsgi" + - service | service_enabled_and_mapped_to_host + - name: Copying over existing policy file template: src: "{{ gnocchi_policy_file_path }}" diff --git a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 index de8ed12900..2fc22c6469 100644 --- a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 +++ b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 @@ -1,20 +1,27 @@ -{% set gnocchi_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} +{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} {% set gnocchi_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} +{% set command = ('/usr/sbin/' + apache_binary + ' -DFOREGROUND') if gnocchi_wsgi_provider == 'apache' else 'uwsgi /etc/gnocchi/gnocchi-api-uwsgi.ini' %} { - "command": "{{ gnocchi_cmd }} -DFOREGROUND", + "command": "{{ command }}", "config_files": [ { "source": "{{ container_config_directory }}/gnocchi.conf", "dest": "/etc/gnocchi/gnocchi.conf", "owner": "gnocchi", "perm": "0600" - }, + }{% if gnocchi_wsgi_provider == 'apache' %}, { "source": "{{ container_config_directory }}/wsgi-gnocchi.conf", "dest": "/etc/{{ gnocchi_dir }}/wsgi-gnocchi.conf", "owner": "gnocchi", "perm": "0600" - }{% if gnocchi_policy_file is defined %}, + }{% elif gnocchi_wsgi_provider == 'uwsgi' %}, + { + "source": "{{ container_config_directory }}/gnocchi-api-uwsgi.ini", + "dest": "/etc/gnocchi/gnocchi-api-uwsgi.ini", + "owner": "gnocchi", + "perm": "0600" + }{% endif %}{% if gnocchi_policy_file is defined %}, { "source": "{{ container_config_directory }}/{{ gnocchi_policy_file }}", "dest": "/etc/gnocchi/{{ gnocchi_policy_file }}", diff --git a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml index f3d3cd764d..53fe2c51f4 100644 --- a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml +++ b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml @@ -9,6 +9,8 @@ features: * - Service - Variable + * - Gnocchi + - gnocchi_wsgi_provider * - Heat - heat_wsgi_provider * - Ironic From cf737670c8a2a916f8a69f317f9e08b1e7684f6d Mon Sep 17 00:00:00 2001 From: Matt Anson Date: Fri, 12 Sep 2025 20:58:05 +0100 Subject: [PATCH 006/165] Remove reference to EXTRA_OPTS in documentation Passing the contents of EXTRA_OPTS into ansible-playbook is no longer supported, so remove reference to this functionality from the documentation. Closes-Bug: #2123837 Change-Id: Ibaf8af1de66f76772a617b05467ba611d0bbd20d Signed-off-by: Matt Anson --- doc/source/user/operating-kolla.rst | 3 --- .../notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml | 4 ++++ 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml diff --git a/doc/source/user/operating-kolla.rst b/doc/source/user/operating-kolla.rst index 0b2d78eca8..e4eca00c2e 100644 --- a/doc/source/user/operating-kolla.rst +++ b/doc/source/user/operating-kolla.rst @@ -204,9 +204,6 @@ Tips and Tricks Kolla Ansible CLI ----------------- -When running the ``kolla-ansible`` CLI, additional arguments may be passed to -``ansible-playbook`` via the ``EXTRA_OPTS`` environment variable. - ``kolla-ansible deploy -i INVENTORY`` is used to deploy and start all Kolla containers. diff --git a/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml b/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml new file mode 100644 index 0000000000..8e044d6687 --- /dev/null +++ b/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Remove reference to EXTRA_OPTS in documentation. From 88cbed5416f3349f49ef097efaf6a7fabdd97a19 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Mon, 15 Sep 2025 17:56:36 +0100 Subject: [PATCH 007/165] Fix IPA callback URL for unmanaged inspection The builtin inspector uses the continue_inspection endpoint[1], whilst the standalone inspector uses continue[2]. The symptom is that you get an 401 status code when posting back the inspection data. This doesn't affect managed inspection since ironic will generate a per node ipxe script. [1] https://docs.openstack.org/ironic/latest/install/configure-pxe.html#configure-unmanaged-inspection [2] https://docs.openstack.org/ironic-inspector/latest/install/index.html#configuration TrivialFix Change-Id: Ie08bdb264a750c8c264b6cfb3b75b1aea674a617 Signed-off-by: Will Szumski --- ansible/roles/ironic/templates/ipa.ipxe.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ironic/templates/ipa.ipxe.j2 b/ansible/roles/ironic/templates/ipa.ipxe.j2 index 676f885c45..4ae0d25762 100644 --- a/ansible/roles/ironic/templates/ipa.ipxe.j2 +++ b/ansible/roles/ironic/templates/ipa.ipxe.j2 @@ -13,6 +13,6 @@ chain pxelinux.cfg/${mac:hexhyp} || goto ipa :ipa :retry_boot imgfree -kernel --timeout 30000 {{ ironic_http_url }}/ironic-agent.kernel ipa-inspection-callback-url={{ ironic_internal_endpoint }}/v1/continue systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=ironic-agent.initramfs {{ ironic_kernel_cmdline_extras | join(' ') }} || goto retry_boot +kernel --timeout 30000 {{ ironic_http_url }}/ironic-agent.kernel ipa-inspection-callback-url={{ ironic_internal_endpoint }}/v1/continue_inspection systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=ironic-agent.initramfs {{ ironic_kernel_cmdline_extras | join(' ') }} || goto retry_boot initrd --timeout 30000 {{ ironic_http_url }}/ironic-agent.initramfs || goto retry_boot boot From 1b3cd72b4d66c4b046964a3108d6446b8cd584e4 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 4 Sep 2025 12:53:40 +0200 Subject: [PATCH 008/165] skyline: Switch to use to pymysql Closes-Bug: #2124203 Change-Id: I31eab7c55a954f9096dae8a1b058eecc0624ea84 Signed-off-by: Michal Nasiadka --- ansible/roles/skyline/templates/skyline.yaml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/skyline/templates/skyline.yaml.j2 b/ansible/roles/skyline/templates/skyline.yaml.j2 index c48dc4eeb9..ad7fe09ae9 100644 --- a/ansible/roles/skyline/templates/skyline.yaml.j2 +++ b/ansible/roles/skyline/templates/skyline.yaml.j2 @@ -2,7 +2,7 @@ default: access_token_expire: {{ skyline_access_token_expire_seconds }} access_token_renew: {{ skyline_access_token_renew_seconds }} cors_allow_origins: {{ skyline_backend_cors_origins }} - database_url: mysql://{{ skyline_database_user }}:{{ skyline_database_password }}@{{ skyline_database_address }}/{{ skyline_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if skyline_database_enable_tls_internal | bool }} + database_url: mysql+pymysql://{{ skyline_database_user }}:{{ skyline_database_password }}@{{ skyline_database_address }}/{{ skyline_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if skyline_database_enable_tls_internal | bool }} debug: {{ skyline_logging_debug }} log_dir: {{ log_dir }} {% if enable_prometheus | bool %} From ad5a7200ae5c8f0721a23cde199b0d26c4d52ee7 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 17 Sep 2025 15:53:18 +0200 Subject: [PATCH 009/165] ovn: Add retries to set-connection commands Break down long commands to YAML blocks Closes-Bug: #2124397 Change-Id: Ie3b38876933a234dc7e9782377f4b0128395827f Signed-off-by: Michal Nasiadka --- ansible/roles/ovn-db/tasks/bootstrap-db.yml | 28 ++++++++++++++++++--- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/ansible/roles/ovn-db/tasks/bootstrap-db.yml b/ansible/roles/ovn-db/tasks/bootstrap-db.yml index adeec211a0..79bace7b62 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-db.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-db.yml @@ -11,7 +11,10 @@ - name: Get OVN_Northbound cluster leader become: true - command: "{{ kolla_container_engine }} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound" + command: >- + {{ kolla_container_engine }} exec ovn_nb_db + ovs-appctl -t /var/run/ovn/ovnnb_db.ctl + cluster/status OVN_Northbound changed_when: False register: ovn_nb_cluster_status @@ -19,12 +22,22 @@ vars: search_string: "Role: leader" become: true - command: "{{ kolla_container_engine }} exec ovn_nb_db ovn-nbctl --inactivity-probe={{ ovn_nb_db_inactivity_probe }} set-connection ptcp:{{ ovn_nb_db_port }}:0.0.0.0" + command: >- + {{ kolla_container_engine }} exec ovn_nb_db + ovn-nbctl --inactivity-probe={{ ovn_nb_db_inactivity_probe }} + set-connection ptcp:{{ ovn_nb_db_port }}:0.0.0.0 + register: ovn_nb_set_connection_result + retries: 3 + delay: 5 + until: ovn_nb_set_connection_result.rc == 0 when: ovn_nb_cluster_status is search(search_string) - name: Get OVN_Southbound cluster leader become: true - command: "{{ kolla_container_engine }} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound" + command: >- + {{ kolla_container_engine }} exec ovn_sb_db + ovs-appctl -t /var/run/ovn/ovnsb_db.ctl + cluster/status OVN_Southbound changed_when: False register: ovn_sb_cluster_status @@ -32,7 +45,14 @@ vars: search_string: "Role: leader" become: true - command: "{{ kolla_container_engine }} exec ovn_sb_db ovn-sbctl --inactivity-probe={{ ovn_sb_db_inactivity_probe }} set-connection ptcp:{{ ovn_sb_db_port }}:0.0.0.0" + command: >- + {{ kolla_container_engine }} exec ovn_sb_db + ovn-sbctl --inactivity-probe={{ ovn_sb_db_inactivity_probe }} + set-connection ptcp:{{ ovn_sb_db_port }}:0.0.0.0 + register: ovn_sb_set_connection_result + retries: 3 + delay: 5 + until: ovn_sb_set_connection_result.rc == 0 when: ovn_sb_cluster_status is search(search_string) - name: Wait for ovn-nb-db From 26218659797329ac14a080bd3a4d0e2fca977867 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 11 Sep 2025 15:16:34 +0200 Subject: [PATCH 010/165] aodh: Switch to uWSGI Change-Id: I52ea761bd5375a94339f25ce0e77df6f78f6d9eb Signed-off-by: Michal Nasiadka --- ansible/roles/aodh/defaults/main.yml | 6 ++++++ ansible/roles/aodh/tasks/config.yml | 18 +++++++++++++++++- ansible/roles/aodh/templates/aodh-api.json.j2 | 15 +++++++++++---- ansible/roles/aodh/templates/aodh.conf.j2 | 3 +++ .../notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml | 2 ++ 5 files changed, 39 insertions(+), 5 deletions(-) diff --git a/ansible/roles/aodh/defaults/main.yml b/ansible/roles/aodh/defaults/main.yml index 30796e9c77..530601f7b2 100644 --- a/ansible/roles/aodh/defaults/main.yml +++ b/ansible/roles/aodh/defaults/main.yml @@ -8,6 +8,7 @@ aodh_services: volumes: "{{ aodh_api_default_volumes + aodh_api_extra_volumes }}" dimensions: "{{ aodh_api_dimensions }}" healthcheck: "{{ aodh_api_healthcheck }}" + wsgi: "aodh.wsgi.api:application" haproxy: aodh_api: enabled: "{{ enable_aodh }}" @@ -257,3 +258,8 @@ aodh_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }}" # Copy certificates ################### aodh_copy_certs: "{{ kolla_copy_ca_into_containers | bool or aodh_database_enable_tls_internal | bool }}" + +#################### +# WSGI +#################### +aodh_wsgi_provider: "uwsgi" diff --git a/ansible/roles/aodh/tasks/config.yml b/ansible/roles/aodh/tasks/config.yml index e5701c87ec..cbf5db25ad 100644 --- a/ansible/roles/aodh/tasks/config.yml +++ b/ansible/roles/aodh/tasks/config.yml @@ -65,6 +65,20 @@ become: true with_dict: "{{ aodh_services | select_services_enabled_and_mapped_to_host }}" +- name: "Configure uWSGI for {{ project_name }}" + include_role: + name: service-uwsgi-config + vars: + project_services: "{{ aodh_services }}" + service: "{{ aodh_services[service_name] }}" + service_name: "aodh-api" + service_uwsgi_config_http_port: "{{ aodh_api_listen_port }}" + service_uwsgi_config_module: "{{ service.wsgi }}" + service_uwsgi_config_uid: "aodh" + when: + - aodh_wsgi_provider == "uwsgi" + - service | service_enabled_and_mapped_to_host + - name: Copying over wsgi-aodh files for services vars: service: "{{ aodh_services['aodh-api'] }}" @@ -73,4 +87,6 @@ dest: "{{ node_config_directory }}/aodh-api/wsgi-aodh.conf" mode: "0660" become: true - when: service | service_enabled_and_mapped_to_host + when: + - aodh_wsgi_provider == "apache" + - service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/aodh/templates/aodh-api.json.j2 b/ansible/roles/aodh/templates/aodh-api.json.j2 index b7d4feff77..844fe8db93 100644 --- a/ansible/roles/aodh/templates/aodh-api.json.j2 +++ b/ansible/roles/aodh/templates/aodh-api.json.j2 @@ -1,20 +1,27 @@ -{% set aodh_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} +{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} {% set aodh_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} +{% set command = ('/usr/sbin/' + apache_binary + ' -DFOREGROUND') if aodh_wsgi_provider == 'apache' else 'uwsgi /etc/aodh/aodh-api-uwsgi.ini' %} { - "command": "{{ aodh_cmd }} -DFOREGROUND", + "command": "{{ command }}", "config_files": [ { "source": "{{ container_config_directory }}/aodh.conf", "dest": "/etc/aodh/aodh.conf", "owner": "aodh", "perm": "0600" - }, + }{% if aodh_wsgi_provider == "apache" %}, { "source": "{{ container_config_directory }}/wsgi-aodh.conf", "dest": "/etc/{{ aodh_dir }}/wsgi-aodh.conf", "owner": "root", "perm": "0600" - }{% if aodh_policy_file is defined %}, + }{% elif aodh_wsgi_provider == 'uwsgi' %}, + { + "source": "{{ container_config_directory }}/aodh-api-uwsgi.ini", + "dest": "/etc/aodh/aodh-api-uwsgi.ini", + "owner": "aodh", + "perm": "0600" + }{% endif %}{% if aodh_policy_file is defined %}, { "source": "{{ container_config_directory }}/{{ aodh_policy_file }}", "dest": "/etc/aodh/{{ aodh_policy_file }}", diff --git a/ansible/roles/aodh/templates/aodh.conf.j2 b/ansible/roles/aodh/templates/aodh.conf.j2 index d172ad72d6..c9c1803850 100644 --- a/ansible/roles/aodh/templates/aodh.conf.j2 +++ b/ansible/roles/aodh/templates/aodh.conf.j2 @@ -2,6 +2,9 @@ [DEFAULT] auth_strategy = keystone log_dir = /var/log/kolla/aodh +{% if service_name == "aodh-api" %} +log_file = aodh-api.log +{% endif %} debug = {{ aodh_logging_debug }} evaluation_interval = {{ aodh_evaluation_interval }} transport_url = {{ rpc_transport_url }} diff --git a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml index 53fe2c51f4..578af12bda 100644 --- a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml +++ b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml @@ -9,6 +9,8 @@ features: * - Service - Variable + * - Aodh + - aodh_wsgi_provider * - Gnocchi - gnocchi_wsgi_provider * - Heat From 46ba219b7cbe1d03fc5ede4fc0a5ab485a978deb Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 14 Jul 2025 07:30:45 +0200 Subject: [PATCH 011/165] CI: Disable failing dns_integration check Change-Id: I9e09db63b027f3c88f9755c728aa2fb477b9d23b Signed-off-by: Michal Nasiadka --- tests/test-core-openstack.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test-core-openstack.sh b/tests/test-core-openstack.sh index c2c0ea3ad2..aa1cf74033 100755 --- a/tests/test-core-openstack.sh +++ b/tests/test-core-openstack.sh @@ -514,7 +514,8 @@ function test_openstack_logged { test_smoke test_neutron_modules test_instance_boot - test_internal_dns_integration + # NOTE(mnasiadka): Disable because it started failing in OVN scenario + [[ $SCENARIO != "ovn" ]] && test_internal_dns_integration test_proxysql_prometheus_exporter # Check for x86_64 architecture to run q35 tests From 444a18c5499d47a179c61f2be4e43463731c7893 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 18 Sep 2025 12:54:03 +0200 Subject: [PATCH 012/165] ironic: Fix log files without suffix This is a follow-up after I2b72712479a05a73b82d8e30235333db2c92ebfd which introduced the logfiles lacking suffix. Change-Id: Ibe43380c16a223e5870baff5c74844aa0f5f87d7 Signed-off-by: Michal Nasiadka --- ansible/roles/ironic/templates/ironic.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ironic/templates/ironic.conf.j2 b/ansible/roles/ironic/templates/ironic.conf.j2 index fcf130bf76..1f0c6a4265 100644 --- a/ansible/roles/ironic/templates/ironic.conf.j2 +++ b/ansible/roles/ironic/templates/ironic.conf.j2 @@ -4,7 +4,7 @@ auth_strategy = noauth {% endif %} debug = {{ ironic_logging_debug }} -log_file = /var/log/kolla/ironic/{{ service_name }} +log_file = /var/log/kolla/ironic/{{ service_name }}.log transport_url = {{ rpc_transport_url }} From b27bea52237c5994e5c9ef657f8efc2fe10d124e Mon Sep 17 00:00:00 2001 From: Seunghun Lee Date: Fri, 22 Aug 2025 15:44:32 +0100 Subject: [PATCH 013/165] Adjust recovery method to support MariaDB>=11.2 This change brings modified method of overcloud database recovery. On database recovery, we've been setting primary database dynamically by comparing ``seqno`` of each MariaDB host and setting ``pc.bootstrap=yes`` to the host with largest ``seqno``. From MariaDB 11.2, ``pc.bootstrap`` cannot be dynamically set with MariaDB command anymore [1]. Modified method keeps dynamic primary assignment by setting ``pc.bootstrap=yes`` in ``galera.cnf`` of primary host before restarting MariaDB then unset it after whole recovery process is finished. [1] https://jira.mariadb.org/browse/MDEV-32800 Change-Id: I22910575d4d1f8d25740a178fc0529dbf35b768f Signed-off-by: Seunghun Lee --- .../roles/mariadb/tasks/recover_cluster.yml | 41 +++++++++++++++---- ansible/roles/mariadb/templates/galera.cnf.j2 | 2 +- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml index 11e4b8333e..165aa3963b 100644 --- a/ansible/roles/mariadb/tasks/recover_cluster.yml +++ b/ansible/roles/mariadb/tasks/recover_cluster.yml @@ -111,6 +111,22 @@ - bootstrap_host is defined - bootstrap_host == inventory_hostname +- name: Refresh galera.cnf to set first MariaDB container as primary + vars: + service_name: "mariadb" + service: "{{ mariadb_services[service_name] }}" + primary_host_on_recovery: "{{ bootstrap_host == inventory_hostname }}" + merge_configs: + sources: + - "{{ role_path }}/templates/galera.cnf.j2" + - "{{ node_custom_config }}/galera.cnf" + - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf" + dest: "{{ node_config_directory }}/{{ service_name }}/galera.cnf" + mode: "0660" + become: true + when: + - bootstrap_host is defined + - name: Starting first MariaDB container become: true kolla_container: @@ -144,14 +160,6 @@ - bootstrap_host is defined - bootstrap_host == inventory_hostname -- name: Set first MariaDB container as primary - become: true - shell: "{{ kolla_container_engine }} exec {{ mariadb_service.container_name }} mariadb -uroot -p{{ database_password }} -e \"SET GLOBAL wsrep_provider_options='pc.bootstrap=yes';\"" - no_log: True - when: - - bootstrap_host is defined - - bootstrap_host == inventory_hostname - - name: Wait for MariaDB to become operational become: true kolla_toolbox: @@ -203,6 +211,23 @@ - bootstrap_host is defined - bootstrap_host != inventory_hostname +- name: Unset pc.bootstrap for primary MariaDB galera.cnf for next restart + vars: + service_name: "mariadb" + service: "{{ mariadb_services[service_name] }}" + primary_host_on_recovery: false + merge_configs: + sources: + - "{{ role_path }}/templates/galera.cnf.j2" + - "{{ node_custom_config }}/galera.cnf" + - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf" + dest: "{{ node_config_directory }}/{{ service_name }}/galera.cnf" + mode: "0660" + become: true + when: + - bootstrap_host is defined + - bootstrap_host == inventory_hostname + - name: Restart master MariaDB container(s) become: true kolla_container: diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2 index c7e5916fd5..e9201ea346 100644 --- a/ansible/roles/mariadb/templates/galera.cnf.j2 +++ b/ansible/roles/mariadb/templates/galera.cnf.j2 @@ -32,7 +32,7 @@ datadir=/var/lib/mysql/ wsrep_cluster_address=gcomm://{% if (groups[mariadb_shard_group] | length) > 1 %}{% for host in groups[mariadb_shard_group] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ mariadb_wsrep_port }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %} -wsrep_provider_options=gmcast.listen_addr=tcp://{{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_ist_port }};{% for option in mariadb_wsrep_extra_provider_options %}{{ option }}{% if not loop.last %};{% endif %}{% endfor %} +wsrep_provider_options={% if primary_host_on_recovery is defined and primary_host_on_recovery %}pc.bootstrap=yes;{% endif %}gmcast.listen_addr=tcp://{{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_ist_port }};{% for option in mariadb_wsrep_extra_provider_options %}{{ option }}{% if not loop.last %};{% endif %}{% endfor %} wsrep_node_address={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }} From 512488a9592a6eac5a4fad51ecd67de1e416c8eb Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 3 Jan 2025 12:28:05 +0000 Subject: [PATCH 014/165] keystone: Add support for using uWSGI Switching the default to uWSGI due to the changes that are happening in OpenStack - with projects dropping wsgi_file(s) and moving to modules. Depends-On: https://review.opendev.org/c/openstack/kolla/+/938402 Signed-Off-By: Michal Nasiadka Change-Id: I33d9f32dd842a12b893f5062f2e1e278af7b30a1 --- ansible/group_vars/all.yml | 1 + ansible/roles/keystone/defaults/main.yml | 6 ++++++ ansible/roles/keystone/tasks/config.yml | 21 ++++++++++++++++++- .../keystone/templates/keystone-startup.sh.j2 | 6 ++++-- .../roles/keystone/templates/keystone.json.j2 | 13 ++++++++---- .../uwsgi-flamingo-5144740f1a2bb4fb.yaml | 2 ++ 6 files changed, 42 insertions(+), 7 deletions(-) diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index bc40c355f7..435937c58e 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -490,6 +490,7 @@ keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haprox keystone_public_listen_port: "5000" keystone_internal_port: "5000" keystone_internal_listen_port: "{{ keystone_internal_port }}" +keystone_listen_port: "{{ keystone_internal_listen_port }}" keystone_ssh_port: "8023" diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml index 3aad442a2c..b53ff1b531 100644 --- a/ansible/roles/keystone/defaults/main.yml +++ b/ansible/roles/keystone/defaults/main.yml @@ -8,6 +8,7 @@ keystone_services: volumes: "{{ keystone_default_volumes + keystone_extra_volumes }}" dimensions: "{{ keystone_dimensions }}" healthcheck: "{{ keystone_healthcheck }}" + wsgi: "keystone.wsgi.api:application" haproxy: keystone_internal: enabled: "{{ enable_keystone }}" @@ -257,3 +258,8 @@ keystone_database_enable_tls_internal: "{{ database_enable_tls_internal | bool } # Copy certificates ################### keystone_copy_certs: "{{ kolla_copy_ca_into_containers | bool or keystone_enable_tls_backend | bool or keystone_database_enable_tls_internal | bool }}" + +############ +# WSGI +############ +keystone_wsgi_provider: "uwsgi" diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml index d9c54e88e6..3f1a26fc39 100644 --- a/ansible/roles/keystone/tasks/config.yml +++ b/ansible/roles/keystone/tasks/config.yml @@ -132,12 +132,31 @@ dest: "{{ node_config_directory }}/keystone/wsgi-keystone.conf" mode: "0660" become: true - when: service | service_enabled_and_mapped_to_host + when: + - service | service_enabled_and_mapped_to_host + - keystone_wsgi_provider == "apache" with_first_found: - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/wsgi-keystone.conf" - "{{ node_custom_config }}/keystone/wsgi-keystone.conf" - "wsgi-keystone.conf.j2" +- name: "Configure uWSGI for Keystone" + include_role: + name: service-uwsgi-config + vars: + project_services: "{{ keystone_services }}" + service: "{{ keystone_services['keystone'] }}" + service_name: "keystone" + service_uwsgi_config_http_port: "{{ keystone_listen_port }}" + service_uwsgi_config_module: "{{ service.wsgi }}" + service_uwsgi_config_tls_backend: "{{ keystone_enable_tls_backend | bool }}" + service_uwsgi_config_tls_cert: "/etc/keystone/certs/keystone-cert.pem" + service_uwsgi_config_tls_key: "/etc/keystone/certs/keystone-key.pem" + service_uwsgi_config_uid: "keystone" + when: + - service | service_enabled_and_mapped_to_host + - keystone_wsgi_provider == "uwsgi" + - name: Checking whether keystone-paste.ini file exists vars: service: "{{ keystone_services['keystone'] }}" diff --git a/ansible/roles/keystone/templates/keystone-startup.sh.j2 b/ansible/roles/keystone/templates/keystone-startup.sh.j2 index 126ec865df..224e86f5dd 100644 --- a/ansible/roles/keystone/templates/keystone-startup.sh.j2 +++ b/ansible/roles/keystone/templates/keystone-startup.sh.j2 @@ -1,5 +1,7 @@ #!/bin/bash -x -{% set keystone_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} +{% set apache_cmd = '/usr/sbin/apache2' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/sbin/httpd' %} +{% set uwsgi_cmd = 'uwsgi /etc/keystone/keystone-api-uwsgi.ini' %} +{% set keystone_cmd = uwsgi_cmd if keystone_wsgi_provider == 'uwsgi' else (apache_cmd + ' -DFOREGROUND') %} set -o errexit set -o pipefail @@ -21,4 +23,4 @@ while [ ! -f "${FERNET_KEY_DIR}/0" ]; do fi done -exec /usr/sbin/{{ keystone_cmd }} -DFOREGROUND $@ +exec {{ keystone_cmd }} $@ diff --git a/ansible/roles/keystone/templates/keystone.json.j2 b/ansible/roles/keystone/templates/keystone.json.j2 index c5b567d69f..1a2327c3b3 100644 --- a/ansible/roles/keystone/templates/keystone.json.j2 +++ b/ansible/roles/keystone/templates/keystone.json.j2 @@ -34,13 +34,19 @@ "dest": "/etc/keystone/{{ keystone_policy_file }}", "owner": "keystone", "perm": "0600" - }{% endif %}, + }{% endif %}{% if keystone_wsgi_provider == 'apache' %}, { "source": "{{ container_config_directory }}/wsgi-keystone.conf", "dest": "/etc/{{ keystone_dir }}/wsgi-keystone.conf", "owner": "keystone", "perm": "0600" - }{% if keystone_enable_tls_backend | bool %}, + }{% elif keystone_wsgi_provider == 'uwsgi' %}, + { + "source": "{{ container_config_directory }}/keystone-uwsgi.ini", + "dest": "/etc/keystone/keystone-api-uwsgi.ini", + "owner": "keystone", + "perm": "0600" + }{% endif %}{% if keystone_enable_tls_backend | bool %}, { "source": "{{ container_config_directory }}/keystone-cert.pem", "dest": "/etc/keystone/certs/keystone-cert.pem", @@ -52,8 +58,7 @@ "dest": "/etc/keystone/certs/keystone-key.pem", "owner": "keystone", "perm": "0600" - }{% endif %} - {% if keystone_enable_federation_openid | bool %}, + }{% endif %}{% if keystone_enable_federation_openid | bool %}, { "source": "{{ container_config_directory }}/federation/oidc/metadata", "dest": "{{ keystone_container_federation_oidc_metadata_folder }}", diff --git a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml index 578af12bda..d2dafe705b 100644 --- a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml +++ b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml @@ -17,6 +17,8 @@ features: - heat_wsgi_provider * - Ironic - ironic_wsgi_provider + * - Keystone + - keystone_wsgi_provider * - Masakari - masakari_wsgi_provider * - Octavia From 20c1225b3db816eccee2e3b6c5c8c0c0ff23db59 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sun, 21 Sep 2025 03:33:57 +0900 Subject: [PATCH 015/165] Remove option for apache < 2.4 apache 2.4 was released long time ago and is now available in recent operating systems. Change-Id: Ic6c487ad6b0c94edb89e3b8feecd6cfac39e1f5d Signed-off-by: Takashi Kajinami --- ansible/roles/cinder/templates/cinder-wsgi.conf.j2 | 4 +--- ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 | 4 +--- ansible/roles/heat/templates/wsgi-heat-api.conf.j2 | 4 +--- ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 | 4 +--- ansible/roles/keystone/templates/wsgi-keystone.conf.j2 | 4 +--- ansible/roles/nova/templates/nova-api-wsgi.conf.j2 | 4 +--- ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 | 4 +--- ansible/roles/octavia/templates/octavia-wsgi.conf.j2 | 4 +--- ansible/roles/placement/templates/placement-api-wsgi.conf.j2 | 4 +--- ansible/roles/trove/templates/trove-wsgi.conf.j2 | 4 +--- 10 files changed, 10 insertions(+), 30 deletions(-) diff --git a/ansible/roles/cinder/templates/cinder-wsgi.conf.j2 b/ansible/roles/cinder/templates/cinder-wsgi.conf.j2 index b467bab02a..b663c73b4b 100644 --- a/ansible/roles/cinder/templates/cinder-wsgi.conf.j2 +++ b/ansible/roles/cinder/templates/cinder-wsgi.conf.j2 @@ -23,9 +23,7 @@ LogLevel info WSGIScriptAlias / /var/www/cgi-bin/cinder/cinder-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/kolla/cinder/cinder-api-error.log LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog /var/log/kolla/cinder/cinder-api-access.log logformat diff --git a/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 b/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 index 27aea0dd07..b692c2d6fa 100644 --- a/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 +++ b/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 @@ -34,9 +34,7 @@ CustomLog "{{ heat_log_dir }}/apache-cfn-access.log" common WSGIScriptAlias / {{ binary_path }}/heat-wsgi-api-cfn WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ heat_log_dir }}/heat-api-cfn-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ heat_log_dir }}/heat-api-cfn-access.log" logformat diff --git a/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 b/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 index d197f764a5..1f285dc153 100644 --- a/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 +++ b/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 @@ -34,9 +34,7 @@ CustomLog "{{ heat_log_dir }}/apache-access.log" common WSGIScriptAlias / {{ binary_path }}/heat-wsgi-api WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ heat_log_dir }}/heat-api-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ heat_log_dir }}/heat-api-access.log" logformat diff --git a/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 b/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 index 7e62ac2c87..ecb7a93daa 100644 --- a/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 +++ b/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 @@ -37,9 +37,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/ironic-api-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ ironic_log_dir }}/ironic-api-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ ironic_log_dir }}/ironic-api-access.log" logformat diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 index 427c36d105..d6970673c4 100644 --- a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 +++ b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 @@ -44,9 +44,7 @@ LogLevel info WSGIScriptAlias / {{ binary_path }}/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ keystone_log_dir }}/keystone-apache-public-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ keystone_log_dir }}/keystone-apache-public-access.log" logformat diff --git a/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 b/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 index 7acd59eb8c..aeea3e932f 100644 --- a/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 +++ b/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 @@ -37,9 +37,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/nova-api-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ nova_log_dir }}/nova-api-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ nova_log_dir }}/nova-api-access.log" logformat diff --git a/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 b/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 index 58ab62302f..8519ebf339 100644 --- a/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 +++ b/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 @@ -37,9 +37,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/nova-metadata-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ nova_log_dir }}/nova-metadata-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ nova_log_dir }}/nova-metadata-access.log" logformat diff --git a/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 b/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 index e3a3a598c9..f4203c1c8a 100644 --- a/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 +++ b/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 @@ -24,9 +24,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/octavia-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/kolla/octavia/octavia-api-error.log LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog /var/log/kolla/octavia/octavia-api-access.log logformat diff --git a/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 b/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 index aa313b6809..c1809aed13 100644 --- a/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 +++ b/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 @@ -25,9 +25,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/placement-api WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ log_dir }}/placement-api-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ log_dir }}/placement-api-access.log" logformat diff --git a/ansible/roles/trove/templates/trove-wsgi.conf.j2 b/ansible/roles/trove/templates/trove-wsgi.conf.j2 index 26449a5384..3d79d04e7d 100644 --- a/ansible/roles/trove/templates/trove-wsgi.conf.j2 +++ b/ansible/roles/trove/templates/trove-wsgi.conf.j2 @@ -24,9 +24,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/trove-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/kolla/trove/trove-api-error.log LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog /var/log/kolla/trove/trove-api-access.log logformat From 5c087f54cff9a7c7fd3d5a6fd3ce622ee415f694 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 15 Sep 2025 14:29:39 +0200 Subject: [PATCH 016/165] CI: Reorganise scenarios into project templates This patch moves into using project templates, which is easier to organise. base_distro and upgrade variables are derived from base.yaml and respective job name - so don't need to be specified in each job anymore. All jobs will now also have an OS release specified, so it's easier to add different OS release jobs (e.g. when backporting new release support to stable branch). Change-Id: I1f08f9b9064d9f7e95b07944db0e7ebac3120c57 Signed-off-by: Michal Nasiadka --- roles/openstack-clients/defaults/main.yml | 6 +- tests/run.yml | 4 +- tests/templates/globals-default.j2 | 2 +- tests/test-hashicorp-vault-passwords.sh | 4 +- tests/upgrade.sh | 2 +- zuul.d/base.yaml | 374 +---------- zuul.d/jobs.yaml | 626 ------------------ zuul.d/nodesets.yaml | 64 +- zuul.d/project.yaml | 83 +-- zuul.d/scenarios/aio.yaml | 130 ++++ zuul.d/scenarios/bifrost.yaml | 31 + zuul.d/scenarios/cells.yaml | 34 + zuul.d/scenarios/cephadm.yaml | 59 ++ .../scenarios/container-engine-migration.yaml | 42 ++ zuul.d/scenarios/haproxy-fqdn.yaml | 34 + zuul.d/scenarios/hashi-vault.yaml | 35 + zuul.d/scenarios/ipv6.yaml | 38 ++ zuul.d/scenarios/ironic.yaml | 53 ++ zuul.d/scenarios/kvm.yaml | 29 + zuul.d/scenarios/lets-encrypt.yaml | 37 ++ zuul.d/scenarios/magnum.yaml | 36 + zuul.d/scenarios/mariadb.yaml | 41 ++ zuul.d/scenarios/masakari.yaml | 36 + zuul.d/scenarios/nfv.yaml | 37 ++ zuul.d/scenarios/octavia.yaml | 34 + zuul.d/scenarios/ovn.yaml | 49 ++ zuul.d/scenarios/prometheus-opensearch.yaml | 55 ++ zuul.d/scenarios/skyline.yaml | 51 ++ zuul.d/scenarios/telemetry.yaml | 33 + zuul.d/scenarios/venus.yaml | 42 ++ zuul.d/scenarios/zun.yaml | 39 ++ 31 files changed, 1041 insertions(+), 1099 deletions(-) delete mode 100644 zuul.d/jobs.yaml create mode 100644 zuul.d/scenarios/aio.yaml create mode 100644 zuul.d/scenarios/bifrost.yaml create mode 100644 zuul.d/scenarios/cells.yaml create mode 100644 zuul.d/scenarios/cephadm.yaml create mode 100644 zuul.d/scenarios/container-engine-migration.yaml create mode 100644 zuul.d/scenarios/haproxy-fqdn.yaml create mode 100644 zuul.d/scenarios/hashi-vault.yaml create mode 100644 zuul.d/scenarios/ipv6.yaml create mode 100644 zuul.d/scenarios/ironic.yaml create mode 100644 zuul.d/scenarios/kvm.yaml create mode 100644 zuul.d/scenarios/lets-encrypt.yaml create mode 100644 zuul.d/scenarios/magnum.yaml create mode 100644 zuul.d/scenarios/mariadb.yaml create mode 100644 zuul.d/scenarios/masakari.yaml create mode 100644 zuul.d/scenarios/nfv.yaml create mode 100644 zuul.d/scenarios/octavia.yaml create mode 100644 zuul.d/scenarios/ovn.yaml create mode 100644 zuul.d/scenarios/prometheus-opensearch.yaml create mode 100644 zuul.d/scenarios/skyline.yaml create mode 100644 zuul.d/scenarios/telemetry.yaml create mode 100644 zuul.d/scenarios/venus.yaml create mode 100644 zuul.d/scenarios/zun.yaml diff --git a/roles/openstack-clients/defaults/main.yml b/roles/openstack-clients/defaults/main.yml index 8545a8973a..6b1e3c6d97 100644 --- a/roles/openstack-clients/defaults/main.yml +++ b/roles/openstack-clients/defaults/main.yml @@ -3,7 +3,7 @@ openstack_clients_pip_packages: - package: aodhclient enabled: "{{ scenario == 'telemetry' }}" - package: python-barbicanclient - enabled: "{{ scenario == 'scenario_nfv' }}" + enabled: "{{ scenario == 'nfv' }}" - package: python-designateclient enabled: "{{ scenario == 'magnum' }}" - package: gnocchiclient @@ -17,13 +17,13 @@ openstack_clients_pip_packages: - package: python-masakariclient enabled: "{{ scenario == 'masakari' }}" - package: python-mistralclient - enabled: "{{ scenario == 'scenario_nfv' }}" + enabled: "{{ scenario == 'nfv' }}" - package: python-octaviaclient enabled: "{{ scenario in ['octavia', 'ovn'] }}" - package: python-openstackclient enabled: true - package: python-tackerclient - enabled: "{{ scenario == 'scenario_nfv' }}" + enabled: "{{ scenario == 'nfv' }}" - package: python-troveclient enabled: "{{ scenario == 'magnum' }}" - package: python-zunclient diff --git a/tests/run.yml b/tests/run.yml index d03187b956..eca77e215e 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -430,7 +430,7 @@ EXT_NET_GATEWAY: "{{ neutron_external_network_prefix }}1" EXT_NET_DEMO_ROUTER_ADDR: "{{ neutron_external_network_prefix }}10" SCENARIO: "{{ scenario }}" - when: openstack_core_tested or scenario in ['ironic', 'magnum', 'scenario_nfv', 'zun', 'octavia'] + when: openstack_core_tested or scenario in ['ironic', 'magnum', 'nfv', 'zun', 'octavia'] - name: Run test-ovn.sh script script: @@ -468,7 +468,7 @@ cmd: test-scenario-nfv.sh executable: /bin/bash chdir: "{{ kolla_ansible_src_dir }}" - when: scenario == "scenario_nfv" + when: scenario == "nfv" - block: - name: Run deploy-tenks.sh script diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index 77b02f4b62..387e1c9b73 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -110,7 +110,7 @@ enable_prometheus: "yes" enable_prometheus_openstack_exporter: "no" {% endif %} -{% if scenario == "scenario_nfv" %} +{% if scenario == "nfv" %} enable_tacker: "yes" enable_neutron_sfc: "yes" enable_mistral: "yes" diff --git a/tests/test-hashicorp-vault-passwords.sh b/tests/test-hashicorp-vault-passwords.sh index caa9ba6a4c..64648d5001 100755 --- a/tests/test-hashicorp-vault-passwords.sh +++ b/tests/test-hashicorp-vault-passwords.sh @@ -6,9 +6,9 @@ set -o errexit export PYTHONUNBUFFERED=1 function install_vault { - if [[ "debian" == $BASE_DISTRO ]]; then + if [[ $BASE_DISTRO =~ (debian|ubuntu) ]]; then curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - - sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" + sudo apt-add-repository -y "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" sudo apt-get update -y && sudo apt-get install -y vault jq else sudo dnf install -y yum-utils diff --git a/tests/upgrade.sh b/tests/upgrade.sh index a53a440e5b..71b8fe737e 100755 --- a/tests/upgrade.sh +++ b/tests/upgrade.sh @@ -29,7 +29,7 @@ function upgrade { if [[ $SCENARIO == "zun" ]] || [[ $SCENARIO == "cephadm" ]]; then SERVICE_TAGS+=",cinder" fi - if [[ $SCENARIO == "scenario_nfv" ]]; then + if [[ $SCENARIO == "nfv" ]]; then SERVICE_TAGS+=",barbican" fi if [[ $SCENARIO == "ironic" ]]; then diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 8bc43be5b0..4d9d263dd9 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -6,7 +6,8 @@ # Test latest ansible-core version on Ubuntu, minimum supported on others. # Use SLURP version (two releases back) on SLURP upgrades. ansible_core_version_constraint: >- - {{ ansible_core_version_slurp if is_slurp else ansible_core_version_min if is_upgrade or ansible_facts.distribution != "Ubuntu" else ansible_core_version_max }} + {{ ansible_core_version_slurp if is_slurp else ansible_core_version_min if + (is_upgrade or ansible_facts.distribution != "Ubuntu") else ansible_core_version_max }} ansible_core_version_slurp: "==2.16.*" ansible_core_version_max: "==2.18.*" ansible_core_version_min: "==2.17.*" @@ -14,21 +15,23 @@ api_interface_name: "vxlan-0" api_network_prefix: "192.0.2." api_network_prefix_length: "24" + base_distro: "{{ zuul.job.split('-').2 }}" configure_swap_size: 0 container_engine: "docker" - is_upgrade: false - is_slurp: false + is_upgrade: "{{ 'upgrade' in zuul.job }}" + is_slurp: "{{ 'slurp' in zuul.job }}" kolla_internal_vip_address: "192.0.2.10" le_enabled: false neutron_external_bridge_name: br-0 - neutron_external_interface_name: "veth-{{ neutron_external_bridge_name }}-ext" + neutron_external_interface_name: >- + veth-{{ neutron_external_bridge_name }}-ext neutron_external_network_prefix: "198.51.100." neutron_external_network_prefix_length: "24" neutron_external_vxlan_interface_name: "vxlan-1" neutron_tenant_network_dns_server: "8.8.8.8" neutron_tenant_network_prefix: "203.0.113." neutron_tenant_network_prefix_length: "24" - previous_release: "2025.1" + previous_release: "{{ '2025.1' if is_slurp else '2025.1' }}" scenario: core scenario_images_core: - ^cron @@ -48,7 +51,7 @@ - ^placement - ^proxysql - ^rabbitmq - tls_enabled: false + tls_enabled: true virt_type: qemu - job: @@ -95,362 +98,3 @@ - ^tests/get_logs.sh - ^tests/(pre|run).yml - ^tests/templates/(inventory|globals-default.j2) - -- job: - name: kolla-ansible-kvm-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(nova-cell)/ - - ^tests/templates/nova-compute-overrides.j2 - vars: - virt_type: kvm - -- job: - name: kolla-ansible-ipv6-base - parent: kolla-ansible-base - voting: false - vars: - api_network_prefix: "fd::" - api_network_prefix_length: "64" - kolla_internal_vip_address: "fd::ff:0" - neutron_external_network_prefix: "fd:1::" - neutron_external_network_prefix_length: "64" - neutron_tenant_network_prefix: "fd:f0::" - neutron_tenant_network_prefix_length: "64" - neutron_tenant_network_dns_server: 2001:4860:4860::8888 - address_family: 'ipv6' - scenario: ipv6 - -- job: - name: kolla-ansible-bifrost-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/bifrost/ - - ^tests/test-bifrost.sh - vars: - scenario: bifrost - scenario_images_core: - - ^bifrost - -- job: - name: kolla-ansible-ironic-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(ironic|neutron|nova|nova-cell)/ - - ^tests/deploy-tenks\.sh$ - - ^tests/templates/ironic-overrides\.j2$ - - ^tests/templates/tenks-deploy-config\.yml\.j2$ - - ^tests/test-dashboard\.sh$ - - ^tests/test-ironic\.sh$ - required-projects: - - openstack/tenks - vars: - scenario: ironic - scenario_images_extra: - - ^dnsmasq - - ^ironic - - ^iscsid - - ^prometheus - -- job: - name: kolla-ansible-zun-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(zun|kuryr|etcd|cinder|iscsi)/ - - ^tests/setup_disks.sh - - ^tests/test-core-openstack.sh - - ^tests/test-zun.sh - - ^tests/test-dashboard.sh - vars: - scenario: zun - scenario_images_extra: - - ^zun - - ^kuryr - - ^etcd - - ^cinder - - ^iscsid - - ^tgtd - -- job: - name: kolla-ansible-swift-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(glance|swift)/ - - ^tests/setup_disks.sh - - ^tests/init-swift.sh - - ^tests/test-core-openstack.sh - - ^tests/test-dashboard.sh - - ^tests/test-swift.sh - vars: - scenario: swift - scenario_images_extra: - - ^swift - -- job: - name: kolla-ansible-cephadm-base - parent: kolla-ansible-base - voting: false - vars: - scenario: cephadm - scenario_images_extra: - - ^cinder - - ^redis - -- job: - name: kolla-ansible-magnum-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(designate|magnum|trove)/ - - ^tests/test-dashboard.sh - - ^tests/test-magnum.sh - vars: - scenario: magnum - scenario_images_extra: - - ^designate - - ^magnum - - ^trove - -- job: - name: kolla-ansible-octavia-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(octavia|octavia-certificates)/ - - ^tests/test-dashboard.sh - - ^tests/test-octavia.sh - vars: - scenario: octavia - scenario_images_extra: - - ^redis - - ^octavia - -- job: - name: kolla-ansible-masakari-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/masakari/ - - ^ansible/roles/hacluster/ - - ^tests/test-masakari.sh - - ^tests/test-dashboard.sh - vars: - scenario: masakari - scenario_images_extra: - - ^masakari - - ^hacluster - -- job: - name: kolla-ansible-mariadb-base - parent: kolla-ansible-scenario-base - voting: true - files: !inherit - - ^ansible/roles/(loadbalancer|mariadb|proxysql-config)/ - - ^tests/test-mariadb.sh - vars: - scenario: mariadb - scenario_images_core: - - ^cron - - ^fluentd - - ^haproxy - - ^keepalived - - ^kolla-toolbox - - ^mariadb - - ^proxysql - -- job: - name: kolla-ansible-scenario-nfv-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(aodh|barbican|heat|mistral|redis|tacker)/ - - ^tests/test-scenario-nfv.sh - - ^tests/test-dashboard.sh - vars: - scenario: scenario_nfv - scenario_images_extra: - - ^aodh - - ^tacker - - ^mistral - - ^redis - - ^barbican - -- job: - name: kolla-ansible-cells-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/nova/ - - ^ansible/roles/nova-cell/ - - ^ansible/roles/loadbalancer/ - - ^tests/test-core-openstack.sh - - ^tests/test-proxysql.sh - vars: - scenario: cells - scenario_images_extra: - - ^proxysql - -- job: - name: kolla-ansible-ovn-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ - - ^tests/test-ovn.sh - - ^tests/test-core-openstack.sh - - ^tests/reconfigure.sh - vars: - scenario: ovn - scenario_images_extra: - - ^redis - - ^octavia - - ^ovn - -- job: - name: kolla-ansible-prometheus-opensearch-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/site.yml - - ^ansible/roles/(common|opensearch|grafana|prometheus)/ - - ^tests/test-prometheus-opensearch.sh - vars: - scenario: prometheus-opensearch - scenario_images_core: - - ^cron - - ^fluentd - - ^grafana - - ^haproxy - - ^keepalived - - ^kolla-toolbox - - ^mariadb - - ^memcached - - ^opensearch - - ^prometheus - - ^proxysql - - ^rabbitmq - -- job: - name: kolla-ansible-venus-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(common|opensearch|venus)/ - - ^tests/test-venus.sh - vars: - scenario: venus - scenario_images_core: - - ^cron - - ^opensearch - - ^fluentd - - ^haproxy - - ^keepalived - - ^keystone - - ^kolla-toolbox - - ^mariadb - - ^memcached - - ^rabbitmq - - ^venus - -- job: - name: kolla-ansible-hashi-vault-base - parent: kolla-ansible-variables - run: tests/run-hashi-vault.yml - required-projects: - - openstack/kolla-ansible - - openstack/requirements - voting: false - files: - - ^requirements-core.yml - - ^tests/templates/(inventory|globals-default.j2) - - ^tests/(pre|run).yml - - ^kolla_ansible/ - - ^tests/run-hashi-vault.yml - - ^tests/test-hashicorp-vault-passwords.sh - -- job: - name: kolla-ansible-haproxy-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/haproxy/ - - ^ansible/roles/loadbalancer/ - - ^kolla_ansible/kolla_url.py - vars: - external_api_interface_name: vxlan2 - external_api_network_prefix: "192.0.3." - external_api_network_prefix_length: "24" - kolla_external_vip_address: "192.0.3.10" - scenario: haproxy - -- job: - name: kolla-ansible-lets-encrypt-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2 - - ^ansible/roles/(letsencrypt|loadbalancer)/ - - ^tests/test-core-openstack.sh - - ^tests/test-dashboard.sh - - ^tests/deploy.sh - vars: - scenario: lets-encrypt - scenario_images_extra: - - ^letsencrypt - - ^haproxy - tls_enabled: true - le_enabled: true - -- job: - name: kolla-ansible-skyline-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/skyline/ - - ^tests/test-skyline.sh - vars: - scenario: skyline - scenario_images_extra: - - ^skyline - -- job: - name: kolla-ansible-skyline-sso-base - parent: kolla-ansible-scenario-base - voting: false - files: - - ^ansible/roles/skyline/ - - ^tests/test-skyline-sso.sh - vars: - scenario: skyline-sso - scenario_images_extra: - - ^skyline - -- job: - name: kolla-ansible-telemetry-base - parent: kolla-ansible-scenario-base - voting: false - files: - - ^ansible/roles/(aodh|ceilometer|gnocchi)/ - - ^tests/test-telemetry.sh - vars: - scenario: telemetry - scenario_images_extra: - - ^aodh - - ^ceilometer - - ^gnocchi - -- job: - name: kolla-ansible-container-engine-migration-base - parent: kolla-ansible-base - voting: false - files: - - ^ansible/migrate-container-engine.yml - - ^ansible/roles/container-engine-migration/ - - ^tests/test-container-engine-migration.sh - vars: - scenario: container-engine-migration diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml deleted file mode 100644 index af453334d3..0000000000 --- a/zuul.d/jobs.yaml +++ /dev/null @@ -1,626 +0,0 @@ ---- -- job: - name: kolla-ansible-centos9s - parent: kolla-ansible-base - nodeset: kolla-ansible-centos9s - voting: false - vars: - base_distro: centos - tls_enabled: true - kolla_build_images: true - -- job: - name: kolla-ansible-centos10s - parent: kolla-ansible-base - nodeset: kolla-ansible-centos10s-8GB - voting: false - vars: - base_distro: centos - tls_enabled: true - kolla_build_images: true - -- job: - name: kolla-ansible-centos10s-aarch64 - parent: kolla-ansible-centos10s - nodeset: kolla-ansible-centos10s-aarch64-8GB - -- job: - name: kolla-ansible-debian-aarch64 - parent: kolla-ansible-debian - nodeset: kolla-ansible-debian-bookworm-aarch64-8GB - timeout: 10800 - voting: false - required-projects: - - openstack/kolla - -- job: - name: kolla-ansible-debian-aarch64-podman - parent: kolla-ansible-debian - nodeset: kolla-ansible-debian-bookworm-aarch64-8GB - timeout: 10800 - voting: false - vars: - container_engine: podman - required-projects: - - openstack/kolla - -- job: - name: kolla-ansible-debian - parent: kolla-ansible-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - tls_enabled: true - -- job: - name: kolla-ansible-debian-podman - parent: kolla-ansible-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - tls_enabled: true - container_engine: podman - -- job: - name: kolla-ansible-rocky9 - parent: kolla-ansible-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - tls_enabled: true - -- job: - name: kolla-ansible-rocky9-podman - parent: kolla-ansible-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - tls_enabled: true - container_engine: podman - -- job: - name: kolla-ansible-ubuntu - parent: kolla-ansible-base - nodeset: kolla-ansible-ubuntu-noble-16GB - vars: - base_distro: ubuntu - tls_enabled: true - -- job: - name: kolla-ansible-ubuntu-podman - parent: kolla-ansible-base - nodeset: kolla-ansible-ubuntu-noble-16GB - vars: - base_distro: ubuntu - tls_enabled: true - container_engine: podman - -- job: - name: kolla-ansible-rocky9-kvm - parent: kolla-ansible-kvm-base - nodeset: kolla-ansible-rocky9-nested-virt - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-kvm - parent: kolla-ansible-kvm-base - nodeset: kolla-ansible-ubuntu-noble-nested-virt - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-multinode-ipv6 - parent: kolla-ansible-ipv6-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-multinode-ipv6 - parent: kolla-ansible-ipv6-base - nodeset: kolla-ansible-ubuntu-noble-multi-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-rocky9-multi - timeout: 10800 - vars: - base_distro: rocky - cephadm_use_package_from_distribution: true - -- job: - name: kolla-ansible-ubuntu-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - timeout: 10800 - vars: - base_distro: ubuntu - cephadm_use_package_from_distribution: true - -- job: - name: kolla-ansible-debian-mariadb - parent: kolla-ansible-mariadb-base - nodeset: kolla-ansible-debian-bookworm-multi-16GB - vars: - base_distro: debian - -- job: - name: kolla-ansible-rocky9-mariadb - parent: kolla-ansible-mariadb-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-mariadb - parent: kolla-ansible-mariadb-base - nodeset: kolla-ansible-ubuntu-noble-multi-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-rocky9 - timeout: 10800 - vars: - base_distro: rocky - is_upgrade: yes - tls_enabled: true - -- job: - name: kolla-ansible-rocky9-slurp-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-rocky9 - timeout: 9000 - vars: - base_distro: rocky - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - tls_enabled: true - -- job: - name: kolla-ansible-debian-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-debian-bookworm-16GB - timeout: 10800 - vars: - base_distro: debian - is_upgrade: yes - tls_enabled: true - -- job: - name: kolla-ansible-debian-slurp-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-debian-bookworm-16GB - timeout: 9000 - vars: - base_distro: debian - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - tls_enabled: true - -- job: - name: kolla-ansible-debian-upgrade-aarch64 - parent: kolla-ansible-debian-upgrade - nodeset: kolla-ansible-debian-bookworm-aarch64-8GB - voting: false - -- job: - name: kolla-ansible-ubuntu-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-ubuntu-noble-16GB - timeout: 10800 - vars: - base_distro: ubuntu - is_upgrade: yes - tls_enabled: true - -- job: - name: kolla-ansible-ubuntu-slurp-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-ubuntu-noble-16GB - timeout: 9000 - vars: - base_distro: ubuntu - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - tls_enabled: true - -- job: - name: kolla-ansible-rocky9-upgrade-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-rocky9-multi - timeout: 10800 - vars: - base_distro: rocky - is_upgrade: yes - -- job: - name: kolla-ansible-rocky9-slurp-upgrade-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-rocky9-multi - timeout: 9000 - vars: - base_distro: rocky - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - -- job: - name: kolla-ansible-ubuntu-upgrade-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - timeout: 10800 - vars: - base_distro: ubuntu - is_upgrade: yes - cephadm_use_package_from_distribution: true - -- job: - name: kolla-ansible-ubuntu-slurp-upgrade-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - timeout: 9000 - vars: - base_distro: ubuntu - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - cephadm_use_package_from_distribution: true - -- job: - name: kolla-ansible-rocky9-upgrade-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-rocky9-multi - timeout: 10800 - vars: - base_distro: rocky - is_upgrade: yes - -- job: - name: kolla-ansible-debian-upgrade-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-debian-bookworm-multi-16GB - timeout: 10800 - vars: - base_distro: debian - is_upgrade: yes - -- job: - name: kolla-ansible-ubuntu-upgrade-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - timeout: 10800 - vars: - base_distro: ubuntu - is_upgrade: yes - -- job: - name: kolla-ansible-rocky9-bifrost - parent: kolla-ansible-bifrost-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-bifrost - parent: kolla-ansible-bifrost-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-zun - parent: kolla-ansible-zun-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-zun - parent: kolla-ansible-zun-base - nodeset: kolla-ansible-ubuntu-noble-multi-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-scenario-nfv - parent: kolla-ansible-scenario-nfv-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-ironic - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-debian-ironic - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - -- job: - name: kolla-ansible-ubuntu-ironic - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-ironic-upgrade - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - is_upgrade: true - -- job: - name: kolla-ansible-debian-ironic-upgrade - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - is_upgrade: true - -- job: - name: kolla-ansible-ubuntu-ironic-upgrade - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - is_upgrade: true - -- job: - name: kolla-ansible-rocky9-magnum - parent: kolla-ansible-magnum-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-magnum - parent: kolla-ansible-magnum-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-octavia - parent: kolla-ansible-octavia-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-octavia - parent: kolla-ansible-octavia-base - nodeset: kolla-ansible-ubuntu-noble-16GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-ubuntu-masakari - parent: kolla-ansible-masakari-base - nodeset: kolla-ansible-ubuntu-noble-masakari-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-masakari - parent: kolla-ansible-masakari-base - nodeset: kolla-ansible-rocky9-masakari - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-cells - parent: kolla-ansible-cells-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-cells - parent: kolla-ansible-cells-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-debian-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-debian-bookworm-multi-16GB - vars: - base_distro: debian - -- job: - name: kolla-ansible-ubuntu-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-ubuntu-lets-encrypt - parent: kolla-ansible-lets-encrypt-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-lets-encrypt - parent: kolla-ansible-lets-encrypt-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-prometheus-opensearch - parent: kolla-ansible-prometheus-opensearch-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-prometheus-opensearch - parent: kolla-ansible-prometheus-opensearch-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-prometheus-opensearch-upgrade - parent: kolla-ansible-prometheus-opensearch-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - is_upgrade: yes - -- job: - name: kolla-ansible-ubuntu-prometheus-opensearch-upgrade - parent: kolla-ansible-prometheus-opensearch-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - is_upgrade: yes - -- job: - name: kolla-ansible-rocky9-venus - parent: kolla-ansible-venus-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-venus - parent: kolla-ansible-venus-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-hashi-vault - parent: kolla-ansible-hashi-vault-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-haproxy-fqdn - parent: kolla-ansible-haproxy-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - tls_enabled: true - -- job: - name: kolla-ansible-ubuntu-haproxy-fqdn - parent: kolla-ansible-haproxy-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - tls_enabled: true - -- job: - name: kolla-ansible-ubuntu-skyline - parent: kolla-ansible-skyline-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-skyline - parent: kolla-ansible-skyline-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-skyline-sso - parent: kolla-ansible-skyline-sso-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-skyline-sso - parent: kolla-ansible-skyline-sso-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-telemetry - parent: kolla-ansible-telemetry-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-telemetry - parent: kolla-ansible-telemetry-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-container-engine-migration - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-container-engine-migration-multinode - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-container-engine-migration - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-ubuntu-container-engine-migration-multinode - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-ubuntu-noble-multi-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-debian-container-engine-migration - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - -- job: - name: kolla-ansible-debian-container-engine-migration-multinode - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-debian-bookworm-multi-16GB - vars: - base_distro: debian diff --git a/zuul.d/nodesets.yaml b/zuul.d/nodesets.yaml index 68565fba63..a28552933d 100644 --- a/zuul.d/nodesets.yaml +++ b/zuul.d/nodesets.yaml @@ -1,12 +1,12 @@ --- - nodeset: - name: kolla-ansible-centos10s-8GB + name: kolla-ansible-centos-10s-8GB nodes: - name: primary label: centos-10-stream-8GB - nodeset: - name: kolla-ansible-centos10s-aarch64-8GB + name: kolla-ansible-centos-10s-aarch64-8GB nodes: - name: primary label: centos-10-stream-arm64-8GB @@ -30,16 +30,16 @@ label: debian-bookworm-arm64-8GB - nodeset: - name: kolla-ansible-ubuntu-noble-8GB + name: kolla-ansible-debian-bookworm-masakari-8GB nodes: - name: primary - label: ubuntu-noble-8GB - -- nodeset: - name: kolla-ansible-ubuntu-noble-16GB - nodes: - - name: primary - label: ubuntu-noble-16GB + label: debian-bookworm-8GB + - name: secondary + label: debian-bookworm-8GB + - name: ternary1 + label: debian-bookworm-8GB + - name: ternary2 + label: debian-bookworm-8GB - nodeset: name: kolla-ansible-debian-bookworm-multi-8GB @@ -62,10 +62,22 @@ label: debian-bookworm-16GB - nodeset: - name: kolla-ansible-rocky9 + name: kolla-ansible-debian-bookworm-nested-virt + nodes: + - name: primary + label: debian-bookworm-nested-virt-8GB + +- nodeset: + name: kolla-ansible-ubuntu-noble-8GB + nodes: + - name: primary + label: ubuntu-noble-8GB + +- nodeset: + name: kolla-ansible-ubuntu-noble-16GB nodes: - name: primary - label: rockylinux-9 + label: ubuntu-noble-16GB - nodeset: name: kolla-ansible-ubuntu-noble-multi-8GB @@ -87,28 +99,12 @@ - name: secondary2 label: ubuntu-noble-16GB -- nodeset: - name: kolla-ansible-rocky9-multi - nodes: - - name: primary - label: rockylinux-9 - - name: secondary1 - label: rockylinux-9 - - name: secondary2 - label: rockylinux-9 - - nodeset: name: kolla-ansible-ubuntu-noble-nested-virt nodes: - name: primary label: ubuntu-noble-nested-virt-8GB -- nodeset: - name: kolla-ansible-rocky9-nested-virt - nodes: - - name: primary - label: nested-virt-centos-9-stream - - nodeset: name: kolla-ansible-ubuntu-noble-masakari-8GB nodes: @@ -120,15 +116,3 @@ label: ubuntu-noble-8GB - name: ternary2 label: ubuntu-noble-8GB - -- nodeset: - name: kolla-ansible-rocky9-masakari - nodes: - - name: primary - label: rockylinux-9 - - name: secondary - label: rockylinux-9 - - name: ternary1 - label: rockylinux-9 - - name: ternary2 - label: rockylinux-9 diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 82c4562b17..eccfdafd00 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -4,67 +4,32 @@ templates: - ansible-role-jobs - check-requirements + - kolla-ansible-scenario-aio + - kolla-ansible-scenario-bifrost + - kolla-ansible-scenario-cells + - kolla-ansible-scenario-cephadm + # NOTE(mnasiadka): Failing since + # https://review.opendev.org/c/openstack/kolla-ansible/+/864780 + # - kolla-ansible-scenario-container-engine-migration + - kolla-ansible-scenario-haproxy-fqdn + - kolla-ansible-scenario-kvm + - kolla-ansible-scenario-lets-encrypt + - kolla-ansible-scenario-magnum + - kolla-ansible-scenario-mariadb + - kolla-ansible-scenario-masakari + - kolla-ansible-scenario-nfv + - kolla-ansible-scenario-ironic + # NOTE(mnasiadka): All runs end up with DISK_FULL + #- kolla-ansible-scenario-ipv6 + - kolla-ansible-scenario-octavia + - kolla-ansible-scenario-ovn + - kolla-ansible-scenario-prometheus-opensearch + # NOTE(mnasiadka): SSO and non-SSO tests are failing + #- kolla-ansible-scenario-skyline + - kolla-ansible-scenario-telemetry + - kolla-ansible-scenario-venus - openstack-cover-jobs - openstack-python3-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 - check: - jobs: - - kolla-ansible-centos10s - - kolla-ansible-debian - - kolla-ansible-debian-podman - - kolla-ansible-ubuntu - - kolla-ansible-ubuntu-podman - - kolla-ansible-ubuntu-kvm - - kolla-ansible-ubuntu-multinode-ipv6 - - kolla-ansible-ubuntu-bifrost - - kolla-ansible-ubuntu-magnum - - kolla-ansible-ubuntu-octavia - - kolla-ansible-ubuntu-masakari - - kolla-ansible-debian-ironic - - kolla-ansible-ubuntu-ironic - - kolla-ansible-debian-ironic-upgrade - - kolla-ansible-ubuntu-ironic-upgrade - - kolla-ansible-debian-upgrade - - kolla-ansible-ubuntu-upgrade - - kolla-ansible-ubuntu-cells - - kolla-ansible-debian-mariadb - - kolla-ansible-ubuntu-mariadb - - kolla-ansible-debian-ovn - - kolla-ansible-ubuntu-ovn - - kolla-ansible-debian-upgrade-ovn - - kolla-ansible-ubuntu-upgrade-ovn - - kolla-ansible-debian - - kolla-ansible-ubuntu-prometheus-opensearch - - kolla-ansible-ubuntu-prometheus-opensearch-upgrade - - kolla-ansible-ubuntu-venus - - kolla-ansible-ubuntu-cephadm - - kolla-ansible-ubuntu-upgrade-cephadm - - kolla-ansible-ubuntu-haproxy-fqdn - - kolla-ansible-ubuntu-lets-encrypt - - kolla-ansible-ubuntu-skyline - - kolla-ansible-ubuntu-skyline-sso - - kolla-ansible-ubuntu-telemetry - - kolla-ansible-ubuntu-container-engine-migration - - kolla-ansible-ubuntu-container-engine-migration-multinode - - kolla-ansible-debian-container-engine-migration - - kolla-ansible-debian-container-engine-migration-multinode - check-arm64: - jobs: - - kolla-ansible-centos10s-aarch64 - - kolla-ansible-debian-aarch64 - - kolla-ansible-debian-aarch64-podman - - kolla-ansible-debian-upgrade-aarch64 - gate: - jobs: - - kolla-ansible-debian - - kolla-ansible-debian-mariadb - - kolla-ansible-debian-upgrade - - kolla-ansible-debian-podman - - kolla-ansible-ubuntu - - kolla-ansible-ubuntu-mariadb - - kolla-ansible-ubuntu-prometheus-opensearch - - kolla-ansible-ubuntu-prometheus-opensearch-upgrade - - kolla-ansible-ubuntu-upgrade - - kolla-ansible-ubuntu-podman diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml new file mode 100644 index 0000000000..aa17cf172e --- /dev/null +++ b/zuul.d/scenarios/aio.yaml @@ -0,0 +1,130 @@ +--- +# NOTE(mnasiadka): To be removed after update on kolla side +- job: + name: kolla-ansible-centos10s + parent: kolla-ansible-base + nodeset: kolla-ansible-centos-10s-8GB + voting: false + vars: + base_distro: centos + kolla_build_images: true + + +- job: + name: kolla-ansible-centos-10s + parent: kolla-ansible-base + nodeset: kolla-ansible-centos-10s-8GB + voting: false + vars: + kolla_build_images: true + +- job: + name: kolla-ansible-centos10s-aarch64 + parent: kolla-ansible-centos10s + nodeset: kolla-ansible-centos-10s-aarch64-8GB + +- job: + name: kolla-ansible-centos-10s-aarch64 + parent: kolla-ansible-centos-10s + nodeset: kolla-ansible-centos-10s-aarch64-8GB + +- job: + name: kolla-ansible-debian-bookworm + parent: kolla-ansible-base + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-debian-bookworm-aarch64 + parent: kolla-ansible-debian-bookworm + nodeset: kolla-ansible-debian-bookworm-aarch64-8GB + timeout: 10800 + vars: + kolla_build_images: true + voting: false + +- job: + name: kolla-ansible-debian-bookworm-aarch64-podman + parent: kolla-ansible-debian-bookworm-aarch64 + nodeset: kolla-ansible-debian-bookworm-aarch64-8GB + timeout: 10800 + vars: + container_engine: podman + kolla_build_images: true + voting: false + +- job: + name: kolla-ansible-debian-bookworm-podman + parent: kolla-ansible-debian-bookworm + nodeset: kolla-ansible-debian-bookworm-16GB + vars: + container_engine: podman + +- job: + name: kolla-ansible-debian-bookworm-upgrade + parent: kolla-ansible-base + nodeset: kolla-ansible-debian-bookworm-16GB + timeout: 10800 + +- job: + name: kolla-ansible-debian-bookworm-aarch64-upgrade + parent: kolla-ansible-debian-bookworm-upgrade + nodeset: kolla-ansible-debian-bookworm-aarch64-8GB + voting: false + +- job: + name: kolla-ansible-debian-bookworm-upgrade-slurp + parent: kolla-ansible-debian-bookworm-upgrade + nodeset: kolla-ansible-debian-bookworm-16GB + timeout: 9000 + +- job: + name: kolla-ansible-ubuntu-noble + parent: kolla-ansible-base + nodeset: kolla-ansible-ubuntu-noble-16GB + +- job: + name: kolla-ansible-ubuntu-noble-podman + parent: kolla-ansible-ubuntu-noble + nodeset: kolla-ansible-ubuntu-noble-16GB + vars: + container_engine: podman + +- job: + name: kolla-ansible-ubuntu-noble-upgrade + parent: kolla-ansible-base + nodeset: kolla-ansible-ubuntu-noble-16GB + timeout: 10800 + +- job: + name: kolla-ansible-ubuntu-noble-upgrade-slurp + parent: kolla-ansible-base + nodeset: kolla-ansible-ubuntu-noble-16GB + timeout: 9000 + +- project-template: + name: kolla-ansible-scenario-aio + description: | + Runs Kolla-Ansible AIO scenario jobs. + check: + jobs: + - kolla-ansible-centos-10s + - kolla-ansible-debian-bookworm + - kolla-ansible-debian-bookworm-podman + - kolla-ansible-debian-bookworm-upgrade + - kolla-ansible-ubuntu-noble + - kolla-ansible-ubuntu-noble-podman + - kolla-ansible-ubuntu-noble-upgrade + check-arm64: + jobs: + - kolla-ansible-centos-10s-aarch64 + - kolla-ansible-debian-bookworm-aarch64 + - kolla-ansible-debian-bookworm-aarch64-podman + - kolla-ansible-debian-bookworm-aarch64-upgrade + gate: + jobs: + - kolla-ansible-debian-bookworm + - kolla-ansible-debian-bookworm-podman + - kolla-ansible-debian-bookworm-upgrade + - kolla-ansible-ubuntu-noble + - kolla-ansible-ubuntu-noble-upgrade + - kolla-ansible-ubuntu-noble-podman diff --git a/zuul.d/scenarios/bifrost.yaml b/zuul.d/scenarios/bifrost.yaml new file mode 100644 index 0000000000..9c6690052a --- /dev/null +++ b/zuul.d/scenarios/bifrost.yaml @@ -0,0 +1,31 @@ +--- +- job: + name: kolla-ansible-bifrost-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/bifrost/ + - ^tests/test-bifrost.sh + vars: + scenario: bifrost + scenario_images_core: + - ^bifrost + +- job: + name: kolla-ansible-debian-bookworm-bifrost + parent: kolla-ansible-bifrost-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-ubuntu-noble-bifrost + parent: kolla-ansible-bifrost-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-bifrost + description: | + Runs Kolla-Ansible Bifrost scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-bifrost + - kolla-ansible-ubuntu-noble-bifrost diff --git a/zuul.d/scenarios/cells.yaml b/zuul.d/scenarios/cells.yaml new file mode 100644 index 0000000000..ce061af6b6 --- /dev/null +++ b/zuul.d/scenarios/cells.yaml @@ -0,0 +1,34 @@ +--- +- job: + name: kolla-ansible-cells-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/nova/ + - ^ansible/roles/nova-cell/ + - ^ansible/roles/loadbalancer/ + - ^tests/test-core-openstack.sh + - ^tests/test-proxysql.sh + vars: + scenario: cells + scenario_images_extra: + - ^proxysql + +- job: + name: kolla-ansible-debian-bookworm-cells + parent: kolla-ansible-cells-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-ubuntu-noble-cells + parent: kolla-ansible-cells-base + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + +- project-template: + name: kolla-ansible-scenario-cells + description: | + Runs Kolla-Ansible Nova Cells scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-cells + - kolla-ansible-ubuntu-noble-cells diff --git a/zuul.d/scenarios/cephadm.yaml b/zuul.d/scenarios/cephadm.yaml new file mode 100644 index 0000000000..7e5528e79c --- /dev/null +++ b/zuul.d/scenarios/cephadm.yaml @@ -0,0 +1,59 @@ +--- +- job: + name: kolla-ansible-cephadm-base + parent: kolla-ansible-base + voting: false + vars: + scenario: cephadm + scenario_images_extra: + - ^cinder + - ^redis + +- job: + name: kolla-ansible-debian-bookworm-cephadm + parent: kolla-ansible-cephadm-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + timeout: 10800 + +- job: + name: kolla-ansible-debian-bookworm-cephadm-upgrade + parent: kolla-ansible-debian-bookworm-cephadm + nodeset: kolla-ansible-debian-bookworm-multi-16GB + timeout: 10800 + +- job: + name: kolla-ansible-debian-bookworm-cephadm-upgrade-slurp + parent: kolla-ansible-debian-bookworm-cephadm-upgrade + nodeset: kolla-ansible-debian-bookworm-multi-16GB + timeout: 9000 + +- job: + name: kolla-ansible-ubuntu-noble-cephadm + parent: kolla-ansible-cephadm-base + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + timeout: 10800 + vars: + cephadm_use_package_from_distribution: true + +- job: + name: kolla-ansible-ubuntu-noble-cephadm-upgrade + parent: kolla-ansible-ubuntu-noble-cephadm + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + timeout: 10800 + +- job: + name: kolla-ansible-ubuntu-noble-cephadm-upgrade-slurp + parent: kolla-ansible-ubuntu-noble-cephadm-upgrade + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + timeout: 9000 + +- project-template: + name: kolla-ansible-scenario-cephadm + description: | + Runs Kolla-Ansible CephAdm scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-cephadm + - kolla-ansible-debian-bookworm-cephadm-upgrade + - kolla-ansible-ubuntu-noble-cephadm + - kolla-ansible-ubuntu-noble-cephadm-upgrade diff --git a/zuul.d/scenarios/container-engine-migration.yaml b/zuul.d/scenarios/container-engine-migration.yaml new file mode 100644 index 0000000000..f64cc72960 --- /dev/null +++ b/zuul.d/scenarios/container-engine-migration.yaml @@ -0,0 +1,42 @@ +--- +- job: + name: kolla-ansible-container-engine-migration-base + parent: kolla-ansible-base + voting: false + files: + - ^ansible/migrate-container-engine.yml + - ^ansible/roles/container-engine-migration/ + - ^tests/test-container-engine-migration.sh + vars: + scenario: container-engine-migration + +- job: + name: kolla-ansible-debian-container-engine-migration + parent: kolla-ansible-container-engine-migration-base + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-debian-container-engine-migration-multinode + parent: kolla-ansible-container-engine-migration-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-ubuntu-container-engine-migration + parent: kolla-ansible-container-engine-migration-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- job: + name: kolla-ansible-ubuntu-container-engine-migration-multinode + parent: kolla-ansible-container-engine-migration-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-container-engine-migration + description: | + Runs Kolla-Ansible container engine migration scenario jobs. + check: + jobs: + - kolla-ansible-debian-container-engine-migration + - kolla-ansible-debian-container-engine-migration-multinode + - kolla-ansible-ubuntu-container-engine-migration + - kolla-ansible-ubuntu-container-engine-migration-multinode diff --git a/zuul.d/scenarios/haproxy-fqdn.yaml b/zuul.d/scenarios/haproxy-fqdn.yaml new file mode 100644 index 0000000000..672d7a8b5b --- /dev/null +++ b/zuul.d/scenarios/haproxy-fqdn.yaml @@ -0,0 +1,34 @@ +--- +- job: + name: kolla-ansible-haproxy-fqdn-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/haproxy/ + - ^ansible/roles/loadbalancer/ + - ^kolla_ansible/kolla_url.py + vars: + external_api_interface_name: vxlan2 + external_api_network_prefix: "192.0.3." + external_api_network_prefix_length: "24" + kolla_external_vip_address: "192.0.3.10" + scenario: haproxy + +- job: + name: kolla-ansible-debian-bookworm-haproxy-fqdn + parent: kolla-ansible-haproxy-fqdn-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-ubuntu-noble-haproxy-fqdn + parent: kolla-ansible-haproxy-fqdn-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-haproxy-fqdn + description: | + Runs Kolla-Ansible HAProxy FQDN scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-haproxy-fqdn + - kolla-ansible-ubuntu-noble-haproxy-fqdn diff --git a/zuul.d/scenarios/hashi-vault.yaml b/zuul.d/scenarios/hashi-vault.yaml new file mode 100644 index 0000000000..74c92c4454 --- /dev/null +++ b/zuul.d/scenarios/hashi-vault.yaml @@ -0,0 +1,35 @@ +--- +- job: + name: kolla-ansible-hashi-vault-base + parent: kolla-ansible-variables + run: tests/run-hashi-vault.yml + required-projects: + - openstack/kolla-ansible + - openstack/requirements + voting: false + files: + - ^requirements-core.yml + - ^tests/templates/(inventory|globals-default.j2) + - ^tests/(pre|run).yml + - ^kolla_ansible/ + - ^tests/run-hashi-vault.yml + - ^tests/test-hashicorp-vault-passwords.sh + +- job: + name: kolla-ansible-debian-bookworm-hashi-vault + parent: kolla-ansible-hashi-vault-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-ubuntu-noble-hashi-vault + parent: kolla-ansible-hashi-vault-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-hashi-vault + description: | + Runs Kolla-Ansible Hashicorp Vault scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-hashi-vault + - kolla-ansible-ubuntu-noble-hashi-vault diff --git a/zuul.d/scenarios/ipv6.yaml b/zuul.d/scenarios/ipv6.yaml new file mode 100644 index 0000000000..e45bf0cd6b --- /dev/null +++ b/zuul.d/scenarios/ipv6.yaml @@ -0,0 +1,38 @@ +--- +- job: + name: kolla-ansible-ipv6-base + parent: kolla-ansible-base + voting: false + vars: + address_family: 'ipv6' + api_network_prefix: "fd::" + api_network_prefix_length: "64" + kolla_internal_vip_address: "fd::ff:0" + neutron_external_network_prefix: "fd:1::" + neutron_external_network_prefix_length: "64" + neutron_tenant_network_prefix: "fd:f0::" + neutron_tenant_network_prefix_length: "64" + neutron_tenant_network_dns_server: 2001:4860:4860::8888 + scenario: ipv6 + scenario_images_extra: + - ^prometheus + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-ipv6 + parent: kolla-ansible-ipv6-base + nodeset: kolla-ansible-debian-bookworm-multi-8GB + +- job: + name: kolla-ansible-ubuntu-noble-ipv6 + parent: kolla-ansible-ipv6-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-ipv6 + description: | + Runs Kolla-Ansible ipv6 scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-ipv6 + - kolla-ansible-ubuntu-noble-ipv6 diff --git a/zuul.d/scenarios/ironic.yaml b/zuul.d/scenarios/ironic.yaml new file mode 100644 index 0000000000..fbe177ff32 --- /dev/null +++ b/zuul.d/scenarios/ironic.yaml @@ -0,0 +1,53 @@ +--- +- job: + name: kolla-ansible-ironic-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/(ironic|neutron|nova|nova-cell)/ + - ^tests/deploy-tenks\.sh$ + - ^tests/templates/ironic-overrides\.j2$ + - ^tests/templates/tenks-deploy-config\.yml\.j2$ + - ^tests/test-dashboard\.sh$ + - ^tests/test-ironic\.sh$ + required-projects: + - openstack/tenks + vars: + scenario: ironic + scenario_images_extra: + - ^dnsmasq + - ^ironic + - ^iscsid + - ^prometheus + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-ironic + parent: kolla-ansible-ironic-base + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-debian-bookworm-ironic-upgrade + parent: kolla-ansible-debian-bookworm-ironic + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-ubuntu-noble-ironic + parent: kolla-ansible-ironic-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- job: + name: kolla-ansible-ubuntu-noble-ironic-upgrade + parent: kolla-ansible-ubuntu-noble-ironic + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-ironic + description: | + Runs Kolla-Ansible Ironic scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-ironic + - kolla-ansible-debian-bookworm-ironic-upgrade + - kolla-ansible-ubuntu-noble-ironic + - kolla-ansible-ubuntu-noble-ironic-upgrade diff --git a/zuul.d/scenarios/kvm.yaml b/zuul.d/scenarios/kvm.yaml new file mode 100644 index 0000000000..829021ad3b --- /dev/null +++ b/zuul.d/scenarios/kvm.yaml @@ -0,0 +1,29 @@ +--- +- job: + name: kolla-ansible-kvm-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/(nova-cell)/ + - ^tests/templates/nova-compute-overrides.j2 + vars: + virt_type: kvm + +- job: + name: kolla-ansible-debian-bookworm-kvm + parent: kolla-ansible-kvm-base + nodeset: kolla-ansible-debian-bookworm-nested-virt + +- job: + name: kolla-ansible-ubuntu-noble-kvm + parent: kolla-ansible-kvm-base + nodeset: kolla-ansible-ubuntu-noble-nested-virt + +- project-template: + name: kolla-ansible-scenario-kvm + description: | + Runs Kolla-Ansible KVM scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-kvm + - kolla-ansible-ubuntu-noble-kvm diff --git a/zuul.d/scenarios/lets-encrypt.yaml b/zuul.d/scenarios/lets-encrypt.yaml new file mode 100644 index 0000000000..54d054d456 --- /dev/null +++ b/zuul.d/scenarios/lets-encrypt.yaml @@ -0,0 +1,37 @@ +--- +- job: + name: kolla-ansible-lets-encrypt-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2 + - ^ansible/roles/(letsencrypt|loadbalancer)/ + - ^tests/test-core-openstack.sh + - ^tests/test-dashboard.sh + - ^tests/deploy.sh + vars: + scenario: lets-encrypt + scenario_images_extra: + - ^letsencrypt + - ^haproxy + tls_enabled: true + le_enabled: true + +- job: + name: kolla-ansible-debian-bookworm-lets-encrypt + parent: kolla-ansible-lets-encrypt-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-ubuntu-noble-lets-encrypt + parent: kolla-ansible-lets-encrypt-base + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + +- project-template: + name: kolla-ansible-scenario-lets-encrypt + description: | + Runs Kolla-Ansible Let's Encrypt scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-lets-encrypt + - kolla-ansible-ubuntu-noble-lets-encrypt diff --git a/zuul.d/scenarios/magnum.yaml b/zuul.d/scenarios/magnum.yaml new file mode 100644 index 0000000000..cd19d79a05 --- /dev/null +++ b/zuul.d/scenarios/magnum.yaml @@ -0,0 +1,36 @@ +--- +- job: + name: kolla-ansible-magnum-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/(designate|magnum|trove)/ + - ^tests/test-dashboard.sh + - ^tests/test-magnum.sh + vars: + scenario: magnum + scenario_images_extra: + - ^designate + - ^magnum + - ^trove + # TODO: Remove after adding TLS support for Trove + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-magnum + parent: kolla-ansible-magnum-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-ubuntu-noble-magnum + parent: kolla-ansible-magnum-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-magnum + description: | + Runs Kolla-Ansible Magnum scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-magnum + - kolla-ansible-ubuntu-noble-magnum diff --git a/zuul.d/scenarios/mariadb.yaml b/zuul.d/scenarios/mariadb.yaml new file mode 100644 index 0000000000..776a9999cc --- /dev/null +++ b/zuul.d/scenarios/mariadb.yaml @@ -0,0 +1,41 @@ +--- +- job: + name: kolla-ansible-mariadb-base + parent: kolla-ansible-scenario-base + voting: true + files: !inherit + - ^ansible/roles/(loadbalancer|mariadb|proxysql-config)/ + - ^tests/test-mariadb.sh + vars: + scenario: mariadb + scenario_images_core: + - ^cron + - ^fluentd + - ^haproxy + - ^keepalived + - ^kolla-toolbox + - ^mariadb + - ^proxysql + +- job: + name: kolla-ansible-debian-bookworm-mariadb + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-ubuntu-noble-mariadb + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-mariadb + description: | + Runs Kolla-Ansible MariaDB scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-mariadb + - kolla-ansible-ubuntu-noble-mariadb + gate: + jobs: + - kolla-ansible-debian-bookworm-mariadb + - kolla-ansible-ubuntu-noble-mariadb diff --git a/zuul.d/scenarios/masakari.yaml b/zuul.d/scenarios/masakari.yaml new file mode 100644 index 0000000000..ed6182b8b2 --- /dev/null +++ b/zuul.d/scenarios/masakari.yaml @@ -0,0 +1,36 @@ +--- +- job: + name: kolla-ansible-masakari-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/masakari/ + - ^ansible/roles/hacluster/ + - ^tests/test-masakari.sh + - ^tests/test-dashboard.sh + vars: + scenario: masakari + scenario_images_extra: + - ^masakari + - ^hacluster + # TODO: Remove once Masakari has TLS support + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-masakari + parent: kolla-ansible-masakari-base + nodeset: kolla-ansible-debian-bookworm-masakari-8GB + +- job: + name: kolla-ansible-ubuntu-noble-masakari + parent: kolla-ansible-masakari-base + nodeset: kolla-ansible-ubuntu-noble-masakari-8GB + +- project-template: + name: kolla-ansible-scenario-masakari + description: | + Runs Kolla-Ansible Masakari scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-masakari + - kolla-ansible-ubuntu-noble-masakari diff --git a/zuul.d/scenarios/nfv.yaml b/zuul.d/scenarios/nfv.yaml new file mode 100644 index 0000000000..0d8145cc7b --- /dev/null +++ b/zuul.d/scenarios/nfv.yaml @@ -0,0 +1,37 @@ +--- +- job: + name: kolla-ansible-scenario-nfv-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/(aodh|barbican|heat|mistral|redis|tacker)/ + - ^tests/test-scenario-nfv.sh + - ^tests/test-dashboard.sh + vars: + scenario: nfv + scenario_images_extra: + - ^aodh + - ^tacker + - ^mistral + - ^redis + - ^barbican + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-nfv + parent: kolla-ansible-scenario-nfv-base + nodeset: kolla-ansible-debian-bookworm-multi-8GB + +- job: + name: kolla-ansible-ubuntu-noble-nfv + parent: kolla-ansible-scenario-nfv-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-nfv + description: | + Runs Kolla-Ansible NFV scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-nfv + - kolla-ansible-ubuntu-noble-nfv diff --git a/zuul.d/scenarios/octavia.yaml b/zuul.d/scenarios/octavia.yaml new file mode 100644 index 0000000000..8b85f132de --- /dev/null +++ b/zuul.d/scenarios/octavia.yaml @@ -0,0 +1,34 @@ +--- +- job: + name: kolla-ansible-octavia-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/(octavia|octavia-certificates)/ + - ^tests/test-dashboard.sh + - ^tests/test-octavia.sh + vars: + scenario: octavia + scenario_images_extra: + - ^redis + - ^octavia + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-octavia + parent: kolla-ansible-octavia-base + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-ubuntu-noble-octavia + parent: kolla-ansible-octavia-base + nodeset: kolla-ansible-ubuntu-noble-16GB + +- project-template: + name: kolla-ansible-scenario-octavia + description: | + Runs Kolla-Ansible Octavia scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-octavia + - kolla-ansible-ubuntu-noble-octavia diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml new file mode 100644 index 0000000000..9c68d3504c --- /dev/null +++ b/zuul.d/scenarios/ovn.yaml @@ -0,0 +1,49 @@ +--- +- job: + name: kolla-ansible-ovn-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ + - ^tests/test-ovn.sh + - ^tests/test-core-openstack.sh + - ^tests/reconfigure.sh + vars: + scenario: ovn + scenario_images_extra: + - ^redis + - ^octavia + - ^ovn + +- job: + name: kolla-ansible-debian-bookworm-ovn + parent: kolla-ansible-ovn-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-debian-bookworm-ovn-upgrade + parent: kolla-ansible-debian-bookworm-ovn + nodeset: kolla-ansible-debian-bookworm-multi-16GB + timeout: 10800 + +- job: + name: kolla-ansible-ubuntu-noble-ovn + parent: kolla-ansible-ovn-base + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + +- job: + name: kolla-ansible-ubuntu-noble-ovn-upgrade + parent: kolla-ansible-ubuntu-noble-ovn + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + timeout: 10800 + +- project-template: + name: kolla-ansible-scenario-ovn + description: | + Runs Kolla-Ansible OVN scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-ovn + - kolla-ansible-debian-bookworm-ovn-upgrade + - kolla-ansible-ubuntu-noble-ovn + - kolla-ansible-ubuntu-noble-ovn-upgrade diff --git a/zuul.d/scenarios/prometheus-opensearch.yaml b/zuul.d/scenarios/prometheus-opensearch.yaml new file mode 100644 index 0000000000..9cd9938496 --- /dev/null +++ b/zuul.d/scenarios/prometheus-opensearch.yaml @@ -0,0 +1,55 @@ +--- +- job: + name: kolla-ansible-prometheus-opensearch-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/site.yml + - ^ansible/roles/(common|opensearch|grafana|prometheus)/ + - ^tests/test-prometheus-opensearch.sh + vars: + scenario: prometheus-opensearch + scenario_images_core: + - ^cron + - ^fluentd + - ^grafana + - ^haproxy + - ^keepalived + - ^kolla-toolbox + - ^mariadb + - ^memcached + - ^opensearch + - ^prometheus + - ^proxysql + - ^rabbitmq + +- job: + name: kolla-ansible-debian-bookworm-prometheus-opensearch + parent: kolla-ansible-prometheus-opensearch-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-debian-bookworm-prometheus-opensearch-upgrade + parent: kolla-ansible-debian-bookworm-prometheus-opensearch + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-ubuntu-noble-prometheus-opensearch + parent: kolla-ansible-prometheus-opensearch-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- job: + name: kolla-ansible-ubuntu-noble-prometheus-opensearch-upgrade + parent: kolla-ansible-ubuntu-noble-prometheus-opensearch + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-prometheus-opensearch + description: | + Runs Kolla-Ansible Prometheus OpenSearch scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-prometheus-opensearch + - kolla-ansible-debian-bookworm-prometheus-opensearch-upgrade + - kolla-ansible-ubuntu-noble-prometheus-opensearch + - kolla-ansible-ubuntu-noble-prometheus-opensearch-upgrade diff --git a/zuul.d/scenarios/skyline.yaml b/zuul.d/scenarios/skyline.yaml new file mode 100644 index 0000000000..c25bbdd96d --- /dev/null +++ b/zuul.d/scenarios/skyline.yaml @@ -0,0 +1,51 @@ +--- +- job: + name: kolla-ansible-skyline-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/skyline/ + - ^tests/test-skyline.sh + vars: + scenario: skyline + scenario_images_extra: + - ^skyline + +- job: + name: kolla-ansible-skyline-sso-base + parent: kolla-ansible-skyline-base + files: !inherit + - ^tests/test-skyline-sso.sh + vars: + scenario: skyline-sso + +- job: + name: kolla-ansible-debian-bookworm-skyline + parent: kolla-ansible-skyline-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-debian-bookworm-skyline-sso + parent: kolla-ansible-skyline-sso-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-ubuntu-noble-skyline + parent: kolla-ansible-skyline-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- job: + name: kolla-ansible-ubuntu-noble-skyline-sso + parent: kolla-ansible-skyline-sso-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-skyline + description: | + Runs Kolla-Ansible Skyline scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-skyline + - kolla-ansible-debian-bookworm-skyline-sso + - kolla-ansible-ubuntu-noble-skyline + - kolla-ansible-ubuntu-noble-skyline-sso diff --git a/zuul.d/scenarios/telemetry.yaml b/zuul.d/scenarios/telemetry.yaml new file mode 100644 index 0000000000..ec20cd708f --- /dev/null +++ b/zuul.d/scenarios/telemetry.yaml @@ -0,0 +1,33 @@ +--- +- job: + name: kolla-ansible-telemetry-base + parent: kolla-ansible-scenario-base + voting: false + files: + - ^ansible/roles/(aodh|ceilometer|gnocchi)/ + - ^tests/test-telemetry.sh + vars: + scenario: telemetry + scenario_images_extra: + - ^aodh + - ^ceilometer + - ^gnocchi + +- job: + name: kolla-ansible-debian-bookworm-telemetry + parent: kolla-ansible-telemetry-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-ubuntu-noble-telemetry + parent: kolla-ansible-telemetry-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-telemetry + description: | + Runs Kolla-Ansible Telemetry scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-telemetry + - kolla-ansible-ubuntu-noble-telemetry diff --git a/zuul.d/scenarios/venus.yaml b/zuul.d/scenarios/venus.yaml new file mode 100644 index 0000000000..579aaca6f9 --- /dev/null +++ b/zuul.d/scenarios/venus.yaml @@ -0,0 +1,42 @@ +--- +- job: + name: kolla-ansible-venus-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/(common|opensearch|venus)/ + - ^tests/test-venus.sh + vars: + scenario: venus + scenario_images_core: + - ^cron + - ^opensearch + - ^fluentd + - ^haproxy + - ^keepalived + - ^keystone + - ^kolla-toolbox + - ^mariadb + - ^memcached + - ^rabbitmq + - ^venus + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-venus + parent: kolla-ansible-venus-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-ubuntu-noble-venus + parent: kolla-ansible-venus-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-venus + description: | + Runs Kolla-Ansible Venus scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-venus + - kolla-ansible-ubuntu-noble-venus diff --git a/zuul.d/scenarios/zun.yaml b/zuul.d/scenarios/zun.yaml new file mode 100644 index 0000000000..8367103ad8 --- /dev/null +++ b/zuul.d/scenarios/zun.yaml @@ -0,0 +1,39 @@ +--- +- job: + name: kolla-ansible-zun-base + parent: kolla-ansible-scenario-base + voting: false + files: !inherit + - ^ansible/roles/(zun|kuryr|etcd|cinder|iscsi)/ + - ^tests/setup_disks.sh + - ^tests/test-core-openstack.sh + - ^tests/test-zun.sh + - ^tests/test-dashboard.sh + vars: + scenario: zun + scenario_images_extra: + - ^zun + - ^kuryr + - ^etcd + - ^cinder + - ^iscsid + - ^tgtd + +- job: + name: kolla-ansible-debian-zun + parent: kolla-ansible-zun-base + nodeset: kolla-ansible-debian-bookworm-multi-8GB + +- job: + name: kolla-ansible-ubuntu-zun + parent: kolla-ansible-zun-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-zun + description: | + Runs Kolla-Ansible Zun scenario jobs. + check: + jobs: + - kolla-ansible-debian-zun + - kolla-ansible-ubuntu-zun From a7c8d80db679816e9dbc86b4e944684034f9022b Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 23 Sep 2025 10:32:33 +0200 Subject: [PATCH 017/165] neutron: Always run neutron-rpc-server There's a million combinations of agents and mechanism drivers that require neutron-rpc-server - let's run it always instead of breaking deployments on current enabled: clause. Change-Id: I66dae0f64e0a80e5f35ff6176956a44341411200 Signed-off-by: Michal Nasiadka --- ansible/roles/neutron/defaults/main.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml index 47d3e05a26..2a9401772a 100644 --- a/ansible/roles/neutron/defaults/main.yml +++ b/ansible/roles/neutron/defaults/main.yml @@ -33,7 +33,7 @@ neutron_services: neutron-rpc-server: container_name: "neutron_rpc_server" image: "{{ neutron_rpc_server_image_full }}" - enabled: "{{ neutron_plugin_agent in ['linuxbridge', 'openvswitch'] }}" + enabled: true group: "neutron-rpc-server" host_in_groups: "{{ inventory_hostname in groups['neutron-rpc-server'] }}" volumes: "{{ neutron_rpc_server_default_volumes + neutron_rpc_server_extra_volumes }}" @@ -135,7 +135,6 @@ neutron_services: dimensions: "{{ neutron_l3_agent_dimensions }}" healthcheck: "{{ neutron_l3_agent_healthcheck }}" pid_mode: "{{ 'host' if neutron_agents_wrappers | bool else '' }}" - neutron-sriov-agent: container_name: "neutron_sriov_agent" image: "{{ neutron_sriov_agent_image_full }}" From 7575f32b04cfedf0ebbdfcef280fd534d5c4918c Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Tue, 23 Sep 2025 15:34:34 +0200 Subject: [PATCH 018/165] CI: run kolla-ansible check in upgrade jobs too Change-Id: Ia0647484ea9e868f7ff41f152cffb1c72c941284 Signed-off-by: Bartosz Bezak --- tests/deploy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/deploy.sh b/tests/deploy.sh index e81dbf5dcc..419c4778af 100755 --- a/tests/deploy.sh +++ b/tests/deploy.sh @@ -71,9 +71,9 @@ function deploy { if [[ $HAS_UPGRADE == 'no' ]]; then kolla-ansible validate-config -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/validate-config - #TODO(r-krcek) check can be moved out of the if statement in the flamingo cycle - kolla-ansible check -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/check fi + + kolla-ansible check -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/check } From 571d3a4ed47d89b1ed7ad0d9828eafb1deb3d4f0 Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Tue, 23 Sep 2025 15:44:45 +0200 Subject: [PATCH 019/165] CI: drop rabbitmq migration to durable queues in upgrade job Change-Id: I6086059cac3da03336998870632c18ddd4eb7442 Signed-off-by: Bartosz Bezak --- tests/upgrade.sh | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/tests/upgrade.sh b/tests/upgrade.sh index 71b8fe737e..eeea9d8188 100755 --- a/tests/upgrade.sh +++ b/tests/upgrade.sh @@ -18,36 +18,6 @@ function upgrade { # proper versions (ansible-collection-kolla is different for new version, potentionally # also dependencies). kolla-ansible bootstrap-servers -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-bootstrap - # Skip rabbitmq-ha-precheck before the queues are migrated. - kolla-ansible prechecks -i ${RAW_INVENTORY} --skip-tags rabbitmq-ha-precheck -vvv &> /tmp/logs/ansible/upgrade-prechecks-pre-rabbitmq - - # NOTE(SvenKieske): As om_enable_rabbitmq_transient_quorum_queue now also - # enables quorum_queues for fanout/reply queues in Epoxy, we need - # to perform a migration to durable queues. - # TODO(SvenKieske): Remove these steps in F Cycle. - SERVICE_TAGS="heat,keystone,neutron,nova" - if [[ $SCENARIO == "zun" ]] || [[ $SCENARIO == "cephadm" ]]; then - SERVICE_TAGS+=",cinder" - fi - if [[ $SCENARIO == "nfv" ]]; then - SERVICE_TAGS+=",barbican" - fi - if [[ $SCENARIO == "ironic" ]]; then - SERVICE_TAGS+=",ironic" - fi - if [[ $SCENARIO == "masakari" ]]; then - SERVICE_TAGS+=",masakari" - fi - if [[ $SCENARIO == "ovn" ]] || [[ $SCENARIO == "octavia" ]]; then - SERVICE_TAGS+=",octavia" - fi - if [[ $SCENARIO == "magnum" ]]; then - SERVICE_TAGS+=",magnum,designate" - fi - kolla-ansible stop -i ${RAW_INVENTORY} -vvv --tags $SERVICE_TAGS --yes-i-really-really-mean-it --ignore-missing &> /tmp/logs/ansible/stop - kolla-ansible genconfig -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/genconfig - kolla-ansible rabbitmq-reset-state -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/rabbitmq-reset-state - # Include rabbitmq-ha-precheck this time to confirm all queues have migrated. kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-prechecks kolla-ansible pull -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/pull-upgrade From 41b416667cd920303fee357d3a4e4630b6c1ce24 Mon Sep 17 00:00:00 2001 From: Antony Messerli Date: Tue, 12 Nov 2024 13:05:51 -0600 Subject: [PATCH 020/165] Enables override of Octavia notification topics Octavia was missing an override for enabling notification topics, this aligns the overrides with how all the other project roles handle the topic. Closes-Bug: #2087997 Change-Id: Icbe25c0f4107128db899c0111af3acf3c513008f Signed-off-by: Antony Messerli --- ansible/roles/octavia/defaults/main.yml | 9 +++++++++ ansible/roles/octavia/templates/octavia.conf.j2 | 6 ++++++ .../notes/octavia-notifications-98a91ab02d9cbee6.yaml | 7 +++++++ 3 files changed, 22 insertions(+) create mode 100644 releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml diff --git a/ansible/roles/octavia/defaults/main.yml b/ansible/roles/octavia/defaults/main.yml index df3d39ae62..e683e67669 100644 --- a/ansible/roles/octavia/defaults/main.yml +++ b/ansible/roles/octavia/defaults/main.yml @@ -277,6 +277,15 @@ octavia_ks_users: password: "{{ octavia_keystone_password }}" role: "admin" +#################### +# Notification +#################### +octavia_notification_topics: + - name: notifications + enabled: "{{ enable_ceilometer | bool }}" + +octavia_enabled_notification_topics: "{{ octavia_notification_topics | selectattr('enabled', 'equalto', true) | list }}" + #################### # Kolla #################### diff --git a/ansible/roles/octavia/templates/octavia.conf.j2 b/ansible/roles/octavia/templates/octavia.conf.j2 index 552b4cbfec..e5ca5bfd48 100644 --- a/ansible/roles/octavia/templates/octavia.conf.j2 +++ b/ansible/roles/octavia/templates/octavia.conf.j2 @@ -122,6 +122,12 @@ rpc_thread_pool_size = 2 [oslo_messaging_notifications] transport_url = {{ notify_transport_url }} +{% if octavia_enabled_notification_topics %} +driver = messagingv2 +topics = {{ octavia_enabled_notification_topics | map(attribute='name') | join(',') }} +{% else %} +driver = noop +{% endif %} [oslo_messaging_rabbit] use_queue_manager = true diff --git a/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml b/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml new file mode 100644 index 0000000000..ca6a5f4ac1 --- /dev/null +++ b/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Adds a missing override for ``octavia_notification_topics`` so that + operators can add their own notification topics for Octavia. By + default it will send notifications to ceilometer when ceilometer + is enabled. From 47c7810e3e93f440c08a490b5a00705404a01573 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 17 Sep 2025 11:28:08 +0200 Subject: [PATCH 021/165] CI: check-logs.sh add wait for OpenSearch connection Change-Id: Ib8a93336e41f7fa326d3a4be53f757fee3923643 Signed-off-by: Michal Nasiadka --- tests/check-logs.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/check-logs.sh b/tests/check-logs.sh index d78da1ee17..cb8b763964 100755 --- a/tests/check-logs.sh +++ b/tests/check-logs.sh @@ -196,6 +196,22 @@ if sudo test -d /var/log/kolla; then fi done + # NOTE: Check if OpenSearch output plugin has connected in OpenSearch scenarios, otherwise + # check_fluentd_missing_logs will fail because fluentd will only parse files when + # output plugin is working. + retries=0 + retries_max=10 + until [[ $(sudo tail -n 5 /var/log/kolla/fluentd/fluentd.log | grep "Could not communicate to OpenSearch" | wc -l) -eq 0 ]]; do + echo "Found 'Could not communicate to OpenSearch' in last 5 lines of fluentd.log, sleeping 30 seconds" + retries=$((retries + 1)) + if [[ $retries != $retries_max ]]; then + sleep 30 + else + echo "Found 'Could not communicate to OpenSearch' in last 5 lines of fluentd.log after 10 retries." | tee -a $fluentd_error_summary_file + break + fi + done + if check_fluentd_missing_logs >/dev/null; then any_critical=1 echo "(critical) Found some missing log files in fluentd logs. Matches in $fluentd_error_summary_file" From 48ec5b8bc6bb6353cf9385a8b59f6902c265453d Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 25 Sep 2025 10:09:20 +0200 Subject: [PATCH 022/165] CI: Remove legacy centos10 job after Kolla CI switch Change-Id: Iaa2d8654367edb08d0b35d8fe6ffc27602264931 Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/aio.yaml | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index aa17cf172e..5b43461b27 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -1,15 +1,4 @@ --- -# NOTE(mnasiadka): To be removed after update on kolla side -- job: - name: kolla-ansible-centos10s - parent: kolla-ansible-base - nodeset: kolla-ansible-centos-10s-8GB - voting: false - vars: - base_distro: centos - kolla_build_images: true - - - job: name: kolla-ansible-centos-10s parent: kolla-ansible-base @@ -18,11 +7,6 @@ vars: kolla_build_images: true -- job: - name: kolla-ansible-centos10s-aarch64 - parent: kolla-ansible-centos10s - nodeset: kolla-ansible-centos-10s-aarch64-8GB - - job: name: kolla-ansible-centos-10s-aarch64 parent: kolla-ansible-centos-10s From d0505893de692f5b260ebdb4c9a73c1087842e38 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 24 Sep 2025 18:21:03 +0200 Subject: [PATCH 023/165] group_vars: Reorganize huge all.yml into smaller files This will help us to trigger scenario jobs on changes in given group_vars files. Changes to Zuul config to trigger given scenarios will be posted as a follow-up. Change-Id: I3f17adf655d2d0c1fc2969153dddb0c72b11397a Signed-off-by: Michal Nasiadka --- ansible/group_vars/all.yml | 1413 ----------------- ansible/group_vars/all/aodh.yml | 11 + ansible/group_vars/all/barbican.yml | 17 + ansible/group_vars/all/bifrost.yml | 3 + ansible/group_vars/all/blazar.yml | 11 + ansible/group_vars/all/ceilometer.yml | 4 + ansible/group_vars/all/ceph-rgw.yml | 10 + ansible/group_vars/all/ceph.yml | 21 + ansible/group_vars/all/cinder.yml | 40 + ansible/group_vars/all/cloudkitty.yml | 20 + ansible/group_vars/all/collectd.yml | 4 + ansible/group_vars/all/common.yml | 366 +++++ ansible/group_vars/all/cyborg.yml | 8 + ansible/group_vars/all/database.yml | 11 + ansible/group_vars/all/designate.yml | 33 + ansible/group_vars/all/etcd.yml | 7 + ansible/group_vars/all/fluentd.yml | 6 + ansible/group_vars/all/glance.yml | 28 + ansible/group_vars/all/gnocchi.yml | 21 + ansible/group_vars/all/grafana.yml | 11 + ansible/group_vars/all/hacluster.yml | 9 + ansible/group_vars/all/haproxy.yml | 48 + ansible/group_vars/all/heat.yml | 17 + ansible/group_vars/all/horizon.yml | 49 + ansible/group_vars/all/influxdb.yml | 12 + ansible/group_vars/all/ironic.yml | 33 + ansible/group_vars/all/iscsi.yml | 4 + ansible/group_vars/all/keepalived.yml | 8 + ansible/group_vars/all/keystone.yml | 86 + ansible/group_vars/all/kuryr.yml | 4 + ansible/group_vars/all/letsencrypt.yml | 13 + ansible/group_vars/all/loadbalancer.yml | 2 + ansible/group_vars/all/magnum.yml | 10 + ansible/group_vars/all/manila.yml | 18 + ansible/group_vars/all/mariadb.yml | 37 + ansible/group_vars/all/masakari.yml | 13 + ansible/group_vars/all/memcached.yml | 10 + ansible/group_vars/all/mistral.yml | 10 + ansible/group_vars/all/multipathd.yml | 2 + ansible/group_vars/all/neutron.yml | 75 + ansible/group_vars/all/nova.yml | 69 + ansible/group_vars/all/octavia.yml | 33 + ansible/group_vars/all/opensearch.yml | 33 + ansible/group_vars/all/openvswitch.yml | 14 + ansible/group_vars/all/ovn.yml | 16 + ansible/group_vars/all/placement.yml | 13 + ansible/group_vars/all/prometheus.yml | 78 + ansible/group_vars/all/proxysql.yml | 4 + ansible/group_vars/all/rabbitmq.yml | 19 + ansible/group_vars/all/redis.yml | 11 + ansible/group_vars/all/s3.yml | 7 + ansible/group_vars/all/skyline.yml | 18 + ansible/group_vars/all/tacker.yml | 10 + ansible/group_vars/all/telegraf.yml | 9 + ansible/group_vars/all/trove.yml | 11 + ansible/group_vars/all/venus.yml | 10 + ansible/group_vars/all/watcher.yml | 10 + ansible/group_vars/all/zun.yml | 31 + .../ansible-python-interpreter.yml} | 0 ansible/module_utils/kolla_podman_worker.py | 2 +- .../reference/containers/kuryr-guide.rst | 3 +- .../reference/databases/mariadb-guide.rst | 6 +- .../orchestration-and-nfv/tacker-guide.rst | 2 +- doc/source/user/multinode.rst | 2 +- doc/source/user/troubleshooting.rst | 16 +- etc/kolla/globals.yml | 2 +- zuul.d/base.yaml | 2 +- 67 files changed, 1506 insertions(+), 1430 deletions(-) delete mode 100644 ansible/group_vars/all.yml create mode 100644 ansible/group_vars/all/aodh.yml create mode 100644 ansible/group_vars/all/barbican.yml create mode 100644 ansible/group_vars/all/bifrost.yml create mode 100644 ansible/group_vars/all/blazar.yml create mode 100644 ansible/group_vars/all/ceilometer.yml create mode 100644 ansible/group_vars/all/ceph-rgw.yml create mode 100644 ansible/group_vars/all/ceph.yml create mode 100644 ansible/group_vars/all/cinder.yml create mode 100644 ansible/group_vars/all/cloudkitty.yml create mode 100644 ansible/group_vars/all/collectd.yml create mode 100644 ansible/group_vars/all/common.yml create mode 100644 ansible/group_vars/all/cyborg.yml create mode 100644 ansible/group_vars/all/database.yml create mode 100644 ansible/group_vars/all/designate.yml create mode 100644 ansible/group_vars/all/etcd.yml create mode 100644 ansible/group_vars/all/fluentd.yml create mode 100644 ansible/group_vars/all/glance.yml create mode 100644 ansible/group_vars/all/gnocchi.yml create mode 100644 ansible/group_vars/all/grafana.yml create mode 100644 ansible/group_vars/all/hacluster.yml create mode 100644 ansible/group_vars/all/haproxy.yml create mode 100644 ansible/group_vars/all/heat.yml create mode 100644 ansible/group_vars/all/horizon.yml create mode 100644 ansible/group_vars/all/influxdb.yml create mode 100644 ansible/group_vars/all/ironic.yml create mode 100644 ansible/group_vars/all/iscsi.yml create mode 100644 ansible/group_vars/all/keepalived.yml create mode 100644 ansible/group_vars/all/keystone.yml create mode 100644 ansible/group_vars/all/kuryr.yml create mode 100644 ansible/group_vars/all/letsencrypt.yml create mode 100644 ansible/group_vars/all/loadbalancer.yml create mode 100644 ansible/group_vars/all/magnum.yml create mode 100644 ansible/group_vars/all/manila.yml create mode 100644 ansible/group_vars/all/mariadb.yml create mode 100644 ansible/group_vars/all/masakari.yml create mode 100644 ansible/group_vars/all/memcached.yml create mode 100644 ansible/group_vars/all/mistral.yml create mode 100644 ansible/group_vars/all/multipathd.yml create mode 100644 ansible/group_vars/all/neutron.yml create mode 100644 ansible/group_vars/all/nova.yml create mode 100644 ansible/group_vars/all/octavia.yml create mode 100644 ansible/group_vars/all/opensearch.yml create mode 100644 ansible/group_vars/all/openvswitch.yml create mode 100644 ansible/group_vars/all/ovn.yml create mode 100644 ansible/group_vars/all/placement.yml create mode 100644 ansible/group_vars/all/prometheus.yml create mode 100644 ansible/group_vars/all/proxysql.yml create mode 100644 ansible/group_vars/all/rabbitmq.yml create mode 100644 ansible/group_vars/all/redis.yml create mode 100644 ansible/group_vars/all/s3.yml create mode 100644 ansible/group_vars/all/skyline.yml create mode 100644 ansible/group_vars/all/tacker.yml create mode 100644 ansible/group_vars/all/telegraf.yml create mode 100644 ansible/group_vars/all/trove.yml create mode 100644 ansible/group_vars/all/venus.yml create mode 100644 ansible/group_vars/all/watcher.yml create mode 100644 ansible/group_vars/all/zun.yml rename ansible/group_vars/{baremetal.yml => baremetal/ansible-python-interpreter.yml} (100%) diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml deleted file mode 100644 index 435937c58e..0000000000 --- a/ansible/group_vars/all.yml +++ /dev/null @@ -1,1413 +0,0 @@ ---- -# The options in this file can be overridden in 'globals.yml' - -# The "temp" files that are created before merge need to stay persistent due -# to the fact that ansible will register a "change" if it has to create them -# again. Persistent files allow for idempotency -container_config_directory: "/var/lib/kolla/config_files" - -# The directory on the deploy host containing globals.yml. -node_config: "{{ CONFIG_DIR | default('/etc/kolla') }}" - -# The directory to merge custom config files the kolla's config files -node_custom_config: "{{ node_config }}/config" - -# The directory to store the config files on the destination node -node_config_directory: "/etc/kolla" - -# The group which own node_config_directory, you can use a non-root -# user to deploy kolla -config_owner_user: "root" -config_owner_group: "root" - -################### -# Ansible options -################### - -# This variable is used as the "filter" argument for the setup module. For -# instance, if one wants to remove/ignore all Neutron interface facts: -# kolla_ansible_setup_filter: "ansible_[!qt]*" -# By default, we do not provide a filter. -kolla_ansible_setup_filter: "{{ omit }}" - -# This variable is used as the "gather_subset" argument for the setup module. -# For instance, if one wants to avoid collecting facts via facter: -# kolla_ansible_setup_gather_subset: "all,!facter" -# By default, we do not provide a gather subset. -kolla_ansible_setup_gather_subset: "{{ omit }}" - -# This variable determines which hosts require facts when using --limit. Facts -# will be gathered using delegation for hosts in this list that are not -# included in the limit. -# By default, this list includes all hosts. -kolla_ansible_delegate_facts_hosts: "{{ groups['all'] }}" - -################### -# Kolla options -################### -# Valid options are [ COPY_ONCE, COPY_ALWAYS ] -config_strategy: "COPY_ALWAYS" - -# Valid options are ['centos', 'debian', 'rocky', 'ubuntu'] -kolla_base_distro: "rocky" - -kolla_internal_vip_address: "{{ kolla_internal_address | default('') }}" -kolla_internal_fqdn: "{{ kolla_internal_vip_address }}" -kolla_external_vip_address: "{{ kolla_internal_vip_address }}" -kolla_same_external_internal_vip: "{{ kolla_external_vip_address | ansible.utils.ipaddr('address') == kolla_internal_vip_address | ansible.utils.ipaddr('address') }}" -kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip | bool else kolla_external_vip_address }}" - -kolla_dev_repos_directory: "/opt/stack/" -kolla_dev_repos_git: "https://opendev.org/openstack" -kolla_dev_repos_pull: "no" -kolla_dev_mode: "no" -kolla_source_version: "{% if openstack_release == 'master' %}master{% else %}stable/{{ openstack_release }}{% endif %}" - -# Proxy settings for containers such as magnum that need internet access -container_http_proxy: "" -container_https_proxy: "" -container_no_proxy: "localhost,127.0.0.1" - -container_proxy_no_proxy_entries: - - "{{ container_no_proxy }}" - - "{{ api_interface_address }}" - - "{{ kolla_internal_vip_address | default('') }}" - -container_proxy: - http_proxy: "{{ container_http_proxy }}" - https_proxy: "{{ container_https_proxy }}" - no_proxy: "{{ container_proxy_no_proxy_entries | select | join(',') }}" - -# By default, Kolla API services bind to the network address assigned -# to the api_interface. Allow the bind address to be an override. -api_interface_address: "{{ 'api' | kolla_address }}" - - -#################### -# Database options -#################### -database_address: "{{ kolla_internal_fqdn }}" -database_user: "root" -database_port: "3306" -database_connection_recycle_time: 10 -database_max_pool_size: 1 -database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}" -database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}" - -#################### -# Container engine options -#################### -kolla_container_engine: "docker" - -#################### -# Docker options -#################### -docker_registry_email: -docker_registry: "quay.io" -docker_namespace: "openstack.kolla" -docker_image_name_prefix: "" -docker_image_url: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}" -docker_registry_username: -# Please read the docs carefully before applying docker_registry_insecure. -docker_registry_insecure: "no" -docker_runtime_directory: "" -# Docker client timeout in seconds. -docker_client_timeout: 120 - -# Docker networking options -docker_disable_default_iptables_rules: "yes" -docker_disable_default_network: "{{ docker_disable_default_iptables_rules }}" -docker_disable_ip_forward: "{{ docker_disable_default_iptables_rules }}" - -# Retention settings for Docker logs -docker_log_max_file: "5" -docker_log_max_size: "50m" - -# Valid options are [ no, on-failure, always, unless-stopped ] -docker_restart_policy: "unless-stopped" - -# '0' means unlimited retries (applies only to 'on-failure' policy) -docker_restart_policy_retry: "10" - -# Extra docker options for Zun -docker_configure_for_zun: "no" -docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375 -docker_zun_config: {} - -# Extra containerd options for Zun -containerd_configure_for_zun: "no" - -# Enable Ceph backed Cinder Volumes for zun -zun_configure_for_cinder_ceph: "no" - -# 42463 is the static group id of the zun user in the Zun image. -# If users customize this value on building the Zun images, -# they need to change this config accordingly. -containerd_grpc_gid: 42463 - -# Timeout after Docker sends SIGTERM before sending SIGKILL. -docker_graceful_timeout: 60 - -# Common options used throughout Docker -docker_common_options: - auth_email: "{{ docker_registry_email }}" - auth_password: "{{ docker_registry_password }}" - auth_registry: "{{ docker_registry }}" - auth_username: "{{ docker_registry_username }}" - environment: - KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" - restart_policy: "{{ docker_restart_policy }}" - restart_retries: "{{ docker_restart_policy_retry }}" - graceful_timeout: "{{ docker_graceful_timeout }}" - client_timeout: "{{ docker_client_timeout }}" - container_engine: "{{ kolla_container_engine }}" - -# Container engine specific volume paths -docker_volumes_path: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes" -podman_volumes_path: "{{ docker_runtime_directory or '/var/lib/containers' }}/storage/volumes" -container_engine_volumes_path: "{{ docker_volumes_path if kolla_container_engine == 'docker' else podman_volumes_path }}" - -##################### -# Volumes under /run -##################### -# Podman has problem with mounting whole /run directory -# described here: https://github.com/containers/podman/issues/16305 -run_default_volumes_podman: - - '/run/netns:/run/netns:shared' - - '/run/lock/nova:/run/lock/nova:shared' - - "/run/libvirt:/run/libvirt:shared" - - "/run/nova:/run/nova:shared" - - "/run/openvswitch:/run/openvswitch:shared" - -run_default_volumes_docker: [] - -run_default_subdirectories: - - '/run/netns' - - '/run/lock/nova' - - "/run/libvirt" - - "/run/nova" - - "/run/openvswitch" - -#################### -# Dimensions options -#################### -# Dimension options for Docker Containers -# NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9) -# fixes at least rabbitmq and mariadb -default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else '{}' }}" -default_container_dimensions_el9: "{{ default_docker_dimensions_el9 if kolla_container_engine == 'docker' else default_podman_dimensions_el9 }}" -default_docker_dimensions_el9: - ulimits: - nofile: - soft: 1048576 - hard: 1048576 -default_podman_dimensions_el9: - ulimits: - RLIMIT_NOFILE: - soft: 1048576 - hard: 1048576 - RLIMIT_NPROC: - soft: 1048576 - hard: 1048576 - -##################### -# Healthcheck options -##################### -enable_container_healthchecks: "yes" -# Healthcheck options for Docker containers -# interval/timeout/start_period are in seconds -default_container_healthcheck_interval: 30 -default_container_healthcheck_timeout: 30 -default_container_healthcheck_retries: 3 -default_container_healthcheck_start_period: 5 - -####################### -# Extra volumes options -####################### -# Extra volumes for Docker Containers -default_extra_volumes: [] - -#################### -# keepalived options -#################### -# Arbitrary unique number from 0..255 -keepalived_virtual_router_id: "51" - - -####################### -## Opensearch Options -######################## -opensearch_datadir_volume: "opensearch" - -opensearch_internal_endpoint: "{{ opensearch_address | kolla_url(internal_protocol, opensearch_port) }}" -opensearch_dashboards_internal_fqdn: "{{ kolla_internal_fqdn }}" -opensearch_dashboards_external_fqdn: "{{ kolla_external_fqdn }}" -opensearch_dashboards_internal_endpoint: "{{ opensearch_dashboards_internal_fqdn | kolla_url(internal_protocol, opensearch_dashboards_port) }}" -opensearch_dashboards_external_endpoint: "{{ opensearch_dashboards_external_fqdn | kolla_url(public_protocol, opensearch_dashboards_port_external) }}" -opensearch_dashboards_user: "opensearch" -opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}" - -################### -# Messaging options -################### -# oslo.messaging rpc transport valid options are [ rabbit, amqp ] -om_rpc_transport: "rabbit" -om_rpc_user: "{{ rabbitmq_user }}" -om_rpc_password: "{{ rabbitmq_password }}" -om_rpc_port: "{{ rabbitmq_port }}" -om_rpc_group: "rabbitmq" -om_rpc_vhost: "/" - -rpc_transport_url: "{{ om_rpc_transport }}://{% for host in groups[om_rpc_group] %}{{ om_rpc_user }}:{{ om_rpc_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_rpc_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_rpc_vhost }}" - -# oslo.messaging notify transport valid options are [ rabbit ] -om_notify_transport: "rabbit" -om_notify_user: "{{ rabbitmq_user }}" -om_notify_password: "{{ rabbitmq_password }}" -om_notify_port: "{{ rabbitmq_port }}" -om_notify_group: "rabbitmq" -om_notify_vhost: "/" - -notify_transport_url: "{{ om_notify_transport }}://{% for host in groups[om_notify_group] %}{{ om_notify_user }}:{{ om_notify_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_notify_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_notify_vhost }}" - -# Whether to enable TLS for oslo.messaging communication with RabbitMQ. -om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}" -# CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS. -om_rabbitmq_cacert: "{{ rabbitmq_cacert }}" -om_rabbitmq_qos_prefetch_count: "1" - -om_enable_rabbitmq_stream_fanout: true - -#################### -# Networking options -#################### -network_interface: "eth0" -neutron_external_interface: "eth1" -kolla_external_vip_interface: "{{ network_interface }}" -api_interface: "{{ network_interface }}" -migration_interface: "{{ api_interface }}" -tunnel_interface: "{{ network_interface }}" -octavia_network_interface: "{{ 'o-hm0' if octavia_network_type == 'tenant' else api_interface }}" -bifrost_network_interface: "{{ network_interface }}" -dns_interface: "{{ network_interface }}" -dpdk_tunnel_interface: "{{ neutron_external_interface }}" -ironic_http_interface: "{{ api_interface }}" -ironic_tftp_interface: "{{ api_interface }}" - -# Configure the address family (AF) per network. -# Valid options are [ ipv4, ipv6 ] -network_address_family: "ipv4" -api_address_family: "{{ network_address_family }}" -storage_address_family: "{{ network_address_family }}" -migration_address_family: "{{ api_address_family }}" -tunnel_address_family: "{{ network_address_family }}" -octavia_network_address_family: "{{ api_address_family }}" -bifrost_network_address_family: "{{ network_address_family }}" -dns_address_family: "{{ network_address_family }}" -dpdk_tunnel_address_family: "{{ network_address_family }}" -ironic_http_address_family: "{{ api_address_family }}" -ironic_tftp_address_family: "{{ api_address_family }}" - -migration_interface_address: "{{ 'migration' | kolla_address }}" -tunnel_interface_address: "{{ 'tunnel' | kolla_address }}" -octavia_network_interface_address: "{{ 'octavia_network' | kolla_address }}" -dpdk_tunnel_interface_address: "{{ 'dpdk_tunnel' | kolla_address }}" -ironic_http_interface_address: "{{ 'ironic_http' | kolla_address }}" -ironic_tftp_interface_address: "{{ 'ironic_tftp' | kolla_address }}" - -# Valid options are [ openvswitch, ovn, linuxbridge ] -# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable. -# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html -neutron_plugin_agent: "openvswitch" - -# Valid options are [ internal, infoblox ] -neutron_ipam_driver: "internal" - -# The default ports used by each service. -# The list should be in alphabetical order -aodh_internal_fqdn: "{{ kolla_internal_fqdn }}" -aodh_external_fqdn: "{{ kolla_external_fqdn }}" -aodh_internal_endpoint: "{{ aodh_internal_fqdn | kolla_url(internal_protocol, aodh_api_port) }}" -aodh_public_endpoint: "{{ aodh_external_fqdn | kolla_url(public_protocol, aodh_api_public_port) }}" -aodh_api_port: "8042" -aodh_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else aodh_api_port }}" -aodh_api_listen_port: "{{ aodh_api_port }}" - -barbican_internal_fqdn: "{{ kolla_internal_fqdn }}" -barbican_external_fqdn: "{{ kolla_external_fqdn }}" -barbican_internal_endpoint: "{{ barbican_internal_fqdn | kolla_url(internal_protocol, barbican_api_port) }}" -barbican_public_endpoint: "{{ barbican_external_fqdn | kolla_url(public_protocol, barbican_api_public_port) }}" -barbican_api_port: "9311" -barbican_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else barbican_api_port }}" -barbican_api_listen_port: "{{ barbican_api_port }}" - -blazar_internal_fqdn: "{{ kolla_internal_fqdn }}" -blazar_external_fqdn: "{{ kolla_external_fqdn }}" -blazar_internal_base_endpoint: "{{ blazar_internal_fqdn | kolla_url(internal_protocol, blazar_api_port) }}" -blazar_public_base_endpoint: "{{ blazar_external_fqdn | kolla_url(public_protocol, blazar_api_public_port) }}" -blazar_api_port: "1234" -blazar_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else blazar_api_port }}" -blazar_api_listen_port: "{{ blazar_api_port }}" - -ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}" -ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}" -ceph_rgw_internal_base_endpoint: "{{ ceph_rgw_internal_fqdn | kolla_url(internal_protocol, ceph_rgw_port) }}" -ceph_rgw_public_base_endpoint: "{{ ceph_rgw_external_fqdn | kolla_url(public_protocol, ceph_rgw_public_port) }}" -ceph_rgw_port: "6780" -ceph_rgw_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ceph_rgw_port }}" - -cinder_internal_fqdn: "{{ kolla_internal_fqdn }}" -cinder_external_fqdn: "{{ kolla_external_fqdn }}" -cinder_internal_base_endpoint: "{{ cinder_internal_fqdn | kolla_url(internal_protocol, cinder_api_port) }}" -cinder_public_base_endpoint: "{{ cinder_external_fqdn | kolla_url(public_protocol, cinder_api_public_port) }}" -cinder_api_port: "8776" -cinder_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cinder_api_port }}" -cinder_api_listen_port: "{{ cinder_api_port }}" - -cloudkitty_internal_fqdn: "{{ kolla_internal_fqdn }}" -cloudkitty_external_fqdn: "{{ kolla_external_fqdn }}" -cloudkitty_internal_endpoint: "{{ cloudkitty_internal_fqdn | kolla_url(internal_protocol, cloudkitty_api_port) }}" -cloudkitty_public_endpoint: "{{ cloudkitty_external_fqdn | kolla_url(public_protocol, cloudkitty_api_public_port) }}" -cloudkitty_api_port: "8889" -cloudkitty_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cloudkitty_api_port }}" -cloudkitty_api_listen_port: "{{ cloudkitty_api_port }}" - -collectd_udp_port: "25826" - -cyborg_internal_fqdn: "{{ kolla_internal_fqdn }}" -cyborg_external_fqdn: "{{ kolla_external_fqdn }}" -cyborg_api_port: "6666" -cyborg_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cyborg_api_port }}" -cyborg_api_listen_port: "{{ cyborg_api_port }}" - -designate_internal_fqdn: "{{ kolla_internal_fqdn }}" -designate_external_fqdn: "{{ kolla_external_fqdn }}" -designate_internal_endpoint: "{{ designate_internal_fqdn | kolla_url(internal_protocol, designate_api_port) }}" -designate_public_endpoint: "{{ designate_external_fqdn | kolla_url(public_protocol, designate_api_public_port) }}" -designate_api_port: "9001" -designate_api_listen_port: "{{ designate_api_port }}" -designate_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else designate_api_port }}" -designate_bind_port: "53" -designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}" -designate_rndc_port: "953" - -etcd_client_port: "2379" -etcd_peer_port: "2380" -etcd_enable_tls: "{{ kolla_enable_tls_backend }}" -etcd_protocol: "{{ 'https' if etcd_enable_tls | bool else 'http' }}" - -fluentd_syslog_port: "5140" - -glance_internal_fqdn: "{{ kolla_internal_fqdn }}" -glance_external_fqdn: "{{ kolla_external_fqdn }}" -glance_internal_endpoint: "{{ glance_internal_fqdn | kolla_url(internal_protocol, glance_api_port) }}" -glance_public_endpoint: "{{ glance_external_fqdn | kolla_url(public_protocol, glance_api_public_port) }}" -glance_api_port: "9292" -glance_api_listen_port: "{{ glance_api_port }}" -glance_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else glance_api_port }}" -glance_tls_proxy_stats_port: "9293" - -gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}" -gnocchi_external_fqdn: "{{ kolla_external_fqdn }}" -gnocchi_internal_endpoint: "{{ gnocchi_internal_fqdn | kolla_url(internal_protocol, gnocchi_api_port) }}" -gnocchi_public_endpoint: "{{ gnocchi_external_fqdn | kolla_url(public_protocol, gnocchi_api_public_port) }}" -gnocchi_api_port: "8041" -gnocchi_api_listen_port: "{{ gnocchi_api_port }}" -gnocchi_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else gnocchi_api_port }}" - -grafana_internal_fqdn: "{{ kolla_internal_fqdn }}" -grafana_external_fqdn: "{{ kolla_external_fqdn }}" -grafana_internal_endpoint: "{{ grafana_internal_fqdn | kolla_url(internal_protocol, grafana_server_port) }}" -grafana_public_endpoint: "{{ grafana_external_fqdn | kolla_url(public_protocol, grafana_server_public_port) }}" -grafana_server_port: "3000" -grafana_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else grafana_server_port }}" -grafana_server_listen_port: "{{ grafana_server_port }}" - -haproxy_stats_port: "1984" -haproxy_monitor_port: "61313" -haproxy_ssh_port: "2985" -# configure SSL/TLS settings for haproxy config, one of [modern, intermediate, legacy]: -kolla_haproxy_ssl_settings: "modern" - -haproxy_ssl_settings: "{{ ssl_legacy_settings if kolla_haproxy_ssl_settings == 'legacy' else ssl_intermediate_settings if kolla_haproxy_ssl_settings == 'intermediate' else ssl_modern_settings | default(ssl_modern_settings) }}" - -ssl_legacy_settings: | - ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES - ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11 - -ssl_intermediate_settings: | - ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 - ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets - ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 - ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets - -ssl_modern_settings: | - ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets - ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets - -heat_internal_fqdn: "{{ kolla_internal_fqdn }}" -heat_external_fqdn: "{{ kolla_external_fqdn }}" -heat_internal_base_endpoint: "{{ heat_internal_fqdn | kolla_url(internal_protocol, heat_api_port) }}" -heat_public_base_endpoint: "{{ heat_external_fqdn | kolla_url(public_protocol, heat_api_public_port) }}" -heat_api_port: "8004" -heat_api_listen_port: "{{ heat_api_port }}" -heat_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_port }}" -heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}" -heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}" -heat_cfn_internal_base_endpoint: "{{ heat_cfn_internal_fqdn | kolla_url(internal_protocol, heat_api_cfn_port) }}" -heat_cfn_public_base_endpoint: "{{ heat_cfn_external_fqdn | kolla_url(public_protocol, heat_api_cfn_public_port) }}" -heat_api_cfn_port: "8000" -heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}" -heat_api_cfn_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_cfn_port }}" - -horizon_internal_fqdn: "{{ kolla_internal_fqdn }}" -horizon_external_fqdn: "{{ kolla_external_fqdn }}" -horizon_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}" -horizon_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}" -horizon_port: "80" -horizon_tls_port: "443" -horizon_listen_port: "{{ horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}" - -influxdb_http_port: "8086" - -ironic_internal_fqdn: "{{ kolla_internal_fqdn }}" -ironic_external_fqdn: "{{ kolla_external_fqdn }}" -ironic_internal_endpoint: "{{ ironic_internal_fqdn | kolla_url(internal_protocol, ironic_api_port) }}" -ironic_public_endpoint: "{{ ironic_external_fqdn | kolla_url(public_protocol, ironic_api_public_port) }}" -ironic_api_port: "6385" -ironic_api_listen_port: "{{ ironic_api_port }}" -ironic_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_api_port }}" -ironic_http_port: "8089" -ironic_prometheus_exporter_port: "9608" - -iscsi_port: "3260" - -keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else keystone_public_listen_port }}" -keystone_public_listen_port: "5000" -keystone_internal_port: "5000" -keystone_internal_listen_port: "{{ keystone_internal_port }}" -keystone_listen_port: "{{ keystone_internal_listen_port }}" - -keystone_ssh_port: "8023" - -kuryr_port: "23750" - -letsencrypt_webserver_port: "8081" -letsencrypt_managed_certs: "{{ '' if not enable_letsencrypt | bool else ('internal' if letsencrypt_internal_cert_server != '' and kolla_same_external_internal_vip | bool else ('internal,external' if letsencrypt_internal_cert_server != '' and letsencrypt_external_cert_server != '' else ('internal' if letsencrypt_internal_cert_server != '' else ('external' if letsencrypt_external_cert_server != '' and not kolla_same_external_internal_vip | bool else '')))) }}" -letsencrypt_external_cert_server: "https://acme-v02.api.letsencrypt.org/directory" -letsencrypt_internal_cert_server: "" - -magnum_internal_fqdn: "{{ kolla_internal_fqdn }}" -magnum_external_fqdn: "{{ kolla_external_fqdn }}" -magnum_internal_base_endpoint: "{{ magnum_internal_fqdn | kolla_url(internal_protocol, magnum_api_port) }}" -magnum_public_base_endpoint: "{{ magnum_external_fqdn | kolla_url(public_protocol, magnum_api_public_port) }}" -magnum_api_port: "9511" -magnum_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else magnum_api_port }}" -magnum_api_listen_port: "{{ magnum_api_port }}" - -manila_internal_fqdn: "{{ kolla_internal_fqdn }}" -manila_external_fqdn: "{{ kolla_external_fqdn }}" -manila_internal_base_endpoint: "{{ manila_internal_fqdn | kolla_url(internal_protocol, manila_api_port) }}" -manila_public_base_endpoint: "{{ manila_external_fqdn | kolla_url(public_protocol, manila_api_public_port) }}" -manila_api_port: "8786" -manila_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else manila_api_port }}" -manila_api_listen_port: "{{ manila_api_port }}" - -mariadb_port: "{{ database_port }}" -mariadb_wsrep_port: "4567" -mariadb_ist_port: "4568" -mariadb_sst_port: "4444" -mariadb_clustercheck_port: "4569" -mariadb_enable_tls_backend: "{{ database_enable_tls_backend }}" - -mariadb_monitor_user: "{{ 'monitor' if enable_proxysql | bool else 'haproxy' }}" - -mariadb_datadir_volume: "mariadb" - -mariadb_default_database_shard_id: 0 -mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}" -mariadb_shard_id: "{{ mariadb_default_database_shard_id }}" -mariadb_shard_name: "shard_{{ mariadb_shard_id }}" -mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}" -mariadb_loadbalancer: "{{ 'proxysql' if enable_proxysql | bool else 'haproxy' }}" -mariadb_backup_target: "{{ 'active' if mariadb_loadbalancer == 'haproxy' else 'replica' }}" -mariadb_shard_root_user_prefix: "root_shard_" -mariadb_shard_backup_user_prefix: "backup_shard_" -mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}" - -masakari_internal_fqdn: "{{ kolla_internal_fqdn }}" -masakari_external_fqdn: "{{ kolla_external_fqdn }}" -masakari_internal_endpoint: "{{ masakari_internal_fqdn | kolla_url(internal_protocol, masakari_api_port) }}" -masakari_public_endpoint: "{{ masakari_external_fqdn | kolla_url(public_protocol, masakari_api_public_port) }}" -masakari_api_port: "15868" -masakari_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else masakari_api_port }}" -masakari_api_listen_port: "{{ masakari_api_port }}" -masakari_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" - -memcached_port: "11211" -memcache_security_strategy: "ENCRYPT" - -mistral_internal_fqdn: "{{ kolla_internal_fqdn }}" -mistral_external_fqdn: "{{ kolla_external_fqdn }}" -mistral_internal_base_endpoint: "{{ mistral_internal_fqdn | kolla_url(internal_protocol, mistral_api_port) }}" -mistral_public_base_endpoint: "{{ mistral_external_fqdn | kolla_url(public_protocol, mistral_api_public_port) }}" -mistral_api_port: "8989" -mistral_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else mistral_api_port }}" -mistral_api_listen_port: "{{ mistral_api_port }}" - -neutron_internal_fqdn: "{{ kolla_internal_fqdn }}" -neutron_external_fqdn: "{{ kolla_external_fqdn }}" -neutron_internal_endpoint: "{{ neutron_internal_fqdn | kolla_url(internal_protocol, neutron_server_port) }}" -neutron_public_endpoint: "{{ neutron_external_fqdn | kolla_url(public_protocol, neutron_server_public_port) }}" -neutron_server_port: "9696" -neutron_server_listen_port: "{{ neutron_server_port }}" -neutron_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else neutron_server_port }}" - -nova_internal_fqdn: "{{ kolla_internal_fqdn }}" -nova_external_fqdn: "{{ kolla_external_fqdn }}" -nova_internal_base_endpoint: "{{ nova_internal_fqdn | kolla_url(internal_protocol, nova_api_port) }}" -nova_public_base_endpoint: "{{ nova_external_fqdn | kolla_url(public_protocol, nova_api_public_port) }}" -nova_api_port: "8774" -nova_api_listen_port: "{{ nova_api_port }}" -nova_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_api_port }}" -nova_metadata_internal_fqdn: "{{ kolla_internal_fqdn }}" -nova_metadata_external_fqdn: "{{ kolla_external_fqdn }}" -nova_metadata_port: "8775" -nova_metadata_listen_port: "{{ nova_metadata_port }}" -nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}" -nova_novncproxy_port: "6080" -nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}" -nova_novncproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_novncproxy_port }}" -nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}" -nova_spicehtml5proxy_port: "6082" -nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}" -nova_spicehtml5proxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_spicehtml5proxy_port }}" -nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}" -nova_serialproxy_port: "6083" -nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}" -nova_serialproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_serialproxy_port }}" -nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}" - -octavia_internal_fqdn: "{{ kolla_internal_fqdn }}" -octavia_external_fqdn: "{{ kolla_external_fqdn }}" -octavia_internal_endpoint: "{{ octavia_internal_fqdn | kolla_url(internal_protocol, octavia_api_port) }}" -octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol, octavia_api_public_port) }}" -octavia_api_port: "9876" -octavia_api_listen_port: "{{ octavia_api_port }}" -octavia_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else octavia_api_port }}" -octavia_health_manager_port: "5555" - -# NOTE: If an external ElasticSearch cluster port is specified, -# we default to using that port in services with ElasticSearch -# endpoints. This is for backwards compatibility. -opensearch_port: "{{ elasticsearch_port | default('9200') }}" -opensearch_dashboards_port: "5601" -opensearch_dashboards_port_external: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else opensearch_dashboards_port }}" -opensearch_dashboards_listen_port: "{{ opensearch_dashboards_port }}" - -ovn_nb_db_port: "6641" -ovn_sb_db_port: "6642" -# OVN SB Relay related variables -ovn_sb_db_relay_count: "{{ ((groups['ovn-controller'] | length) / ovn_sb_db_relay_compute_per_relay | int) | round(0, 'ceil') | int }}" -ovn_sb_db_relay_compute_per_relay: "50" -ovn_sb_db_relay_port_prefix: "1664" -ovn_sb_db_relay_port: "{{ ovn_sb_db_relay_port_prefix ~ ovn_sb_db_relay_client_group_id }}" -ovn_sb_db_relay_client_group_id: "{{ range(1, ovn_sb_db_relay_count | int + 1) | random(seed=inventory_hostname) }}" -ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" -ovn_sb_connection: "{{ ovn_sb_connection_relay if enable_ovn_sb_db_relay | bool else ovn_sb_connection_no_relay }}" -ovn_sb_connection_no_relay: "{% for host in groups['ovn-sb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" -ovn_sb_connection_relay: "{% for host in groups['ovn-sb-db-relay'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_relay_port }}{% if not loop.last %},{% endif %}{% endfor %}" - -ovsdb_port: "6640" - -placement_internal_fqdn: "{{ kolla_internal_fqdn }}" -placement_external_fqdn: "{{ kolla_external_fqdn }}" -placement_internal_endpoint: "{{ placement_internal_fqdn | kolla_url(internal_protocol, placement_api_port) }}" -placement_public_endpoint: "{{ placement_external_fqdn | kolla_url(public_protocol, placement_api_public_port) }}" -# Default Placement API port of 8778 already in use -placement_api_port: "8780" -placement_api_listen_port: "{{ placement_api_port }}" -placement_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else placement_api_port }}" - -prometheus_internal_fqdn: "{{ kolla_internal_fqdn }}" -prometheus_external_fqdn: "{{ kolla_external_fqdn }}" -prometheus_internal_endpoint: "{{ prometheus_internal_fqdn | kolla_url(internal_protocol, prometheus_port) }}" -prometheus_public_endpoint: "{{ prometheus_external_fqdn | kolla_url(public_protocol, prometheus_public_port) }}" -prometheus_port: "9091" -prometheus_listen_port: "{{ prometheus_port }}" -prometheus_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_port }}" -prometheus_node_exporter_port: "9100" -prometheus_mysqld_exporter_port: "9104" -prometheus_haproxy_exporter_port: "9101" -prometheus_memcached_exporter_port: "9150" -prometheus_rabbitmq_exporter_port: "{{ rabbitmq_prometheus_port }}" -# Default cadvisor port of 8080 already in use -prometheus_cadvisor_port: "18080" -prometheus_fluentd_integration_port: "24231" -prometheus_libvirt_exporter_port: "9177" -prometheus_etcd_integration_port: "{{ etcd_client_port }}" -proxysql_prometheus_exporter_port: "6070" - -# Prometheus alertmanager ports -prometheus_alertmanager_internal_fqdn: "{{ kolla_internal_fqdn }}" -prometheus_alertmanager_external_fqdn: "{{ kolla_external_fqdn }}" -prometheus_alertmanager_internal_endpoint: "{{ prometheus_alertmanager_internal_fqdn | kolla_url(internal_protocol, prometheus_alertmanager_port) }}" -prometheus_alertmanager_public_endpoint: "{{ prometheus_alertmanager_external_fqdn | kolla_url(public_protocol, prometheus_alertmanager_public_port) }}" -prometheus_alertmanager_port: "9093" -prometheus_alertmanager_cluster_port: "9094" -prometheus_alertmanager_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_alertmanager_port }}" -prometheus_alertmanager_listen_port: "{{ prometheus_alertmanager_port }}" - -# Prometheus openstack-exporter ports -prometheus_openstack_exporter_port: "9198" -prometheus_elasticsearch_exporter_port: "9108" - -# Prometheus blackbox-exporter ports -prometheus_blackbox_exporter_port: "9115" - -# Prometheus instance label to use for metrics -prometheus_instance_label: - -proxysql_admin_port: "6032" - -rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}" -rabbitmq_management_port: "15672" -rabbitmq_cluster_port: "25672" -rabbitmq_epmd_port: "4369" -rabbitmq_prometheus_port: "15692" - -redis_port: "6379" -redis_sentinel_port: "26379" - -skyline_apiserver_internal_fqdn: "{{ kolla_internal_fqdn }}" -skyline_apiserver_external_fqdn: "{{ kolla_external_fqdn }}" -skyline_apiserver_internal_endpoint: "{{ skyline_apiserver_internal_fqdn | kolla_url(internal_protocol, skyline_apiserver_port) }}" -skyline_apiserver_public_endpoint: "{{ skyline_apiserver_external_fqdn | kolla_url(public_protocol, skyline_apiserver_public_port) }}" -skyline_console_internal_fqdn: "{{ kolla_internal_fqdn }}" -skyline_console_external_fqdn: "{{ kolla_external_fqdn }}" -skyline_console_internal_endpoint: "{{ skyline_console_internal_fqdn | kolla_url(internal_protocol, skyline_console_port) }}" -skyline_console_public_endpoint: "{{ skyline_console_external_fqdn | kolla_url(public_protocol, skyline_console_public_port) }}" -skyline_apiserver_port: "9998" -skyline_apiserver_listen_port: "{{ skyline_apiserver_port }}" -skyline_apiserver_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_apiserver_port }}" -skyline_console_port: "9999" -skyline_console_listen_port: "{{ skyline_console_port }}" -skyline_console_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_console_port }}" -skyline_enable_sso: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}" - -syslog_udp_port: "{{ fluentd_syslog_port }}" - -tacker_internal_fqdn: "{{ kolla_internal_fqdn }}" -tacker_external_fqdn: "{{ kolla_external_fqdn }}" -tacker_internal_endpoint: "{{ tacker_internal_fqdn | kolla_url(internal_protocol, tacker_server_port) }}" -tacker_public_endpoint: "{{ tacker_external_fqdn | kolla_url(public_protocol, tacker_server_public_port) }}" -tacker_server_port: "9890" -tacker_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else tacker_server_port }}" -tacker_server_listen_port: "{{ tacker_server_port }}" - -trove_internal_fqdn: "{{ kolla_internal_fqdn }}" -trove_external_fqdn: "{{ kolla_external_fqdn }}" -trove_internal_base_endpoint: "{{ trove_internal_fqdn | kolla_url(internal_protocol, trove_api_port) }}" -trove_public_base_endpoint: "{{ trove_external_fqdn | kolla_url(public_protocol, trove_api_public_port) }}" -trove_api_port: "8779" -trove_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else trove_api_port }}" -trove_api_listen_port: "{{ trove_api_port }}" - -venus_internal_fqdn: "{{ kolla_internal_fqdn }}" -venus_external_fqdn: "{{ kolla_external_fqdn }}" -venus_internal_endpoint: "{{ venus_internal_fqdn | kolla_url(internal_protocol, venus_api_port) }}" -venus_public_endpoint: "{{ venus_external_fqdn | kolla_url(public_protocol, venus_api_public_port) }}" -venus_api_port: "10010" -venus_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else venus_api_port }}" -venus_api_listen_port: "{{ venus_api_port }}" - -watcher_internal_fqdn: "{{ kolla_internal_fqdn }}" -watcher_external_fqdn: "{{ kolla_external_fqdn }}" -watcher_internal_endpoint: "{{ watcher_internal_fqdn | kolla_url(internal_protocol, watcher_api_port) }}" -watcher_public_endpoint: "{{ watcher_external_fqdn | kolla_url(public_protocol, watcher_api_public_port) }}" -watcher_api_port: "9322" -watcher_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else watcher_api_port }}" -watcher_api_listen_port: "{{ watcher_api_port }}" - -zun_api_port: "9517" -zun_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else zun_api_port }}" -zun_api_listen_port: "{{ zun_api_port }}" -zun_wsproxy_internal_fqdn: "{{ kolla_internal_fqdn }}" -zun_wsproxy_external_fqdn: "{{ kolla_external_fqdn }}" -zun_wsproxy_port: "6784" -zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}" -zun_cni_daemon_port: "9036" -zun_internal_fqdn: "{{ kolla_internal_fqdn }}" -zun_external_fqdn: "{{ kolla_external_fqdn }}" -zun_internal_base_endpoint: "{{ zun_internal_fqdn | kolla_url(internal_protocol, zun_api_port) }}" -zun_public_base_endpoint: "{{ zun_external_fqdn | kolla_url(public_protocol, zun_api_public_port) }}" - -public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}" -internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}" - -################## -# Firewall options -################## -enable_external_api_firewalld: "false" -external_api_firewalld_zone: "public" - -#################### -# OpenStack options -#################### -openstack_release: "master" -# Docker image tag used by default. -openstack_tag: "{{ openstack_release }}-{{ kolla_base_distro }}-{{ kolla_base_distro_version }}{{ openstack_tag_suffix }}" -openstack_tag_suffix: "" -openstack_logging_debug: "False" - -openstack_region_name: "RegionOne" - -# A list of policy file formats that are supported by Oslo.policy -supported_policy_format_list: - - policy.yaml - - policy.json - -# In the context of multi-regions, list here the name of all your regions. -multiple_regions_names: - - "{{ openstack_region_name }}" - -openstack_service_workers: "{{ [ansible_facts.processor_vcpus, 5] | min }}" -openstack_service_rpc_workers: "{{ [ansible_facts.processor_vcpus, 3] | min }}" - -# Optionally allow Kolla to set sysctl values -set_sysctl: "yes" - -# Optionally change the path to sysctl.conf modified by Kolla Ansible plays. -kolla_sysctl_conf_path: /etc/sysctl.conf - -# Endpoint type used to connect with OpenStack services with ansible modules. -# Valid options are [ public, internal ] -openstack_interface: "internal" - -# Openstack CA certificate bundle file -# CA bundle file must be added to both the Horizon and Kolla Toolbox containers -openstack_cacert: "" - -# Enable core OpenStack services. This includes: -# glance, keystone, neutron, nova, heat, and horizon. -enable_openstack_core: "yes" - -# These roles are required for Kolla to be operation, however a savvy deployer -# could disable some of these required roles and run their own services. -enable_glance: "{{ enable_openstack_core | bool }}" -enable_haproxy: "yes" -enable_keepalived: "{{ enable_haproxy | bool }}" -enable_loadbalancer: "{{ enable_haproxy | bool or enable_keepalived | bool or enable_proxysql | bool }}" -enable_keystone: "{{ enable_openstack_core | bool }}" -enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}" -enable_mariadb: "yes" -enable_memcached: "yes" -enable_neutron: "{{ enable_openstack_core | bool }}" -enable_nova: "{{ enable_openstack_core | bool }}" -enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}" - -# NOTE: Most memcached clients handle load-balancing via client side -# hashing (consistent or not) logic, so going under the covers and messing -# with things that the clients are not aware of is generally wrong -enable_haproxy_memcached: "no" - -# Additional optional OpenStack features and services are specified here -enable_aodh: "no" -enable_barbican: "no" -enable_blazar: "no" -enable_ceilometer: "no" -enable_ceilometer_ipmi: "no" -enable_ceilometer_prometheus_pushgateway: "no" -enable_cells: "no" -enable_central_logging: "no" -enable_ceph_rgw: "no" -enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}" -enable_cinder: "no" -enable_cinder_backup: "yes" -enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}" -enable_cinder_backend_lvm: "no" -enable_cinder_backend_nfs: "no" -enable_cinder_backend_quobyte: "no" -enable_cinder_backend_pure_iscsi: "no" -enable_cinder_backend_pure_fc: "no" -enable_cinder_backend_pure_roce: "no" -enable_cinder_backend_pure_nvme_tcp: "no" -enable_cinder_backend_lightbits: "no" -enable_cloudkitty: "no" -enable_collectd: "no" -enable_cyborg: "no" -enable_designate: "no" -enable_etcd: "no" -enable_fluentd: "yes" -enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}" -enable_gnocchi: "no" -enable_gnocchi_statsd: "no" -enable_grafana: "no" -enable_grafana_external: "{{ enable_grafana | bool }}" -enable_hacluster: "{{ enable_masakari_hostmonitor | bool }}" -enable_heat: "{{ enable_openstack_core | bool }}" -enable_horizon: "{{ enable_openstack_core | bool }}" -enable_horizon_blazar: "{{ enable_blazar | bool }}" -enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}" -enable_horizon_designate: "{{ enable_designate | bool }}" -enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}" -enable_horizon_heat: "{{ enable_heat | bool }}" -enable_horizon_ironic: "{{ enable_ironic | bool }}" -enable_horizon_magnum: "{{ enable_magnum | bool }}" -enable_horizon_manila: "{{ enable_manila | bool }}" -enable_horizon_masakari: "{{ enable_masakari | bool }}" -enable_horizon_mistral: "{{ enable_mistral | bool }}" -enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}" -enable_horizon_octavia: "{{ enable_octavia | bool }}" -enable_horizon_tacker: "{{ enable_tacker | bool }}" -enable_horizon_trove: "{{ enable_trove | bool }}" -enable_horizon_venus: "{{ enable_venus | bool }}" -enable_horizon_watcher: "{{ enable_watcher | bool }}" -enable_horizon_zun: "{{ enable_zun | bool }}" -enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}" -enable_ironic: "no" -enable_ironic_dnsmasq: "{{ enable_ironic | bool }}" -enable_ironic_neutron_agent: "no" -enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" -enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}" -enable_kuryr: "no" -enable_letsencrypt: "no" -enable_magnum: "no" -enable_manila: "no" -enable_manila_backend_generic: "no" -enable_manila_backend_hnas: "no" -enable_manila_backend_cephfs_native: "no" -enable_manila_backend_cephfs_nfs: "no" -enable_manila_backend_glusterfs_nfs: "no" -enable_manila_backend_flashblade: "no" -enable_mariabackup: "no" -enable_masakari: "no" -enable_masakari_instancemonitor: "{{ enable_masakari | bool }}" -enable_masakari_hostmonitor: "{{ enable_masakari | bool }}" -enable_mistral: "no" -enable_multipathd: "no" -enable_neutron_vpnaas: "no" -enable_neutron_sriov: "no" -enable_neutron_mlnx: "no" -enable_neutron_dvr: "no" -enable_neutron_fwaas: "no" -enable_neutron_qos: "no" -enable_neutron_agent_ha: "no" -enable_neutron_bgp_dragent: "no" -enable_neutron_provider_networks: "no" -enable_neutron_segments: "no" -enable_neutron_packet_logging: "no" -enable_neutron_sfc: "no" -enable_neutron_taas: "no" -enable_neutron_trunk: "no" -enable_neutron_metering: "no" -enable_neutron_infoblox_ipam_agent: "no" -enable_neutron_port_forwarding: "no" -enable_nova_libvirt_container: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}" -enable_nova_serialconsole_proxy: "no" -enable_nova_ssh: "yes" -enable_octavia: "no" -enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}" -enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}" -enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}" -enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}" -enable_ovn_sb_db_relay: "{{ enable_ovn | bool }}" -enable_ovs_dpdk: "no" -enable_osprofiler: "no" -enable_placement: "{{ enable_nova | bool or enable_zun | bool }}" -enable_prometheus: "no" -enable_proxysql: "yes" -enable_redis: "no" -enable_skyline: "no" -enable_tacker: "no" -enable_telegraf: "no" -enable_trove: "no" -enable_trove_singletenant: "no" -enable_venus: "no" -enable_watcher: "no" -enable_zun: "no" - -ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}" -designate_keystone_user: "designate" -ironic_keystone_user: "ironic" -neutron_keystone_user: "neutron" -nova_keystone_user: "nova" -placement_keystone_user: "placement" -cinder_keystone_user: "cinder" -glance_keystone_user: "glance" - -# Nova fake driver and the number of fake driver per compute node -enable_nova_fake: "no" -num_nova_fake_per_node: 5 - -# Clean images options are specified here -enable_destroy_images: "no" - -#################### -# Global Options -#################### -# List of containers to skip during stop command in YAML list format -# skip_stop_containers: -# - container1 -# - container2 -skip_stop_containers: [] - -#################### -# Logging options -#################### - -# NOTE: If an external ElasticSearch cluster address is configured, all -# services with ElasticSearch endpoints should be configured to log -# to the external cluster by default. This is for backwards compatibility. -opensearch_address: "{{ elasticsearch_address if elasticsearch_address is defined else kolla_internal_fqdn }}" -enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}" -enable_opensearch_dashboards: "{{ enable_opensearch | bool }}" -enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}" - -#################### -# Redis options -#################### -redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}default:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}" -redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes" - -#################### -# Osprofiler options -#################### -# valid values: ["elasticsearch", "redis"] -osprofiler_backend: "elasticsearch" -opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}" -osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string }}" - -#################### -# RabbitMQ options -#################### -rabbitmq_user: "openstack" -rabbitmq_monitoring_user: "" -# Whether to enable TLS encryption for RabbitMQ client-server communication. -rabbitmq_enable_tls: "no" -# CA certificate bundle in RabbitMQ container. -rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" -rabbitmq_datadir_volume: "rabbitmq" - -#################### -# HAProxy options -#################### -haproxy_user: "openstack" -haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}" -haproxy_enable_http2: "yes" -haproxy_http2_protocol: "alpn h2,http/1.1" -kolla_enable_tls_internal: "no" -kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}" -kolla_certificates_dir: "{{ node_config }}/certificates" -kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem" -kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem" -kolla_admin_openrc_cacert: "" -kolla_copy_ca_into_containers: "no" -haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" -haproxy_backend_cacert_dir: "/etc/ssl/certs" -haproxy_single_external_frontend: false -haproxy_single_external_frontend_public_port: "{{ '443' if kolla_enable_tls_external | bool else '80' }}" - -################## -# Backend options -################## -kolla_httpd_keep_alive: "60" -kolla_httpd_timeout: "60" - -###################### -# Backend TLS options -###################### -kolla_enable_tls_backend: "no" -kolla_verify_tls_backend: "yes" -kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem" -kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem" - -##################### -# ACME client options -##################### -acme_client_lego: "server lego {{ api_interface_address }}:{{ letsencrypt_webserver_port }}" -acme_client_servers: "{% set arr = [] %}{% if enable_letsencrypt | bool %}{{ arr.append(acme_client_lego) }}{% endif %}{{ arr }}" - -#################### -# Keystone options -#################### -keystone_internal_fqdn: "{{ kolla_internal_fqdn }}" -keystone_external_fqdn: "{{ kolla_external_fqdn }}" - -keystone_internal_url: "{{ keystone_internal_fqdn | kolla_url(internal_protocol, keystone_internal_port) }}" -keystone_public_url: "{{ keystone_external_fqdn | kolla_url(public_protocol, keystone_public_port) }}" - -keystone_admin_user: "admin" -keystone_admin_project: "admin" - -# Whether or not to apply changes to service user passwords when services are -# reconfigured -update_keystone_service_user_passwords: true - -default_project_domain_name: "Default" -default_project_domain_id: "default" - -default_user_domain_name: "Default" -default_user_domain_id: "default" - -# Keystone fernet token expiry in seconds. Default is 1 day. -fernet_token_expiry: 86400 -# Keystone window to allow expired fernet tokens. Default is 2 days. -fernet_token_allow_expired_window: 172800 -# Keystone fernet key rotation interval in seconds. Default is sum of token -# expiry and allow expired window, 3 days. This ensures the minimum number -# of keys are active. If this interval is lower than the sum of the token -# expiry and allow expired window, multiple active keys will be necessary. -fernet_key_rotation_interval: "{{ fernet_token_expiry + fernet_token_allow_expired_window }}" - -keystone_default_user_role: "member" - -# OpenStack authentication string. You should only need to override these if you -# are changing the admin tenant/project or user. -openstack_auth: - auth_url: "{{ keystone_internal_url }}" - username: "{{ keystone_admin_user }}" - password: "{{ keystone_admin_password }}" - project_name: "{{ keystone_admin_project }}" - domain_name: "default" - user_domain_name: "default" - -####################### -# Glance options -####################### -glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_s3 | bool) }}" -glance_backend_ceph: "no" -glance_backend_s3: "no" -enable_glance_image_cache: "no" -glance_file_datadir_volume: "glance" -glance_enable_rolling_upgrade: "no" -glance_enable_property_protection: "no" -glance_enable_interoperable_image_import: "no" -glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}" -# NOTE(mnasiadka): For use in common role -glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}" - -####################### -# Barbican options -####################### -# Valid options are [ simple_crypto, p11_crypto ] -barbican_crypto_plugin: "simple_crypto" -barbican_library_path: "/usr/lib/libCryptoki2_64.so" - -################# -# Gnocchi options -################# -# Valid options are [ file, ceph ] -gnocchi_backend_storage: "file" - -# Valid options are [redis, ''] -gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}" -gnocchi_metric_datadir_volume: "gnocchi" - -################################# -# Cinder options -################################# -cinder_backend_ceph: "no" -cinder_backend_huawei: "no" -cinder_backend_huawei_xml_files: [] -cinder_volume_group: "cinder-volumes" -cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else 'tgtadm' }}" -# Valid options are [ '', redis, etcd ] -cinder_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" - -# Valid options are [ nfs, ceph, s3 ] -cinder_backup_driver: "ceph" -cinder_backup_share: "" -cinder_backup_mount_options_nfs: "" - -####################### -# Cloudkitty options -####################### -# Valid options are 'sqlalchemy' or 'influxdb'. The default value is -# 'influxdb', which matches the default in Cloudkitty since the Stein release. -# When the backend is "influxdb", we also enable Influxdb. -# Also, when using 'influxdb' as the backend, we trigger the configuration/use -# of Cloudkitty storage backend version 2. -cloudkitty_storage_backend: "influxdb" - -####################### -# Designate options -####################### -# Valid options are [ bind9, infoblox ] -designate_backend: "bind9" -designate_ns_record: - - "ns1.example.org" -designate_backend_external: "no" -designate_backend_external_bind9_nameservers: "" -# Valid options are [ '', redis ] -designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}" - -designate_enable_notifications_sink: "no" -designate_notifications_topic_name: "notifications_designate" - -####################### -# Neutron options -####################### -neutron_bgp_router_id: "1.1.1.1" -neutron_bridge_name: "{{ 'br_dpdk' if enable_ovs_dpdk | bool else 'br-ex' }}" -neutron_physical_networks: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index }}{% if not loop.last %},{% endif %}{% endfor %}" -# Comma-separated type of enabled ml2 type drivers -neutron_type_drivers: "flat,vlan,vxlan{% if neutron_plugin_agent == 'ovn' %},geneve{% endif %}" -# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers') -# NOTE: for ironic this list should also contain 'flat' -neutron_tenant_network_types: "{% if neutron_plugin_agent == 'ovn' %}geneve{% else %}vxlan{% endif %}" - -# valid values: ["dvr", "dvr_no_external"] -neutron_compute_dvr_mode: "dvr" -computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_compute_dvr_mode == 'dvr') or enable_neutron_provider_networks | bool or neutron_ovn_distributed_fip | bool }}" - -# Default DNS resolvers for virtual networks -neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4" - -# Set legacy iptables to allow kernels not supporting iptables-nft -neutron_legacy_iptables: "no" - -# Enable distributed floating ip for OVN deployments -neutron_ovn_distributed_fip: "no" - -# SRIOV physnet:interface mappings when SRIOV is enabled -# "sriovnet1" and tunnel_interface used here as placeholders -neutron_sriov_physnet_mappings: - sriovnet1: "{{ tunnel_interface }}" -neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}" - -# Set OVN network availability zones -neutron_ovn_availability_zones: [] - -# Enable OVN agent -neutron_enable_ovn_agent: "no" - -####################### -# Nova options -####################### -nova_backend_ceph: "no" -nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}" -# Valid options are [ kvm, qemu ] -nova_compute_virt_type: "kvm" -nova_instance_datadir_volume: "{{ 'nova_compute' if enable_nova_libvirt_container | bool else '/var/lib/nova' }}" -nova_safety_upgrade: "no" -# Valid options are [ none, novnc, spice ] -nova_console: "novnc" - -####################### -# Nova Database -####################### -nova_database_shard_id: "{{ mariadb_default_database_shard_id | int }}" -nova_cell0_database_shard_id: "{{ nova_database_shard_id | int }}" - -# These are kept for backwards compatibility, as cell0 references them. -nova_database_name: "nova" -nova_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova{% endif %}" -nova_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}" - -nova_cell0_database_name: "{{ nova_database_name }}_cell0" -nova_cell0_database_user: "{{ nova_database_user }}" -nova_cell0_database_address: "{{ nova_database_address }}" -nova_cell0_database_password: "{{ nova_database_password }}" - -####################### -# Horizon options -####################### -horizon_backend_database: false -horizon_keystone_multidomain: False - -# Enable deploying custom horizon policy files for services that don't have a -# horizon plugin but have a policy file. Override these when you have services -# not deployed by kolla-ansible but want custom policy files deployed for them -# in horizon. -enable_ceilometer_horizon_policy_file: "{{ enable_ceilometer }}" -enable_cinder_horizon_policy_file: "{{ enable_cinder }}" -enable_glance_horizon_policy_file: "{{ enable_glance }}" -enable_heat_horizon_policy_file: "{{ enable_heat }}" -enable_keystone_horizon_policy_file: "{{ enable_keystone }}" -enable_neutron_horizon_policy_file: "{{ enable_neutron }}" -enable_nova_horizon_policy_file: "{{ enable_nova }}" - -horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}" - -################### -# External Ceph options -################### -# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes) -external_ceph_cephx_enabled: "yes" - -ceph_cluster: "ceph" - -# External Ceph pool names -ceph_cinder_pool_name: "volumes" -ceph_cinder_backup_pool_name: "backups" -ceph_glance_pool_name: "images" -ceph_gnocchi_pool_name: "gnocchi" -ceph_nova_pool_name: "vms" - -ceph_cinder_backup_user: "cinder-backup" -ceph_cinder_user: "cinder" -ceph_glance_user: "glance" -ceph_gnocchi_user: "gnocchi" -ceph_manila_user: "manila" -ceph_nova_user: "{{ ceph_cinder_user }}" - -############################################# -# MariaDB component-specific database details -############################################# -# Whether to configure haproxy to load balance -# the external MariaDB server(s) -enable_external_mariadb_load_balancer: "no" -# Whether to use pre-configured databases / users -use_preconfigured_databases: "no" -# whether to use a common, preconfigured user -# for all component databases -use_common_mariadb_user: "no" - -############ -# Prometheus -############ -enable_prometheus_server: "{{ enable_prometheus | bool }}" -enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}" -enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}" -enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}" -enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}" -enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}" -enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}" -enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}" -enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}" -enable_prometheus_ceph_mgr_exporter: "no" -enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}" -enable_prometheus_openstack_exporter_external: "no" -enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_opensearch | bool }}" -enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}" -enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}" -enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}" -enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}" -enable_prometheus_proxysql_exporter: "{{ enable_prometheus | bool and enable_proxysql | bool }}" - -prometheus_alertmanager_user: "admin" -prometheus_ceph_exporter_interval: "{{ prometheus_scrape_interval }}" -prometheus_grafana_user: "grafana" -prometheus_haproxy_user: "haproxy" -prometheus_skyline_user: "skyline" -prometheus_scrape_interval: "60s" -prometheus_openstack_exporter_interval: "{{ prometheus_scrape_interval }}" -prometheus_openstack_exporter_timeout: "45s" -prometheus_elasticsearch_exporter_interval: "{{ prometheus_scrape_interval }}" -prometheus_cmdline_extras: -prometheus_ceph_mgr_exporter_endpoints: [] -prometheus_openstack_exporter_endpoint_type: "internal" -prometheus_openstack_exporter_compute_api_version: "latest" -prometheus_libvirt_exporter_interval: "60s" - - -#################### -# InfluxDB options -#################### -influxdb_address: "{{ kolla_internal_fqdn }}" -influxdb_datadir_volume: "influxdb" - -influxdb_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, influxdb_http_port) }}" - -######################### -# Internal Image options -######################### -kolla_base_distro_version_default_map: { - "centos": "stream9", - "debian": "bookworm", - "rocky": "9", - "ubuntu": "noble", -} - -distro_python_version: "3" - -kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}" - -############# -# S3 options -############# -# Common options for S3 Cinder Backup and Glance S3 backend. -s3_url: -s3_bucket: -s3_access_key: -s3_secret_key: - -########## -# Telegraf -########## -# Configure telegraf to use the docker daemon itself as an input for -# telemetry data. -telegraf_enable_docker_input: "no" - -# Valid options are [ '', redis, etcd ] -ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" - -########## -# Octavia -########## -# Whether to run Kolla-Ansible's automatic configuration for Octavia. -# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no` -# and keep your other Octavia config like before. -octavia_auto_configure: "{{ 'amphora' in octavia_provider_drivers }}" - -# Octavia network type options are [ tenant, provider ] -# * tenant indicates that we will create a tenant network and a network -# interface on the Octavia worker nodes for communication with amphorae. -# * provider indicates that we will create a flat or vlan provider network. -# In this case octavia_network_interface should be set to a network interface -# on the Octavia worker nodes on the same provider network. -octavia_network_type: "provider" - -################################### -# Identity federation configuration -################################### -# Here we configure all of the IdPs meta information that will be required to implement identity federation with OpenStack Keystone. -# We require the administrator to enter the following metadata: -# * name (internal name of the IdP in Keystone); -# * openstack_domain (the domain in Keystone that the IdP belongs to) -# * protocol (the federated protocol used by the IdP; e.g. openid or saml); -# * identifier (the IdP identifier; e.g. https://accounts.google.com); -# * public_name (the public name that will be shown for users in Horizon); -# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration); -# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol -# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf, -# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}'); -# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem'; -# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid); -# -# The IdPs meta information are to be presented to Kolla-Ansible as the following example: -# keystone_identity_providers: -# - name: "myidp1" -# openstack_domain: "my-domain" -# protocol: "openid" -# identifier: "https://accounts.google.com" -# public_name: "Authenticate via myidp1" -# attribute_mapping: "mappingId1" -# metadata_folder: "path/to/metadata/folder" -# certificate_file: "path/to/certificate/file.pem" -# -# We also need to configure the attribute mapping that is used by IdPs. -# The configuration of attribute mappings is a list of objects, where each -# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP -# object in the IdPs set), and the 'file' with a full qualified path to a mapping file. -# keystone_identity_mappings: -# - name: "mappingId1" -# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1" -# - name: "mappingId2" -# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2" -# - name: "mappingId3" -# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3" -keystone_identity_providers: [] -keystone_identity_mappings: [] - -#################### -# Corosync options -#################### - -# this is UDP port -hacluster_corosync_port: 5405 diff --git a/ansible/group_vars/all/aodh.yml b/ansible/group_vars/all/aodh.yml new file mode 100644 index 0000000000..2e431e11eb --- /dev/null +++ b/ansible/group_vars/all/aodh.yml @@ -0,0 +1,11 @@ +--- +enable_aodh: "no" + +# Ports +aodh_internal_fqdn: "{{ kolla_internal_fqdn }}" +aodh_external_fqdn: "{{ kolla_external_fqdn }}" +aodh_internal_endpoint: "{{ aodh_internal_fqdn | kolla_url(internal_protocol, aodh_api_port) }}" +aodh_public_endpoint: "{{ aodh_external_fqdn | kolla_url(public_protocol, aodh_api_public_port) }}" +aodh_api_port: "8042" +aodh_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else aodh_api_port }}" +aodh_api_listen_port: "{{ aodh_api_port }}" diff --git a/ansible/group_vars/all/barbican.yml b/ansible/group_vars/all/barbican.yml new file mode 100644 index 0000000000..36e256a018 --- /dev/null +++ b/ansible/group_vars/all/barbican.yml @@ -0,0 +1,17 @@ +--- +enable_barbican: "no" + +####################### +# Barbican options +####################### +# Valid options are [ simple_crypto, p11_crypto ] +barbican_crypto_plugin: "simple_crypto" +barbican_library_path: "/usr/lib/libCryptoki2_64.so" + +barbican_internal_fqdn: "{{ kolla_internal_fqdn }}" +barbican_external_fqdn: "{{ kolla_external_fqdn }}" +barbican_internal_endpoint: "{{ barbican_internal_fqdn | kolla_url(internal_protocol, barbican_api_port) }}" +barbican_public_endpoint: "{{ barbican_external_fqdn | kolla_url(public_protocol, barbican_api_public_port) }}" +barbican_api_port: "9311" +barbican_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else barbican_api_port }}" +barbican_api_listen_port: "{{ barbican_api_port }}" diff --git a/ansible/group_vars/all/bifrost.yml b/ansible/group_vars/all/bifrost.yml new file mode 100644 index 0000000000..a42e6c9ea1 --- /dev/null +++ b/ansible/group_vars/all/bifrost.yml @@ -0,0 +1,3 @@ +--- +bifrost_network_interface: "{{ network_interface }}" +bifrost_network_address_family: "{{ network_address_family }}" diff --git a/ansible/group_vars/all/blazar.yml b/ansible/group_vars/all/blazar.yml new file mode 100644 index 0000000000..10122ffdc4 --- /dev/null +++ b/ansible/group_vars/all/blazar.yml @@ -0,0 +1,11 @@ +--- +enable_blazar: "no" + +# Ports +blazar_internal_fqdn: "{{ kolla_internal_fqdn }}" +blazar_external_fqdn: "{{ kolla_external_fqdn }}" +blazar_internal_base_endpoint: "{{ blazar_internal_fqdn | kolla_url(internal_protocol, blazar_api_port) }}" +blazar_public_base_endpoint: "{{ blazar_external_fqdn | kolla_url(public_protocol, blazar_api_public_port) }}" +blazar_api_port: "1234" +blazar_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else blazar_api_port }}" +blazar_api_listen_port: "{{ blazar_api_port }}" diff --git a/ansible/group_vars/all/ceilometer.yml b/ansible/group_vars/all/ceilometer.yml new file mode 100644 index 0000000000..19e99eee03 --- /dev/null +++ b/ansible/group_vars/all/ceilometer.yml @@ -0,0 +1,4 @@ +--- +enable_ceilometer: "no" +enable_ceilometer_ipmi: "no" +enable_ceilometer_prometheus_pushgateway: "no" diff --git a/ansible/group_vars/all/ceph-rgw.yml b/ansible/group_vars/all/ceph-rgw.yml new file mode 100644 index 0000000000..3d3d4802b7 --- /dev/null +++ b/ansible/group_vars/all/ceph-rgw.yml @@ -0,0 +1,10 @@ +--- +enable_ceph_rgw: "no" +enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}" + +ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}" +ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}" +ceph_rgw_internal_base_endpoint: "{{ ceph_rgw_internal_fqdn | kolla_url(internal_protocol, ceph_rgw_port) }}" +ceph_rgw_public_base_endpoint: "{{ ceph_rgw_external_fqdn | kolla_url(public_protocol, ceph_rgw_public_port) }}" +ceph_rgw_port: "6780" +ceph_rgw_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ceph_rgw_port }}" diff --git a/ansible/group_vars/all/ceph.yml b/ansible/group_vars/all/ceph.yml new file mode 100644 index 0000000000..987717a65e --- /dev/null +++ b/ansible/group_vars/all/ceph.yml @@ -0,0 +1,21 @@ +--- +################### +# External Ceph options +################### +# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes) +external_ceph_cephx_enabled: "yes" + +ceph_cluster: "ceph" + +# External Ceph pool names +ceph_cinder_pool_name: "volumes" +ceph_cinder_backup_pool_name: "backups" +ceph_glance_pool_name: "images" +ceph_gnocchi_pool_name: "gnocchi" +ceph_nova_pool_name: "vms" + +ceph_cinder_backup_user: "cinder-backup" +ceph_cinder_user: "cinder" +ceph_glance_user: "glance" +ceph_gnocchi_user: "gnocchi" +ceph_nova_user: "{{ ceph_cinder_user }}" diff --git a/ansible/group_vars/all/cinder.yml b/ansible/group_vars/all/cinder.yml new file mode 100644 index 0000000000..e4659674ea --- /dev/null +++ b/ansible/group_vars/all/cinder.yml @@ -0,0 +1,40 @@ +--- +enable_cinder: "no" +enable_cinder_backup: "yes" +enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}" +enable_cinder_backend_lvm: "no" +enable_cinder_backend_nfs: "no" +enable_cinder_backend_quobyte: "no" +enable_cinder_backend_pure_iscsi: "no" +enable_cinder_backend_pure_fc: "no" +enable_cinder_backend_pure_roce: "no" +enable_cinder_backend_pure_nvme_tcp: "no" +enable_cinder_backend_lightbits: "no" + +################################# +# Cinder options +################################# +cinder_backend_ceph: "no" +cinder_backend_huawei: "no" +cinder_backend_huawei_xml_files: [] +cinder_volume_group: "cinder-volumes" +cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else 'tgtadm' }}" +# Valid options are [ '', redis, etcd ] +cinder_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" + +# Valid options are [ nfs, ceph, s3 ] +cinder_backup_driver: "ceph" +cinder_backup_share: "" +cinder_backup_mount_options_nfs: "" + +storage_address_family: "{{ network_address_family }}" + +cinder_keystone_user: "cinder" + +cinder_internal_fqdn: "{{ kolla_internal_fqdn }}" +cinder_external_fqdn: "{{ kolla_external_fqdn }}" +cinder_internal_base_endpoint: "{{ cinder_internal_fqdn | kolla_url(internal_protocol, cinder_api_port) }}" +cinder_public_base_endpoint: "{{ cinder_external_fqdn | kolla_url(public_protocol, cinder_api_public_port) }}" +cinder_api_port: "8776" +cinder_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cinder_api_port }}" +cinder_api_listen_port: "{{ cinder_api_port }}" diff --git a/ansible/group_vars/all/cloudkitty.yml b/ansible/group_vars/all/cloudkitty.yml new file mode 100644 index 0000000000..a657c3bd0b --- /dev/null +++ b/ansible/group_vars/all/cloudkitty.yml @@ -0,0 +1,20 @@ +--- +enable_cloudkitty: "no" + +####################### +# Cloudkitty options +####################### +# Valid options are 'sqlalchemy' or 'influxdb'. The default value is +# 'influxdb', which matches the default in Cloudkitty since the Stein release. +# When the backend is "influxdb", we also enable Influxdb. +# Also, when using 'influxdb' as the backend, we trigger the configuration/use +# of Cloudkitty storage backend version 2. +cloudkitty_storage_backend: "influxdb" + +cloudkitty_internal_fqdn: "{{ kolla_internal_fqdn }}" +cloudkitty_external_fqdn: "{{ kolla_external_fqdn }}" +cloudkitty_internal_endpoint: "{{ cloudkitty_internal_fqdn | kolla_url(internal_protocol, cloudkitty_api_port) }}" +cloudkitty_public_endpoint: "{{ cloudkitty_external_fqdn | kolla_url(public_protocol, cloudkitty_api_public_port) }}" +cloudkitty_api_port: "8889" +cloudkitty_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cloudkitty_api_port }}" +cloudkitty_api_listen_port: "{{ cloudkitty_api_port }}" diff --git a/ansible/group_vars/all/collectd.yml b/ansible/group_vars/all/collectd.yml new file mode 100644 index 0000000000..8e62327160 --- /dev/null +++ b/ansible/group_vars/all/collectd.yml @@ -0,0 +1,4 @@ +--- +enable_collectd: "no" + +collectd_udp_port: "25826" diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml new file mode 100644 index 0000000000..a08a3ce3c1 --- /dev/null +++ b/ansible/group_vars/all/common.yml @@ -0,0 +1,366 @@ +--- +################### +# Ansible options +################### + +# This variable is used as the "filter" argument for the setup module. For +# instance, if one wants to remove/ignore all Neutron interface facts: +# kolla_ansible_setup_filter: "ansible_[!qt]*" +# By default, we do not provide a filter. +kolla_ansible_setup_filter: "{{ omit }}" + +# This variable is used as the "gather_subset" argument for the setup module. +# For instance, if one wants to avoid collecting facts via facter: +# kolla_ansible_setup_gather_subset: "all,!facter" +# By default, we do not provide a gather subset. +kolla_ansible_setup_gather_subset: "{{ omit }}" + +# This variable determines which hosts require facts when using --limit. Facts +# will be gathered using delegation for hosts in this list that are not +# included in the limit. +# By default, this list includes all hosts. +kolla_ansible_delegate_facts_hosts: "{{ groups['all'] }}" + +#################### +# Docker options +#################### +docker_registry_email: +docker_registry: "quay.io" +docker_namespace: "openstack.kolla" +docker_image_name_prefix: "" +docker_image_url: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}" +docker_registry_username: +# Please read the docs carefully before applying docker_registry_insecure. +docker_registry_insecure: "no" +docker_runtime_directory: "" +# Docker client timeout in seconds. +docker_client_timeout: 120 + +# Docker networking options +docker_disable_default_iptables_rules: "yes" +docker_disable_default_network: "{{ docker_disable_default_iptables_rules }}" +docker_disable_ip_forward: "{{ docker_disable_default_iptables_rules }}" + +# Retention settings for Docker logs +docker_log_max_file: "5" +docker_log_max_size: "50m" + +# Valid options are [ no, on-failure, always, unless-stopped ] +docker_restart_policy: "unless-stopped" + +# '0' means unlimited retries (applies only to 'on-failure' policy) +docker_restart_policy_retry: "10" + +# Timeout after Docker sends SIGTERM before sending SIGKILL. +docker_graceful_timeout: 60 + +# Common options used throughout Docker +docker_common_options: + auth_email: "{{ docker_registry_email }}" + auth_password: "{{ docker_registry_password }}" + auth_registry: "{{ docker_registry }}" + auth_username: "{{ docker_registry_username }}" + environment: + KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" + restart_policy: "{{ docker_restart_policy }}" + restart_retries: "{{ docker_restart_policy_retry }}" + graceful_timeout: "{{ docker_graceful_timeout }}" + client_timeout: "{{ docker_client_timeout }}" + container_engine: "{{ kolla_container_engine }}" + +# Container engine specific volume paths +docker_volumes_path: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes" +podman_volumes_path: "{{ docker_runtime_directory or '/var/lib/containers' }}/storage/volumes" +container_engine_volumes_path: "{{ docker_volumes_path if kolla_container_engine == 'docker' else podman_volumes_path }}" + +##################### +# Volumes under /run +##################### +# Podman has problem with mounting whole /run directory +# described here: https://github.com/containers/podman/issues/16305 +run_default_volumes_podman: + - '/run/netns:/run/netns:shared' + - '/run/lock/nova:/run/lock/nova:shared' + - "/run/libvirt:/run/libvirt:shared" + - "/run/nova:/run/nova:shared" + - "/run/openvswitch:/run/openvswitch:shared" + +run_default_volumes_docker: [] + +run_default_subdirectories: + - '/run/netns' + - '/run/lock/nova' + - "/run/libvirt" + - "/run/nova" + - "/run/openvswitch" + +#################### +# Dimensions options +#################### +# Dimension options for Docker Containers +# NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9) +# fixes at least rabbitmq and mariadb +default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else '{}' }}" +default_container_dimensions_el9: "{{ default_docker_dimensions_el9 if kolla_container_engine == 'docker' else default_podman_dimensions_el9 }}" +default_docker_dimensions_el9: + ulimits: + nofile: + soft: 1048576 + hard: 1048576 +default_podman_dimensions_el9: + ulimits: + RLIMIT_NOFILE: + soft: 1048576 + hard: 1048576 + RLIMIT_NPROC: + soft: 1048576 + hard: 1048576 + +##################### +# Healthcheck options +##################### +enable_container_healthchecks: "yes" +# Healthcheck options for Docker containers +# interval/timeout/start_period are in seconds +default_container_healthcheck_interval: 30 +default_container_healthcheck_timeout: 30 +default_container_healthcheck_retries: 3 +default_container_healthcheck_start_period: 5 + +####################### +# Extra volumes options +####################### +# Extra volumes for Docker Containers +default_extra_volumes: [] + +################## +# Firewall options +################## +enable_external_api_firewalld: "false" +external_api_firewalld_zone: "public" + +################## +# Backend options +################## +kolla_httpd_keep_alive: "60" +kolla_httpd_timeout: "60" + +# The "temp" files that are created before merge need to stay persistent due +# to the fact that ansible will register a "change" if it has to create them +# again. Persistent files allow for idempotency +container_config_directory: "/var/lib/kolla/config_files" + +# The directory on the deploy host containing globals.yml. +node_config: "{{ CONFIG_DIR | default('/etc/kolla') }}" + +# The directory to merge custom config files the kolla's config files +node_custom_config: "{{ node_config }}/config" + +# The directory to store the config files on the destination node +node_config_directory: "/etc/kolla" + +# The group which own node_config_directory, you can use a non-root +# user to deploy kolla +config_owner_user: "root" +config_owner_group: "root" + +################### +# Kolla options +################### +# Valid options are [ COPY_ONCE, COPY_ALWAYS ] +config_strategy: "COPY_ALWAYS" + +# Valid options are ['centos', 'debian', 'rocky', 'ubuntu'] +kolla_base_distro: "rocky" + +kolla_internal_vip_address: "{{ kolla_internal_address | default('') }}" +kolla_internal_fqdn: "{{ kolla_internal_vip_address }}" +kolla_external_vip_address: "{{ kolla_internal_vip_address }}" +kolla_same_external_internal_vip: "{{ kolla_external_vip_address | ansible.utils.ipaddr('address') == kolla_internal_vip_address | ansible.utils.ipaddr('address') }}" +kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip | bool else kolla_external_vip_address }}" + +kolla_dev_repos_directory: "/opt/stack/" +kolla_dev_repos_git: "https://opendev.org/openstack" +kolla_dev_repos_pull: "no" +kolla_dev_mode: "no" +kolla_source_version: "{% if openstack_release == 'master' %}master{% else %}stable/{{ openstack_release }}{% endif %}" + +# Proxy settings for containers such as magnum that need internet access +container_http_proxy: "" +container_https_proxy: "" +container_no_proxy: "localhost,127.0.0.1" + +container_proxy_no_proxy_entries: + - "{{ container_no_proxy }}" + - "{{ api_interface_address }}" + - "{{ kolla_internal_vip_address | default('') }}" + +container_proxy: + http_proxy: "{{ container_http_proxy }}" + https_proxy: "{{ container_https_proxy }}" + no_proxy: "{{ container_proxy_no_proxy_entries | select | join(',') }}" + +# By default, Kolla API services bind to the network address assigned +# to the api_interface. Allow the bind address to be an override. +api_interface_address: "{{ 'api' | kolla_address }}" + +#################### +# Container engine options +#################### +kolla_container_engine: "docker" + + +######################### +# Internal Image options +######################### +kolla_base_distro_version_default_map: { + "centos": "stream9", + "debian": "bookworm", + "rocky": "9", + "ubuntu": "noble", +} + +distro_python_version: "3" + +kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}" + +#################### +# Networking options +#################### +network_interface: "eth0" +kolla_external_vip_interface: "{{ network_interface }}" +api_interface: "{{ network_interface }}" + +# Configure the address family (AF) per network. +# Valid options are [ ipv4, ipv6 ] +network_address_family: "ipv4" +api_address_family: "{{ network_address_family }}" + +public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}" +internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}" + +# Additional optional OpenStack features and services are specified here +enable_central_logging: "no" + +# Clean images options are specified here +enable_destroy_images: "no" + +#################### +# Global Options +#################### +# List of containers to skip during stop command in YAML list format +# skip_stop_containers: +# - container1 +# - container2 +skip_stop_containers: [] + +################### +# Messaging options +################### +# oslo.messaging rpc transport valid options are [ rabbit, amqp ] +om_rpc_transport: "rabbit" +om_rpc_user: "{{ rabbitmq_user }}" +om_rpc_password: "{{ rabbitmq_password }}" +om_rpc_port: "{{ rabbitmq_port }}" +om_rpc_group: "rabbitmq" +om_rpc_vhost: "/" + +rpc_transport_url: "{{ om_rpc_transport }}://{% for host in groups[om_rpc_group] %}{{ om_rpc_user }}:{{ om_rpc_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_rpc_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_rpc_vhost }}" + +# oslo.messaging notify transport valid options are [ rabbit ] +om_notify_transport: "rabbit" +om_notify_user: "{{ rabbitmq_user }}" +om_notify_password: "{{ rabbitmq_password }}" +om_notify_port: "{{ rabbitmq_port }}" +om_notify_group: "rabbitmq" +om_notify_vhost: "/" + +notify_transport_url: "{{ om_notify_transport }}://{% for host in groups[om_notify_group] %}{{ om_notify_user }}:{{ om_notify_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_notify_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_notify_vhost }}" + +# Whether to enable TLS for oslo.messaging communication with RabbitMQ. +om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}" +# CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS. +om_rabbitmq_cacert: "{{ rabbitmq_cacert }}" +om_rabbitmq_qos_prefetch_count: "1" + +om_enable_rabbitmq_stream_fanout: true + +# OpenStack authentication string. You should only need to override these if you +# are changing the admin tenant/project or user. +openstack_auth: + auth_url: "{{ keystone_internal_url }}" + username: "{{ keystone_admin_user }}" + password: "{{ keystone_admin_password }}" + project_name: "{{ keystone_admin_project }}" + domain_name: "default" + user_domain_name: "default" + +#################### +# OpenStack options +#################### +openstack_release: "master" +# Docker image tag used by default. +openstack_tag: "{{ openstack_release }}-{{ kolla_base_distro }}-{{ kolla_base_distro_version }}{{ openstack_tag_suffix }}" +openstack_tag_suffix: "" +openstack_logging_debug: "False" + +openstack_region_name: "RegionOne" + +# A list of policy file formats that are supported by Oslo.policy +supported_policy_format_list: + - policy.yaml + - policy.json + +# In the context of multi-regions, list here the name of all your regions. +multiple_regions_names: + - "{{ openstack_region_name }}" + +openstack_service_workers: "{{ [ansible_facts.processor_vcpus, 5] | min }}" +openstack_service_rpc_workers: "{{ [ansible_facts.processor_vcpus, 3] | min }}" + +# Endpoint type used to connect with OpenStack services with ansible modules. +# Valid options are [ public, internal ] +openstack_interface: "internal" + +# Openstack CA certificate bundle file +# CA bundle file must be added to both the Horizon and Kolla Toolbox containers +openstack_cacert: "" + +# Enable core OpenStack services. This includes: +# glance, keystone, neutron, nova, heat, and horizon. +enable_openstack_core: "yes" + +enable_osprofiler: "no" + +#################### +# Osprofiler options +#################### +# valid values: ["elasticsearch", "redis"] +osprofiler_backend: "elasticsearch" +opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}" +osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string }}" + +###################### +# Backend TLS options +###################### +kolla_enable_tls_backend: "no" +kolla_verify_tls_backend: "yes" +kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem" +kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem" + +#################### +# Database options +#################### +database_address: "{{ kolla_internal_fqdn }}" +database_user: "root" +database_port: "3306" +database_connection_recycle_time: 10 +database_max_pool_size: 1 +database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}" +database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}" + +# Optionally allow Kolla to set sysctl values +set_sysctl: "yes" + +# Optionally change the path to sysctl.conf modified by Kolla Ansible plays. +kolla_sysctl_conf_path: /etc/sysctl.conf diff --git a/ansible/group_vars/all/cyborg.yml b/ansible/group_vars/all/cyborg.yml new file mode 100644 index 0000000000..f8346b5ef2 --- /dev/null +++ b/ansible/group_vars/all/cyborg.yml @@ -0,0 +1,8 @@ +--- +enable_cyborg: "no" + +cyborg_internal_fqdn: "{{ kolla_internal_fqdn }}" +cyborg_external_fqdn: "{{ kolla_external_fqdn }}" +cyborg_api_port: "6666" +cyborg_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cyborg_api_port }}" +cyborg_api_listen_port: "{{ cyborg_api_port }}" diff --git a/ansible/group_vars/all/database.yml b/ansible/group_vars/all/database.yml new file mode 100644 index 0000000000..7e0e1bd033 --- /dev/null +++ b/ansible/group_vars/all/database.yml @@ -0,0 +1,11 @@ +--- +#################### +# Database options +#################### +database_address: "{{ kolla_internal_fqdn }}" +database_user: "root" +database_port: "3306" +database_connection_recycle_time: 10 +database_max_pool_size: 1 +database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}" +database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}" diff --git a/ansible/group_vars/all/designate.yml b/ansible/group_vars/all/designate.yml new file mode 100644 index 0000000000..e9916c5420 --- /dev/null +++ b/ansible/group_vars/all/designate.yml @@ -0,0 +1,33 @@ +--- +enable_designate: "no" + +designate_keystone_user: "designate" + +####################### +# Designate options +####################### +# Valid options are [ bind9, infoblox ] +designate_backend: "bind9" +designate_ns_record: + - "ns1.example.org" +designate_backend_external: "no" +designate_backend_external_bind9_nameservers: "" +# Valid options are [ '', redis ] +designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}" + +designate_enable_notifications_sink: "no" +designate_notifications_topic_name: "notifications_designate" + +dns_interface: "{{ network_interface }}" +dns_address_family: "{{ network_address_family }}" + +designate_internal_fqdn: "{{ kolla_internal_fqdn }}" +designate_external_fqdn: "{{ kolla_external_fqdn }}" +designate_internal_endpoint: "{{ designate_internal_fqdn | kolla_url(internal_protocol, designate_api_port) }}" +designate_public_endpoint: "{{ designate_external_fqdn | kolla_url(public_protocol, designate_api_public_port) }}" +designate_api_port: "9001" +designate_api_listen_port: "{{ designate_api_port }}" +designate_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else designate_api_port }}" +designate_bind_port: "53" +designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}" +designate_rndc_port: "953" diff --git a/ansible/group_vars/all/etcd.yml b/ansible/group_vars/all/etcd.yml new file mode 100644 index 0000000000..a6b1601196 --- /dev/null +++ b/ansible/group_vars/all/etcd.yml @@ -0,0 +1,7 @@ +--- +enable_etcd: "no" + +etcd_client_port: "2379" +etcd_peer_port: "2380" +etcd_enable_tls: "{{ kolla_enable_tls_backend }}" +etcd_protocol: "{{ 'https' if etcd_enable_tls | bool else 'http' }}" diff --git a/ansible/group_vars/all/fluentd.yml b/ansible/group_vars/all/fluentd.yml new file mode 100644 index 0000000000..13f41522d0 --- /dev/null +++ b/ansible/group_vars/all/fluentd.yml @@ -0,0 +1,6 @@ +--- +enable_fluentd: "yes" +enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}" + +fluentd_syslog_port: "5140" +syslog_udp_port: "{{ fluentd_syslog_port }}" diff --git a/ansible/group_vars/all/glance.yml b/ansible/group_vars/all/glance.yml new file mode 100644 index 0000000000..baf705736b --- /dev/null +++ b/ansible/group_vars/all/glance.yml @@ -0,0 +1,28 @@ +--- +enable_glance: "{{ enable_openstack_core | bool }}" + +glance_keystone_user: "glance" + +####################### +# Glance options +####################### +glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_s3 | bool) }}" +glance_backend_ceph: "no" +glance_backend_s3: "no" +enable_glance_image_cache: "no" +glance_file_datadir_volume: "glance" +glance_enable_rolling_upgrade: "no" +glance_enable_property_protection: "no" +glance_enable_interoperable_image_import: "no" +glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}" +# NOTE(mnasiadka): For use in common role +glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}" + +glance_internal_fqdn: "{{ kolla_internal_fqdn }}" +glance_external_fqdn: "{{ kolla_external_fqdn }}" +glance_internal_endpoint: "{{ glance_internal_fqdn | kolla_url(internal_protocol, glance_api_port) }}" +glance_public_endpoint: "{{ glance_external_fqdn | kolla_url(public_protocol, glance_api_public_port) }}" +glance_api_port: "9292" +glance_api_listen_port: "{{ glance_api_port }}" +glance_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else glance_api_port }}" +glance_tls_proxy_stats_port: "9293" diff --git a/ansible/group_vars/all/gnocchi.yml b/ansible/group_vars/all/gnocchi.yml new file mode 100644 index 0000000000..f103736b43 --- /dev/null +++ b/ansible/group_vars/all/gnocchi.yml @@ -0,0 +1,21 @@ +--- +enable_gnocchi: "no" +enable_gnocchi_statsd: "no" + +################# +# Gnocchi options +################# +# Valid options are [ file, ceph ] +gnocchi_backend_storage: "file" + +# Valid options are [redis, ''] +gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}" +gnocchi_metric_datadir_volume: "gnocchi" + +gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}" +gnocchi_external_fqdn: "{{ kolla_external_fqdn }}" +gnocchi_internal_endpoint: "{{ gnocchi_internal_fqdn | kolla_url(internal_protocol, gnocchi_api_port) }}" +gnocchi_public_endpoint: "{{ gnocchi_external_fqdn | kolla_url(public_protocol, gnocchi_api_public_port) }}" +gnocchi_api_port: "8041" +gnocchi_api_listen_port: "{{ gnocchi_api_port }}" +gnocchi_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else gnocchi_api_port }}" diff --git a/ansible/group_vars/all/grafana.yml b/ansible/group_vars/all/grafana.yml new file mode 100644 index 0000000000..97c7d12945 --- /dev/null +++ b/ansible/group_vars/all/grafana.yml @@ -0,0 +1,11 @@ +--- +enable_grafana: "no" +enable_grafana_external: "{{ enable_grafana | bool }}" + +grafana_internal_fqdn: "{{ kolla_internal_fqdn }}" +grafana_external_fqdn: "{{ kolla_external_fqdn }}" +grafana_internal_endpoint: "{{ grafana_internal_fqdn | kolla_url(internal_protocol, grafana_server_port) }}" +grafana_public_endpoint: "{{ grafana_external_fqdn | kolla_url(public_protocol, grafana_server_public_port) }}" +grafana_server_port: "3000" +grafana_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else grafana_server_port }}" +grafana_server_listen_port: "{{ grafana_server_port }}" diff --git a/ansible/group_vars/all/hacluster.yml b/ansible/group_vars/all/hacluster.yml new file mode 100644 index 0000000000..15a92447cc --- /dev/null +++ b/ansible/group_vars/all/hacluster.yml @@ -0,0 +1,9 @@ +--- +enable_hacluster: "{{ enable_masakari_hostmonitor | bool }}" + +#################### +# Corosync options +#################### + +# this is UDP port +hacluster_corosync_port: 5405 diff --git a/ansible/group_vars/all/haproxy.yml b/ansible/group_vars/all/haproxy.yml new file mode 100644 index 0000000000..d2e5a14a24 --- /dev/null +++ b/ansible/group_vars/all/haproxy.yml @@ -0,0 +1,48 @@ +--- +enable_haproxy: "yes" + +#################### +# HAProxy options +#################### +haproxy_user: "openstack" +haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}" +haproxy_enable_http2: "yes" +haproxy_http2_protocol: "alpn h2,http/1.1" +kolla_enable_tls_internal: "no" +kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}" +kolla_certificates_dir: "{{ node_config }}/certificates" +kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem" +kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem" +kolla_admin_openrc_cacert: "" +kolla_copy_ca_into_containers: "no" +haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" +haproxy_backend_cacert_dir: "/etc/ssl/certs" +haproxy_single_external_frontend: false +haproxy_single_external_frontend_public_port: "{{ '443' if kolla_enable_tls_external | bool else '80' }}" + +# configure SSL/TLS settings for haproxy config, one of [modern, intermediate, legacy]: +kolla_haproxy_ssl_settings: "modern" + +haproxy_ssl_settings: "{{ ssl_legacy_settings if kolla_haproxy_ssl_settings == 'legacy' else ssl_intermediate_settings if kolla_haproxy_ssl_settings == 'intermediate' else ssl_modern_settings | default(ssl_modern_settings) }}" + +ssl_legacy_settings: | + ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES + ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11 + +ssl_intermediate_settings: | + ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 + ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets + ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 + ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets + +ssl_modern_settings: | + ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets + ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets + +haproxy_stats_port: "1984" +haproxy_monitor_port: "61313" +haproxy_ssh_port: "2985" diff --git a/ansible/group_vars/all/heat.yml b/ansible/group_vars/all/heat.yml new file mode 100644 index 0000000000..8a87682269 --- /dev/null +++ b/ansible/group_vars/all/heat.yml @@ -0,0 +1,17 @@ +--- +enable_heat: "{{ enable_openstack_core | bool }}" + +heat_internal_fqdn: "{{ kolla_internal_fqdn }}" +heat_external_fqdn: "{{ kolla_external_fqdn }}" +heat_internal_base_endpoint: "{{ heat_internal_fqdn | kolla_url(internal_protocol, heat_api_port) }}" +heat_public_base_endpoint: "{{ heat_external_fqdn | kolla_url(public_protocol, heat_api_public_port) }}" +heat_api_port: "8004" +heat_api_listen_port: "{{ heat_api_port }}" +heat_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_port }}" +heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}" +heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}" +heat_cfn_internal_base_endpoint: "{{ heat_cfn_internal_fqdn | kolla_url(internal_protocol, heat_api_cfn_port) }}" +heat_cfn_public_base_endpoint: "{{ heat_cfn_external_fqdn | kolla_url(public_protocol, heat_api_cfn_public_port) }}" +heat_api_cfn_port: "8000" +heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}" +heat_api_cfn_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_cfn_port }}" diff --git a/ansible/group_vars/all/horizon.yml b/ansible/group_vars/all/horizon.yml new file mode 100644 index 0000000000..f010828271 --- /dev/null +++ b/ansible/group_vars/all/horizon.yml @@ -0,0 +1,49 @@ +--- +enable_horizon: "{{ enable_openstack_core | bool }}" +enable_horizon_blazar: "{{ enable_blazar | bool }}" +enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}" +enable_horizon_designate: "{{ enable_designate | bool }}" +enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}" +enable_horizon_heat: "{{ enable_heat | bool }}" +enable_horizon_ironic: "{{ enable_ironic | bool }}" +enable_horizon_magnum: "{{ enable_magnum | bool }}" +enable_horizon_manila: "{{ enable_manila | bool }}" +enable_horizon_masakari: "{{ enable_masakari | bool }}" +enable_horizon_mistral: "{{ enable_mistral | bool }}" +enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}" +enable_horizon_octavia: "{{ enable_octavia | bool }}" +enable_horizon_tacker: "{{ enable_tacker | bool }}" +enable_horizon_trove: "{{ enable_trove | bool }}" +enable_horizon_venus: "{{ enable_venus | bool }}" +enable_horizon_watcher: "{{ enable_watcher | bool }}" +enable_horizon_zun: "{{ enable_zun | bool }}" + +####################### +# Horizon options +####################### +horizon_backend_database: false +horizon_keystone_multidomain: false + +# Enable deploying custom horizon policy files for services that don't have a +# horizon plugin but have a policy file. Override these when you have services +# not deployed by kolla-ansible but want custom policy files deployed for them +# in horizon. +enable_ceilometer_horizon_policy_file: "{{ enable_ceilometer }}" +enable_cinder_horizon_policy_file: "{{ enable_cinder }}" +enable_glance_horizon_policy_file: "{{ enable_glance }}" +enable_heat_horizon_policy_file: "{{ enable_heat }}" +enable_keystone_horizon_policy_file: "{{ enable_keystone }}" +enable_neutron_horizon_policy_file: "{{ enable_neutron }}" +enable_nova_horizon_policy_file: "{{ enable_nova }}" + +# TLS +horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}" + +# Ports +horizon_internal_fqdn: "{{ kolla_internal_fqdn }}" +horizon_external_fqdn: "{{ kolla_external_fqdn }}" +horizon_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}" +horizon_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}" +horizon_port: "80" +horizon_tls_port: "443" +horizon_listen_port: "{{ horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}" diff --git a/ansible/group_vars/all/influxdb.yml b/ansible/group_vars/all/influxdb.yml new file mode 100644 index 0000000000..c6ddb3c99d --- /dev/null +++ b/ansible/group_vars/all/influxdb.yml @@ -0,0 +1,12 @@ +--- +enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}" + +#################### +# InfluxDB options +#################### +influxdb_address: "{{ kolla_internal_fqdn }}" +influxdb_datadir_volume: "influxdb" + +influxdb_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, influxdb_http_port) }}" + +influxdb_http_port: "8086" diff --git a/ansible/group_vars/all/ironic.yml b/ansible/group_vars/all/ironic.yml new file mode 100644 index 0000000000..48e017975d --- /dev/null +++ b/ansible/group_vars/all/ironic.yml @@ -0,0 +1,33 @@ +--- +enable_ironic: "no" +enable_ironic_dnsmasq: "{{ enable_ironic | bool }}" +enable_ironic_neutron_agent: "no" +enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" + +# Keystone user +ironic_keystone_user: "ironic" + +# Coordination backend +ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" + +# Network interfaces +ironic_http_interface: "{{ api_interface }}" +ironic_tftp_interface: "{{ api_interface }}" + +# Address family +ironic_http_address_family: "{{ api_address_family }}" +ironic_tftp_address_family: "{{ api_address_family }}" + +# Addresses +ironic_http_interface_address: "{{ 'ironic_http' | kolla_address }}" +ironic_tftp_interface_address: "{{ 'ironic_tftp' | kolla_address }}" + +ironic_internal_fqdn: "{{ kolla_internal_fqdn }}" +ironic_external_fqdn: "{{ kolla_external_fqdn }}" +ironic_internal_endpoint: "{{ ironic_internal_fqdn | kolla_url(internal_protocol, ironic_api_port) }}" +ironic_public_endpoint: "{{ ironic_external_fqdn | kolla_url(public_protocol, ironic_api_public_port) }}" +ironic_api_port: "6385" +ironic_api_listen_port: "{{ ironic_api_port }}" +ironic_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_api_port }}" +ironic_http_port: "8089" +ironic_prometheus_exporter_port: "9608" diff --git a/ansible/group_vars/all/iscsi.yml b/ansible/group_vars/all/iscsi.yml new file mode 100644 index 0000000000..20773bf867 --- /dev/null +++ b/ansible/group_vars/all/iscsi.yml @@ -0,0 +1,4 @@ +--- +enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}" + +iscsi_port: "3260" diff --git a/ansible/group_vars/all/keepalived.yml b/ansible/group_vars/all/keepalived.yml new file mode 100644 index 0000000000..b32dcdd4f3 --- /dev/null +++ b/ansible/group_vars/all/keepalived.yml @@ -0,0 +1,8 @@ +--- +enable_keepalived: "{{ enable_haproxy | bool }}" + +#################### +# keepalived options +#################### +# Arbitrary unique number from 0..255 +keepalived_virtual_router_id: "51" diff --git a/ansible/group_vars/all/keystone.yml b/ansible/group_vars/all/keystone.yml new file mode 100644 index 0000000000..8a644bd82f --- /dev/null +++ b/ansible/group_vars/all/keystone.yml @@ -0,0 +1,86 @@ +--- +enable_keystone: "{{ enable_openstack_core | bool }}" +enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}" + +#################### +# Keystone options +#################### +keystone_internal_fqdn: "{{ kolla_internal_fqdn }}" +keystone_external_fqdn: "{{ kolla_external_fqdn }}" + +keystone_internal_url: "{{ keystone_internal_fqdn | kolla_url(internal_protocol, keystone_internal_port) }}" +keystone_public_url: "{{ keystone_external_fqdn | kolla_url(public_protocol, keystone_public_port) }}" + +keystone_admin_user: "admin" +keystone_admin_project: "admin" + +# Whether or not to apply changes to service user passwords when services are +# reconfigured +update_keystone_service_user_passwords: true + +default_project_domain_name: "Default" +default_project_domain_id: "default" + +default_user_domain_name: "Default" +default_user_domain_id: "default" + +# Keystone fernet token expiry in seconds. Default is 1 day. +fernet_token_expiry: 86400 +# Keystone window to allow expired fernet tokens. Default is 2 days. +fernet_token_allow_expired_window: 172800 +# Keystone fernet key rotation interval in seconds. Default is sum of token +# expiry and allow expired window, 3 days. This ensures the minimum number +# of keys are active. If this interval is lower than the sum of the token +# expiry and allow expired window, multiple active keys will be necessary. +fernet_key_rotation_interval: "{{ fernet_token_expiry + fernet_token_allow_expired_window }}" + +keystone_default_user_role: "member" + +################################### +# Identity federation configuration +################################### +# Here we configure all of the IdPs meta information that will be required to implement identity federation with OpenStack Keystone. +# We require the administrator to enter the following metadata: +# * name (internal name of the IdP in Keystone); +# * openstack_domain (the domain in Keystone that the IdP belongs to) +# * protocol (the federated protocol used by the IdP; e.g. openid or saml); +# * identifier (the IdP identifier; e.g. https://accounts.google.com); +# * public_name (the public name that will be shown for users in Horizon); +# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration); +# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol +# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf, +# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}'); +# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem'; +# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid); +# +# The IdPs meta information are to be presented to Kolla-Ansible as the following example: +# keystone_identity_providers: +# - name: "myidp1" +# openstack_domain: "my-domain" +# protocol: "openid" +# identifier: "https://accounts.google.com" +# public_name: "Authenticate via myidp1" +# attribute_mapping: "mappingId1" +# metadata_folder: "path/to/metadata/folder" +# certificate_file: "path/to/certificate/file.pem" +# +# We also need to configure the attribute mapping that is used by IdPs. +# The configuration of attribute mappings is a list of objects, where each +# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP +# object in the IdPs set), and the 'file' with a full qualified path to a mapping file. +# keystone_identity_mappings: +# - name: "mappingId1" +# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1" +# - name: "mappingId2" +# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2" +# - name: "mappingId3" +# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3" +keystone_identity_providers: [] +keystone_identity_mappings: [] + +keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else keystone_public_listen_port }}" +keystone_public_listen_port: "5000" +keystone_internal_port: "5000" +keystone_internal_listen_port: "{{ keystone_internal_port }}" +keystone_listen_port: "{{ keystone_internal_listen_port }}" +keystone_ssh_port: "8023" diff --git a/ansible/group_vars/all/kuryr.yml b/ansible/group_vars/all/kuryr.yml new file mode 100644 index 0000000000..69464a0319 --- /dev/null +++ b/ansible/group_vars/all/kuryr.yml @@ -0,0 +1,4 @@ +--- +enable_kuryr: "no" + +kuryr_port: "23750" diff --git a/ansible/group_vars/all/letsencrypt.yml b/ansible/group_vars/all/letsencrypt.yml new file mode 100644 index 0000000000..50007e8859 --- /dev/null +++ b/ansible/group_vars/all/letsencrypt.yml @@ -0,0 +1,13 @@ +--- +enable_letsencrypt: "no" + +##################### +# ACME client options +##################### +acme_client_lego: "server lego {{ api_interface_address }}:{{ letsencrypt_webserver_port }}" +acme_client_servers: "{% set arr = [] %}{% if enable_letsencrypt | bool %}{{ arr.append(acme_client_lego) }}{% endif %}{{ arr }}" + +letsencrypt_webserver_port: "8081" +letsencrypt_managed_certs: "{{ '' if not enable_letsencrypt | bool else ('internal' if letsencrypt_internal_cert_server != '' and kolla_same_external_internal_vip | bool else ('internal,external' if letsencrypt_internal_cert_server != '' and letsencrypt_external_cert_server != '' else ('internal' if letsencrypt_internal_cert_server != '' else ('external' if letsencrypt_external_cert_server != '' and not kolla_same_external_internal_vip | bool else '')))) }}" +letsencrypt_external_cert_server: "https://acme-v02.api.letsencrypt.org/directory" +letsencrypt_internal_cert_server: "" diff --git a/ansible/group_vars/all/loadbalancer.yml b/ansible/group_vars/all/loadbalancer.yml new file mode 100644 index 0000000000..ef47f25553 --- /dev/null +++ b/ansible/group_vars/all/loadbalancer.yml @@ -0,0 +1,2 @@ +--- +enable_loadbalancer: "{{ enable_haproxy | bool or enable_keepalived | bool or enable_proxysql | bool }}" diff --git a/ansible/group_vars/all/magnum.yml b/ansible/group_vars/all/magnum.yml new file mode 100644 index 0000000000..279e17d986 --- /dev/null +++ b/ansible/group_vars/all/magnum.yml @@ -0,0 +1,10 @@ +--- +enable_magnum: "no" + +magnum_internal_fqdn: "{{ kolla_internal_fqdn }}" +magnum_external_fqdn: "{{ kolla_external_fqdn }}" +magnum_internal_base_endpoint: "{{ magnum_internal_fqdn | kolla_url(internal_protocol, magnum_api_port) }}" +magnum_public_base_endpoint: "{{ magnum_external_fqdn | kolla_url(public_protocol, magnum_api_public_port) }}" +magnum_api_port: "9511" +magnum_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else magnum_api_port }}" +magnum_api_listen_port: "{{ magnum_api_port }}" diff --git a/ansible/group_vars/all/manila.yml b/ansible/group_vars/all/manila.yml new file mode 100644 index 0000000000..526707331b --- /dev/null +++ b/ansible/group_vars/all/manila.yml @@ -0,0 +1,18 @@ +--- +enable_manila: "no" +enable_manila_backend_generic: "no" +enable_manila_backend_hnas: "no" +enable_manila_backend_cephfs_native: "no" +enable_manila_backend_cephfs_nfs: "no" +enable_manila_backend_glusterfs_nfs: "no" +enable_manila_backend_flashblade: "no" + +ceph_manila_user: "manila" + +manila_internal_fqdn: "{{ kolla_internal_fqdn }}" +manila_external_fqdn: "{{ kolla_external_fqdn }}" +manila_internal_base_endpoint: "{{ manila_internal_fqdn | kolla_url(internal_protocol, manila_api_port) }}" +manila_public_base_endpoint: "{{ manila_external_fqdn | kolla_url(public_protocol, manila_api_public_port) }}" +manila_api_port: "8786" +manila_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else manila_api_port }}" +manila_api_listen_port: "{{ manila_api_port }}" diff --git a/ansible/group_vars/all/mariadb.yml b/ansible/group_vars/all/mariadb.yml new file mode 100644 index 0000000000..ea6c9fda9c --- /dev/null +++ b/ansible/group_vars/all/mariadb.yml @@ -0,0 +1,37 @@ +--- +enable_mariadb: "yes" +enable_mariabackup: "no" + +############################################# +# MariaDB component-specific database details +############################################# +# Whether to configure haproxy to load balance +# the external MariaDB server(s) +enable_external_mariadb_load_balancer: "no" +# Whether to use pre-configured databases / users +use_preconfigured_databases: "no" +# whether to use a common, preconfigured user +# for all component databases +use_common_mariadb_user: "no" + +mariadb_port: "{{ database_port }}" +mariadb_wsrep_port: "4567" +mariadb_ist_port: "4568" +mariadb_sst_port: "4444" +mariadb_clustercheck_port: "4569" +mariadb_enable_tls_backend: "{{ database_enable_tls_backend }}" + +mariadb_monitor_user: "{{ 'monitor' if enable_proxysql | bool else 'haproxy' }}" + +mariadb_datadir_volume: "mariadb" + +mariadb_default_database_shard_id: 0 +mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}" +mariadb_shard_id: "{{ mariadb_default_database_shard_id }}" +mariadb_shard_name: "shard_{{ mariadb_shard_id }}" +mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}" +mariadb_loadbalancer: "{{ 'proxysql' if enable_proxysql | bool else 'haproxy' }}" +mariadb_backup_target: "{{ 'active' if mariadb_loadbalancer == 'haproxy' else 'replica' }}" +mariadb_shard_root_user_prefix: "root_shard_" +mariadb_shard_backup_user_prefix: "backup_shard_" +mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}" diff --git a/ansible/group_vars/all/masakari.yml b/ansible/group_vars/all/masakari.yml new file mode 100644 index 0000000000..bef390e719 --- /dev/null +++ b/ansible/group_vars/all/masakari.yml @@ -0,0 +1,13 @@ +--- +enable_masakari: "no" +enable_masakari_instancemonitor: "{{ enable_masakari | bool }}" +enable_masakari_hostmonitor: "{{ enable_masakari | bool }}" + +masakari_internal_fqdn: "{{ kolla_internal_fqdn }}" +masakari_external_fqdn: "{{ kolla_external_fqdn }}" +masakari_internal_endpoint: "{{ masakari_internal_fqdn | kolla_url(internal_protocol, masakari_api_port) }}" +masakari_public_endpoint: "{{ masakari_external_fqdn | kolla_url(public_protocol, masakari_api_public_port) }}" +masakari_api_port: "15868" +masakari_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else masakari_api_port }}" +masakari_api_listen_port: "{{ masakari_api_port }}" +masakari_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" diff --git a/ansible/group_vars/all/memcached.yml b/ansible/group_vars/all/memcached.yml new file mode 100644 index 0000000000..282138b418 --- /dev/null +++ b/ansible/group_vars/all/memcached.yml @@ -0,0 +1,10 @@ +--- +enable_memcached: "yes" + +# NOTE: Most memcached clients handle load-balancing via client side +# hashing (consistent or not) logic, so going under the covers and messing +# with things that the clients are not aware of is generally wrong +enable_haproxy_memcached: "no" + +memcached_port: "11211" +memcache_security_strategy: "ENCRYPT" diff --git a/ansible/group_vars/all/mistral.yml b/ansible/group_vars/all/mistral.yml new file mode 100644 index 0000000000..2e72f3cc60 --- /dev/null +++ b/ansible/group_vars/all/mistral.yml @@ -0,0 +1,10 @@ +--- +enable_mistral: "no" + +mistral_internal_fqdn: "{{ kolla_internal_fqdn }}" +mistral_external_fqdn: "{{ kolla_external_fqdn }}" +mistral_internal_base_endpoint: "{{ mistral_internal_fqdn | kolla_url(internal_protocol, mistral_api_port) }}" +mistral_public_base_endpoint: "{{ mistral_external_fqdn | kolla_url(public_protocol, mistral_api_public_port) }}" +mistral_api_port: "8989" +mistral_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else mistral_api_port }}" +mistral_api_listen_port: "{{ mistral_api_port }}" diff --git a/ansible/group_vars/all/multipathd.yml b/ansible/group_vars/all/multipathd.yml new file mode 100644 index 0000000000..bae55e51ca --- /dev/null +++ b/ansible/group_vars/all/multipathd.yml @@ -0,0 +1,2 @@ +--- +enable_multipathd: "no" diff --git a/ansible/group_vars/all/neutron.yml b/ansible/group_vars/all/neutron.yml new file mode 100644 index 0000000000..0ef47e2edb --- /dev/null +++ b/ansible/group_vars/all/neutron.yml @@ -0,0 +1,75 @@ +--- +enable_neutron: "{{ enable_openstack_core | bool }}" + +enable_neutron_vpnaas: "no" +enable_neutron_sriov: "no" +enable_neutron_mlnx: "no" +enable_neutron_dvr: "no" +enable_neutron_fwaas: "no" +enable_neutron_qos: "no" +enable_neutron_agent_ha: "no" +enable_neutron_bgp_dragent: "no" +enable_neutron_provider_networks: "no" +enable_neutron_segments: "no" +enable_neutron_packet_logging: "no" +enable_neutron_sfc: "no" +enable_neutron_taas: "no" +enable_neutron_trunk: "no" +enable_neutron_metering: "no" +enable_neutron_infoblox_ipam_agent: "no" +enable_neutron_port_forwarding: "no" +neutron_enable_ovn_agent: "no" + +neutron_keystone_user: "neutron" + +# Valid options are [ openvswitch, ovn, linuxbridge ] +# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable. +# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html +neutron_plugin_agent: "openvswitch" + +# Valid options are [ internal, infoblox ] +neutron_ipam_driver: "internal" + +eutron_external_interface: "eth1" + +####################### +# Neutron options +####################### +neutron_bgp_router_id: "1.1.1.1" +neutron_bridge_name: "{{ 'br_dpdk' if enable_ovs_dpdk | bool else 'br-ex' }}" +neutron_physical_networks: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index }}{% if not loop.last %},{% endif %}{% endfor %}" +# Comma-separated type of enabled ml2 type drivers +neutron_type_drivers: "flat,vlan,vxlan{% if neutron_plugin_agent == 'ovn' %},geneve{% endif %}" +# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers') +# NOTE: for ironic this list should also contain 'flat' +neutron_tenant_network_types: "{% if neutron_plugin_agent == 'ovn' %}geneve{% else %}vxlan{% endif %}" + +# valid values: ["dvr", "dvr_no_external"] +neutron_compute_dvr_mode: "dvr" +computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_compute_dvr_mode == 'dvr') or enable_neutron_provider_networks | bool or neutron_ovn_distributed_fip | bool }}" + +# Default DNS resolvers for virtual networks +neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4" + +# Set legacy iptables to allow kernels not supporting iptables-nft +neutron_legacy_iptables: "no" + +# Enable distributed floating ip for OVN deployments +neutron_ovn_distributed_fip: "no" + +# SRIOV physnet:interface mappings when SRIOV is enabled +# "sriovnet1" and tunnel_interface used here as placeholders +neutron_sriov_physnet_mappings: + sriovnet1: "{{ tunnel_interface }}" +neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}" + +# Set OVN network availability zones +neutron_ovn_availability_zones: [] + +neutron_internal_fqdn: "{{ kolla_internal_fqdn }}" +neutron_external_fqdn: "{{ kolla_external_fqdn }}" +neutron_internal_endpoint: "{{ neutron_internal_fqdn | kolla_url(internal_protocol, neutron_server_port) }}" +neutron_public_endpoint: "{{ neutron_external_fqdn | kolla_url(public_protocol, neutron_server_public_port) }}" +neutron_server_port: "9696" +neutron_server_listen_port: "{{ neutron_server_port }}" +neutron_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else neutron_server_port }}" diff --git a/ansible/group_vars/all/nova.yml b/ansible/group_vars/all/nova.yml new file mode 100644 index 0000000000..48811be629 --- /dev/null +++ b/ansible/group_vars/all/nova.yml @@ -0,0 +1,69 @@ +--- +enable_cells: "no" +enable_nova: "{{ enable_openstack_core | bool }}" +enable_nova_libvirt_container: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}" +enable_nova_serialconsole_proxy: "no" +enable_nova_ssh: "yes" + +####################### +# Nova options +####################### +nova_backend_ceph: "no" +nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}" +# Valid options are [ kvm, qemu ] +nova_compute_virt_type: "kvm" +nova_instance_datadir_volume: "{{ 'nova_compute' if enable_nova_libvirt_container | bool else '/var/lib/nova' }}" +nova_safety_upgrade: "no" +# Valid options are [ none, novnc, spice ] +nova_console: "novnc" + +####################### +# Nova Database +####################### +nova_database_shard_id: "{{ mariadb_default_database_shard_id | int }}" +nova_cell0_database_shard_id: "{{ nova_database_shard_id | int }}" + +# These are kept for backwards compatibility, as cell0 references them. +nova_database_name: "nova" +nova_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova{% endif %}" +nova_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}" + +nova_cell0_database_name: "{{ nova_database_name }}_cell0" +nova_cell0_database_user: "{{ nova_database_user }}" +nova_cell0_database_address: "{{ nova_database_address }}" +nova_cell0_database_password: "{{ nova_database_password }}" + +# Nova fake driver and the number of fake driver per compute node +enable_nova_fake: "no" +num_nova_fake_per_node: 5 + +migration_interface: "{{ api_interface }}" +migration_interface_address: "{{ 'migration' | kolla_address }}" +migration_address_family: "{{ api_address_family }}" + +nova_keystone_user: "nova" + +nova_internal_fqdn: "{{ kolla_internal_fqdn }}" +nova_external_fqdn: "{{ kolla_external_fqdn }}" +nova_internal_base_endpoint: "{{ nova_internal_fqdn | kolla_url(internal_protocol, nova_api_port) }}" +nova_public_base_endpoint: "{{ nova_external_fqdn | kolla_url(public_protocol, nova_api_public_port) }}" +nova_api_port: "8774" +nova_api_listen_port: "{{ nova_api_port }}" +nova_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_api_port }}" +nova_metadata_internal_fqdn: "{{ kolla_internal_fqdn }}" +nova_metadata_external_fqdn: "{{ kolla_external_fqdn }}" +nova_metadata_port: "8775" +nova_metadata_listen_port: "{{ nova_metadata_port }}" +nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}" +nova_novncproxy_port: "6080" +nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}" +nova_novncproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_novncproxy_port }}" +nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}" +nova_spicehtml5proxy_port: "6082" +nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}" +nova_spicehtml5proxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_spicehtml5proxy_port }}" +nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}" +nova_serialproxy_port: "6083" +nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}" +nova_serialproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_serialproxy_port }}" +nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}" diff --git a/ansible/group_vars/all/octavia.yml b/ansible/group_vars/all/octavia.yml new file mode 100644 index 0000000000..c0a152cf4c --- /dev/null +++ b/ansible/group_vars/all/octavia.yml @@ -0,0 +1,33 @@ +--- +enable_octavia: "no" +enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}" +enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}" + +########## +# Octavia +########## +# Whether to run Kolla-Ansible's automatic configuration for Octavia. +# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no` +# and keep your other Octavia config like before. +octavia_auto_configure: "{{ 'amphora' in octavia_provider_drivers }}" + +# Octavia network type options are [ tenant, provider ] +# * tenant indicates that we will create a tenant network and a network +# interface on the Octavia worker nodes for communication with amphorae. +# * provider indicates that we will create a flat or vlan provider network. +# In this case octavia_network_interface should be set to a network interface +# on the Octavia worker nodes on the same provider network. +octavia_network_type: "provider" + +octavia_network_interface: "{{ 'o-hm0' if octavia_network_type == 'tenant' else api_interface }}" +octavia_network_address_family: "{{ api_address_family }}" +octavia_network_interface_address: "{{ 'octavia_network' | kolla_address }}" + +octavia_internal_fqdn: "{{ kolla_internal_fqdn }}" +octavia_external_fqdn: "{{ kolla_external_fqdn }}" +octavia_internal_endpoint: "{{ octavia_internal_fqdn | kolla_url(internal_protocol, octavia_api_port) }}" +octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol, octavia_api_public_port) }}" +octavia_api_port: "9876" +octavia_api_listen_port: "{{ octavia_api_port }}" +octavia_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else octavia_api_port }}" +octavia_health_manager_port: "5555" diff --git a/ansible/group_vars/all/opensearch.yml b/ansible/group_vars/all/opensearch.yml new file mode 100644 index 0000000000..21c208c67e --- /dev/null +++ b/ansible/group_vars/all/opensearch.yml @@ -0,0 +1,33 @@ +--- +#################### +# Logging options +#################### + +# NOTE: If an external ElasticSearch cluster address is configured, all +# services with ElasticSearch endpoints should be configured to log +# to the external cluster by default. This is for backwards compatibility. +opensearch_address: "{{ elasticsearch_address if elasticsearch_address is defined else kolla_internal_fqdn }}" +enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}" +enable_opensearch_dashboards: "{{ enable_opensearch | bool }}" +enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}" + +####################### +## Opensearch Options +######################## +opensearch_datadir_volume: "opensearch" + +opensearch_internal_endpoint: "{{ opensearch_address | kolla_url(internal_protocol, opensearch_port) }}" +opensearch_dashboards_internal_fqdn: "{{ kolla_internal_fqdn }}" +opensearch_dashboards_external_fqdn: "{{ kolla_external_fqdn }}" +opensearch_dashboards_internal_endpoint: "{{ opensearch_dashboards_internal_fqdn | kolla_url(internal_protocol, opensearch_dashboards_port) }}" +opensearch_dashboards_external_endpoint: "{{ opensearch_dashboards_external_fqdn | kolla_url(public_protocol, opensearch_dashboards_port_external) }}" +opensearch_dashboards_user: "opensearch" +opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}" + +# NOTE: If an external ElasticSearch cluster port is specified, +# we default to using that port in services with ElasticSearch +# endpoints. This is for backwards compatibility. +opensearch_port: "{{ elasticsearch_port | default('9200') }}" +opensearch_dashboards_port: "5601" +opensearch_dashboards_port_external: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else opensearch_dashboards_port }}" +opensearch_dashboards_listen_port: "{{ opensearch_dashboards_port }}" diff --git a/ansible/group_vars/all/openvswitch.yml b/ansible/group_vars/all/openvswitch.yml new file mode 100644 index 0000000000..68a813b43b --- /dev/null +++ b/ansible/group_vars/all/openvswitch.yml @@ -0,0 +1,14 @@ +--- +enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}" +enable_ovs_dpdk: "no" + +ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}" + +tunnel_address_family: "{{ network_address_family }}" +dpdk_tunnel_address_family: "{{ network_address_family }}" +tunnel_interface: "{{ network_interface }}" +dpdk_tunnel_interface: "{{ neutron_external_interface }}" +tunnel_interface_address: "{{ 'tunnel' | kolla_address }}" +dpdk_tunnel_interface_address: "{{ 'dpdk_tunnel' | kolla_address }}" + +ovsdb_port: "6640" diff --git a/ansible/group_vars/all/ovn.yml b/ansible/group_vars/all/ovn.yml new file mode 100644 index 0000000000..dc9b4b6e8d --- /dev/null +++ b/ansible/group_vars/all/ovn.yml @@ -0,0 +1,16 @@ +--- +enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}" +enable_ovn_sb_db_relay: "{{ enable_ovn | bool }}" + +ovn_nb_db_port: "6641" +ovn_sb_db_port: "6642" +# OVN SB Relay related variables +ovn_sb_db_relay_count: "{{ ((groups['ovn-controller'] | length) / ovn_sb_db_relay_compute_per_relay | int) | round(0, 'ceil') | int }}" +ovn_sb_db_relay_compute_per_relay: "50" +ovn_sb_db_relay_port_prefix: "1664" +ovn_sb_db_relay_port: "{{ ovn_sb_db_relay_port_prefix ~ ovn_sb_db_relay_client_group_id }}" +ovn_sb_db_relay_client_group_id: "{{ range(1, ovn_sb_db_relay_count | int + 1) | random(seed=inventory_hostname) }}" +ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" +ovn_sb_connection: "{{ ovn_sb_connection_relay if enable_ovn_sb_db_relay | bool else ovn_sb_connection_no_relay }}" +ovn_sb_connection_no_relay: "{% for host in groups['ovn-sb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" +ovn_sb_connection_relay: "{% for host in groups['ovn-sb-db-relay'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_relay_port }}{% if not loop.last %},{% endif %}{% endfor %}" diff --git a/ansible/group_vars/all/placement.yml b/ansible/group_vars/all/placement.yml new file mode 100644 index 0000000000..4949393eaa --- /dev/null +++ b/ansible/group_vars/all/placement.yml @@ -0,0 +1,13 @@ +--- +enable_placement: "{{ enable_nova | bool or enable_zun | bool }}" + +placement_keystone_user: "placement" + +placement_internal_fqdn: "{{ kolla_internal_fqdn }}" +placement_external_fqdn: "{{ kolla_external_fqdn }}" +placement_internal_endpoint: "{{ placement_internal_fqdn | kolla_url(internal_protocol, placement_api_port) }}" +placement_public_endpoint: "{{ placement_external_fqdn | kolla_url(public_protocol, placement_api_public_port) }}" +# Default Placement API port of 8778 already in use +placement_api_port: "8780" +placement_api_listen_port: "{{ placement_api_port }}" +placement_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else placement_api_port }}" diff --git a/ansible/group_vars/all/prometheus.yml b/ansible/group_vars/all/prometheus.yml new file mode 100644 index 0000000000..e372bf6429 --- /dev/null +++ b/ansible/group_vars/all/prometheus.yml @@ -0,0 +1,78 @@ +--- +enable_prometheus: "no" + +############ +# Prometheus +############ +enable_prometheus_server: "{{ enable_prometheus | bool }}" +enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}" +enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}" +enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}" +enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}" +enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}" +enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}" +enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}" +enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}" +enable_prometheus_ceph_mgr_exporter: "no" +enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}" +enable_prometheus_openstack_exporter_external: "no" +enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_opensearch | bool }}" +enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}" +enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}" +enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}" +enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}" +enable_prometheus_proxysql_exporter: "{{ enable_prometheus | bool and enable_proxysql | bool }}" + +prometheus_alertmanager_user: "admin" +prometheus_ceph_exporter_interval: "{{ prometheus_scrape_interval }}" +prometheus_grafana_user: "grafana" +prometheus_haproxy_user: "haproxy" +prometheus_skyline_user: "skyline" +prometheus_scrape_interval: "60s" +prometheus_openstack_exporter_interval: "{{ prometheus_scrape_interval }}" +prometheus_openstack_exporter_timeout: "45s" +prometheus_elasticsearch_exporter_interval: "{{ prometheus_scrape_interval }}" +prometheus_cmdline_extras: +prometheus_ceph_mgr_exporter_endpoints: [] +prometheus_openstack_exporter_endpoint_type: "internal" +prometheus_openstack_exporter_compute_api_version: "latest" +prometheus_libvirt_exporter_interval: "60s" + +prometheus_internal_fqdn: "{{ kolla_internal_fqdn }}" +prometheus_external_fqdn: "{{ kolla_external_fqdn }}" +prometheus_internal_endpoint: "{{ prometheus_internal_fqdn | kolla_url(internal_protocol, prometheus_port) }}" +prometheus_public_endpoint: "{{ prometheus_external_fqdn | kolla_url(public_protocol, prometheus_public_port) }}" +prometheus_port: "9091" +prometheus_listen_port: "{{ prometheus_port }}" +prometheus_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_port }}" +prometheus_node_exporter_port: "9100" +prometheus_mysqld_exporter_port: "9104" +prometheus_haproxy_exporter_port: "9101" +prometheus_memcached_exporter_port: "9150" +prometheus_rabbitmq_exporter_port: "{{ rabbitmq_prometheus_port }}" +# Default cadvisor port of 8080 already in use +prometheus_cadvisor_port: "18080" +prometheus_fluentd_integration_port: "24231" +prometheus_libvirt_exporter_port: "9177" +prometheus_etcd_integration_port: "{{ etcd_client_port }}" +proxysql_prometheus_exporter_port: "6070" + +# Prometheus alertmanager ports +prometheus_alertmanager_internal_fqdn: "{{ kolla_internal_fqdn }}" +prometheus_alertmanager_external_fqdn: "{{ kolla_external_fqdn }}" +prometheus_alertmanager_internal_endpoint: "{{ prometheus_alertmanager_internal_fqdn | kolla_url(internal_protocol, prometheus_alertmanager_port) }}" +prometheus_alertmanager_public_endpoint: "{{ prometheus_alertmanager_external_fqdn | kolla_url(public_protocol, prometheus_alertmanager_public_port) }}" +prometheus_alertmanager_port: "9093" +prometheus_alertmanager_cluster_port: "9094" +prometheus_alertmanager_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_alertmanager_port }}" +prometheus_alertmanager_listen_port: "{{ prometheus_alertmanager_port }}" + +# Prometheus openstack-exporter ports +prometheus_openstack_exporter_port: "9198" +prometheus_elasticsearch_exporter_port: "9108" + +# Prometheus blackbox-exporter ports +prometheus_blackbox_exporter_port: "9115" + +# Prometheus instance label to use for metrics +prometheus_instance_label: diff --git a/ansible/group_vars/all/proxysql.yml b/ansible/group_vars/all/proxysql.yml new file mode 100644 index 0000000000..822483d094 --- /dev/null +++ b/ansible/group_vars/all/proxysql.yml @@ -0,0 +1,4 @@ +--- +enable_proxysql: "yes" + +proxysql_admin_port: "6032" diff --git a/ansible/group_vars/all/rabbitmq.yml b/ansible/group_vars/all/rabbitmq.yml new file mode 100644 index 0000000000..4773c14445 --- /dev/null +++ b/ansible/group_vars/all/rabbitmq.yml @@ -0,0 +1,19 @@ +--- +enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}" + +#################### +# RabbitMQ options +#################### +rabbitmq_user: "openstack" +rabbitmq_monitoring_user: "" +# Whether to enable TLS encryption for RabbitMQ client-server communication. +rabbitmq_enable_tls: "no" +# CA certificate bundle in RabbitMQ container. +rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" +rabbitmq_datadir_volume: "rabbitmq" + +rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}" +rabbitmq_management_port: "15672" +rabbitmq_cluster_port: "25672" +rabbitmq_epmd_port: "4369" +rabbitmq_prometheus_port: "15692" diff --git a/ansible/group_vars/all/redis.yml b/ansible/group_vars/all/redis.yml new file mode 100644 index 0000000000..bea98be6e2 --- /dev/null +++ b/ansible/group_vars/all/redis.yml @@ -0,0 +1,11 @@ +--- +enable_redis: "no" + +#################### +# Redis options +#################### +redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}default:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}" +redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes" + +redis_port: "6379" +redis_sentinel_port: "26379" diff --git a/ansible/group_vars/all/s3.yml b/ansible/group_vars/all/s3.yml new file mode 100644 index 0000000000..c42e45931b --- /dev/null +++ b/ansible/group_vars/all/s3.yml @@ -0,0 +1,7 @@ +--- +############# +# Common options for S3 Cinder Backup and Glance S3 backend. +s3_url: +s3_bucket: +s3_access_key: +s3_secret_key: diff --git a/ansible/group_vars/all/skyline.yml b/ansible/group_vars/all/skyline.yml new file mode 100644 index 0000000000..56225268a0 --- /dev/null +++ b/ansible/group_vars/all/skyline.yml @@ -0,0 +1,18 @@ +--- +enable_skyline: "no" + +skyline_apiserver_internal_fqdn: "{{ kolla_internal_fqdn }}" +skyline_apiserver_external_fqdn: "{{ kolla_external_fqdn }}" +skyline_apiserver_internal_endpoint: "{{ skyline_apiserver_internal_fqdn | kolla_url(internal_protocol, skyline_apiserver_port) }}" +skyline_apiserver_public_endpoint: "{{ skyline_apiserver_external_fqdn | kolla_url(public_protocol, skyline_apiserver_public_port) }}" +skyline_console_internal_fqdn: "{{ kolla_internal_fqdn }}" +skyline_console_external_fqdn: "{{ kolla_external_fqdn }}" +skyline_console_internal_endpoint: "{{ skyline_console_internal_fqdn | kolla_url(internal_protocol, skyline_console_port) }}" +skyline_console_public_endpoint: "{{ skyline_console_external_fqdn | kolla_url(public_protocol, skyline_console_public_port) }}" +skyline_apiserver_port: "9998" +skyline_apiserver_listen_port: "{{ skyline_apiserver_port }}" +skyline_apiserver_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_apiserver_port }}" +skyline_console_port: "9999" +skyline_console_listen_port: "{{ skyline_console_port }}" +skyline_console_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_console_port }}" +skyline_enable_sso: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}" diff --git a/ansible/group_vars/all/tacker.yml b/ansible/group_vars/all/tacker.yml new file mode 100644 index 0000000000..936f5fbeab --- /dev/null +++ b/ansible/group_vars/all/tacker.yml @@ -0,0 +1,10 @@ +--- +enable_tacker: "no" + +tacker_internal_fqdn: "{{ kolla_internal_fqdn }}" +tacker_external_fqdn: "{{ kolla_external_fqdn }}" +tacker_internal_endpoint: "{{ tacker_internal_fqdn | kolla_url(internal_protocol, tacker_server_port) }}" +tacker_public_endpoint: "{{ tacker_external_fqdn | kolla_url(public_protocol, tacker_server_public_port) }}" +tacker_server_port: "9890" +tacker_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else tacker_server_port }}" +tacker_server_listen_port: "{{ tacker_server_port }}" diff --git a/ansible/group_vars/all/telegraf.yml b/ansible/group_vars/all/telegraf.yml new file mode 100644 index 0000000000..1ad350fd98 --- /dev/null +++ b/ansible/group_vars/all/telegraf.yml @@ -0,0 +1,9 @@ +--- +enable_telegraf: "no" + +########## +# Telegraf +########## +# Configure telegraf to use the docker daemon itself as an input for +# telemetry data. +telegraf_enable_docker_input: "no" diff --git a/ansible/group_vars/all/trove.yml b/ansible/group_vars/all/trove.yml new file mode 100644 index 0000000000..420efad5e5 --- /dev/null +++ b/ansible/group_vars/all/trove.yml @@ -0,0 +1,11 @@ +--- +enable_trove: "no" +enable_trove_singletenant: "no" + +trove_internal_fqdn: "{{ kolla_internal_fqdn }}" +trove_external_fqdn: "{{ kolla_external_fqdn }}" +trove_internal_base_endpoint: "{{ trove_internal_fqdn | kolla_url(internal_protocol, trove_api_port) }}" +trove_public_base_endpoint: "{{ trove_external_fqdn | kolla_url(public_protocol, trove_api_public_port) }}" +trove_api_port: "8779" +trove_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else trove_api_port }}" +trove_api_listen_port: "{{ trove_api_port }}" diff --git a/ansible/group_vars/all/venus.yml b/ansible/group_vars/all/venus.yml new file mode 100644 index 0000000000..85908647b1 --- /dev/null +++ b/ansible/group_vars/all/venus.yml @@ -0,0 +1,10 @@ +--- +enable_venus: "no" + +venus_internal_fqdn: "{{ kolla_internal_fqdn }}" +venus_external_fqdn: "{{ kolla_external_fqdn }}" +venus_internal_endpoint: "{{ venus_internal_fqdn | kolla_url(internal_protocol, venus_api_port) }}" +venus_public_endpoint: "{{ venus_external_fqdn | kolla_url(public_protocol, venus_api_public_port) }}" +venus_api_port: "10010" +venus_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else venus_api_port }}" +venus_api_listen_port: "{{ venus_api_port }}" diff --git a/ansible/group_vars/all/watcher.yml b/ansible/group_vars/all/watcher.yml new file mode 100644 index 0000000000..87d919c8f6 --- /dev/null +++ b/ansible/group_vars/all/watcher.yml @@ -0,0 +1,10 @@ +--- +enable_watcher: "no" + +watcher_internal_fqdn: "{{ kolla_internal_fqdn }}" +watcher_external_fqdn: "{{ kolla_external_fqdn }}" +watcher_internal_endpoint: "{{ watcher_internal_fqdn | kolla_url(internal_protocol, watcher_api_port) }}" +watcher_public_endpoint: "{{ watcher_external_fqdn | kolla_url(public_protocol, watcher_api_public_port) }}" +watcher_api_port: "9322" +watcher_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else watcher_api_port }}" +watcher_api_listen_port: "{{ watcher_api_port }}" diff --git a/ansible/group_vars/all/zun.yml b/ansible/group_vars/all/zun.yml new file mode 100644 index 0000000000..ed10ea5462 --- /dev/null +++ b/ansible/group_vars/all/zun.yml @@ -0,0 +1,31 @@ +--- +enable_zun: "no" + +# Extra docker options for Zun +docker_configure_for_zun: "no" +docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375 +docker_zun_config: {} + +# Extra containerd options for Zun +containerd_configure_for_zun: "no" + +# Enable Ceph backed Cinder Volumes for zun +zun_configure_for_cinder_ceph: "no" + +# 42463 is the static group id of the zun user in the Zun image. +# If users customize this value on building the Zun images, +# they need to change this config accordingly. +containerd_grpc_gid: 42463 + +zun_api_port: "9517" +zun_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else zun_api_port }}" +zun_api_listen_port: "{{ zun_api_port }}" +zun_wsproxy_internal_fqdn: "{{ kolla_internal_fqdn }}" +zun_wsproxy_external_fqdn: "{{ kolla_external_fqdn }}" +zun_wsproxy_port: "6784" +zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}" +zun_cni_daemon_port: "9036" +zun_internal_fqdn: "{{ kolla_internal_fqdn }}" +zun_external_fqdn: "{{ kolla_external_fqdn }}" +zun_internal_base_endpoint: "{{ zun_internal_fqdn | kolla_url(internal_protocol, zun_api_port) }}" +zun_public_base_endpoint: "{{ zun_external_fqdn | kolla_url(public_protocol, zun_api_public_port) }}" diff --git a/ansible/group_vars/baremetal.yml b/ansible/group_vars/baremetal/ansible-python-interpreter.yml similarity index 100% rename from ansible/group_vars/baremetal.yml rename to ansible/group_vars/baremetal/ansible-python-interpreter.yml diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py index 2f323eff24..f26846dedc 100644 --- a/ansible/module_utils/kolla_podman_worker.py +++ b/ansible/module_utils/kolla_podman_worker.py @@ -220,7 +220,7 @@ def parse_dimensions(self, dimensions): # NOTE(m.hiner): default ulimits have to be filtered out because # Podman would treat them as new ulimits and break the container # as a result. Names are a copy of - # default_podman_dimensions_el9 in /ansible/group_vars/all.yml + # default_podman_dimensions_el9 in group_vars for name in ['RLIMIT_NOFILE', 'RLIMIT_NPROC']: ulimits.pop(name, None) diff --git a/doc/source/reference/containers/kuryr-guide.rst b/doc/source/reference/containers/kuryr-guide.rst index fbcbfd4192..f748d16c5a 100644 --- a/doc/source/reference/containers/kuryr-guide.rst +++ b/doc/source/reference/containers/kuryr-guide.rst @@ -26,7 +26,8 @@ The IP address is host running the etcd service. ```2375``` is port that allows Docker daemon to be accessed remotely. ```2379``` is the etcd listening port. -By default etcd and kuryr are disabled in the ``group_vars/all.yml``. +By default etcd and kuryr are disabled in the ``group_vars/all/etcd.yml`` and +``group_vars/all/kuryr.yml`` files. In order to enable them, you need to edit the file globals.yml and set the following variables diff --git a/doc/source/reference/databases/mariadb-guide.rst b/doc/source/reference/databases/mariadb-guide.rst index 71b51f77c4..e34e4bf0a4 100644 --- a/doc/source/reference/databases/mariadb-guide.rst +++ b/doc/source/reference/databases/mariadb-guide.rst @@ -52,9 +52,9 @@ inventory file in the way described below: .. note:: If ``mariadb_shard_id`` is not defined for host in inventory file it will be set automatically - to ``mariadb_default_database_shard_id`` (default 0) from ``group_vars/all.yml`` and can be - overwritten in ``/etc/kolla/globals.yml``. Shard which is marked as default is special in case - of backup or loadbalance, as it is described below. + to ``mariadb_default_database_shard_id`` (default 0) from ``group_vars/all/mariadb.yml`` and + can be overwritten in ``/etc/kolla/globals.yml``. Shard which is marked as default is + special in case of backup or loadbalance, as it is described below. Loadbalancer ------------ diff --git a/doc/source/reference/orchestration-and-nfv/tacker-guide.rst b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst index 78fdf4821b..4c99e72e15 100644 --- a/doc/source/reference/orchestration-and-nfv/tacker-guide.rst +++ b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst @@ -30,7 +30,7 @@ Preparation and Deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~ By default tacker and required services are disabled in -the ``group_vars/all.yml`` file. +the ``group_vars/all/tacker.yml`` file. In order to enable them, you need to edit the file ``/etc/kolla/globals.yml`` and set the following variables: diff --git a/doc/source/user/multinode.rst b/doc/source/user/multinode.rst index 21bf67fa45..5a0db7cf2a 100644 --- a/doc/source/user/multinode.rst +++ b/doc/source/user/multinode.rst @@ -136,7 +136,7 @@ host or group variables: `__ are quite complex, but it is worth becoming familiar with them if using host and group variables. The playbook group variables in -``ansible/group_vars/all.yml`` define global defaults, and these take +``ansible/group_vars/all/`` define global defaults, and these take precedence over variables defined in an inventory file and inventory ``group_vars/all``, but not over inventory ``group_vars/*``. Variables in 'extra' files (``globals.yml``) have the highest precedence, so any variables diff --git a/doc/source/user/troubleshooting.rst b/doc/source/user/troubleshooting.rst index d00a94ca74..76a9ac6047 100644 --- a/doc/source/user/troubleshooting.rst +++ b/doc/source/user/troubleshooting.rst @@ -88,13 +88,13 @@ You can find all kolla logs in there. /var/lib/docker/volumes/kolla_logs/_data When ``enable_central_logging`` is enabled, to view the logs in a web browser -using Kibana, go to -``http://:`` or -``http://:``. Authenticate -using ```` and ````. +using OpenSearch Dashboards, go to +``http://:`` or +``http://:``. Authenticate +using ``opensearch`` and ````. The values ````, ```` -```` and ```` can be found in -``/kolla/ansible/group_vars/all.yml`` or if the default -values are overridden, in ``/etc/kolla/globals.yml``. The value of -```` can be found in ``/etc/kolla/passwords.yml``. +```` can be found in +``/kolla/ansible/group_vars/all/opensearch.yml``. The value +of ```` can be found in +``/etc/kolla/passwords.yml``. diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index cb0e651d59..9d4f00a0a4 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -1,7 +1,7 @@ --- # You can use this file to override _any_ variable throughout Kolla. # Additional options can be found in the -# 'kolla-ansible/ansible/group_vars/all.yml' file. Default value of all the +# 'kolla-ansible/ansible/group_vars/all' directory. Default value of all the # commented parameters are shown here, To override the default value uncomment # the parameter and change its value. diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 4d9d263dd9..13db98ca61 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -91,7 +91,7 @@ parent: kolla-ansible-base name: kolla-ansible-scenario-base files: - - ^ansible/group_vars/all.yml + - ^ansible/group_vars/all/(baremetal|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml - ^ansible/roles/common/ - ^requirements-core.yml - ^tests/check-logs.sh From cb06df40e212648b7e27e66c04da24a285891305 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 25 Sep 2025 13:54:20 +0200 Subject: [PATCH 024/165] service-uwsgi-config: Bump socket-timeout to 30 Default is 4 - adapting to the same value as in Devstack Change-Id: I40aaa939f092387eaf5ef65332747224314636c2 Signed-off-by: Michal Nasiadka --- ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 index 870448243b..ce42d352d5 100644 --- a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 +++ b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 @@ -27,6 +27,7 @@ wsgi-file = {{ service_uwsgi_config_wsgi_file }} plugins-dir = {{ '/usr/lib/uwsgi/plugins' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/lib64/uwsgi' }} plugins = python3 processes = {{ service_uwsgi_config_workers }} +socket-timeout = 30 thunder-lock = true {% if service_uwsgi_config_uid is defined %} uid = {{ service_uwsgi_config_uid }} From 2b591fb097caa24e577714f6bd8c6b168fa652f1 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 18 Sep 2025 12:05:05 +0200 Subject: [PATCH 025/165] horizon: Add support for using uWSGI Switching the default to uWSGI due to the changes that are happening in OpenStack - with projects dropping wsgi_file(s) and moving to modules. Separate Apache/mod_wsgi container that will serve for OIDC/SAML cases will be posted as a follow-up. Depends-On: https://review.opendev.org/c/openstack/kolla/+/961676 Change-Id: Ia6b719847033f7861e814b4e1d8da810dafcc2aa Signed-off-by: Michal Nasiadka --- ansible/group_vars/all/horizon.yml | 2 +- ansible/roles/horizon/defaults/main.yml | 6 ++++++ ansible/roles/horizon/tasks/config.yml | 21 ++++++++++++++++++- .../roles/horizon/templates/horizon.json.j2 | 11 +++++++++- .../notes/horizon-port-584efee771a14fd9.yaml | 6 ++++++ .../uwsgi-flamingo-5144740f1a2bb4fb.yaml | 2 ++ 6 files changed, 45 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/horizon-port-584efee771a14fd9.yaml diff --git a/ansible/group_vars/all/horizon.yml b/ansible/group_vars/all/horizon.yml index f010828271..fac7a27a46 100644 --- a/ansible/group_vars/all/horizon.yml +++ b/ansible/group_vars/all/horizon.yml @@ -46,4 +46,4 @@ horizon_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol horizon_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}" horizon_port: "80" horizon_tls_port: "443" -horizon_listen_port: "{{ horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}" +horizon_listen_port: "{{ '8080' if enable_haproxy | bool else horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}" diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml index cdda448e16..d1dcae651f 100644 --- a/ansible/roles/horizon/defaults/main.yml +++ b/ansible/roles/horizon/defaults/main.yml @@ -27,6 +27,7 @@ horizon_services: volumes: "{{ horizon_default_volumes + horizon_extra_volumes }}" dimensions: "{{ horizon_dimensions }}" healthcheck: "{{ horizon_healthcheck }}" + wsgi: "openstack_dashboard.wsgi:application" haproxy: horizon: enabled: "{{ enable_horizon }}" @@ -168,3 +169,8 @@ horizon_use_keystone_public_url: False # Copy certificates ################### horizon_copy_certs: "{{ kolla_copy_ca_into_containers | bool or horizon_enable_tls_backend | bool }}" + +############ +# WSGI +############ +horizon_wsgi_provider: "uwsgi" diff --git a/ansible/roles/horizon/tasks/config.yml b/ansible/roles/horizon/tasks/config.yml index 37d06c2007..7fece46ebd 100644 --- a/ansible/roles/horizon/tasks/config.yml +++ b/ansible/roles/horizon/tasks/config.yml @@ -59,7 +59,26 @@ - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/horizon.conf" - "{{ node_custom_config }}/horizon/horizon.conf" - "horizon.conf.j2" - when: service | service_enabled_and_mapped_to_host + when: + - service | service_enabled_and_mapped_to_host + - horizon_wsgi_provider == "apache" + +- name: "Configure uWSGI for Horizon" + include_role: + name: service-uwsgi-config + vars: + project_services: "{{ horizon_services }}" + service: "{{ project_services[service_name] }}" + service_name: "horizon" + service_uwsgi_config_http_port: "{{ horizon_listen_port }}" + service_uwsgi_config_module: "{{ service.wsgi }}" + service_uwsgi_config_tls_backend: "{{ horizon_enable_tls_backend | bool }}" + service_uwsgi_config_tls_cert: "/etc/horizon/certs/horizon-cert.pem" + service_uwsgi_config_tls_key: "/etc/horizon/certs/horizon-key.pem" + service_uwsgi_config_uid: "{{ 'horizon' if enable_haproxy | bool else 'root' }}" + when: + - service | service_enabled_and_mapped_to_host + - horizon_wsgi_provider == "uwsgi" - name: Copying over kolla-settings.py become: true diff --git a/ansible/roles/horizon/templates/horizon.json.j2 b/ansible/roles/horizon/templates/horizon.json.j2 index 5e070ff493..edbc335f8a 100644 --- a/ansible/roles/horizon/templates/horizon.json.j2 +++ b/ansible/roles/horizon/templates/horizon.json.j2 @@ -1,14 +1,23 @@ {% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} {% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} {% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'horizon.conf' %} +{% set uwsgi_cmd = 'uwsgi /etc/horizon/horizon-uwsgi.ini' %} +{% set command = uwsgi_cmd if horizon_wsgi_provider == 'uwsgi' else ('/usr/sbin/' + apache_cmd + ' -DFOREGROUND') %} { - "command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND", + "command": "{{ command }}", "config_files": [ { +{% if horizon_wsgi_provider == 'apache' %} "source": "{{ container_config_directory }}/horizon.conf", "dest": "/etc/{{ apache_dir }}/{{ apache_file }}", "owner": "horizon", "perm": "0600" +{% elif horizon_wsgi_provider == 'uwsgi' %} + "source": "{{ container_config_directory }}/horizon-uwsgi.ini", + "dest": "/etc/horizon/horizon-uwsgi.ini", + "owner": "horizon", + "perm": "0600" +{% endif %} }, {% for path in custom_policy %} { diff --git a/releasenotes/notes/horizon-port-584efee771a14fd9.yaml b/releasenotes/notes/horizon-port-584efee771a14fd9.yaml new file mode 100644 index 0000000000..96f2f37737 --- /dev/null +++ b/releasenotes/notes/horizon-port-584efee771a14fd9.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + ``Horizon`` default port (80/443) has been changed to ``8080`` when using + HAProxy, while the old default has been retained for development + environments using ``enable_haproxy`` set to ``no``. diff --git a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml index d2dafe705b..0c120edd68 100644 --- a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml +++ b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml @@ -15,6 +15,8 @@ features: - gnocchi_wsgi_provider * - Heat - heat_wsgi_provider + * - Horizon + - horizon_wsgi_provider * - Ironic - ironic_wsgi_provider * - Keystone From 10f77140d4934510184b2cc19dd119483def5815 Mon Sep 17 00:00:00 2001 From: Piotr Milewski Date: Tue, 20 May 2025 14:36:42 +0200 Subject: [PATCH 026/165] Fix missing vendordata.json file for nova-metadata Closes-Bug: #2111328 Change-Id: I3b3b2c434b393dd268e129d2b2d90cca0d0d67d2 Signed-off-by: Piotr Milewski --- ansible/roles/nova/tasks/config.yml | 27 ++++++++++--------- .../notes/bug-2111328-c4f57b50eb5bfecf.yaml | 6 +++++ 2 files changed, 21 insertions(+), 12 deletions(-) create mode 100644 releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml diff --git a/ansible/roles/nova/tasks/config.yml b/ansible/roles/nova/tasks/config.yml index abbcf939d0..8a38aa02ad 100644 --- a/ansible/roles/nova/tasks/config.yml +++ b/ansible/roles/nova/tasks/config.yml @@ -103,6 +103,21 @@ - service | service_enabled_and_mapped_to_host - nova_wsgi_provider == "apache" +- name: Copying over vendordata file for nova services + vars: + service: "{{ nova_services[item] }}" + copy: + src: "{{ vendordata_file_path }}" + dest: "{{ node_config_directory }}/{{ item }}/vendordata.json" + mode: "0660" + become: True + when: + - vendordata_file_path is defined + - service | service_enabled_and_mapped_to_host + loop: + - "nova-metadata" + - "nova-api" + - name: "Configure uWSGI for Nova" include_role: name: service-uwsgi-config @@ -123,15 +138,3 @@ loop: - { name: "nova-api", port: "{{ nova_api_listen_port }}" } - { name: "nova-metadata", port: "{{ nova_metadata_listen_port }}" } - -- name: Copying over vendordata file - vars: - service: "{{ nova_services['nova-api'] }}" - copy: - src: "{{ vendordata_file_path }}" - dest: "{{ node_config_directory }}/nova-api/vendordata.json" - mode: "0660" - become: True - when: - - vendordata_file_path is defined - - service | service_enabled_and_mapped_to_host diff --git a/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml b/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml new file mode 100644 index 0000000000..94bf9a694b --- /dev/null +++ b/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue where vendordata.json, if defined, + was not being copied to the nova-metadata directory. + `LP#2111328 `__ From 2370ea60c8fb434cee87a05c5679ac49ff476d28 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 29 Sep 2025 16:58:27 +0200 Subject: [PATCH 027/165] haproxy: Fail when a step in haproxy_run.sh fails Change-Id: Iab4dbffc796ebf5bdf32b6eda33f3204a70d1392 Signed-off-by: Michal Nasiadka --- .../roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 index 7d3492c08a..1cb4d0a21a 100644 --- a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 +++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 @@ -1,4 +1,8 @@ -#!/bin/bash -x +#!/bin/bash + +set -o errexit +set -o pipefail +set -o xtrace {% if kolla_enable_tls_internal | bool or kolla_enable_tls_external | bool %} {% if kolla_enable_tls_external | bool %} From 8d5cb518d66115515ef185e28562fd5e9a60de2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Kr=C4=8Dek?= Date: Fri, 14 Feb 2025 14:12:44 +0000 Subject: [PATCH 028/165] Add ability to get stopped container's facts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kolla_container_facts could only get running containers, this patch adds the ability to specify, that user wants even stopped containers. Also updates module's docs and corrects few typos. Change-Id: I9ace3c1d802bc00ce692b3ca755384c6b62bd72d Signed-off-by: Roman Krček --- ansible/library/kolla_container_facts.py | 59 ++++++++++++++++++++---- tests/test_kolla_container_facts.py | 31 ++++++++++--- 2 files changed, 74 insertions(+), 16 deletions(-) diff --git a/ansible/library/kolla_container_facts.py b/ansible/library/kolla_container_facts.py index 053b2ef5e1..9886d5252b 100644 --- a/ansible/library/kolla_container_facts.py +++ b/ansible/library/kolla_container_facts.py @@ -20,10 +20,10 @@ DOCUMENTATION = ''' --- module: kolla_container_facts -short_description: Module for collecting Docker container facts +short_description: Module for collecting container facts description: - - A module targeting at collecting Docker container facts. It is used for - detecting whether the container is running on host in Kolla. + - A module targeted at collecting container facts. It is used for + retrieving data about containers like their environment or state. options: container_engine: description: @@ -32,7 +32,7 @@ type: str api_version: description: - - The version of the api for docker-py to use when contacting docker + - The version of the API for container SDK to use required: False type: str default: auto @@ -44,18 +44,45 @@ action: description: - The action to perform + - The action "get_containers" only returns running containers, unless + argument get_all_containers is True required: True type: str -author: Jeffrey Zhang, Michal Nasiadka, Ivan Halomi + choices: + - get_containers + - get_container_env + - get_container_state + args: + description: + - Additional arguments for actions + required: False + type: dict + elements: dict + suboptions: + get_all_containers: + description: + - Get all containers, even stopped containers when + performing action "get_containers" + type: bool + required: False + default: False +author: Jeffrey Zhang, Michal Nasiadka, Roman Krček, Ivan Halomi ''' EXAMPLES = ''' - hosts: all tasks: - - name: Gather docker facts + - name: Gather docker facts for running containers + kolla_container_facts: + container_engine: docker + action: get_containers + + - name: Gather docker facts for all containers kolla_container_facts: container_engine: docker - action: get_containers + action: get_containers + args: + get_all_containers: true - name: Gather glance container facts kolla_container_facts: @@ -136,9 +163,11 @@ def get_containers_names(self): def get_containers(self): """Handle when module is called with action get_containers""" names = self.params.get('name') + args = self.params.get('args', {}) + get_all_containers = args.get('get_all_containers', False) self.result['containers'] = dict() - containers = self.client.containers.list() + containers = self.client.containers.list(all=get_all_containers) for container in containers: container.reload() container_name = container.name @@ -226,9 +255,19 @@ def main(): action=dict(required=True, type='str', choices=['get_containers', 'get_containers_env', - 'get_containers_state', + 'get_volumes', 'get_containers_names', - 'get_volumes']), + 'get_containers_state']), + args=dict( + type='dict', + required=False, + default={}, + options=dict( + get_all_containers=dict(required=False, + type='bool', + default=False) + ) + ) ) required_if = [ diff --git a/tests/test_kolla_container_facts.py b/tests/test_kolla_container_facts.py index e58b058c5f..b6a3d8000b 100644 --- a/tests/test_kolla_container_facts.py +++ b/tests/test_kolla_container_facts.py @@ -109,7 +109,7 @@ def contruct_volume(vol_dict: dict) -> mock.Mock: return volume -def get_containers(override=None): +def get_containers(override=None, all: bool = False): if override: cont_dicts = override else: @@ -117,9 +117,11 @@ def get_containers(override=None): containers = [] for c in cont_dicts: - # Only running containers should be returned by the container APIs - if c['State']['Status'] == 'running': - containers.append(construct_container(c)) + # With the option "all", only running containers are returned + # by the container API + if not all and c['State']['Status'] != 'running': + continue + containers.append(construct_container(c)) return containers @@ -152,8 +154,9 @@ def test_get_containers_single(self): self.assertDictEqual( self.fake_data['containers'][0], self.dfw.result['containers']['my_container']) + self.dfw.client.containers.list.assert_called_once_with(all=False) - def test_get_container_multi(self): + def test_get_containers_multi(self): self.dfw = get_DockerFactsWorker( {'name': ['my_container', 'exited_container'], 'action': 'get_containers'}) @@ -165,8 +168,9 @@ def test_get_container_multi(self): self.assertIn('my_container', self.dfw.result['containers']) self.assertNotIn('my_container', self.dfw.result) self.assertNotIn('exited_container', self.dfw.result['containers']) + self.dfw.client.containers.list.assert_called_once_with(all=False) - def test_get_container_all(self): + def test_get_containers_all_running(self): self.dfw = get_DockerFactsWorker({'name': [], 'action': 'get_containers'}) running_containers = get_containers(self.fake_data['containers']) @@ -177,6 +181,21 @@ def test_get_container_all(self): self.assertIn('my_container', self.dfw.result['containers']) self.assertNotIn('my_container', self.dfw.result) self.assertNotIn('exited_container', self.dfw.result['containers']) + self.dfw.client.containers.list.assert_called_once_with(all=False) + + def test_get_containers_all_including_stopped(self): + self.dfw = get_DockerFactsWorker({'name': [], + 'action': 'get_containers', + 'args': { + 'get_all_containers': True}}) + all_containers = get_containers(self.fake_data['containers'], all=True) + self.dfw.client.containers.list.return_value = all_containers + self.dfw.get_containers() + + self.assertFalse(self.dfw.result['changed']) + self.assertIn('my_container', self.dfw.result['containers']) + self.assertIn('exited_container', self.dfw.result['containers']) + self.dfw.client.containers.list.assert_called_once_with(all=True) def test_get_containers_env(self): fake_env = dict(KOLLA_BASE_DISTRO='ubuntu', From bb15619e80414a2913792a945fdcd112b5fe1671 Mon Sep 17 00:00:00 2001 From: Matt Anson Date: Mon, 29 Sep 2025 17:21:00 +0100 Subject: [PATCH 029/165] Only notify handlers in service-check-containers Notification of handlers on service config change is now mostly delegated to the service-check-containers role, but handlers that restart nova_libvirt and ovn_sb_db_relays still remain. Remove these remaining handlers to ensure that all service containers are restarted by service-check-containers. Partial-Bug: #2123946 Change-Id: I57a51fb46cb7c58074044bac76b1160620086cb8 Signed-off-by: Matt Anson --- ansible/roles/nova-cell/tasks/external_ceph.yml | 6 +++--- ansible/roles/ovn-db/tasks/config-relay.yml | 4 ---- .../bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml | 7 +++++++ 3 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml diff --git a/ansible/roles/nova-cell/tasks/external_ceph.yml b/ansible/roles/nova-cell/tasks/external_ceph.yml index de8fc143e6..f2d7ba6967 100644 --- a/ansible/roles/nova-cell/tasks/external_ceph.yml +++ b/ansible/roles/nova-cell/tasks/external_ceph.yml @@ -200,6 +200,6 @@ # reload. This may be due to differences in tested versions of libvirt # (8.0.0 vs 6.0.0). Reload should be low overhead, so do it always. libvirt_restart_handlers: >- - {{ ['Restart nova-libvirt container'] - if enable_nova_libvirt_container | bool else - ['Reload libvirtd'] }} + {{ ['Reload libvirtd'] + if not enable_nova_libvirt_container | bool else + [] }} diff --git a/ansible/roles/ovn-db/tasks/config-relay.yml b/ansible/roles/ovn-db/tasks/config-relay.yml index 71c3828768..f26cd2b48f 100644 --- a/ansible/roles/ovn-db/tasks/config-relay.yml +++ b/ansible/roles/ovn-db/tasks/config-relay.yml @@ -19,8 +19,6 @@ dest: "{{ node_config_directory }}/ovn-sb-db-relay-{{ item }}/config.json" mode: "0660" become: true - notify: - - Restart ovn-sb-db-relay container - name: Generate config files for OVN relay services vars: @@ -31,5 +29,3 @@ dest: "{{ node_config_directory }}/ovn-sb-db-relay-{{ item }}/ovsdb-relay.json" mode: "0660" become: true - notify: - - Restart ovn-sb-db-relay container diff --git a/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml b/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml new file mode 100644 index 0000000000..060471f697 --- /dev/null +++ b/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Handlers to trigger a restart nova_libvirt and ovn_sb_db_relay + containers have been removed and restarts of these services + are now under the control of the service-check-containers + role `LP#2123946 `__. From 47214ff28d1fc44673a4430e4edd73f8e43975ca Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 25 Sep 2025 11:52:48 +0200 Subject: [PATCH 030/165] Update Zuul triggers after group_vars split The basic approach is: - we test majority in aio and cephadm scenarios - additional scenario jobs are only triggered on roles that are not covered by aio/cephadm (and the ones that are relevant for given scenario) Change-Id: I56b4e1218092ffcca074fef3ceac8e9d99054b21 Signed-off-by: Michal Nasiadka --- zuul.d/base.yaml | 88 +++++++++---------- zuul.d/project.yaml | 1 + zuul.d/scenarios/aio.yaml | 20 +++-- zuul.d/scenarios/bifrost.yaml | 3 +- zuul.d/scenarios/cells.yaml | 5 +- zuul.d/scenarios/cephadm.yaml | 5 ++ .../scenarios/container-engine-migration.yaml | 2 +- zuul.d/scenarios/haproxy-fqdn.yaml | 6 +- zuul.d/scenarios/hashi-vault.yaml | 11 ++- zuul.d/scenarios/ipv6.yaml | 2 +- zuul.d/scenarios/ironic.yaml | 5 +- zuul.d/scenarios/kvm.yaml | 4 +- zuul.d/scenarios/lets-encrypt.yaml | 7 +- zuul.d/scenarios/magnum.yaml | 3 +- zuul.d/scenarios/mariadb.yaml | 5 +- zuul.d/scenarios/masakari.yaml | 7 +- zuul.d/scenarios/nfv.yaml | 4 +- zuul.d/scenarios/octavia.yaml | 4 +- zuul.d/scenarios/ovn.yaml | 5 +- zuul.d/scenarios/prometheus-opensearch.yaml | 4 +- zuul.d/scenarios/skyline.yaml | 3 +- zuul.d/scenarios/telemetry.yaml | 3 +- zuul.d/scenarios/venus.yaml | 5 +- zuul.d/scenarios/zun.yaml | 3 +- 24 files changed, 110 insertions(+), 95 deletions(-) diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 13db98ca61..4b241ead92 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -1,6 +1,47 @@ --- - job: - name: kolla-ansible-variables + name: kolla-ansible-base + pre-run: tests/pre.yml + run: tests/run.yml + post-run: tests/post.yml + timeout: 9000 + post-timeout: 1800 + required-projects: + - openstack/ansible-collection-kolla + - openstack/kolla + - openstack/kolla-ansible + - openstack/requirements + files: + - ^ansible/group_vars/all/common.yml + - ^requirements-core.yml + - ^tests/check-(config|failure|logs).sh + - ^tests/get_logs.sh + - ^tests/init-core-openstack.sh + - ^tests/(run|pre|post).yml + - ^tests/setup_gate.sh + - ^tests/templates/(inventory|globals-default).j2 + - ^tests/test-(core-openstack|dashboard|proxysql).sh + - ^tests/upgrade.sh + irrelevant-files: + - ^.*\.rst$ + - ^bindep.txt$ + - ^doc/.* + - ^releasenotes/.*$ + - ^deploy-guide/.*$ + - ^test-requirements.txt$ + - ^lint-requirements.txt$ + - ^etc/kolla/globals.yml$ + - ^tox.ini$ + - ^\..+ + - ^LICENSE$ + - ^contrib/ + - ^specs/ + - ^kolla_ansible/tests/ + - ^tools/validate-.*$ + - ^zuul.d/ + roles: + - zuul: zuul/zuul-jobs + - zuul: openstack/kolla vars: address_family: 'ipv4' # Test latest ansible-core version on Ubuntu, minimum supported on others. @@ -53,48 +94,3 @@ - ^rabbitmq tls_enabled: true virt_type: qemu - -- job: - parent: kolla-ansible-variables - name: kolla-ansible-base - pre-run: tests/pre.yml - run: tests/run.yml - post-run: tests/post.yml - timeout: 9000 - post-timeout: 1800 - required-projects: - - openstack/ansible-collection-kolla - - openstack/kolla - - openstack/kolla-ansible - - openstack/requirements - irrelevant-files: - - ^.*\.rst$ - - ^bindep.txt$ - - ^doc/.* - - ^releasenotes/.*$ - - ^deploy-guide/.*$ - - ^test-requirements.txt$ - - ^lint-requirements.txt$ - - ^etc/kolla/globals.yml$ - - ^tox.ini$ - - ^\..+ - - ^LICENSE$ - - ^contrib/ - - ^specs/ - - ^kolla_ansible/tests/ - - ^tools/validate-.*$ - roles: - - zuul: zuul/zuul-jobs - - zuul: openstack/kolla - -- job: - parent: kolla-ansible-base - name: kolla-ansible-scenario-base - files: - - ^ansible/group_vars/all/(baremetal|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml - - ^ansible/roles/common/ - - ^requirements-core.yml - - ^tests/check-logs.sh - - ^tests/get_logs.sh - - ^tests/(pre|run).yml - - ^tests/templates/(inventory|globals-default.j2) diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index eccfdafd00..77e714e1a8 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -12,6 +12,7 @@ # https://review.opendev.org/c/openstack/kolla-ansible/+/864780 # - kolla-ansible-scenario-container-engine-migration - kolla-ansible-scenario-haproxy-fqdn + - kolla-ansible-scenario-hashi-vault - kolla-ansible-scenario-kvm - kolla-ansible-scenario-lets-encrypt - kolla-ansible-scenario-magnum diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 5b43461b27..360737ab54 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -1,7 +1,15 @@ --- - job: - name: kolla-ansible-centos-10s parent: kolla-ansible-base + name: kolla-ansible-aio-base + files: !inherit + - ^ansible/group_vars/all/(common|fluentd|glance|haproxy|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml + - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml + - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|openvswitch|placement|proxysql|rabbitmq)/ + +- job: + name: kolla-ansible-centos-10s + parent: kolla-ansible-aio-base nodeset: kolla-ansible-centos-10s-8GB voting: false vars: @@ -14,7 +22,7 @@ - job: name: kolla-ansible-debian-bookworm - parent: kolla-ansible-base + parent: kolla-ansible-aio-base nodeset: kolla-ansible-debian-bookworm-16GB - job: @@ -45,7 +53,7 @@ - job: name: kolla-ansible-debian-bookworm-upgrade - parent: kolla-ansible-base + parent: kolla-ansible-aio-base nodeset: kolla-ansible-debian-bookworm-16GB timeout: 10800 @@ -63,7 +71,7 @@ - job: name: kolla-ansible-ubuntu-noble - parent: kolla-ansible-base + parent: kolla-ansible-aio-base nodeset: kolla-ansible-ubuntu-noble-16GB - job: @@ -75,13 +83,13 @@ - job: name: kolla-ansible-ubuntu-noble-upgrade - parent: kolla-ansible-base + parent: kolla-ansible-aio-base nodeset: kolla-ansible-ubuntu-noble-16GB timeout: 10800 - job: name: kolla-ansible-ubuntu-noble-upgrade-slurp - parent: kolla-ansible-base + parent: kolla-ansible-aio-base nodeset: kolla-ansible-ubuntu-noble-16GB timeout: 9000 diff --git a/zuul.d/scenarios/bifrost.yaml b/zuul.d/scenarios/bifrost.yaml index 9c6690052a..fd24a79016 100644 --- a/zuul.d/scenarios/bifrost.yaml +++ b/zuul.d/scenarios/bifrost.yaml @@ -1,9 +1,10 @@ --- - job: name: kolla-ansible-bifrost-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit + - ^ansible/group_vars/all/bifrost.yml - ^ansible/roles/bifrost/ - ^tests/test-bifrost.sh vars: diff --git a/zuul.d/scenarios/cells.yaml b/zuul.d/scenarios/cells.yaml index ce061af6b6..bec5a297ce 100644 --- a/zuul.d/scenarios/cells.yaml +++ b/zuul.d/scenarios/cells.yaml @@ -1,14 +1,13 @@ --- - job: name: kolla-ansible-cells-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit + - ^ansible/group_vars/all/(baremetal|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq|sysctl).yml - ^ansible/roles/nova/ - ^ansible/roles/nova-cell/ - ^ansible/roles/loadbalancer/ - - ^tests/test-core-openstack.sh - - ^tests/test-proxysql.sh vars: scenario: cells scenario_images_extra: diff --git a/zuul.d/scenarios/cephadm.yaml b/zuul.d/scenarios/cephadm.yaml index 7e5528e79c..9d3bf69f22 100644 --- a/zuul.d/scenarios/cephadm.yaml +++ b/zuul.d/scenarios/cephadm.yaml @@ -3,6 +3,11 @@ name: kolla-ansible-cephadm-base parent: kolla-ansible-base voting: false + files: + - ^ansible/group_vars/all/(ceph|ceph-rgw|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml + - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml + - ^ansible/roles/(ceph-rgw|common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|openvswitch|placement|proxysql|rabbitmq)/ + - ^roles/cephadm/ vars: scenario: cephadm scenario_images_extra: diff --git a/zuul.d/scenarios/container-engine-migration.yaml b/zuul.d/scenarios/container-engine-migration.yaml index f64cc72960..c57aaa23a5 100644 --- a/zuul.d/scenarios/container-engine-migration.yaml +++ b/zuul.d/scenarios/container-engine-migration.yaml @@ -3,7 +3,7 @@ name: kolla-ansible-container-engine-migration-base parent: kolla-ansible-base voting: false - files: + files: !inherit - ^ansible/migrate-container-engine.yml - ^ansible/roles/container-engine-migration/ - ^tests/test-container-engine-migration.sh diff --git a/zuul.d/scenarios/haproxy-fqdn.yaml b/zuul.d/scenarios/haproxy-fqdn.yaml index 672d7a8b5b..dac1687c4e 100644 --- a/zuul.d/scenarios/haproxy-fqdn.yaml +++ b/zuul.d/scenarios/haproxy-fqdn.yaml @@ -1,11 +1,11 @@ --- - job: name: kolla-ansible-haproxy-fqdn-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/roles/haproxy/ - - ^ansible/roles/loadbalancer/ + - ^ansible/group_vars/haproxy.yml + - ^ansible/roles/(haproxy|haproxy-config|loadbalancer|loadbalancer-config)/ - ^kolla_ansible/kolla_url.py vars: external_api_interface_name: vxlan2 diff --git a/zuul.d/scenarios/hashi-vault.yaml b/zuul.d/scenarios/hashi-vault.yaml index 74c92c4454..468751bade 100644 --- a/zuul.d/scenarios/hashi-vault.yaml +++ b/zuul.d/scenarios/hashi-vault.yaml @@ -1,18 +1,17 @@ --- - job: name: kolla-ansible-hashi-vault-base - parent: kolla-ansible-variables + parent: kolla-ansible-base run: tests/run-hashi-vault.yml required-projects: - openstack/kolla-ansible - openstack/requirements voting: false - files: - - ^requirements-core.yml - - ^tests/templates/(inventory|globals-default.j2) - - ^tests/(pre|run).yml + files: !override - ^kolla_ansible/ - - ^tests/run-hashi-vault.yml + - ^requirements-core.yml + - ^tests/(pre|run|run-hashi-vault).yml + - ^tests/templates/(inventory|globals-default).j2 - ^tests/test-hashicorp-vault-passwords.sh - job: diff --git a/zuul.d/scenarios/ipv6.yaml b/zuul.d/scenarios/ipv6.yaml index e45bf0cd6b..c5a9d3e392 100644 --- a/zuul.d/scenarios/ipv6.yaml +++ b/zuul.d/scenarios/ipv6.yaml @@ -1,7 +1,7 @@ --- - job: name: kolla-ansible-ipv6-base - parent: kolla-ansible-base + parent: kolla-ansible-aio-base voting: false vars: address_family: 'ipv6' diff --git a/zuul.d/scenarios/ironic.yaml b/zuul.d/scenarios/ironic.yaml index fbe177ff32..63b642e7f6 100644 --- a/zuul.d/scenarios/ironic.yaml +++ b/zuul.d/scenarios/ironic.yaml @@ -1,10 +1,11 @@ --- - job: name: kolla-ansible-ironic-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/roles/(ironic|neutron|nova|nova-cell)/ + - ^ansible/group_vars/all/(nova|ironic).yml + - ^ansible/roles/(nova|nova-cell|ironic)/ - ^tests/deploy-tenks\.sh$ - ^tests/templates/ironic-overrides\.j2$ - ^tests/templates/tenks-deploy-config\.yml\.j2$ diff --git a/zuul.d/scenarios/kvm.yaml b/zuul.d/scenarios/kvm.yaml index 829021ad3b..1f4a034eb8 100644 --- a/zuul.d/scenarios/kvm.yaml +++ b/zuul.d/scenarios/kvm.yaml @@ -1,10 +1,10 @@ --- - job: name: kolla-ansible-kvm-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/roles/(nova-cell)/ + - ^ansible/roles/nova-cell/ - ^tests/templates/nova-compute-overrides.j2 vars: virt_type: kvm diff --git a/zuul.d/scenarios/lets-encrypt.yaml b/zuul.d/scenarios/lets-encrypt.yaml index 54d054d456..a985cae648 100644 --- a/zuul.d/scenarios/lets-encrypt.yaml +++ b/zuul.d/scenarios/lets-encrypt.yaml @@ -1,11 +1,12 @@ --- - job: name: kolla-ansible-lets-encrypt-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2 - - ^ansible/roles/(letsencrypt|loadbalancer)/ + - ^ansible/group_vars/all/lets-encrypt.yml + - ^ansible/roles/fluentd/templates/conf/input/11-letsencrypt.conf.j2 + - ^ansible/roles/(haproxy-config|letsencrypt|loadbalancer|loadbalancer-config)/ - ^tests/test-core-openstack.sh - ^tests/test-dashboard.sh - ^tests/deploy.sh diff --git a/zuul.d/scenarios/magnum.yaml b/zuul.d/scenarios/magnum.yaml index cd19d79a05..18df9700ab 100644 --- a/zuul.d/scenarios/magnum.yaml +++ b/zuul.d/scenarios/magnum.yaml @@ -1,9 +1,10 @@ --- - job: name: kolla-ansible-magnum-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit + - ^ansible/group_vars/all/(designate|magnum|trove).yml - ^ansible/roles/(designate|magnum|trove)/ - ^tests/test-dashboard.sh - ^tests/test-magnum.sh diff --git a/zuul.d/scenarios/mariadb.yaml b/zuul.d/scenarios/mariadb.yaml index 776a9999cc..23cad5491b 100644 --- a/zuul.d/scenarios/mariadb.yaml +++ b/zuul.d/scenarios/mariadb.yaml @@ -1,10 +1,11 @@ --- - job: name: kolla-ansible-mariadb-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: true files: !inherit - - ^ansible/roles/(loadbalancer|mariadb|proxysql-config)/ + - ^ansible/group_vars/all/mariadb.yml + - ^ansible/roles/(loadbalancer|loadbalancer-config|mariadb|proxysql-config)/ - ^tests/test-mariadb.sh vars: scenario: mariadb diff --git a/zuul.d/scenarios/masakari.yaml b/zuul.d/scenarios/masakari.yaml index ed6182b8b2..4aab449dea 100644 --- a/zuul.d/scenarios/masakari.yaml +++ b/zuul.d/scenarios/masakari.yaml @@ -1,13 +1,12 @@ --- - job: name: kolla-ansible-masakari-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/roles/masakari/ - - ^ansible/roles/hacluster/ + - ^ansible/group_vars/all/(hacluster|masakari).yml + - ^ansible/roles/(hacluster|masakari)/ - ^tests/test-masakari.sh - - ^tests/test-dashboard.sh vars: scenario: masakari scenario_images_extra: diff --git a/zuul.d/scenarios/nfv.yaml b/zuul.d/scenarios/nfv.yaml index 0d8145cc7b..60e44f7eea 100644 --- a/zuul.d/scenarios/nfv.yaml +++ b/zuul.d/scenarios/nfv.yaml @@ -1,12 +1,12 @@ --- - job: name: kolla-ansible-scenario-nfv-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit + - ^ansible/group_vars/all/(aodh|barbican|heat|mistral|redis|tacker).yml - ^ansible/roles/(aodh|barbican|heat|mistral|redis|tacker)/ - ^tests/test-scenario-nfv.sh - - ^tests/test-dashboard.sh vars: scenario: nfv scenario_images_extra: diff --git a/zuul.d/scenarios/octavia.yaml b/zuul.d/scenarios/octavia.yaml index 8b85f132de..d7fdcb2080 100644 --- a/zuul.d/scenarios/octavia.yaml +++ b/zuul.d/scenarios/octavia.yaml @@ -1,11 +1,11 @@ --- - job: name: kolla-ansible-octavia-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit + - ^ansible/group_vars/all/octavia.yml - ^ansible/roles/(octavia|octavia-certificates)/ - - ^tests/test-dashboard.sh - ^tests/test-octavia.sh vars: scenario: octavia diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml index 9c68d3504c..17a46c569b 100644 --- a/zuul.d/scenarios/ovn.yaml +++ b/zuul.d/scenarios/ovn.yaml @@ -1,13 +1,12 @@ --- - job: name: kolla-ansible-ovn-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit + - ^ansible/group_vars/all/(neutron|octavia|openvswitch|ovn).yml - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ - ^tests/test-ovn.sh - - ^tests/test-core-openstack.sh - - ^tests/reconfigure.sh vars: scenario: ovn scenario_images_extra: diff --git a/zuul.d/scenarios/prometheus-opensearch.yaml b/zuul.d/scenarios/prometheus-opensearch.yaml index 9cd9938496..481e663f37 100644 --- a/zuul.d/scenarios/prometheus-opensearch.yaml +++ b/zuul.d/scenarios/prometheus-opensearch.yaml @@ -1,10 +1,10 @@ --- - job: name: kolla-ansible-prometheus-opensearch-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/site.yml + - ^ansible/group_vars/all/(common|grafana|opensearch|prometheus).yml - ^ansible/roles/(common|opensearch|grafana|prometheus)/ - ^tests/test-prometheus-opensearch.sh vars: diff --git a/zuul.d/scenarios/skyline.yaml b/zuul.d/scenarios/skyline.yaml index c25bbdd96d..6bacdf2b91 100644 --- a/zuul.d/scenarios/skyline.yaml +++ b/zuul.d/scenarios/skyline.yaml @@ -1,9 +1,10 @@ --- - job: name: kolla-ansible-skyline-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit + - ^ansible/group_vars/all/skyline.yml - ^ansible/roles/skyline/ - ^tests/test-skyline.sh vars: diff --git a/zuul.d/scenarios/telemetry.yaml b/zuul.d/scenarios/telemetry.yaml index ec20cd708f..05c1d4fb7f 100644 --- a/zuul.d/scenarios/telemetry.yaml +++ b/zuul.d/scenarios/telemetry.yaml @@ -1,9 +1,10 @@ --- - job: name: kolla-ansible-telemetry-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: + - ^ansible/group_vars/all/(aodh|ceilometer|gnocchi).yml - ^ansible/roles/(aodh|ceilometer|gnocchi)/ - ^tests/test-telemetry.sh vars: diff --git a/zuul.d/scenarios/venus.yaml b/zuul.d/scenarios/venus.yaml index 579aaca6f9..f933304c7a 100644 --- a/zuul.d/scenarios/venus.yaml +++ b/zuul.d/scenarios/venus.yaml @@ -1,10 +1,11 @@ --- - job: name: kolla-ansible-venus-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/roles/(common|opensearch|venus)/ + - ^ansible/group_vars/all/(fluentd|opensearch|venus).yml + - ^ansible/roles/(fluentd|opensearch|venus)/ - ^tests/test-venus.sh vars: scenario: venus diff --git a/zuul.d/scenarios/zun.yaml b/zuul.d/scenarios/zun.yaml index 8367103ad8..6ef8a9c602 100644 --- a/zuul.d/scenarios/zun.yaml +++ b/zuul.d/scenarios/zun.yaml @@ -1,9 +1,10 @@ --- - job: name: kolla-ansible-zun-base - parent: kolla-ansible-scenario-base + parent: kolla-ansible-base voting: false files: !inherit + - ^ansible/group_vars/all/(zun|kuryr|etcd|cinder|iscsi).yml - ^ansible/roles/(zun|kuryr|etcd|cinder|iscsi)/ - ^tests/setup_disks.sh - ^tests/test-core-openstack.sh From afbbacd00472478dad8ab5c667dd6171a4a3808d Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 14 Jul 2025 14:46:28 +0200 Subject: [PATCH 031/165] Split out cron to a separate role We've split out fluentd to move it earlier in the deployment workflow - cron and kolla-toolbox are currently the only components in common role. Splitting it out will help with limiting kolla-ansible to reconfigure only services managed by cron role. Change-Id: Ia74c7d1e78e9d756a83c8adc7d9d8d9adb371751 Signed-off-by: Michal Nasiadka --- ansible/roles/common/defaults/main.yml | 26 ----- ansible/roles/common/handlers/main.yml | 14 --- ansible/roles/common/tasks/config.yml | 71 +----------- ansible/roles/cron/defaults/main.yml | 38 +++++++ ansible/roles/cron/handlers/main.yml | 14 +++ ansible/roles/cron/tasks/bootstrap.yml | 1 + ansible/roles/cron/tasks/check-containers.yml | 3 + ansible/roles/cron/tasks/check.yml | 4 + ansible/roles/cron/tasks/config.yml | 101 ++++++++++++++++++ ansible/roles/cron/tasks/config_validate.yml | 1 + ansible/roles/cron/tasks/copy-certs.yml | 6 ++ .../roles/cron/tasks/deploy-containers.yml | 2 + ansible/roles/cron/tasks/deploy.yml | 9 ++ ansible/roles/cron/tasks/main.yml | 2 + ansible/roles/cron/tasks/precheck.yml | 6 ++ ansible/roles/cron/tasks/pull.yml | 3 + ansible/roles/cron/tasks/reconfigure.yml | 2 + ansible/roles/cron/tasks/stop.yml | 6 ++ ansible/roles/cron/tasks/upgrade.yml | 7 ++ .../templates/cron-logrotate-ansible.conf.j2 | 0 .../templates/cron-logrotate-aodh.conf.j2 | 0 .../templates/cron-logrotate-barbican.conf.j2 | 0 .../templates/cron-logrotate-blazar.conf.j2 | 0 .../cron-logrotate-ceilometer.conf.j2 | 0 .../templates/cron-logrotate-cinder.conf.j2 | 0 .../cron-logrotate-cloudkitty.conf.j2 | 0 .../templates/cron-logrotate-collectd.conf.j2 | 0 .../templates/cron-logrotate-cyborg.conf.j2 | 0 .../cron-logrotate-designate.conf.j2 | 0 .../templates/cron-logrotate-etcd.conf.j2 | 0 .../templates/cron-logrotate-fluentd.conf.j2 | 0 .../cron-logrotate-glance-tls-proxy.conf.j2 | 0 .../templates/cron-logrotate-glance.conf.j2 | 0 .../templates/cron-logrotate-global.conf.j2 | 0 .../templates/cron-logrotate-gnocchi.conf.j2 | 0 .../templates/cron-logrotate-grafana.conf.j2 | 0 .../cron-logrotate-hacluster.conf.j2 | 0 .../templates/cron-logrotate-haproxy.conf.j2 | 0 .../templates/cron-logrotate-heat.conf.j2 | 0 .../templates/cron-logrotate-horizon.conf.j2 | 0 .../templates/cron-logrotate-influxdb.conf.j2 | 0 .../templates/cron-logrotate-ironic.conf.j2 | 0 .../templates/cron-logrotate-keystone.conf.j2 | 0 .../templates/cron-logrotate-kuryr.conf.j2 | 0 .../cron-logrotate-letsencrypt.conf.j2 | 0 .../templates/cron-logrotate-magnum.conf.j2 | 0 .../templates/cron-logrotate-manila.conf.j2 | 0 .../templates/cron-logrotate-mariadb.conf.j2 | 0 .../templates/cron-logrotate-masakari.conf.j2 | 0 .../templates/cron-logrotate-mistral.conf.j2 | 0 .../templates/cron-logrotate-neutron.conf.j2 | 0 .../cron-logrotate-nova-libvirt.conf.j2 | 0 .../templates/cron-logrotate-nova.conf.j2 | 0 .../templates/cron-logrotate-octavia.conf.j2 | 0 .../cron-logrotate-opensearch.conf.j2 | 0 .../cron-logrotate-openvswitch.conf.j2 | 0 .../cron-logrotate-placement.conf.j2 | 0 .../cron-logrotate-prometheus.conf.j2 | 0 .../templates/cron-logrotate-proxysql.conf.j2 | 0 .../templates/cron-logrotate-rabbitmq.conf.j2 | 0 .../templates/cron-logrotate-redis.conf.j2 | 0 .../templates/cron-logrotate-skyline.conf.j2 | 0 .../templates/cron-logrotate-swift.conf.j2 | 0 .../templates/cron-logrotate-tacker.conf.j2 | 0 .../templates/cron-logrotate-trove.conf.j2 | 0 .../templates/cron-logrotate-venus.conf.j2 | 0 .../templates/cron-logrotate-watcher.conf.j2 | 0 .../templates/cron-logrotate-zun.conf.j2 | 0 .../{common => cron}/templates/cron.json.j2 | 0 ansible/roles/cron/vars/main.yml | 2 + ansible/site.yml | 15 ++- .../contributor/adding-a-new-service.rst | 6 +- .../cron-break-out-role-fa72289cc100ef53.yaml | 5 + 73 files changed, 230 insertions(+), 114 deletions(-) create mode 100644 ansible/roles/cron/defaults/main.yml create mode 100644 ansible/roles/cron/handlers/main.yml create mode 100644 ansible/roles/cron/tasks/bootstrap.yml create mode 100644 ansible/roles/cron/tasks/check-containers.yml create mode 100644 ansible/roles/cron/tasks/check.yml create mode 100644 ansible/roles/cron/tasks/config.yml create mode 100644 ansible/roles/cron/tasks/config_validate.yml create mode 100644 ansible/roles/cron/tasks/copy-certs.yml create mode 100644 ansible/roles/cron/tasks/deploy-containers.yml create mode 100644 ansible/roles/cron/tasks/deploy.yml create mode 100644 ansible/roles/cron/tasks/main.yml create mode 100644 ansible/roles/cron/tasks/precheck.yml create mode 100644 ansible/roles/cron/tasks/pull.yml create mode 100644 ansible/roles/cron/tasks/reconfigure.yml create mode 100644 ansible/roles/cron/tasks/stop.yml create mode 100644 ansible/roles/cron/tasks/upgrade.yml rename ansible/roles/{common => cron}/templates/cron-logrotate-ansible.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-aodh.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-barbican.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-blazar.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-ceilometer.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-cinder.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-cloudkitty.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-collectd.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-cyborg.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-designate.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-etcd.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-fluentd.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-glance-tls-proxy.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-glance.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-global.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-gnocchi.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-grafana.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-hacluster.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-haproxy.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-heat.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-horizon.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-influxdb.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-ironic.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-keystone.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-kuryr.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-letsencrypt.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-magnum.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-manila.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-mariadb.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-masakari.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-mistral.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-neutron.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-nova-libvirt.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-nova.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-octavia.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-opensearch.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-openvswitch.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-placement.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-prometheus.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-proxysql.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-rabbitmq.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-redis.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-skyline.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-swift.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-tacker.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-trove.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-venus.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-watcher.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron-logrotate-zun.conf.j2 (100%) rename ansible/roles/{common => cron}/templates/cron.json.j2 (100%) create mode 100644 ansible/roles/cron/vars/main.yml create mode 100644 releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml index 4ca1b704b7..715b335897 100644 --- a/ansible/roles/common/defaults/main.yml +++ b/ansible/roles/common/defaults/main.yml @@ -12,32 +12,18 @@ common_services: privileged: True volumes: "{{ kolla_toolbox_default_volumes + kolla_toolbox_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ kolla_toolbox_dimensions }}" - cron: - container_name: cron - group: cron - enabled: True - image: "{{ cron_image_full }}" - environment: - KOLLA_LOGROTATE_SCHEDULE: "{{ cron_logrotate_schedule }}" - volumes: "{{ cron_default_volumes + cron_extra_volumes }}" - dimensions: "{{ cron_dimensions }}" #################### # Docker #################### common_tag: "{{ openstack_tag }}" -cron_dimensions: "{{ default_container_dimensions }}" kolla_toolbox_dimensions: "{{ default_container_dimensions }}" kolla_toolbox_image: "{{ docker_image_url }}kolla-toolbox" kolla_toolbox_tag: "{{ common_tag }}" kolla_toolbox_image_full: "{{ kolla_toolbox_image }}:{{ kolla_toolbox_tag }}" -cron_image: "{{ docker_image_url }}cron" -cron_tag: "{{ common_tag }}" -cron_image_full: "{{ cron_image }}:{{ cron_tag }}" - kolla_toolbox_default_volumes: - "{{ node_config_directory }}/kolla-toolbox/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -45,19 +31,7 @@ kolla_toolbox_default_volumes: - "/dev/:/dev/" - "/run/:/run/{{ ':shared' if kolla_container_engine == 'docker' else '' }}" # see: https://github.com/containers/podman/issues/16305 - "kolla_logs:/var/log/kolla/" -cron_default_volumes: - - "{{ node_config_directory }}/cron/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" kolla_toolbox_extra_volumes: "{{ default_extra_volumes }}" -cron_extra_volumes: "{{ default_extra_volumes }}" - -cron_logrotate_log_maxsize: "100M" -cron_logrotate_log_minsize: "30M" -cron_logrotate_rotation_interval: "weekly" -cron_logrotate_rotation_count: 6 -cron_logrotate_schedule: "daily" ################### # Copy certificates diff --git a/ansible/roles/common/handlers/main.yml b/ansible/roles/common/handlers/main.yml index c963de693b..137b4f9cd7 100644 --- a/ansible/roles/common/handlers/main.yml +++ b/ansible/roles/common/handlers/main.yml @@ -20,17 +20,3 @@ become: true command: "{{ kolla_container_engine }} exec -t {{ common_services['kolla-toolbox']['container_name'] }} ansible --version" changed_when: false - -- name: Restart cron container - vars: - service_name: "cron" - service: "{{ common_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" - image: "{{ service.image }}" - volumes: "{{ service.volumes }}" - environment: "{{ service.environment }}" - dimensions: "{{ service.dimensions }}" diff --git a/ansible/roles/common/tasks/config.yml b/ansible/roles/common/tasks/config.yml index 2562c31e5c..f236544336 100644 --- a/ansible/roles/common/tasks/config.yml +++ b/ansible/roles/common/tasks/config.yml @@ -11,10 +11,7 @@ mode: "0770" become: true with_subelements: - - - service_name: "cron" - paths: - - "cron" - - service_name: "kolla-toolbox" + - - service_name: "kolla-toolbox" paths: - "kolla-toolbox" - paths @@ -50,72 +47,6 @@ become: true with_dict: "{{ common_services | select_services_enabled_and_mapped_to_host }}" -- name: Copying over cron logrotate config file - vars: - cron_logrotate_enabled_services: >- - {{ cron_logrotate_services | - selectattr('enabled') | - map(attribute='name') | - list }} - cron_logrotate_services: - - { name: "ansible", enabled: "yes" } - - { name: "aodh", enabled: "{{ enable_aodh | bool }}" } - - { name: "barbican", enabled: "{{ enable_barbican | bool }}" } - - { name: "blazar", enabled: "{{ enable_blazar | bool }}" } - - { name: "ceilometer", enabled: "{{ enable_ceilometer | bool }}" } - - { name: "cinder", enabled: "{{ enable_cinder | bool }}" } - - { name: "cloudkitty", enabled: "{{ enable_cloudkitty | bool }}" } - - { name: "collectd", enabled: "{{ enable_collectd | bool }}" } - - { name: "cyborg", enabled: "{{ enable_cyborg | bool }}" } - - { name: "designate", enabled: "{{ enable_designate | bool }}" } - - { name: "etcd", enabled: "{{ enable_etcd | bool }}" } - - { name: "fluentd", enabled: "{{ enable_fluentd | bool }}" } - - { name: "glance", enabled: "{{ enable_glance | bool }}" } - - { name: "glance-tls-proxy", enabled: "{{ glance_enable_tls_backend | bool }}" } - - { name: "gnocchi", enabled: "{{ enable_gnocchi | bool }}" } - - { name: "grafana", enabled: "{{ enable_grafana | bool }}" } - - { name: "hacluster", enabled: "{{ enable_hacluster | bool }}" } - - { name: "haproxy", enabled: "{{ enable_haproxy | bool }}" } - - { name: "heat", enabled: "{{ enable_heat | bool }}" } - - { name: "horizon", enabled: "{{ enable_horizon | bool }}" } - - { name: "influxdb", enabled: "{{ enable_influxdb | bool }}" } - - { name: "ironic", enabled: "{{ enable_ironic | bool }}" } - - { name: "keystone", enabled: "{{ enable_keystone | bool }}" } - - { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" } - - { name: "magnum", enabled: "{{ enable_magnum | bool }}" } - - { name: "manila", enabled: "{{ enable_manila | bool }}" } - - { name: "mariadb", enabled: "{{ enable_mariadb | bool }}" } - - { name: "masakari", enabled: "{{ enable_masakari | bool }}" } - - { name: "mistral", enabled: "{{ enable_mistral | bool }}" } - - { name: "neutron", enabled: "{{ enable_neutron | bool }}" } - - { name: "nova", enabled: "{{ enable_nova | bool }}" } - - { name: "nova-libvirt", enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}" } - - { name: "octavia", enabled: "{{ enable_octavia | bool }}" } - - { name: "opensearch", enabled: "{{ enable_opensearch | bool or enable_opensearch_dashboards | bool }}" } - - { name: "openvswitch", enabled: "{{ enable_openvswitch | bool }}" } - - { name: "placement", enabled: "{{ enable_placement | bool }}" } - - { name: "prometheus", enabled: "{{ enable_prometheus | bool }}" } - - { name: "proxysql", enabled: "{{ enable_proxysql | bool }}" } - - { name: "rabbitmq", enabled: "{{ enable_rabbitmq | bool }}" } - - { name: "redis", enabled: "{{ enable_redis | bool }}" } - - { name: "skyline", enabled: "{{ enable_skyline | bool }}" } - - { name: "tacker", enabled: "{{ enable_tacker | bool }}" } - - { name: "trove", enabled: "{{ enable_trove | bool }}" } - - { name: "venus", enabled: "{{ enable_venus | bool }}" } - - { name: "watcher", enabled: "{{ enable_watcher | bool }}" } - - { name: "zun", enabled: "{{ enable_zun | bool }}" } - template: - src: "{{ item }}" - dest: "{{ node_config_directory }}/cron/logrotate.conf" - mode: "0660" - become: true - when: - - common_services.cron | service_enabled_and_mapped_to_host - with_first_found: - - "{{ node_custom_config }}/cron/{{ inventory_hostname }}/cron-logrotate-global.conf" - - "{{ node_custom_config }}/cron/cron-logrotate-global.conf" - - "cron-logrotate-global.conf.j2" - - name: Ensure RabbitMQ Erlang cookie exists become: true template: diff --git a/ansible/roles/cron/defaults/main.yml b/ansible/roles/cron/defaults/main.yml new file mode 100644 index 0000000000..909285ae18 --- /dev/null +++ b/ansible/roles/cron/defaults/main.yml @@ -0,0 +1,38 @@ +--- +cron_services: + cron: + container_name: cron + group: cron + enabled: True + image: "{{ cron_image_full }}" + environment: + KOLLA_LOGROTATE_SCHEDULE: "{{ cron_logrotate_schedule }}" + volumes: "{{ cron_default_volumes + cron_extra_volumes }}" + dimensions: "{{ cron_dimensions }}" + +#################### +# Docker +#################### +cron_dimensions: "{{ default_container_dimensions }}" + +cron_image: "{{ docker_image_url }}cron" +cron_tag: "{{ openstack_tag }}" +cron_image_full: "{{ cron_image }}:{{ cron_tag }}" + +cron_default_volumes: + - "{{ node_config_directory }}/cron/:{{ container_config_directory }}/:ro" + - "/etc/localtime:/etc/localtime:ro" + - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" + - "kolla_logs:/var/log/kolla/" +cron_extra_volumes: "{{ default_extra_volumes }}" + +cron_logrotate_log_maxsize: "100M" +cron_logrotate_log_minsize: "30M" +cron_logrotate_rotation_interval: "weekly" +cron_logrotate_rotation_count: 6 +cron_logrotate_schedule: "daily" + +################### +# Copy certificates +################### +cron_copy_certs: "{{ kolla_copy_ca_into_containers | bool }}" diff --git a/ansible/roles/cron/handlers/main.yml b/ansible/roles/cron/handlers/main.yml new file mode 100644 index 0000000000..a9581eba1e --- /dev/null +++ b/ansible/roles/cron/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: Restart cron container + vars: + service_name: "cron" + service: "{{ cron_services[service_name] }}" + become: true + kolla_container: + action: "recreate_or_restart_container" + common_options: "{{ docker_common_options }}" + name: "{{ service.container_name }}" + image: "{{ service.image }}" + volumes: "{{ service.volumes }}" + environment: "{{ service.environment }}" + dimensions: "{{ service.dimensions }}" diff --git a/ansible/roles/cron/tasks/bootstrap.yml b/ansible/roles/cron/tasks/bootstrap.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/ansible/roles/cron/tasks/bootstrap.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/cron/tasks/check-containers.yml b/ansible/roles/cron/tasks/check-containers.yml new file mode 100644 index 0000000000..b7e2f7c29f --- /dev/null +++ b/ansible/roles/cron/tasks/check-containers.yml @@ -0,0 +1,3 @@ +--- +- import_role: + name: service-check-containers diff --git a/ansible/roles/cron/tasks/check.yml b/ansible/roles/cron/tasks/check.yml new file mode 100644 index 0000000000..c6eeb2833f --- /dev/null +++ b/ansible/roles/cron/tasks/check.yml @@ -0,0 +1,4 @@ +--- +- name: Checking Cron containers + import_role: + role: service-check diff --git a/ansible/roles/cron/tasks/config.yml b/ansible/roles/cron/tasks/config.yml new file mode 100644 index 0000000000..adb5bfe5ef --- /dev/null +++ b/ansible/roles/cron/tasks/config.yml @@ -0,0 +1,101 @@ +--- +- name: Ensuring config directories exist + vars: + service_name: "{{ 'cron' }}" + service: "{{ cron_services[service_name] }}" + file: + path: "{{ node_config_directory }}/{{ service_name }}" + state: "directory" + owner: "{{ config_owner_user }}" + group: "{{ config_owner_group }}" + mode: "0770" + become: true + when: service | service_enabled_and_mapped_to_host + +- include_tasks: copy-certs.yml + when: + - cron_copy_certs | bool + +- name: Copying over config.json files for services + template: + src: "{{ item.key }}.json.j2" + dest: "{{ node_config_directory }}/{{ item.key }}/config.json" + mode: "0660" + become: true + with_dict: "{{ cron_services | select_services_enabled_and_mapped_to_host }}" + +- name: Copying over cron logrotate config file + vars: + cron_logrotate_enabled_services: >- + {{ cron_logrotate_services | + selectattr('enabled') | + map(attribute='name') | + list }} + cron_logrotate_services: + - { name: "ansible", enabled: "yes" } + - { name: "aodh", enabled: "{{ enable_aodh | bool }}" } + - { name: "barbican", enabled: "{{ enable_barbican | bool }}" } + - { name: "blazar", enabled: "{{ enable_blazar | bool }}" } + - { name: "ceilometer", enabled: "{{ enable_ceilometer | bool }}" } + - { name: "cinder", enabled: "{{ enable_cinder | bool }}" } + - { name: "cloudkitty", enabled: "{{ enable_cloudkitty | bool }}" } + - { name: "collectd", enabled: "{{ enable_collectd | bool }}" } + - { name: "cyborg", enabled: "{{ enable_cyborg | bool }}" } + - { name: "designate", enabled: "{{ enable_designate | bool }}" } + - { name: "etcd", enabled: "{{ enable_etcd | bool }}" } + - { name: "fluentd", enabled: "{{ enable_fluentd | bool }}" } + - { name: "glance", enabled: "{{ enable_glance | bool }}" } + - { name: "glance-tls-proxy", enabled: "{{ glance_enable_tls_backend | bool }}" } + - { name: "gnocchi", enabled: "{{ enable_gnocchi | bool }}" } + - { name: "grafana", enabled: "{{ enable_grafana | bool }}" } + - { name: "hacluster", enabled: "{{ enable_hacluster | bool }}" } + - { name: "haproxy", enabled: "{{ enable_haproxy | bool }}" } + - { name: "heat", enabled: "{{ enable_heat | bool }}" } + - { name: "horizon", enabled: "{{ enable_horizon | bool }}" } + - { name: "influxdb", enabled: "{{ enable_influxdb | bool }}" } + - { name: "ironic", enabled: "{{ enable_ironic | bool }}" } + - { name: "keystone", enabled: "{{ enable_keystone | bool }}" } + - { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" } + - { name: "magnum", enabled: "{{ enable_magnum | bool }}" } + - { name: "manila", enabled: "{{ enable_manila | bool }}" } + - { name: "mariadb", enabled: "{{ enable_mariadb | bool }}" } + - { name: "masakari", enabled: "{{ enable_masakari | bool }}" } + - { name: "mistral", enabled: "{{ enable_mistral | bool }}" } + - { name: "neutron", enabled: "{{ enable_neutron | bool }}" } + - { name: "nova", enabled: "{{ enable_nova | bool }}" } + - { name: "nova-libvirt", enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}" } + - { name: "octavia", enabled: "{{ enable_octavia | bool }}" } + - { name: "opensearch", enabled: "{{ enable_opensearch | bool or enable_opensearch_dashboards | bool }}" } + - { name: "openvswitch", enabled: "{{ enable_openvswitch | bool }}" } + - { name: "placement", enabled: "{{ enable_placement | bool }}" } + - { name: "prometheus", enabled: "{{ enable_prometheus | bool }}" } + - { name: "proxysql", enabled: "{{ enable_proxysql | bool }}" } + - { name: "rabbitmq", enabled: "{{ enable_rabbitmq | bool }}" } + - { name: "redis", enabled: "{{ enable_redis | bool }}" } + - { name: "skyline", enabled: "{{ enable_skyline | bool }}" } + - { name: "tacker", enabled: "{{ enable_tacker | bool }}" } + - { name: "trove", enabled: "{{ enable_trove | bool }}" } + - { name: "venus", enabled: "{{ enable_venus | bool }}" } + - { name: "watcher", enabled: "{{ enable_watcher | bool }}" } + - { name: "zun", enabled: "{{ enable_zun | bool }}" } + template: + src: "{{ item }}" + dest: "{{ node_config_directory }}/cron/logrotate.conf" + mode: "0660" + become: true + when: + - cron_services.cron | service_enabled_and_mapped_to_host + with_first_found: + - "{{ node_custom_config }}/cron/{{ inventory_hostname }}/cron-logrotate-global.conf" + - "{{ node_custom_config }}/cron/cron-logrotate-global.conf" + - "cron-logrotate-global.conf.j2" + +- name: Ensuring config directories have correct owner and permission + become: true + file: + path: "{{ node_config_directory }}/{{ item.key }}" + owner: "{{ config_owner_user }}" + group: "{{ config_owner_group }}" + mode: "0770" + ignore_errors: "{{ ansible_check_mode }}" + with_dict: "{{ cron_services | select_services_enabled_and_mapped_to_host }}" diff --git a/ansible/roles/cron/tasks/config_validate.yml b/ansible/roles/cron/tasks/config_validate.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/ansible/roles/cron/tasks/config_validate.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/cron/tasks/copy-certs.yml b/ansible/roles/cron/tasks/copy-certs.yml new file mode 100644 index 0000000000..3e43bc11b0 --- /dev/null +++ b/ansible/roles/cron/tasks/copy-certs.yml @@ -0,0 +1,6 @@ +--- +- name: "Copy certificates and keys for {{ project_name }}" + import_role: + role: service-cert-copy + vars: + project_services: "{{ cron_services }}" diff --git a/ansible/roles/cron/tasks/deploy-containers.yml b/ansible/roles/cron/tasks/deploy-containers.yml new file mode 100644 index 0000000000..eb24ab5c7a --- /dev/null +++ b/ansible/roles/cron/tasks/deploy-containers.yml @@ -0,0 +1,2 @@ +--- +- import_tasks: check-containers.yml diff --git a/ansible/roles/cron/tasks/deploy.yml b/ansible/roles/cron/tasks/deploy.yml new file mode 100644 index 0000000000..d0b36cb78b --- /dev/null +++ b/ansible/roles/cron/tasks/deploy.yml @@ -0,0 +1,9 @@ +--- +- import_tasks: config.yml + +- import_tasks: check-containers.yml + +- import_tasks: bootstrap.yml + +- name: Flush handlers + meta: flush_handlers diff --git a/ansible/roles/cron/tasks/main.yml b/ansible/roles/cron/tasks/main.yml new file mode 100644 index 0000000000..bc5d1e6257 --- /dev/null +++ b/ansible/roles/cron/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: "{{ kolla_action }}.yml" diff --git a/ansible/roles/cron/tasks/precheck.yml b/ansible/roles/cron/tasks/precheck.yml new file mode 100644 index 0000000000..9a65561141 --- /dev/null +++ b/ansible/roles/cron/tasks/precheck.yml @@ -0,0 +1,6 @@ +--- +- import_role: + name: service-precheck + vars: + service_precheck_services: "{{ cron_services }}" + service_name: "{{ project_name }}" diff --git a/ansible/roles/cron/tasks/pull.yml b/ansible/roles/cron/tasks/pull.yml new file mode 100644 index 0000000000..53f9c5fda1 --- /dev/null +++ b/ansible/roles/cron/tasks/pull.yml @@ -0,0 +1,3 @@ +--- +- import_role: + role: service-images-pull diff --git a/ansible/roles/cron/tasks/reconfigure.yml b/ansible/roles/cron/tasks/reconfigure.yml new file mode 100644 index 0000000000..5b10a7e111 --- /dev/null +++ b/ansible/roles/cron/tasks/reconfigure.yml @@ -0,0 +1,2 @@ +--- +- import_tasks: deploy.yml diff --git a/ansible/roles/cron/tasks/stop.yml b/ansible/roles/cron/tasks/stop.yml new file mode 100644 index 0000000000..ce903cd50f --- /dev/null +++ b/ansible/roles/cron/tasks/stop.yml @@ -0,0 +1,6 @@ +--- +- import_role: + name: service-stop + vars: + project_services: "{{ cron_services }}" + service_name: "{{ project_name }}" diff --git a/ansible/roles/cron/tasks/upgrade.yml b/ansible/roles/cron/tasks/upgrade.yml new file mode 100644 index 0000000000..49edff81e3 --- /dev/null +++ b/ansible/roles/cron/tasks/upgrade.yml @@ -0,0 +1,7 @@ +--- +- import_tasks: config.yml + +- import_tasks: check-containers.yml + +- name: Flush handlers + meta: flush_handlers diff --git a/ansible/roles/common/templates/cron-logrotate-ansible.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-ansible.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-ansible.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-ansible.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-aodh.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-aodh.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-aodh.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-aodh.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-barbican.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-barbican.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-barbican.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-barbican.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-blazar.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-blazar.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-blazar.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-blazar.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-ceilometer.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-ceilometer.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-ceilometer.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-ceilometer.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-cinder.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-cinder.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-cinder.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-cinder.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-cloudkitty.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-cloudkitty.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-cloudkitty.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-cloudkitty.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-collectd.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-collectd.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-collectd.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-collectd.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-cyborg.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-cyborg.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-cyborg.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-cyborg.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-designate.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-designate.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-designate.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-designate.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-etcd.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-etcd.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-etcd.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-etcd.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-fluentd.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-fluentd.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-fluentd.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-fluentd.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-glance-tls-proxy.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-glance-tls-proxy.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-glance-tls-proxy.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-glance-tls-proxy.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-glance.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-glance.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-glance.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-glance.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-global.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-global.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-global.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-global.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-gnocchi.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-gnocchi.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-gnocchi.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-gnocchi.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-grafana.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-grafana.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-grafana.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-grafana.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-hacluster.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-hacluster.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-hacluster.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-hacluster.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-haproxy.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-haproxy.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-haproxy.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-haproxy.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-heat.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-heat.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-heat.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-heat.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-horizon.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-horizon.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-horizon.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-horizon.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-influxdb.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-influxdb.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-influxdb.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-influxdb.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-ironic.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-ironic.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-ironic.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-ironic.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-keystone.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-keystone.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-keystone.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-keystone.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-kuryr.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-kuryr.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-kuryr.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-kuryr.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-letsencrypt.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-letsencrypt.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-magnum.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-magnum.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-magnum.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-magnum.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-manila.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-manila.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-manila.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-manila.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-mariadb.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-mariadb.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-mariadb.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-mariadb.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-masakari.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-masakari.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-masakari.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-masakari.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-mistral.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-mistral.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-mistral.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-mistral.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-neutron.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-neutron.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-neutron.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-neutron.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-nova-libvirt.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-nova-libvirt.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-nova-libvirt.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-nova-libvirt.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-nova.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-nova.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-nova.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-nova.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-octavia.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-octavia.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-octavia.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-octavia.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-opensearch.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-opensearch.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-opensearch.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-opensearch.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-openvswitch.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-openvswitch.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-openvswitch.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-openvswitch.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-placement.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-placement.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-placement.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-placement.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-prometheus.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-prometheus.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-prometheus.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-prometheus.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-proxysql.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-proxysql.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-proxysql.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-proxysql.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-rabbitmq.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-rabbitmq.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-rabbitmq.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-rabbitmq.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-redis.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-redis.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-redis.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-redis.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-skyline.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-skyline.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-skyline.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-skyline.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-swift.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-swift.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-swift.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-swift.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-tacker.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-tacker.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-tacker.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-tacker.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-trove.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-trove.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-trove.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-trove.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-venus.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-venus.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-venus.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-venus.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-watcher.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-watcher.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-watcher.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-watcher.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-zun.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-zun.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-zun.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-zun.conf.j2 diff --git a/ansible/roles/common/templates/cron.json.j2 b/ansible/roles/cron/templates/cron.json.j2 similarity index 100% rename from ansible/roles/common/templates/cron.json.j2 rename to ansible/roles/cron/templates/cron.json.j2 diff --git a/ansible/roles/cron/vars/main.yml b/ansible/roles/cron/vars/main.yml new file mode 100644 index 0000000000..3c0c0f60d8 --- /dev/null +++ b/ansible/roles/cron/vars/main.yml @@ -0,0 +1,2 @@ +--- +project_name: "cron" diff --git a/ansible/site.yml b/ansible/site.yml index 8dbaa39bc3..3410f9b50c 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -87,7 +87,6 @@ - name: Apply role common gather_facts: false hosts: - - cron - kolla-logs - kolla-toolbox serial: '{{ kolla_serial|default("0") }}' @@ -100,6 +99,20 @@ roles: - role: common +- name: Apply role cron + gather_facts: false + hosts: + - cron + serial: '{{ kolla_serial|default("0") }}' + max_fail_percentage: >- + {{ cron_max_fail_percentage | + default(kolla_max_fail_percentage) | + default(100) }} + tags: + - cron + roles: + - role: cron + - name: Apply role fluentd gather_facts: false hosts: diff --git a/doc/source/contributor/adding-a-new-service.rst b/doc/source/contributor/adding-a-new-service.rst index 665439112c..af05d174d3 100644 --- a/doc/source/contributor/adding-a-new-service.rst +++ b/doc/source/contributor/adding-a-new-service.rst @@ -42,10 +42,10 @@ which Kolla uses throughout and which should be followed. * Log rotation - For OpenStack services there should be a ``cron-logrotate-PROJECT.conf.j2`` - template file in ``ansible/roles/common/templates`` with the following + template file in ``ansible/roles/cron/templates`` with the following content: - .. path ansible/roles/common/templates/cron-logrotate-PROJECT.conf.j2 + .. path ansible/roles/cron/templates/cron-logrotate-PROJECT.conf.j2 .. code-block:: console "/var/log/kolla/PROJECT/*.log" @@ -53,7 +53,7 @@ which Kolla uses throughout and which should be followed. } - For OpenStack services there should be an entry in the ``services`` list - in the ``cron.json.j2`` template file in ``ansible/roles/common/templates``. + in the ``cron.json.j2`` template file in ``ansible/roles/cron/templates``. * Log delivery diff --git a/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml b/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml new file mode 100644 index 0000000000..7bbb085e5c --- /dev/null +++ b/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + A ``cron`` Ansible role has been created and its deployment is not part + of the ``common`` role anymore. From 3f85823fa6381a4e65126f4cb46a1bb7d6cee325 Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Thu, 2 Oct 2025 09:50:36 +0200 Subject: [PATCH 032/165] docs: change docs after fluentd move to its own role follow up to [1] [1] https://review.opendev.org/c/openstack/kolla-ansible/+/954619 Change-Id: I7d23b9b57deb6d421a8953fe78774bd0155db625 Signed-off-by: Bartosz Bezak --- doc/source/contributor/adding-a-new-service.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/contributor/adding-a-new-service.rst b/doc/source/contributor/adding-a-new-service.rst index 665439112c..7d332ac07f 100644 --- a/doc/source/contributor/adding-a-new-service.rst +++ b/doc/source/contributor/adding-a-new-service.rst @@ -59,8 +59,8 @@ which Kolla uses throughout and which should be followed. - For OpenStack services the service should add a new ``rewriterule`` in the ``match`` element in the ``01-rewrite.conf.j2`` template file in - ``ansible/roles/common/templates/conf/filter`` to deliver log messages to - Elasticsearch. + ``ansible/roles/fluentd/templates/conf/filter`` to deliver log messages to + Opensearch. * Documentation From 87e6d76e731be888fc8872ac8423569ba07f56bc Mon Sep 17 00:00:00 2001 From: Doug Szumski Date: Thu, 2 Oct 2025 09:26:45 +0100 Subject: [PATCH 033/165] Trigger CI scenario when Fluentd is updated The Fluentd service is tested by the prometheus-opensearch scenario. This change will ensure the scenario runs when the Fluentd role or group vars are updated. Change-Id: Ia6b8b879082d746f9ed1aec142f9fcbcb9d688bc Signed-off-by: Doug Szumski --- zuul.d/scenarios/prometheus-opensearch.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zuul.d/scenarios/prometheus-opensearch.yaml b/zuul.d/scenarios/prometheus-opensearch.yaml index 481e663f37..4f18bd20d4 100644 --- a/zuul.d/scenarios/prometheus-opensearch.yaml +++ b/zuul.d/scenarios/prometheus-opensearch.yaml @@ -4,8 +4,8 @@ parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/group_vars/all/(common|grafana|opensearch|prometheus).yml - - ^ansible/roles/(common|opensearch|grafana|prometheus)/ + - ^ansible/group_vars/all/(common|fluentd|grafana|opensearch|prometheus).yml + - ^ansible/roles/(common|fluentd|grafana|opensearch|prometheus)/ - ^tests/test-prometheus-opensearch.sh vars: scenario: prometheus-opensearch From ed5a70f8a36817a6b3ac0ed52acacb44acb3d951 Mon Sep 17 00:00:00 2001 From: Piotr Milewski Date: Thu, 26 Jun 2025 16:21:10 +0200 Subject: [PATCH 034/165] fluentd: Switch to direct log shipping to OpenSearch nodes Fluentd now sends logs directly to OpenSearch node IPs instead of using a Load Balancer. This change reduces Load Balancer overhead from high log volumes. The Load Balancer for OpenSearch remains in place, as it is still used by OpenSearch Dashboards. Fluentd continues to handle node availability, automatically distributing logs via round-robin, ensuring log delivery even if individual OpenSearch nodes become unavailable. Fixed Fluentd configuration template to avoid generating unnecessary empty lines when optional parameters are not set. Change-Id: I972268857f0389b3f313070491352789bcb1e409 Signed-off-by: Piotr Milewski --- ansible/roles/fluentd/defaults/main.yml | 5 +--- .../templates/conf/output/00-local.conf.j2 | 2 +- .../conf/output/03-opensearch.conf.j2 | 24 ++++++------------- .../fluentd-direct-b37822ae1145355e.yaml | 14 +++++++++++ tests/test-prometheus-opensearch.sh | 1 - 5 files changed, 23 insertions(+), 23 deletions(-) create mode 100644 releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml diff --git a/ansible/roles/fluentd/defaults/main.yml b/ansible/roles/fluentd/defaults/main.yml index f749a5bea5..dfbe2d635d 100644 --- a/ansible/roles/fluentd/defaults/main.yml +++ b/ansible/roles/fluentd/defaults/main.yml @@ -24,12 +24,9 @@ fluentd_elasticsearch_cacert: "{{ openstack_cacert }}" fluentd_elasticsearch_request_timeout: "60s" fluentd_opensearch_path: "" -fluentd_opensearch_scheme: "{{ internal_protocol }}" +fluentd_opensearch_scheme: "http" fluentd_opensearch_user: "" fluentd_opensearch_password: "" -fluentd_opensearch_ssl_version: "TLSv1_2" -fluentd_opensearch_ssl_verify: "true" -fluentd_opensearch_cacert: "{{ openstack_cacert }}" fluentd_opensearch_request_timeout: "60s" #################### diff --git a/ansible/roles/fluentd/templates/conf/output/00-local.conf.j2 b/ansible/roles/fluentd/templates/conf/output/00-local.conf.j2 index 2c82f539fc..15fc270c54 100644 --- a/ansible/roles/fluentd/templates/conf/output/00-local.conf.j2 +++ b/ansible/roles/fluentd/templates/conf/output/00-local.conf.j2 @@ -51,7 +51,7 @@ {% elif log_direct_to_opensearch %} @type opensearch - host {{ opensearch_address }} + hosts {% for host in groups['opensearch'] %}{{ 'api' | kolla_address(host) }}{% if not loop.last %},{% endif %}{% endfor %} port {{ opensearch_port }} scheme {{ fluentd_opensearch_scheme }} {% if fluentd_opensearch_path != '' %} diff --git a/ansible/roles/fluentd/templates/conf/output/03-opensearch.conf.j2 b/ansible/roles/fluentd/templates/conf/output/03-opensearch.conf.j2 index dd60f1e1cc..02d39f037d 100644 --- a/ansible/roles/fluentd/templates/conf/output/03-opensearch.conf.j2 +++ b/ansible/roles/fluentd/templates/conf/output/03-opensearch.conf.j2 @@ -3,39 +3,29 @@ @type copy @type opensearch - host {{ opensearch_address }} + hosts {% for host in groups['opensearch'] %}{{ 'api' | kolla_address(host) }}{% if not loop.last %},{% endif %}{% endfor %} port {{ opensearch_port }} scheme {{ fluentd_opensearch_scheme }} -{% if fluentd_opensearch_path != '' %} - path {{ fluentd_opensearch_path }} -{% endif %} -{% if fluentd_opensearch_scheme == 'https' %} - ssl_version {{ fluentd_opensearch_ssl_version }} - ssl_verify {{ fluentd_opensearch_ssl_verify }} -{% if fluentd_opensearch_cacert | length > 0 %} - ca_file {{ fluentd_opensearch_cacert }} -{% endif %} -{% endif %} -{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%} +{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != '' -%} user {{ fluentd_opensearch_user }} password {{ fluentd_opensearch_password }} -{% endif %} +{%- endif %} logstash_format true logstash_prefix {{ opensearch_log_index_prefix }} reconnect_on_error true -{% if match_pattern != 'retry_os' %} +{% if match_pattern != 'retry_os' -%} retry_tag retry_os -{% endif %} +{%- endif %} request_timeout {{ fluentd_opensearch_request_timeout }} suppress_type_name true bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }} @type file -{% if match_pattern == 'retry_os' %} +{% if match_pattern == 'retry_os' -%} path /var/lib/fluentd/data/opensearch.buffer/openstack_retry.* {% else %} path /var/lib/fluentd/data/opensearch.buffer/openstack.* -{% endif %} +{%- endif %} flush_interval 15s chunk_limit_size {{ fluentd_buffer_chunk_limit_size }} diff --git a/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml b/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml new file mode 100644 index 0000000000..3f16efb467 --- /dev/null +++ b/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + Fluentd now sends logs directly to OpenSearch node IPs instead of using + a Load Balancer. This change reduces Load Balancer overhead from high + log volumes. The Load Balancer for OpenSearch remains in place, as it + is still used by OpenSearch Dashboards. Fluentd continues to handle node + availability, automatically distributing logs via round-robin to + available nodes, ensuring log delivery even if individual OpenSearch + nodes become unavailable. +fixes: + - | + Fixed Fluentd configuration template to avoid generating unnecessary + empty lines when optional parameters are not set. diff --git a/tests/test-prometheus-opensearch.sh b/tests/test-prometheus-opensearch.sh index c1f8272c16..cbb687d072 100755 --- a/tests/test-prometheus-opensearch.sh +++ b/tests/test-prometheus-opensearch.sh @@ -170,7 +170,6 @@ function test_prometheus { function test_prometheus_opensearch_logged { . /etc/kolla/admin-openrc.sh - test_opensearch_dashboards test_opensearch test_grafana From 5587c1b7869a09fff5d9753b64b928bb847b46f1 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 5 Aug 2025 14:56:10 +0200 Subject: [PATCH 035/165] Move to for Rocky Linux 10 CephAdm, Octavia, OVN and NFV jobs are not added here, because these rely on Valkey/Redis available. Ironic jobs are not added, because ipxe.efi is not packaged in EL10 and needs a followup patch. Let's Encrypt jobs are also not added, due to ssl certificate validation errors. Depends-On: https://review.opendev.org/c/openstack/kolla/+/956554 Depends-On: https://review.opendev.org/c/openstack/ansible-collection-kolla/+/961237 Change-Id: I51823808c4e4ac08ae8f080aacf6a3759589571d Signed-off-by: Michal Nasiadka --- ansible/group_vars/all/common.yml | 4 +- ansible/roles/fluentd/tasks/config.yml | 10 +++++ .../hacluster-pacemaker-remote.json.j2 | 3 +- ansible/roles/prechecks/vars/main.yml | 2 +- .../admin/mariadb-backup-and-restore.rst | 6 +-- doc/source/reference/networking/octavia.rst | 2 +- doc/source/user/quickstart-development.rst | 2 +- doc/source/user/quickstart.rst | 2 +- doc/source/user/support-matrix.rst | 8 ++-- roles/cephadm/defaults/main.yml | 2 +- roles/cephadm/tasks/pkg_redhat.yml | 7 ++- zuul.d/nodesets.yaml | 44 +++++++++++++++++++ zuul.d/scenarios/aio.yaml | 16 +++++++ zuul.d/scenarios/bifrost.yaml | 6 +++ zuul.d/scenarios/cells.yaml | 6 +++ zuul.d/scenarios/haproxy-fqdn.yaml | 6 +++ zuul.d/scenarios/hashi-vault.yaml | 6 +++ zuul.d/scenarios/ipv6.yaml | 6 +++ zuul.d/scenarios/kvm.yaml | 6 +++ zuul.d/scenarios/magnum.yaml | 6 +++ zuul.d/scenarios/mariadb.yaml | 6 +++ zuul.d/scenarios/masakari.yaml | 6 +++ zuul.d/scenarios/prometheus-opensearch.yaml | 6 +++ zuul.d/scenarios/skyline.yaml | 12 +++++ zuul.d/scenarios/telemetry.yaml | 6 +++ 25 files changed, 169 insertions(+), 17 deletions(-) diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml index a08a3ce3c1..82d670f48b 100644 --- a/ansible/group_vars/all/common.yml +++ b/ansible/group_vars/all/common.yml @@ -214,9 +214,9 @@ kolla_container_engine: "docker" # Internal Image options ######################### kolla_base_distro_version_default_map: { - "centos": "stream9", + "centos": "stream10", "debian": "bookworm", - "rocky": "9", + "rocky": "10", "ubuntu": "noble", } diff --git a/ansible/roles/fluentd/tasks/config.yml b/ansible/roles/fluentd/tasks/config.yml index d1fc3ac338..83a5504f3f 100644 --- a/ansible/roles/fluentd/tasks/config.yml +++ b/ansible/roles/fluentd/tasks/config.yml @@ -16,6 +16,16 @@ when: - kolla_copy_ca_into_containers | bool +- name: Ensure /var/log/journal exists on EL systems + become: true + file: + path: /var/log/journal + state: directory + owner: root + group: systemd-journal + mode: "2755" + when: kolla_base_distro in ['centos', 'rocky'] + - name: Copying over config.json files for services template: src: "{{ item.key }}.json.j2" diff --git a/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 b/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 index e84923d67d..4cc1568849 100644 --- a/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 +++ b/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 @@ -1,5 +1,6 @@ +{% set remoted = '/usr/sbin/pacemaker-remoted' if kolla_base_distro in ['centos', 'rocky'] else '/usr/sbin/pacemaker_remoted' %} { - "command": "/usr/sbin/pacemaker_remoted -l /var/log/kolla/hacluster/pacemaker-remoted.log{% if openstack_logging_debug | bool %} -VV{% endif %} -p {{ hacluster_pacemaker_remote_port }}", + "command": "{{ remoted }} -l /var/log/kolla/hacluster/pacemaker-remoted.log{% if openstack_logging_debug | bool %} -VV{% endif %} -p {{ hacluster_pacemaker_remote_port }}", "config_files": [ { "source": "{{ container_config_directory }}/authkey", diff --git a/ansible/roles/prechecks/vars/main.yml b/ansible/roles/prechecks/vars/main.yml index 6b65e8bfcd..7cb1fe28e1 100644 --- a/ansible/roles/prechecks/vars/main.yml +++ b/ansible/roles/prechecks/vars/main.yml @@ -14,6 +14,6 @@ host_os_distributions: Debian: - "bookworm" Rocky: - - "9" + - "10" Ubuntu: - "noble" diff --git a/doc/source/admin/mariadb-backup-and-restore.rst b/doc/source/admin/mariadb-backup-and-restore.rst index 6ebb73d5b5..69fd411fd4 100644 --- a/doc/source/admin/mariadb-backup-and-restore.rst +++ b/doc/source/admin/mariadb-backup-and-restore.rst @@ -83,7 +83,7 @@ following options on the first database node: docker run --rm -it --volumes-from mariadb --name dbrestore \ --volume mariadb_backup:/backup \ - quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \ + quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \ /bin/bash (dbrestore) $ cd /backup (dbrestore) $ rm -rf /backup/restore @@ -105,7 +105,7 @@ place, again on the first node: docker run --rm -it --volumes-from mariadb --name dbrestore \ --volume mariadb_backup:/backup \ - quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \ + quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \ /bin/bash (dbrestore) $ rm -rf /var/lib/mysql/* (dbrestore) $ rm -rf /var/lib/mysql/\.[^\.]* @@ -148,7 +148,7 @@ incremental backup, docker run --rm -it --volumes-from mariadb --name dbrestore \ --volume mariadb_backup:/backup --tmpfs /backup/restore \ - quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \ + quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \ /bin/bash (dbrestore) $ cd /backup (dbrestore) $ rm -rf /backup/restore diff --git a/doc/source/reference/networking/octavia.rst b/doc/source/reference/networking/octavia.rst index af08cf9e04..7a29d67fa8 100644 --- a/doc/source/reference/networking/octavia.rst +++ b/doc/source/reference/networking/octavia.rst @@ -340,7 +340,7 @@ Now deploy Octavia: Amphora image ------------- -It is necessary to build an Amphora image. On CentOS / Rocky 9: +It is necessary to build an Amphora image. On CentOS / Rocky 10: .. code-block:: console diff --git a/doc/source/user/quickstart-development.rst b/doc/source/user/quickstart-development.rst index 0dccdb1f18..6e29a2ad51 100644 --- a/doc/source/user/quickstart-development.rst +++ b/doc/source/user/quickstart-development.rst @@ -186,7 +186,7 @@ There are a few options that are required to deploy Kolla Ansible: - Rocky (``rocky``) - Ubuntu (``ubuntu``) - For newcomers, we recommend to use Rocky Linux 9 or Ubuntu 24.04. + For newcomers, we recommend to use Rocky Linux 10 or Ubuntu 24.04. .. code-block:: console diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 4afcab38aa..b4d8553670 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -177,7 +177,7 @@ There are a few options that are required to deploy Kolla Ansible: - Rocky (``rocky``) - Ubuntu (``ubuntu``) - For newcomers, we recommend to use Rocky Linux 9 or Ubuntu 24.04. + For newcomers, we recommend to use Rocky Linux 10 or Ubuntu 24.04. .. code-block:: console diff --git a/doc/source/user/support-matrix.rst b/doc/source/user/support-matrix.rst index 0301775f19..8925a2c021 100644 --- a/doc/source/user/support-matrix.rst +++ b/doc/source/user/support-matrix.rst @@ -9,13 +9,13 @@ Kolla Ansible supports the following host Operating Systems (OS): .. note:: - CentOS Stream 9 is supported as a host OS while Kolla does not publish CS9 + CentOS Stream 10 is supported as a host OS while Kolla does not publish CS10 based images. Users can build them on their own. We recommend using Rocky - Linux 9 images instead. + Linux 10 images instead. -* CentOS Stream 9 +* CentOS Stream 10 * Debian Bookworm (12) -* Rocky Linux 9 +* Rocky Linux 10 * Ubuntu Noble (24.04) Supported container images diff --git a/roles/cephadm/defaults/main.yml b/roles/cephadm/defaults/main.yml index 6739d491ce..5ab4c04257 100644 --- a/roles/cephadm/defaults/main.yml +++ b/roles/cephadm/defaults/main.yml @@ -1,5 +1,5 @@ --- -cephadm_ceph_release: "reef" +cephadm_ceph_release: "squid" cephadm_ceph_apt_repo: "deb http://download.ceph.com/debian-{{ cephadm_ceph_release }}/ {{ ansible_distribution_release }} main" cephadm_use_package_from_distribution: false diff --git a/roles/cephadm/tasks/pkg_redhat.yml b/roles/cephadm/tasks/pkg_redhat.yml index 85708cef43..af8c747b83 100644 --- a/roles/cephadm/tasks/pkg_redhat.yml +++ b/roles/cephadm/tasks/pkg_redhat.yml @@ -22,8 +22,11 @@ become: True when: not cephadm_use_package_from_distribution -- name: Install cephadm +# NOTE(mnasiadka): cephadm bootstrap failing on jinja2 missing +- name: Install cephadm and jinja2 dnf: - name: "cephadm" + name: + - cephadm + - python3-jinja2 install_weak_deps: False become: True diff --git a/zuul.d/nodesets.yaml b/zuul.d/nodesets.yaml index a28552933d..295df62e61 100644 --- a/zuul.d/nodesets.yaml +++ b/zuul.d/nodesets.yaml @@ -67,6 +67,50 @@ - name: primary label: debian-bookworm-nested-virt-8GB +- nodeset: + name: kolla-ansible-rocky-10-8GB + nodes: + - name: primary + label: rockylinux-10-8GB + +- nodeset: + name: kolla-ansible-rocky-10-16GB + nodes: + - name: primary + label: rockylinux-10-16GB + +- nodeset: + name: kolla-ansible-rocky-10-masakari-8GB + nodes: + - name: primary + label: rockylinux-10-8GB + - name: secondary + label: rockylinux-10-8GB + - name: ternary1 + label: rockylinux-10-8GB + - name: ternary2 + label: rockylinux-10-8GB + +- nodeset: + name: kolla-ansible-rocky-10-multi-8GB + nodes: + - name: primary + label: rockylinux-10-8GB + - name: secondary1 + label: rockylinux-10-8GB + - name: secondary2 + label: rockylinux-10-8GB + +- nodeset: + name: kolla-ansible-rocky-10-multi-16GB + nodes: + - name: primary + label: rockylinux-10-16GB + - name: secondary1 + label: rockylinux-10-16GB + - name: secondary2 + label: rockylinux-10-16GB + - nodeset: name: kolla-ansible-ubuntu-noble-8GB nodes: diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 5b43461b27..7a624359a1 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -61,6 +61,18 @@ nodeset: kolla-ansible-debian-bookworm-16GB timeout: 9000 +- job: + name: kolla-ansible-rocky-10 + parent: kolla-ansible-base + nodeset: kolla-ansible-rocky-10-16GB + +- job: + name: kolla-ansible-rocky-10-podman + parent: kolla-ansible-rocky-10 + nodeset: kolla-ansible-rocky-10-16GB + vars: + container_engine: podman + - job: name: kolla-ansible-ubuntu-noble parent: kolla-ansible-base @@ -95,6 +107,8 @@ - kolla-ansible-debian-bookworm - kolla-ansible-debian-bookworm-podman - kolla-ansible-debian-bookworm-upgrade + - kolla-ansible-rocky-10 + - kolla-ansible-rocky-10-podman - kolla-ansible-ubuntu-noble - kolla-ansible-ubuntu-noble-podman - kolla-ansible-ubuntu-noble-upgrade @@ -109,6 +123,8 @@ - kolla-ansible-debian-bookworm - kolla-ansible-debian-bookworm-podman - kolla-ansible-debian-bookworm-upgrade + - kolla-ansible-rocky-10 + - kolla-ansible-rocky-10-podman - kolla-ansible-ubuntu-noble - kolla-ansible-ubuntu-noble-upgrade - kolla-ansible-ubuntu-noble-podman diff --git a/zuul.d/scenarios/bifrost.yaml b/zuul.d/scenarios/bifrost.yaml index 9c6690052a..d4fa31204d 100644 --- a/zuul.d/scenarios/bifrost.yaml +++ b/zuul.d/scenarios/bifrost.yaml @@ -16,6 +16,11 @@ parent: kolla-ansible-bifrost-base nodeset: kolla-ansible-debian-bookworm-8GB +- job: + name: kolla-ansible-rocky-10-bifrost + parent: kolla-ansible-bifrost-base + nodeset: kolla-ansible-rocky-10-8GB + - job: name: kolla-ansible-ubuntu-noble-bifrost parent: kolla-ansible-bifrost-base @@ -28,4 +33,5 @@ check: jobs: - kolla-ansible-debian-bookworm-bifrost + - kolla-ansible-rocky-10-bifrost - kolla-ansible-ubuntu-noble-bifrost diff --git a/zuul.d/scenarios/cells.yaml b/zuul.d/scenarios/cells.yaml index ce061af6b6..5b546bc8e4 100644 --- a/zuul.d/scenarios/cells.yaml +++ b/zuul.d/scenarios/cells.yaml @@ -19,6 +19,11 @@ parent: kolla-ansible-cells-base nodeset: kolla-ansible-debian-bookworm-multi-16GB +- job: + name: kolla-ansible-rocky-10-cells + parent: kolla-ansible-cells-base + nodeset: kolla-ansible-rocky-10-multi-16GB + - job: name: kolla-ansible-ubuntu-noble-cells parent: kolla-ansible-cells-base @@ -31,4 +36,5 @@ check: jobs: - kolla-ansible-debian-bookworm-cells + - kolla-ansible-rocky-10-cells - kolla-ansible-ubuntu-noble-cells diff --git a/zuul.d/scenarios/haproxy-fqdn.yaml b/zuul.d/scenarios/haproxy-fqdn.yaml index 672d7a8b5b..a668696a14 100644 --- a/zuul.d/scenarios/haproxy-fqdn.yaml +++ b/zuul.d/scenarios/haproxy-fqdn.yaml @@ -19,6 +19,11 @@ parent: kolla-ansible-haproxy-fqdn-base nodeset: kolla-ansible-debian-bookworm-8GB +- job: + name: kolla-ansible-rocky-10-haproxy-fqdn + parent: kolla-ansible-haproxy-fqdn-base + nodeset: kolla-ansible-rocky-10-8GB + - job: name: kolla-ansible-ubuntu-noble-haproxy-fqdn parent: kolla-ansible-haproxy-fqdn-base @@ -31,4 +36,5 @@ check: jobs: - kolla-ansible-debian-bookworm-haproxy-fqdn + - kolla-ansible-rocky-10-haproxy-fqdn - kolla-ansible-ubuntu-noble-haproxy-fqdn diff --git a/zuul.d/scenarios/hashi-vault.yaml b/zuul.d/scenarios/hashi-vault.yaml index 74c92c4454..429285ffd8 100644 --- a/zuul.d/scenarios/hashi-vault.yaml +++ b/zuul.d/scenarios/hashi-vault.yaml @@ -20,6 +20,11 @@ parent: kolla-ansible-hashi-vault-base nodeset: kolla-ansible-debian-bookworm-8GB +- job: + name: kolla-ansible-rocky-10-hashi-vault + parent: kolla-ansible-hashi-vault-base + nodeset: kolla-ansible-rocky-10-8GB + - job: name: kolla-ansible-ubuntu-noble-hashi-vault parent: kolla-ansible-hashi-vault-base @@ -32,4 +37,5 @@ check: jobs: - kolla-ansible-debian-bookworm-hashi-vault + - kolla-ansible-rocky-10-hashi-vault - kolla-ansible-ubuntu-noble-hashi-vault diff --git a/zuul.d/scenarios/ipv6.yaml b/zuul.d/scenarios/ipv6.yaml index e45bf0cd6b..186b4cc85b 100644 --- a/zuul.d/scenarios/ipv6.yaml +++ b/zuul.d/scenarios/ipv6.yaml @@ -23,6 +23,11 @@ parent: kolla-ansible-ipv6-base nodeset: kolla-ansible-debian-bookworm-multi-8GB +- job: + name: kolla-ansible-rocky-10-ipv6 + parent: kolla-ansible-ipv6-base + nodeset: kolla-ansible-rocky-10-multi-8GB + - job: name: kolla-ansible-ubuntu-noble-ipv6 parent: kolla-ansible-ipv6-base @@ -35,4 +40,5 @@ check: jobs: - kolla-ansible-debian-bookworm-ipv6 + - kolla-ansible-rocky-10-ipv6 - kolla-ansible-ubuntu-noble-ipv6 diff --git a/zuul.d/scenarios/kvm.yaml b/zuul.d/scenarios/kvm.yaml index 829021ad3b..a78b927d40 100644 --- a/zuul.d/scenarios/kvm.yaml +++ b/zuul.d/scenarios/kvm.yaml @@ -14,6 +14,11 @@ parent: kolla-ansible-kvm-base nodeset: kolla-ansible-debian-bookworm-nested-virt +- job: + name: kolla-ansible-rocky-10-kvm + parent: kolla-ansible-kvm-base + nodeset: kolla-ansible-rocky-10-8GB + - job: name: kolla-ansible-ubuntu-noble-kvm parent: kolla-ansible-kvm-base @@ -26,4 +31,5 @@ check: jobs: - kolla-ansible-debian-bookworm-kvm + - kolla-ansible-rocky-10-kvm - kolla-ansible-ubuntu-noble-kvm diff --git a/zuul.d/scenarios/magnum.yaml b/zuul.d/scenarios/magnum.yaml index cd19d79a05..fb0e08b2ff 100644 --- a/zuul.d/scenarios/magnum.yaml +++ b/zuul.d/scenarios/magnum.yaml @@ -21,6 +21,11 @@ parent: kolla-ansible-magnum-base nodeset: kolla-ansible-debian-bookworm-8GB +- job: + name: kolla-ansible-rocky-10-magnum + parent: kolla-ansible-magnum-base + nodeset: kolla-ansible-rocky-10-8GB + - job: name: kolla-ansible-ubuntu-noble-magnum parent: kolla-ansible-magnum-base @@ -33,4 +38,5 @@ check: jobs: - kolla-ansible-debian-bookworm-magnum + - kolla-ansible-rocky-10-magnum - kolla-ansible-ubuntu-noble-magnum diff --git a/zuul.d/scenarios/mariadb.yaml b/zuul.d/scenarios/mariadb.yaml index 776a9999cc..59be953964 100644 --- a/zuul.d/scenarios/mariadb.yaml +++ b/zuul.d/scenarios/mariadb.yaml @@ -22,6 +22,11 @@ parent: kolla-ansible-mariadb-base nodeset: kolla-ansible-debian-bookworm-multi-16GB +- job: + name: kolla-ansible-rocky-10-mariadb + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-rocky-10-multi-16GB + - job: name: kolla-ansible-ubuntu-noble-mariadb parent: kolla-ansible-mariadb-base @@ -34,6 +39,7 @@ check: jobs: - kolla-ansible-debian-bookworm-mariadb + - kolla-ansible-rocky-10-mariadb - kolla-ansible-ubuntu-noble-mariadb gate: jobs: diff --git a/zuul.d/scenarios/masakari.yaml b/zuul.d/scenarios/masakari.yaml index ed6182b8b2..11a5955c22 100644 --- a/zuul.d/scenarios/masakari.yaml +++ b/zuul.d/scenarios/masakari.yaml @@ -21,6 +21,11 @@ parent: kolla-ansible-masakari-base nodeset: kolla-ansible-debian-bookworm-masakari-8GB +- job: + name: kolla-ansible-rocky-10-masakari + parent: kolla-ansible-masakari-base + nodeset: kolla-ansible-rocky-10-masakari-8GB + - job: name: kolla-ansible-ubuntu-noble-masakari parent: kolla-ansible-masakari-base @@ -33,4 +38,5 @@ check: jobs: - kolla-ansible-debian-bookworm-masakari + - kolla-ansible-rocky-10-masakari - kolla-ansible-ubuntu-noble-masakari diff --git a/zuul.d/scenarios/prometheus-opensearch.yaml b/zuul.d/scenarios/prometheus-opensearch.yaml index 9cd9938496..463e54d6df 100644 --- a/zuul.d/scenarios/prometheus-opensearch.yaml +++ b/zuul.d/scenarios/prometheus-opensearch.yaml @@ -33,6 +33,11 @@ parent: kolla-ansible-debian-bookworm-prometheus-opensearch nodeset: kolla-ansible-debian-bookworm-8GB +- job: + name: kolla-ansible-rocky-10-prometheus-opensearch + parent: kolla-ansible-prometheus-opensearch-base + nodeset: kolla-ansible-rocky-10-8GB + - job: name: kolla-ansible-ubuntu-noble-prometheus-opensearch parent: kolla-ansible-prometheus-opensearch-base @@ -51,5 +56,6 @@ jobs: - kolla-ansible-debian-bookworm-prometheus-opensearch - kolla-ansible-debian-bookworm-prometheus-opensearch-upgrade + - kolla-ansible-rocky-10-prometheus-opensearch - kolla-ansible-ubuntu-noble-prometheus-opensearch - kolla-ansible-ubuntu-noble-prometheus-opensearch-upgrade diff --git a/zuul.d/scenarios/skyline.yaml b/zuul.d/scenarios/skyline.yaml index c25bbdd96d..6386ad7a62 100644 --- a/zuul.d/scenarios/skyline.yaml +++ b/zuul.d/scenarios/skyline.yaml @@ -29,6 +29,16 @@ parent: kolla-ansible-skyline-sso-base nodeset: kolla-ansible-debian-bookworm-8GB +- job: + name: kolla-ansible-rocky-10-skyline + parent: kolla-ansible-skyline-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-rocky-10-skyline-sso + parent: kolla-ansible-skyline-sso-base + nodeset: kolla-ansible-rocky-10-8GB + - job: name: kolla-ansible-ubuntu-noble-skyline parent: kolla-ansible-skyline-base @@ -47,5 +57,7 @@ jobs: - kolla-ansible-debian-bookworm-skyline - kolla-ansible-debian-bookworm-skyline-sso + - kolla-ansible-rocky-10-skyline + - kolla-ansible-rocky-10-skyline-sso - kolla-ansible-ubuntu-noble-skyline - kolla-ansible-ubuntu-noble-skyline-sso diff --git a/zuul.d/scenarios/telemetry.yaml b/zuul.d/scenarios/telemetry.yaml index ec20cd708f..e95a41a913 100644 --- a/zuul.d/scenarios/telemetry.yaml +++ b/zuul.d/scenarios/telemetry.yaml @@ -18,6 +18,11 @@ parent: kolla-ansible-telemetry-base nodeset: kolla-ansible-debian-bookworm-8GB +- job: + name: kolla-ansible-rocky-10-telemetry + parent: kolla-ansible-telemetry-base + nodeset: kolla-ansible-rocky-10-8GB + - job: name: kolla-ansible-ubuntu-noble-telemetry parent: kolla-ansible-telemetry-base @@ -30,4 +35,5 @@ check: jobs: - kolla-ansible-debian-bookworm-telemetry + - kolla-ansible-rocky-10-telemetry - kolla-ansible-ubuntu-noble-telemetry From d946423d2dc7a1ab381ac36e6354760e638393a1 Mon Sep 17 00:00:00 2001 From: Doug Szumski Date: Thu, 2 Oct 2025 17:15:25 +0100 Subject: [PATCH 036/165] Connect OpenSearch Dashboards directly OpenSearch Dashboards accepts a list of OpenSearch hosts [1]. This patch switches to using that mechanism for load balancing, rather than HAProxy. One benefit is that if HAProxy goes down, OpenSearch Dashboards will remain functional. Connection to the OpenSearch frontend remains unchanged. [1] https://github.com/opensearch-project/OpenSearch-Dashboards/blob/main/config/opensearch_dashboards.yml#L26 Signed-off-by: Doug Szumski Change-Id: If1a1bd0adbd1d0a41d9432d4474e8e74ca8b6873 --- .../roles/opensearch/templates/opensearch_dashboards.yml.j2 | 2 +- ...rch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml diff --git a/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 b/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 index ca2f04886e..4555094e02 100644 --- a/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 +++ b/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 @@ -2,7 +2,7 @@ opensearchDashboards.defaultAppId: "{{ opensearch_dashboards_default_app_id }}" logging.dest: /var/log/kolla/opensearch-dashboards/opensearch-dashboards.log server.port: {{ opensearch_dashboards_port }} server.host: "{{ api_interface_address }}" -opensearch.hosts: "{{ opensearch_internal_endpoint }}" +opensearch.hosts: [{% for host in groups['opensearch'] %}"http://{{ 'api' | kolla_address(host) }}:{{ opensearch_port }}"{% if not loop.last %},{% endif %}{% endfor %}] opensearch.requestTimeout: {{ opensearch_dashboards_opensearch_request_timeout }} opensearch.shardTimeout: {{ opensearch_dashboards_opensearch_shard_timeout }} opensearch.ssl.verificationMode: "{{ 'full' if opensearch_dashboards_opensearch_ssl_verify | bool else 'none' }}" diff --git a/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml b/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml new file mode 100644 index 0000000000..1fdd9b0880 --- /dev/null +++ b/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + OpenSearch Dashboards now connects directly to OpenSearch nodes, rather + than via a HAProxy endpoint. This should have no user facing impact. From 0ea03e3155c05b4cae05562d7f93949c3863a112 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 3 Oct 2025 11:50:56 +0200 Subject: [PATCH 037/165] CI: Add nova-cell role to aio triggers Change-Id: I4641683420e813e2a8fb5dd2d26c1e07aa486403 Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/aio.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 8d844901cf..8963996553 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -5,7 +5,7 @@ files: !inherit - ^ansible/group_vars/all/(common|fluentd|glance|haproxy|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml - - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|openvswitch|placement|proxysql|rabbitmq)/ + - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq)/ - job: name: kolla-ansible-centos-10s From 3d9a0549ce039861b8aa735aa6bd61bb72690609 Mon Sep 17 00:00:00 2001 From: Norman Ziegner Date: Wed, 26 Mar 2025 14:14:06 +0100 Subject: [PATCH 038/165] docs: add validate-config to cli tips & tricks Change-Id: Iac15261a0916d14094afdd9b3cb5cba88f505d0f Signed-off-by: Norman Ziegner --- doc/source/user/operating-kolla.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/user/operating-kolla.rst b/doc/source/user/operating-kolla.rst index 412b8dbd51..a48f3357f1 100644 --- a/doc/source/user/operating-kolla.rst +++ b/doc/source/user/operating-kolla.rst @@ -269,6 +269,10 @@ images on hosts. files for enabled OpenStack services, without then restarting the containers so it is not applied right away. +``kolla-ansible validate-config -i INVENTORY`` is used to validate generated +configuration files of enabled OpenStack services. By default, the results are +saved to ``/var/log/kolla/config-validate`` when issues are detected. + ``kolla-ansible ... -i INVENTORY1 -i INVENTORY2`` Multiple inventories can be specified by passing the ``--inventory`` or ``-i`` command line option multiple times. This can be useful to share configuration between multiple environments. From 2b699d0b0a0c102de8db11a103f78b41c4f611d7 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 3 Oct 2025 16:45:16 +0200 Subject: [PATCH 039/165] CI: Fine tune proxysql connect and ping timeouts Change-Id: Ide74d1a4ca4d0a9a65f8ce4fff2563c4f08708a7 Signed-off-by: Michal Nasiadka --- tests/templates/globals-default.j2 | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index 387e1c9b73..32a28773fb 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -45,7 +45,11 @@ mariadb_wsrep_extra_provider_options: - "evs.inactive_timeout=PT30S" - "evs.keepalive_period=PT3S" -mariadb_monitor_connect_timeout: "60000" +mariadb_monitor_connect_interval: "60000" +mariadb_monitor_connect_timeout: "180000" +mariadb_monitor_ping_interval: "60000" +mariadb_monitor_ping_max_failures: "10" +mariadb_monitor_ping_timeout: "10000" nova_compute_virt_type: "{{ virt_type }}" From e47d73502fbcab579956b7d61f63670615796e45 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 3 Oct 2025 11:52:16 +0200 Subject: [PATCH 040/165] CI: Switch ovn jobs to voting and add to gate Since Debian is often failing (cirros boot crashes) - let's mark it as non voting. Change-Id: Icb8fc54d658937cc806e0441b452b6f4320b292a Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/ovn.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml index 17a46c569b..6e94728457 100644 --- a/zuul.d/scenarios/ovn.yaml +++ b/zuul.d/scenarios/ovn.yaml @@ -2,7 +2,6 @@ - job: name: kolla-ansible-ovn-base parent: kolla-ansible-base - voting: false files: !inherit - ^ansible/group_vars/all/(neutron|octavia|openvswitch|ovn).yml - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ @@ -18,6 +17,8 @@ name: kolla-ansible-debian-bookworm-ovn parent: kolla-ansible-ovn-base nodeset: kolla-ansible-debian-bookworm-multi-16GB + # NOTE(mnasiadka): Often cirros boot is crashing on cannot open root device + voting: false - job: name: kolla-ansible-debian-bookworm-ovn-upgrade @@ -46,3 +47,7 @@ - kolla-ansible-debian-bookworm-ovn-upgrade - kolla-ansible-ubuntu-noble-ovn - kolla-ansible-ubuntu-noble-ovn-upgrade + gate: + jobs: + - kolla-ansible-ubuntu-noble-ovn + - kolla-ansible-ubuntu-noble-ovn-upgrade From 5bafdd206fb9cae34825bd6ed8226f8021040674 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 6 Oct 2025 10:43:44 +0200 Subject: [PATCH 041/165] CI: Fix Rocky10 jobs parent Change-Id: I43d0a65ae796b080dd977f5c227f7360746e7c9b Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/aio.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 8963996553..11f5ee8131 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -71,7 +71,7 @@ - job: name: kolla-ansible-rocky-10 - parent: kolla-ansible-base + parent: kolla-ansible-aio-base nodeset: kolla-ansible-rocky-10-16GB - job: From aa39fb88b6bc46e0ca305c97d6badea0ff7eedad Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 6 Oct 2025 09:09:45 +0200 Subject: [PATCH 042/165] CI: Bump cirros version to 0.6.3 Change-Id: Id979cc7e4f88994a051a4037224c2c1af479dcfd Signed-off-by: Michal Nasiadka --- tools/init-runonce | 2 +- zuul.d/scenarios/aio.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/init-runonce b/tools/init-runonce index 0c389642b2..900d979f23 100755 --- a/tools/init-runonce +++ b/tools/init-runonce @@ -19,7 +19,7 @@ fi # to be created. ARCH=$(uname -m) -CIRROS_RELEASE=${CIRROS_RELEASE:-0.6.2} +CIRROS_RELEASE=${CIRROS_RELEASE:-0.6.3} IMAGE_PATH=/opt/cache/files/ IMAGE_URL=${IMAGE_URL:-"https://github.com/cirros-dev/cirros/releases/download/${CIRROS_RELEASE}/"} IMAGE=cirros-${CIRROS_RELEASE}-${ARCH}-disk.img diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 11f5ee8131..9c740d706b 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -6,6 +6,7 @@ - ^ansible/group_vars/all/(common|fluentd|glance|haproxy|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq)/ + - ^tools/init-runonce - job: name: kolla-ansible-centos-10s From 701cd54b3c390f22294084ddec7c93ba1c48e342 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Kr=C4=8Dek?= Date: Wed, 30 Apr 2025 07:34:19 +0000 Subject: [PATCH 043/165] Move tasks from k-a role common to a-c-k MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move tasks that modified host configuration from kolla-ansible role common to a-c-k as they need to be run only once on the boostrap of the host and are not strongly related to the common services. Depends-On: https://review.opendev.org/c/openstack/ansible-collection-kolla/+/948525 Change-Id: I21c36df43425e2390b62f2d0257e99940e098734 Signed-off-by: Roman Krček --- ansible/group_vars/all/common.yml | 7 ------- ansible/roles/common/tasks/config.yml | 18 ------------------ .../common/templates/kolla-directories.conf.j2 | 3 --- ansible/roles/common/templates/kolla.target.j2 | 5 ----- .../notes/move-tasks-a8e65bbda50dd2a0.yaml | 7 +++++++ 5 files changed, 7 insertions(+), 33 deletions(-) delete mode 100644 ansible/roles/common/templates/kolla-directories.conf.j2 delete mode 100644 ansible/roles/common/templates/kolla.target.j2 create mode 100644 releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml index 82d670f48b..7b8f83332b 100644 --- a/ansible/group_vars/all/common.yml +++ b/ansible/group_vars/all/common.yml @@ -87,13 +87,6 @@ run_default_volumes_podman: run_default_volumes_docker: [] -run_default_subdirectories: - - '/run/netns' - - '/run/lock/nova' - - "/run/libvirt" - - "/run/nova" - - "/run/openvswitch" - #################### # Dimensions options #################### diff --git a/ansible/roles/common/tasks/config.yml b/ansible/roles/common/tasks/config.yml index f236544336..3d8f0bb36b 100644 --- a/ansible/roles/common/tasks/config.yml +++ b/ansible/roles/common/tasks/config.yml @@ -21,24 +21,6 @@ when: - common_copy_certs | bool -- name: Copying over /run subdirectories conf - become: true - template: - src: kolla-directories.conf.j2 - dest: /etc/tmpfiles.d/kolla.conf - when: kolla_container_engine == 'podman' - -- name: Restart systemd-tmpfiles - become: true - command: systemd-tmpfiles --create - when: kolla_container_engine == 'podman' - -- name: Copying over kolla.target - become: true - template: - src: kolla.target.j2 - dest: /etc/systemd/system/kolla.target - - name: Copying over config.json files for services template: src: "{{ item.key }}.json.j2" diff --git a/ansible/roles/common/templates/kolla-directories.conf.j2 b/ansible/roles/common/templates/kolla-directories.conf.j2 deleted file mode 100644 index 3831b21065..0000000000 --- a/ansible/roles/common/templates/kolla-directories.conf.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% for path in run_default_subdirectories %} -d {{ path }} 0755 root root - - -{% endfor %} diff --git a/ansible/roles/common/templates/kolla.target.j2 b/ansible/roles/common/templates/kolla.target.j2 deleted file mode 100644 index 1eb3693e55..0000000000 --- a/ansible/roles/common/templates/kolla.target.j2 +++ /dev/null @@ -1,5 +0,0 @@ -[Unit] -Description=Kolla target allowing to start/stop all kolla*@.service instances at once - -[Install] -WantedBy=multi-user.target diff --git a/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml b/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml new file mode 100644 index 0000000000..3c8c90d155 --- /dev/null +++ b/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Move tasks that modified host configuration from kolla-ansible + role common to a-c-k as they need to be run only once at the + bootstrap of the host and are not strongly related to the common + services. From 820a9ea0cf75028764daac2aeace998fbd3b843e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 6 Oct 2025 16:21:06 +0200 Subject: [PATCH 044/165] CI: Add rocky 10 ironic jobs Depends-On: https://review.opendev.org/c/openstack/kolla/+/963192 Change-Id: Ic9ddafb9ad5dcd27bb5efec1e94e64db886ee954 Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/ironic.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/zuul.d/scenarios/ironic.yaml b/zuul.d/scenarios/ironic.yaml index 63b642e7f6..b1fa449060 100644 --- a/zuul.d/scenarios/ironic.yaml +++ b/zuul.d/scenarios/ironic.yaml @@ -32,6 +32,11 @@ parent: kolla-ansible-debian-bookworm-ironic nodeset: kolla-ansible-debian-bookworm-16GB +- job: + name: kolla-ansible-rocky-10-ironic + parent: kolla-ansible-ironic-base + nodeset: kolla-ansible-rocky-10-16GB + - job: name: kolla-ansible-ubuntu-noble-ironic parent: kolla-ansible-ironic-base @@ -50,5 +55,6 @@ jobs: - kolla-ansible-debian-bookworm-ironic - kolla-ansible-debian-bookworm-ironic-upgrade + - kolla-ansible-rocky-10-ironic - kolla-ansible-ubuntu-noble-ironic - kolla-ansible-ubuntu-noble-ironic-upgrade From 49e64cd324e5cc7cda31f0c0632d30358b808f67 Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Tue, 7 Oct 2025 14:00:01 +0200 Subject: [PATCH 045/165] Fix Horizon crash on Memcached node failure Horizon uses Django's PyMemcacheCache backend to store session data. When one of the Memcached nodes becomes unavailable, Django raises an exception during session access, causing Horizon to return HTTP 500 errors. This patch adds 'ignore_exc': True to the cache OPTIONS to make Django treat such errors as cache misses instead of crashing. This improves Horizon stability in high-availability setups with multiple Memcached nodes. Closes-Bug: #2106557 Change-Id: If8873eaec35c4d2e5b13cdb8bcefb96230c59b51 Signed-off-by: Michal Arbet --- ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 | 1 + releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml diff --git a/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 b/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 index 2abbd6acc2..4860f0a887 100644 --- a/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 +++ b/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 @@ -19,6 +19,7 @@ DATABASES = { {% elif groups['memcached'] | length > 0 and not horizon_backend_database | bool %} SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES['default']['LOCATION'] = [{% for host in groups['memcached'] %}'{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ memcached_port }}'{% if not loop.last %},{% endif %}{% endfor %}] +CACHES['default']['OPTIONS'] = {'ignore_exc': True} {% endif %} {% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %} diff --git a/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml b/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml new file mode 100644 index 0000000000..7f558e85a8 --- /dev/null +++ b/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes an issue where Horizon returned HTTP 500 errors when one of the + Memcached nodes was unavailable by setting ``ignore_exc`` to ``True`` in + the cache backend. + `LP#2106557 `__ From 2ed65f099c2502558fdb819ed6834d4c6eb577d9 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 7 Oct 2025 13:47:50 +0200 Subject: [PATCH 046/165] CI: Add more file triggers to aio and cells Move test-core-openstack.sh and test-dashboard.sh from base - also test-proxysql is only being run in cells scenario. Change-Id: I2be9d112e1a4580fd1864d34b8d2c71f47d087c3 Signed-off-by: Michal Nasiadka --- zuul.d/base.yaml | 1 - zuul.d/scenarios/aio.yaml | 3 +++ zuul.d/scenarios/cells.yaml | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 4b241ead92..0a53f6df12 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -20,7 +20,6 @@ - ^tests/(run|pre|post).yml - ^tests/setup_gate.sh - ^tests/templates/(inventory|globals-default).j2 - - ^tests/test-(core-openstack|dashboard|proxysql).sh - ^tests/upgrade.sh irrelevant-files: - ^.*\.rst$ diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 9c740d706b..763351fbda 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -5,7 +5,10 @@ files: !inherit - ^ansible/group_vars/all/(common|fluentd|glance|haproxy|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml + - ^ansible/(action_plugins|filter_plugins|library|module_utils)/ - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq)/ + - ^kolla_ansible/ + - ^tests/test-(core-openstack|dashboard).sh - ^tools/init-runonce - job: diff --git a/zuul.d/scenarios/cells.yaml b/zuul.d/scenarios/cells.yaml index 2474e1446d..b94bbe93a0 100644 --- a/zuul.d/scenarios/cells.yaml +++ b/zuul.d/scenarios/cells.yaml @@ -8,6 +8,7 @@ - ^ansible/roles/nova/ - ^ansible/roles/nova-cell/ - ^ansible/roles/loadbalancer/ + - ^tests/test-(core-openstack|proxysql).sh vars: scenario: cells scenario_images_extra: From bdd98eca87890666f66b8b3d920f7ce0948ace44 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 10 Jul 2025 11:14:36 +0200 Subject: [PATCH 047/165] ovn-db: Add OVN_NB_DB and OVN_SB_DB env variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It will make running ovn-nbctl and ovn-sbctl working on all nodes irrespective of their cluster role. Excerpt from ovn-nbctl: --db database The OVSDB database remote to contact. If the OVN_NB_DB en‐ vironment variable is set, its value is used as the de‐ fault. Otherwise, the default is unix:/ovnnb_db.sock, but this default is unlikely to be useful outside of single-ma‐ chine OVN test environments. Closes-Bug: #2116313 Change-Id: I85f7bfd0aaa6f2d426ddb28950acbbca9d385aac Signed-off-by: Michal Nasiadka --- ansible/roles/ovn-db/defaults/main.yml | 7 +++++ ansible/roles/ovn-db/handlers/main.yml | 3 ++ ansible/roles/ovn-db/tasks/bootstrap-db.yml | 8 +++-- doc/source/reference/networking/neutron.rst | 7 +++++ .../ovn-env-variables-b622b4c53ee275f4.yaml | 6 ++++ tests/run.yml | 1 + tests/test-ovn.sh | 29 ++++++++++++++++--- 7 files changed, 55 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml diff --git a/ansible/roles/ovn-db/defaults/main.yml b/ansible/roles/ovn-db/defaults/main.yml index 18e999e1e6..0bc27949b6 100644 --- a/ansible/roles/ovn-db/defaults/main.yml +++ b/ansible/roles/ovn-db/defaults/main.yml @@ -4,6 +4,9 @@ ovn_db_services: container_name: ovn_northd group: ovn-northd enabled: true + environment: + OVN_NB_DB: "{{ ovn_nb_connection }}" + OVN_SB_DB: "{{ ovn_sb_connection_no_relay }}" image: "{{ ovn_northd_image_full }}" volumes: "{{ ovn_northd_default_volumes + ovn_northd_extra_volumes }}" dimensions: "{{ ovn_northd_dimensions }}" @@ -11,6 +14,8 @@ ovn_db_services: container_name: ovn_nb_db group: ovn-nb-db enabled: true + environment: + OVN_NB_DB: "{{ ovn_nb_connection }}" image: "{{ ovn_nb_db_image_full }}" volumes: "{{ ovn_nb_db_default_volumes + ovn_nb_db_extra_volumes }}" dimensions: "{{ ovn_nb_db_dimensions }}" @@ -18,6 +23,8 @@ ovn_db_services: container_name: ovn_sb_db group: ovn-sb-db enabled: true + environment: + OVN_SB_DB: "{{ ovn_sb_connection_no_relay }}" image: "{{ ovn_sb_db_image_full }}" volumes: "{{ ovn_sb_db_default_volumes + ovn_sb_db_extra_volumes }}" dimensions: "{{ ovn_sb_db_dimensions }}" diff --git a/ansible/roles/ovn-db/handlers/main.yml b/ansible/roles/ovn-db/handlers/main.yml index 76c04399f4..128659c7a5 100644 --- a/ansible/roles/ovn-db/handlers/main.yml +++ b/ansible/roles/ovn-db/handlers/main.yml @@ -7,6 +7,7 @@ kolla_container: action: "recreate_or_restart_container" common_options: "{{ docker_common_options }}" + environment: "{{ service.environment }}" name: "{{ service.container_name }}" image: "{{ service.image }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" @@ -20,6 +21,7 @@ kolla_container: action: "recreate_or_restart_container" common_options: "{{ docker_common_options }}" + environment: "{{ service.environment }}" name: "{{ service.container_name }}" image: "{{ service.image }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" @@ -48,6 +50,7 @@ kolla_container: action: "recreate_or_restart_container" common_options: "{{ docker_common_options }}" + environment: "{{ service.environment }}" name: "{{ service.container_name }}" image: "{{ service.image }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" diff --git a/ansible/roles/ovn-db/tasks/bootstrap-db.yml b/ansible/roles/ovn-db/tasks/bootstrap-db.yml index 79bace7b62..d325ca9ac3 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-db.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-db.yml @@ -24,7 +24,9 @@ become: true command: >- {{ kolla_container_engine }} exec ovn_nb_db - ovn-nbctl --inactivity-probe={{ ovn_nb_db_inactivity_probe }} + ovn-nbctl + --db unix:/var/run/ovn/ovnnb_db.sock + --inactivity-probe={{ ovn_nb_db_inactivity_probe }} set-connection ptcp:{{ ovn_nb_db_port }}:0.0.0.0 register: ovn_nb_set_connection_result retries: 3 @@ -47,7 +49,9 @@ become: true command: >- {{ kolla_container_engine }} exec ovn_sb_db - ovn-sbctl --inactivity-probe={{ ovn_sb_db_inactivity_probe }} + ovn-sbctl + --db unix:/var/run/ovn/ovnsb_db.sock + --inactivity-probe={{ ovn_sb_db_inactivity_probe }} set-connection ptcp:{{ ovn_sb_db_port }}:0.0.0.0 register: ovn_sb_set_connection_result retries: 3 diff --git a/doc/source/reference/networking/neutron.rst b/doc/source/reference/networking/neutron.rst index 0dae7dcb6d..01ce340442 100644 --- a/doc/source/reference/networking/neutron.rst +++ b/doc/source/reference/networking/neutron.rst @@ -279,6 +279,13 @@ In order to deploy Neutron OVN Agent you need to set the following: Currently the agent is only needed for QoS for hardware offloaded ports. +When in need of running `ovn-nbctl` or `ovn-sbctl` commands it's most +convenient to run them from ``ovn_northd`` container: + +.. code-block:: console + + docker exec ovn_northd ovn-nbctl show + Mellanox Infiniband (ml2/mlnx) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml b/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml new file mode 100644 index 0000000000..69f2665c3b --- /dev/null +++ b/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The OVN container images (``ovn-nb-db``, ``ovn-northd`` and ``ovn-sb-db``) + have now default environment variables in place that ease running of + ``ovn-nbctl`` and ``ovn-sbctl`` commands for operators. diff --git a/tests/run.yml b/tests/run.yml index eca77e215e..b0736c807a 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -440,6 +440,7 @@ when: scenario == "ovn" environment: CONTAINER_ENGINE: "{{ container_engine }}" + IS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}" - name: Run test-core-openstack.sh script script: diff --git a/tests/test-ovn.sh b/tests/test-ovn.sh index a9d77a95ae..a48fb1161b 100755 --- a/tests/test-ovn.sh +++ b/tests/test-ovn.sh @@ -15,10 +15,20 @@ function test_ovn { # List OVN NB/SB entries echo "OVN NB DB entries:" - sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show + # TODO(mnasiadka): Remove the first part of conditional in G cycle + if [ $IS_UPGRADE == "yes" ]; then + sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show + else + sudo ${container_engine} exec ovn_northd ovn-nbctl show + fi echo "OVN SB DB entries:" - sudo ${container_engine} exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show + # TODO(mnasiadka): Remove the first part of conditional in G cycle + if [ $IS_UPGRADE == "yes" ]; then + sudo ${container_engine} exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show + else + sudo ${container_engine} exec ovn_northd ovn-sbctl show + fi OVNNB_STATUS=$(sudo ${container_engine} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound) OVNSB_STATUS=$(sudo ${container_engine} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound) @@ -92,9 +102,20 @@ function test_octavia { openstack floating ip set $lb_fip --port $lb_port_id echo "OVN NB entries for LB:" - sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer + # TODO(mnasiadka): Remove the first part of conditional in G cycle + if [ $IS_UPGRADE == "yes" ]; then + sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer + else + sudo ${container_engine} exec ovn_northd ovn-nbctl list load_balancer + fi + echo "OVN NB entries for NAT:" - sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat + # TODO(mnasiadka): Remove the first part of conditional in G cycle + if [ $IS_UPGRADE == "yes" ]; then + sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat + else + sudo ${container_engine} exec ovn_northd ovn-nbctl list nat + fi echo "Attempt to access the load balanced HTTP server." attempts=12 From 00d33a38a6a41f226ae53d05d3ee4c5660ff85fc Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 8 Oct 2025 16:25:06 +0200 Subject: [PATCH 048/165] CI: Rework Hashicorp Vault jobs into OpenBao Change-Id: I1a8ebc171da8990b21f03e6d5c3da296eef92e28 Signed-off-by: Michal Nasiadka --- .../{run-hashi-vault.yml => run-openbao.yml} | 10 +-- tests/test-hashicorp-vault-passwords.sh | 69 ------------------- tests/test-openbao-passwords.sh | 67 ++++++++++++++++++ zuul.d/project.yaml | 2 +- zuul.d/scenarios/hashi-vault.yaml | 40 ----------- zuul.d/scenarios/openbao.yaml | 40 +++++++++++ 6 files changed, 113 insertions(+), 115 deletions(-) rename tests/{run-hashi-vault.yml => run-openbao.yml} (92%) delete mode 100755 tests/test-hashicorp-vault-passwords.sh create mode 100755 tests/test-openbao-passwords.sh delete mode 100644 zuul.d/scenarios/hashi-vault.yaml create mode 100644 zuul.d/scenarios/openbao.yaml diff --git a/tests/run-hashi-vault.yml b/tests/run-openbao.yml similarity index 92% rename from tests/run-hashi-vault.yml rename to tests/run-openbao.yml index f9a014e76e..f8d2fe2596 100644 --- a/tests/run-hashi-vault.yml +++ b/tests/run-openbao.yml @@ -68,10 +68,10 @@ command: "{{ kolla_ansible_venv_path }}/bin/kolla-genpwd" # At this point we have generated all necessary configuration, and are - # ready to test Hashicorp Vault. - - name: Run test-hashicorp-vault-passwords.sh script + # ready to test OpenBao. + - name: Run test-openbao-passwords.sh script script: - cmd: test-hashicorp-vault-passwords.sh + cmd: test-openbao-passwords.sh executable: /bin/bash chdir: "{{ kolla_ansible_src_dir }}" environment: @@ -85,11 +85,11 @@ - name: Read generated file slurp: - src: "/tmp/passwords-hashicorp-vault.yml" + src: "/tmp/passwords-openbao.yml" register: generated_file # This test will load in the original input file and the one that was - # generated by Vault and ensure that the keys are the same in both files. + # generated by OpenBao and ensure that the keys are the same in both files. # This ensures that we are not missing any passwords. - name: Check passwords that were written to Vault are as expected vars: diff --git a/tests/test-hashicorp-vault-passwords.sh b/tests/test-hashicorp-vault-passwords.sh deleted file mode 100755 index 64648d5001..0000000000 --- a/tests/test-hashicorp-vault-passwords.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -export PYTHONUNBUFFERED=1 - -function install_vault { - if [[ $BASE_DISTRO =~ (debian|ubuntu) ]]; then - curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - - sudo apt-add-repository -y "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" - sudo apt-get update -y && sudo apt-get install -y vault jq - else - sudo dnf install -y yum-utils - sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo - sudo dnf install -y vault jq - fi -} - -function start_vault { - nohup vault server --dev & - # Give Vault some time to warm up - sleep 10 -} - -function test_vault { - TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output) - echo "${TOKEN}" | vault login -address 'http://127.0.0.1:8200' - - vault kv put -address 'http://127.0.0.1:8200' secret/foo data=bar -} - -function test_writepwd { - TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output) - kolla-writepwd \ - --passwords /etc/kolla/passwords.yml \ - --vault-addr 'http://127.0.0.1:8200' \ - --vault-token ${TOKEN} \ - --vault-mount-point secret -} - -function test_readpwd { - TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output) - cp etc/kolla/passwords.yml /tmp/passwords-hashicorp-vault.yml - kolla-readpwd \ - --passwords /tmp/passwords-hashicorp-vault.yml \ - --vault-addr 'http://127.0.0.1:8200' \ - --vault-token ${TOKEN} \ - --vault-mount-point secret -} - -function teardown { - pkill vault -} - -function test_hashicorp_vault_passwords { - echo "Setting up development Vault server..." - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - install_vault - start_vault - test_vault - echo "Write passwords to Hashicorp Vault..." - test_writepwd - echo "Read passwords from Hashicorp Vault..." - test_readpwd - echo "Cleaning up..." - teardown -} - -test_hashicorp_vault_passwords diff --git a/tests/test-openbao-passwords.sh b/tests/test-openbao-passwords.sh new file mode 100755 index 0000000000..5e6b115bad --- /dev/null +++ b/tests/test-openbao-passwords.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +set -o xtrace +set -o errexit + +export PYTHONUNBUFFERED=1 + +function install_openbao { + if [[ $BASE_DISTRO =~ (debian|ubuntu) ]]; then + curl -fsSLO https://github.com/openbao/openbao/releases/download/v2.4.1/bao_2.4.1_linux_amd64.deb + sudo dpkg -i bao_2.4.1_linux_amd64.deb + rm -f bao_2.4.1_linux_amd64.deb + else + sudo dnf install -y https://github.com/openbao/openbao/releases/download/v2.4.1/bao_2.4.1_linux_amd64.rpm + fi +} + +function start_openbao { + nohup bao server --dev & + # Give Vault some time to warm up + sleep 10 +} + +function test_openbao { + TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token) + echo "${TOKEN}" | bao login -address 'http://127.0.0.1:8200' - + bao kv put -address 'http://127.0.0.1:8200' secret/foo data=bar +} + +function test_writepwd { + TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token) + kolla-writepwd \ + --passwords /etc/kolla/passwords.yml \ + --vault-addr 'http://127.0.0.1:8200' \ + --vault-token ${TOKEN} \ + --vault-mount-point secret +} + +function test_readpwd { + TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token) + cp etc/kolla/passwords.yml /tmp/passwords-openbao.yml + kolla-readpwd \ + --passwords /tmp/passwords-openbao.yml \ + --vault-addr 'http://127.0.0.1:8200' \ + --vault-token ${TOKEN} \ + --vault-mount-point secret +} + +function teardown { + pkill bao +} + +function test_openbao_passwords { + echo "Setting up development OpenBao server..." + source $KOLLA_ANSIBLE_VENV_PATH/bin/activate + install_openbao + start_openbao + test_openbao + echo "Write passwords to OpenBao..." + test_writepwd + echo "Read passwords from OpenBao..." + test_readpwd + echo "Cleaning up..." + teardown +} + +test_openbao_passwords diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 77e714e1a8..ed9c7b718f 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -12,7 +12,7 @@ # https://review.opendev.org/c/openstack/kolla-ansible/+/864780 # - kolla-ansible-scenario-container-engine-migration - kolla-ansible-scenario-haproxy-fqdn - - kolla-ansible-scenario-hashi-vault + - kolla-ansible-scenario-openbao - kolla-ansible-scenario-kvm - kolla-ansible-scenario-lets-encrypt - kolla-ansible-scenario-magnum diff --git a/zuul.d/scenarios/hashi-vault.yaml b/zuul.d/scenarios/hashi-vault.yaml deleted file mode 100644 index a267aa4554..0000000000 --- a/zuul.d/scenarios/hashi-vault.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- job: - name: kolla-ansible-hashi-vault-base - parent: kolla-ansible-base - run: tests/run-hashi-vault.yml - required-projects: - - openstack/kolla-ansible - - openstack/requirements - voting: false - files: !override - - ^kolla_ansible/ - - ^requirements-core.yml - - ^tests/(pre|run|run-hashi-vault).yml - - ^tests/templates/(inventory|globals-default).j2 - - ^tests/test-hashicorp-vault-passwords.sh - -- job: - name: kolla-ansible-debian-bookworm-hashi-vault - parent: kolla-ansible-hashi-vault-base - nodeset: kolla-ansible-debian-bookworm-8GB - -- job: - name: kolla-ansible-rocky-10-hashi-vault - parent: kolla-ansible-hashi-vault-base - nodeset: kolla-ansible-rocky-10-8GB - -- job: - name: kolla-ansible-ubuntu-noble-hashi-vault - parent: kolla-ansible-hashi-vault-base - nodeset: kolla-ansible-ubuntu-noble-8GB - -- project-template: - name: kolla-ansible-scenario-hashi-vault - description: | - Runs Kolla-Ansible Hashicorp Vault scenario jobs. - check: - jobs: - - kolla-ansible-debian-bookworm-hashi-vault - - kolla-ansible-rocky-10-hashi-vault - - kolla-ansible-ubuntu-noble-hashi-vault diff --git a/zuul.d/scenarios/openbao.yaml b/zuul.d/scenarios/openbao.yaml new file mode 100644 index 0000000000..1cca53ec3a --- /dev/null +++ b/zuul.d/scenarios/openbao.yaml @@ -0,0 +1,40 @@ +--- +- job: + name: kolla-ansible-openbao-base + parent: kolla-ansible-base + run: tests/run-openbao.yml + required-projects: + - openstack/kolla-ansible + - openstack/requirements + voting: false + files: !override + - ^kolla_ansible/ + - ^requirements-core.yml + - ^tests/(pre|run|run-openbao).yml + - ^tests/templates/(inventory|globals-default).j2 + - ^tests/test-openbao-passwords.sh + +- job: + name: kolla-ansible-debian-bookworm-openbao + parent: kolla-ansible-openbao-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-rocky-10-openbao + parent: kolla-ansible-openbao-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-openbao + parent: kolla-ansible-openbao-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-openbao + description: | + Runs Kolla-Ansible OpenBao scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-openbao + - kolla-ansible-rocky-10-openbao + - kolla-ansible-ubuntu-noble-openbao From 10e719f9d8b4b8d8efbc00e9a69ae6984b4a0a10 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 7 Oct 2025 19:57:40 +0200 Subject: [PATCH 049/165] CI: Rework deploy.sh to an Ansible role Change-Id: Iecb212c38732d38f704f1a7d43036f8bbade496a Signed-off-by: Michal Nasiadka --- .../tasks/certificates.yml | 39 +++++++++ roles/kolla-ansible-deploy/tasks/deploy.yml | 55 +++++++++++++ roles/kolla-ansible-deploy/tasks/main.yml | 11 +++ tests/deploy.sh | 80 ------------------- tests/pre.yml | 1 + tests/run.yml | 12 +-- zuul.d/scenarios/aio.yaml | 1 + zuul.d/scenarios/lets-encrypt.yaml | 2 +- 8 files changed, 110 insertions(+), 91 deletions(-) create mode 100644 roles/kolla-ansible-deploy/tasks/certificates.yml create mode 100644 roles/kolla-ansible-deploy/tasks/deploy.yml create mode 100644 roles/kolla-ansible-deploy/tasks/main.yml delete mode 100755 tests/deploy.sh diff --git a/roles/kolla-ansible-deploy/tasks/certificates.yml b/roles/kolla-ansible-deploy/tasks/certificates.yml new file mode 100644 index 0000000000..6296a1dbe2 --- /dev/null +++ b/roles/kolla-ansible-deploy/tasks/certificates.yml @@ -0,0 +1,39 @@ +--- +- name: Generate self-signed certificates for the optional internal TLS tests + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible certificates + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/certificates 2>&1 + +- name: Init pebble when Lets Encrypt is enabled + when: (le_enabled | default(False)) | bool + block: + - name: "Run pebble container" + become: true + community.docker.docker_container: + name: pebble + image: "ghcr.io/letsencrypt/pebble:latest" + env: + PEBBLE_VA_NOSLEEP: "1" + PEBBLE_VA_ALWAYS_VALID: "1" + network_mode: host + + - name: "Wait for pebble to start" + ansible.builtin.wait_for: + port: 15000 + delay: 3 + + - name: "Copy pebble miniCA to /etc/kolla/certificates" + become: true + ansible.builtin.command: + cmd: "docker cp pebble:/test/certs/pebble.minica.pem /etc/kolla/certificates/ca/pebble-root.crt" + + - name: "Fetch pebble.crt and store it in /etc/kolla/certificates/ca/" + become: true + ansible.builtin.get_url: + url: "https://127.0.0.1:15000/roots/0" + dest: "/etc/kolla/certificates/ca/pebble.crt" + validate_certs: false diff --git a/roles/kolla-ansible-deploy/tasks/deploy.yml b/roles/kolla-ansible-deploy/tasks/deploy.yml new file mode 100644 index 0000000000..5b36d7b971 --- /dev/null +++ b/roles/kolla-ansible-deploy/tasks/deploy.yml @@ -0,0 +1,55 @@ +--- +- name: Run kolla-ansible prechecks + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible prechecks + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/deploy-prechecks 2>&1 + +- name: Run kolla-ansible pull + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible pull + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/pull 2>&1 + +- name: Run kolla-ansible deploy + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible deploy + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/deploy 2>&1 + +- name: Run kolla-ansible post-deploy + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible post-deploy + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/post-deploy 2>&1 + +- name: Run kolla-ansible validate-config on upgrades + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible validate-config + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/validate-config 2>&1 + when: not is_upgrade | bool + +- name: Run kolla-ansible check + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible check + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/check 2>&1 diff --git a/roles/kolla-ansible-deploy/tasks/main.yml b/roles/kolla-ansible-deploy/tasks/main.yml new file mode 100644 index 0000000000..e02765d99a --- /dev/null +++ b/roles/kolla-ansible-deploy/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Ensure /etc/kolla is writable + become: true + ansible.builtin.file: + path: /etc/kolla + state: directory + mode: "0777" + recurse: true + +- import_tasks: certificates.yml +- import_tasks: deploy.yml diff --git a/tests/deploy.sh b/tests/deploy.sh deleted file mode 100755 index 419c4778af..0000000000 --- a/tests/deploy.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - -function init_pebble { - - sudo echo "[i] Pulling letsencrypt/pebble" > /tmp/logs/ansible/certificates - sudo docker pull quay.io/openstack.kolla/pebble:latest &>> /tmp/logs/ansible/certificates - - sudo echo "[i] Force removing old pebble container" &>> /tmp/logs/ansible/certificates - sudo docker rm -f pebble &>> /tmp/logs/ansible/certificates - - sudo echo "[i] Run new pebble container" &>> /tmp/logs/ansible/certificates - sudo docker run --name pebble --rm -d -e "PEBBLE_VA_NOSLEEP=1" -e "PEBBLE_VA_ALWAYS_VALID=1" --net=host quay.io/openstack.kolla/pebble:latest &>> /tmp/logs/ansible/certificates - - sudo echo "[i] Wait for pebble container be up" &>> /tmp/logs/ansible/certificates - # wait until pebble starts - while ! sudo docker logs pebble | grep -q "Listening on"; do - sleep 1 - done - sudo echo "[i] Wait for pebble container done" &>> /tmp/logs/ansible/certificates - - sudo echo "[i] Pebble container logs" &>> /tmp/logs/ansible/certificates - sudo docker logs pebble &>> /tmp/logs/ansible/certificates -} - -function pebble_cacert { - - sudo docker cp pebble:/test/certs/pebble.minica.pem /etc/kolla/certificates/ca/pebble-root.crt - sudo curl -k -s -o /etc/kolla/certificates/ca/pebble.crt -v https://127.0.0.1:15000/roots/0 -} - -function certificates { - - RAW_INVENTORY=/etc/kolla/inventory - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - # generate self-signed certificates for the optional internal TLS tests - if [[ "$TLS_ENABLED" = "True" ]]; then - kolla-ansible certificates -i ${RAW_INVENTORY} -vvv > /tmp/logs/ansible/certificates - fi - if [[ "$LE_ENABLED" = "True" ]]; then - init_pebble - pebble_cacert - fi - - #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there - sudo chmod -R 777 /etc/kolla -} - - -function deploy { - - RAW_INVENTORY=/etc/kolla/inventory - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there - sudo chmod -R 777 /etc/kolla - - certificates - - # Actually do the deployment - kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy-prechecks - kolla-ansible pull -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/pull - kolla-ansible deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy - kolla-ansible post-deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/post-deploy - - if [[ $HAS_UPGRADE == 'no' ]]; then - kolla-ansible validate-config -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/validate-config - fi - - kolla-ansible check -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/check -} - - -deploy diff --git a/tests/pre.yml b/tests/pre.yml index 804c090d18..4f8d4556d0 100644 --- a/tests/pre.yml +++ b/tests/pre.yml @@ -61,6 +61,7 @@ - gawk - python3-pip - python3-setuptools + - python3-requests - name: Install lvm on storage scenarios become: true diff --git a/tests/run.yml b/tests/run.yml index eca77e215e..0b04afa24c 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -391,16 +391,8 @@ # Deploy control plane. For upgrade jobs this is the previous release. - block: - - name: Run deploy.sh script - script: - cmd: deploy.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - TLS_ENABLED: "{{ tls_enabled }}" - LE_ENABLED: "{{ le_enabled }}" - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" - HAS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}" + - import_role: + name: kolla-ansible-deploy # NOTE(yoctozepto): this is nice as the first step after the deployment # because it waits for the services to stabilize well enough so that diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 763351fbda..39bbe5f05d 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -8,6 +8,7 @@ - ^ansible/(action_plugins|filter_plugins|library|module_utils)/ - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq)/ - ^kolla_ansible/ + - ^roles/kolla-ansible-deploy/ - ^tests/test-(core-openstack|dashboard).sh - ^tools/init-runonce diff --git a/zuul.d/scenarios/lets-encrypt.yaml b/zuul.d/scenarios/lets-encrypt.yaml index a985cae648..fc059a79c0 100644 --- a/zuul.d/scenarios/lets-encrypt.yaml +++ b/zuul.d/scenarios/lets-encrypt.yaml @@ -7,9 +7,9 @@ - ^ansible/group_vars/all/lets-encrypt.yml - ^ansible/roles/fluentd/templates/conf/input/11-letsencrypt.conf.j2 - ^ansible/roles/(haproxy-config|letsencrypt|loadbalancer|loadbalancer-config)/ + - ^roles/kolla-ansible-deploy/tasks/certificates.yml - ^tests/test-core-openstack.sh - ^tests/test-dashboard.sh - - ^tests/deploy.sh vars: scenario: lets-encrypt scenario_images_extra: From a87bd3350d6ca989aece8b8764042bf3759623b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20Weing=C3=A4rtner?= Date: Wed, 26 Jun 2024 10:50:37 -0300 Subject: [PATCH 050/165] Customize the authentication error timeout page in modOIDC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a situation that happens when a user triggers an authentication in OpenStack that uses Identity federation, which leads users to a blank page with an error message that might not be that clear to users. If the user takes too long to enter his/her credentials in the login page of the IdP, the user sees a page saying that the authentication took too much and that the user must try it again. However, if the user tries again, she/he is already logged in to the IdP, and then the user gets access direct to Horizon. This whole process can be a bit confusing for users, and they saw an error page after the login, which leads them to think that there was an error in the authentication, but then when he/she tries again, everything just works. Therefore, to handle such cases we propose Kolla-ansble to use a custom error page that has an automatic redirect if the error happens. The timeout is managed by "OIDCStateTimeout" variable. The default (which is what we have in place now) is 300 seconds (5 minutes): https://github.com/zmartzone/mod_auth_openidc/blob/d13ee0e4bd2cef94c4d2b55829310175288e0bdf/src/mod_auth_openidc.c#L651). I would not recommend the increase of this timeout though. I would rather decrease this value; 5 minutes to such an operation is already too much. This variable can be overridden via wsgi-keystone.conf file. To customize the error page we have the following variable "OIDCHTMLErrorTemplate". The template is processed in the following line: https://github.com/zmartzone/mod_auth_openidc/blob/f2b8315cd48faba04320e9a06fc52b57f0989dcb/src/util.c#L1619. As we can see, there is no default page. The default HTML is hardcoded in C++, and the template requires two "%s" placeholders that will receive the title of the error message and the full description of the error. Change-Id: Iebda3ca7a5f9cef7e5e0930fc4f70764774d6ed5 Signed-off-by: Rafael Weingärtner --- ansible/roles/keystone/defaults/main.yml | 9 ++++-- .../keystone/tasks/config-federation-oidc.yml | 18 ++++++++++++ .../roles/keystone/templates/keystone.json.j2 | 9 +++++- .../templates/modoidc-error-page.html.j2 | 29 +++++++++++++++++++ .../keystone/templates/wsgi-keystone.conf.j2 | 1 + .../shared-services/keystone-guide.rst | 14 +++++++++ ...m_modOIDC_error_page-0fe3dd7414310536.yaml | 19 ++++++++++++ 7 files changed, 95 insertions(+), 4 deletions(-) create mode 100644 ansible/roles/keystone/templates/modoidc-error-page.html.j2 create mode 100644 releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml index 3aad442a2c..cdc52a3ed1 100644 --- a/ansible/roles/keystone/defaults/main.yml +++ b/ansible/roles/keystone/defaults/main.yml @@ -225,12 +225,15 @@ keystone_remote_id_attribute_oidc: "HTTP_OIDC_ISS" keystone_container_federation_oidc_metadata_folder: "{{ '/etc/apache2/metadata' if kolla_base_distro in ['debian', 'ubuntu'] else '/etc/httpd/metadata' }}" keystone_container_federation_oidc_idp_certificate_folder: "{{ '/etc/apache2/cert' if kolla_base_distro in ['debian', 'ubuntu'] else '/etc/httpd/cert' }}" keystone_container_federation_oidc_attribute_mappings_folder: "{{ container_config_directory }}/federation/oidc/attribute_maps" -keystone_host_federation_oidc_metadata_folder: "{{ node_config_directory }}/keystone/federation/oidc/metadata" -keystone_host_federation_oidc_idp_certificate_folder: "{{ node_config_directory }}/keystone/federation/oidc/cert" -keystone_host_federation_oidc_attribute_mappings_folder: "{{ node_config_directory }}/keystone/federation/oidc/attribute_maps" +keystone_host_federation_base_folder: "{{ node_config_directory }}/keystone/federation" +keystone_host_federation_oidc_metadata_folder: "{{ keystone_host_federation_base_folder }}/oidc/metadata" +keystone_host_federation_oidc_idp_certificate_folder: "{{ keystone_host_federation_base_folder }}/oidc/cert" +keystone_host_federation_oidc_attribute_mappings_folder: "{{ keystone_host_federation_base_folder }}/oidc/attribute_maps" keystone_federation_oidc_jwks_uri: "" keystone_federation_oidc_additional_options: {} +keystone_federation_oidc_error_page_retry_login_delay_milliseconds: 5000 + # These variables are used to define multiple trusted Horizon dashboards. # keystone_trusted_dashboards: ['', '', ''] horizon_trusted_dashboards: "{{ ['%s://%s/auth/websso/' % (public_protocol, kolla_external_fqdn), '%s/auth/websso/' % (horizon_public_endpoint)] if enable_horizon | bool else [] }}" diff --git a/ansible/roles/keystone/tasks/config-federation-oidc.yml b/ansible/roles/keystone/tasks/config-federation-oidc.yml index 81384931d0..7a50e8c40a 100644 --- a/ansible/roles/keystone/tasks/config-federation-oidc.yml +++ b/ansible/roles/keystone/tasks/config-federation-oidc.yml @@ -85,3 +85,21 @@ keystone_federation_openid_certificate_key_ids: "{{ certificates_path.files | map(attribute='path') | map('regex_replace', '^.*/(.*)\\.pem$', '\\1#' + keystone_container_federation_oidc_idp_certificate_folder + '/\\1.pem') | list }}" # noqa 204 when: - inventory_hostname in groups[keystone.group] + +- name: Copying modOIDC error page template + vars: + keystone: "{{ keystone_services.keystone }}" + template: + src: "{{ item }}" + dest: "{{ keystone_host_federation_base_folder }}/modoidc-error-page.html" + mode: "0660" + become: true + when: + - inventory_hostname in groups[keystone.group] + - keystone.enabled | bool + - keystone_enable_federation_openid | bool + with_first_found: + - files: + - "{{ node_custom_config }}/keystone/federation/modoidc-error-page.html" + - "modoidc-error-page.html.j2" + skip: true diff --git a/ansible/roles/keystone/templates/keystone.json.j2 b/ansible/roles/keystone/templates/keystone.json.j2 index c5b567d69f..fbdfa60115 100644 --- a/ansible/roles/keystone/templates/keystone.json.j2 +++ b/ansible/roles/keystone/templates/keystone.json.j2 @@ -67,7 +67,14 @@ "owner": "{{ apache_user }}:{{ apache_user }}", "perm": "0600", "merge": true - }{% endif %}{% if kolla_copy_ca_into_containers | bool %}, + }, + { + "source": "{{ container_config_directory }}/federation/modoidc-error-page.html", + "dest": "/var/www/html/modoidc-error-page.html", + "owner": "{{ apache_user }}:{{ apache_user }}", + "perm": "0600" + } + {% endif %}{% if kolla_copy_ca_into_containers | bool %}, { "source": "{{ container_config_directory }}/ca-certificates", "dest": "/var/lib/kolla/share/ca-certificates", diff --git a/ansible/roles/keystone/templates/modoidc-error-page.html.j2 b/ansible/roles/keystone/templates/modoidc-error-page.html.j2 new file mode 100644 index 0000000000..1d8db9c077 --- /dev/null +++ b/ansible/roles/keystone/templates/modoidc-error-page.html.j2 @@ -0,0 +1,29 @@ + + + + + +
+

It seems that an error happened during the login process.

+

You will be redirected again. Wait a few seconds please.

+
+ Redirect me now. +
+
+
+

Error: %s

+

%s

+ + diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 index 8275b8b917..80b08a9ad0 100644 --- a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 +++ b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 @@ -67,6 +67,7 @@ LogLevel info {% if keystone_federation_oidc_jwks_uri | length > 0 %} OIDCOAuthVerifyJwksUri {{ keystone_federation_oidc_jwks_uri }} {% endif %} + OIDCHTMLErrorTemplate /var/www/html/modoidc-error-page.html {% if keystone_federation_openid_certificate_key_ids | length > 0 %} OIDCOAuthVerifyCertFiles {{ keystone_federation_openid_certificate_key_ids | join(" ") }} {% endif %} diff --git a/doc/source/reference/shared-services/keystone-guide.rst b/doc/source/reference/shared-services/keystone-guide.rst index d0958a3f92..b92230dcb6 100644 --- a/doc/source/reference/shared-services/keystone-guide.rst +++ b/doc/source/reference/shared-services/keystone-guide.rst @@ -104,6 +104,20 @@ Example for Keycloak shown below: keystone_federation_oidc_additional_options: OIDCTokenBindingPolicy: disabled +When using OIDC, operators can also use the following variable +to customize the delay to retry authenticating in the IdP if the +authentication has timeout: + +``keystone_federation_oidc_error_page_retry_login_delay_milliseconds`` + Default is 5000 milliseconds (5 seconds). + +It is also possible to override the ``OIDCHTMLErrorTemplate``, +the custom error template page via: + +.. code-block:: yaml + + {{ node_custom_config }}/keystone/federation/modoidc-error-page.html + Identity providers configurations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml b/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml new file mode 100644 index 0000000000..e50b469113 --- /dev/null +++ b/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml @@ -0,0 +1,19 @@ +--- +features: + - | + Enable the configuration of the timeout manager by + ``OIDCStateTimeout`` variable. We also provide means to + override the error page for the modOIDC plugin via + ``{{ node_custom_config }}/keystone/federation/modoidc-error-page.html`` + file. + +upgrade: + - | + It was added a default template for the modOIDC plugin, + which will handle authentication errors for federated users. + The default template is found at + "ansible/roles/keystone/templates/modoidc-error-page.html.j2"; + it can also be replaced/overwritten. One can also overwrite, + the timeout, instead of the whole page via the following variable: + ``keystone_federation_oidc_error_page_retry_login_delay_milliseconds``. + The default timeout for the page redirection is 5 seconds. From 45e7b9ec4435d334c5757a1e5a6352b9f342567e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 8 Oct 2025 17:02:50 +0200 Subject: [PATCH 051/165] CI: Rework test/deploy/upgrade-bifrost.sh into Ansible roles Change-Id: I55ea22002ea81085a43dc053f619bb61cd497c69 Signed-off-by: Michal Nasiadka --- .../tasks/main.yml | 9 +++++ .../kolla-ansible-test-bifrost/tasks/main.yml | 35 +++++++++++++++++++ .../tasks/main.yml | 8 +++++ tests/deploy-bifrost.sh | 23 ------------ tests/run.yml | 32 +++++------------ tests/test-bifrost.sh | 31 ---------------- tests/upgrade-bifrost.sh | 24 ------------- zuul.d/scenarios/bifrost.yaml | 2 +- 8 files changed, 62 insertions(+), 102 deletions(-) create mode 100644 roles/kolla-ansible-deploy-bifrost/tasks/main.yml create mode 100644 roles/kolla-ansible-test-bifrost/tasks/main.yml create mode 100644 roles/kolla-ansible-upgrade-bifrost/tasks/main.yml delete mode 100755 tests/deploy-bifrost.sh delete mode 100755 tests/test-bifrost.sh delete mode 100755 tests/upgrade-bifrost.sh diff --git a/roles/kolla-ansible-deploy-bifrost/tasks/main.yml b/roles/kolla-ansible-deploy-bifrost/tasks/main.yml new file mode 100644 index 0000000000..5b510da401 --- /dev/null +++ b/roles/kolla-ansible-deploy-bifrost/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy Bifrost + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible deploy-bifrost + -i /etc/kolla/inventory + >/tmp/logs/ansible/deploy-bifrost 2>&1 + diff --git a/roles/kolla-ansible-test-bifrost/tasks/main.yml b/roles/kolla-ansible-test-bifrost/tasks/main.yml new file mode 100644 index 0000000000..431aed5840 --- /dev/null +++ b/roles/kolla-ansible-test-bifrost/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- name: Check baremetal driver list + become: true + ansible.builtin.command: + cmd: > + {{ container_engine }} exec bifrost_deploy + bash -c 'OS_CLOUD=bifrost baremetal driver list' + register: bdl + until: bdl.rc == 0 + retries: 5 + delay: 10 + +- name: Check baremetal node list + become: true + ansible.builtin.command: + cmd: > + {{ container_engine }} exec bifrost_deploy + bash -c 'OS_CLOUD=bifrost baremetal node list' + +- name: Create baremetal node + become: true + ansible.builtin.command: + cmd: > + {{ container_engine }} exec bifrost_deploy + bash -c 'OS_CLOUD=bifrost baremetal node create + --driver redfish --name test-node' + +- name: Delete baremetal node + become: true + ansible.builtin.command: + cmd: > + {{ container_engine }} exec bifrost_deploy + bash -c 'OS_CLOUD=bifrost baremetal node delete + test-node' + diff --git a/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml b/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml new file mode 100644 index 0000000000..ac8d11c1c6 --- /dev/null +++ b/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Upgrade Bifrost + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible deploy-bifrost + -i /etc/kolla/inventory + >/tmp/logs/ansible/upgrade-bifrost 2>&1 diff --git a/tests/deploy-bifrost.sh b/tests/deploy-bifrost.sh deleted file mode 100755 index b06a6d769e..0000000000 --- a/tests/deploy-bifrost.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function deploy_bifrost { - RAW_INVENTORY=/etc/kolla/inventory - - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - # TODO(mgoddard): run prechecks. - # Deploy the bifrost container. - # TODO(mgoddard): add pull action when we have a local registry service in - # CI. - kolla-ansible deploy-bifrost -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy-bifrost -} - - -deploy_bifrost diff --git a/tests/run.yml b/tests/run.yml index 0b04afa24c..9178300bec 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -780,31 +780,17 @@ when: is_upgrade # Bifrost testing. - - block: - - name: Run deploy-bifrost.sh script - shell: - cmd: tests/deploy-bifrost.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" + - name: Bifrost testing + when: scenario == "bifrost" + block: + - import_role: + name: kolla-ansible-deploy-bifrost - - name: Run test-bifrost.sh script - shell: - cmd: tests/test-bifrost.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - CONTAINER_ENGINE: "{{ container_engine }}" + - import_role: + name: kolla-ansible-test-bifrost - - name: Run upgrade-bifrost.sh script - shell: - cmd: tests/upgrade-bifrost.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" - when: scenario == "bifrost" + - import_role: + name: kolla-ansible-upgrade-bifrost # NOTE(yoctozepto): each host checks itself - hosts: all diff --git a/tests/test-bifrost.sh b/tests/test-bifrost.sh deleted file mode 100755 index b8017c026f..0000000000 --- a/tests/test-bifrost.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function test_bifrost { - container_engine="${1:-docker}" - - # TODO(mgoddard): More testing, deploy bare metal nodes. - # TODO(mgoddard): Use openstackclient when clouds.yaml works. See - # https://bugs.launchpad.net/bifrost/+bug/1754070. - attempts=0 - while [[ $(sudo ${container_engine} exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal driver list -f value" | wc -l) -eq 0 ]]; do - attempts=$((attempts + 1)) - if [[ $attempts -gt 6 ]]; then - echo "Timed out waiting for ironic conductor to become active" - exit 1 - fi - sleep 10 - done - sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node list" - sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node create --driver redfish --name test-node" - sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node delete test-node" -} - - -test_bifrost $1 diff --git a/tests/upgrade-bifrost.sh b/tests/upgrade-bifrost.sh deleted file mode 100755 index a5d5c36826..0000000000 --- a/tests/upgrade-bifrost.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function upgrade_bifrost { - RAW_INVENTORY=/etc/kolla/inventory - - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - # TODO(mgoddard): run prechecks. - # TODO(mgoddard): add pull action when we have a local registry service in - # CI. - # TODO(mgoddard): make some configuration file changes and trigger a real - # upgrade. - kolla-ansible deploy-bifrost -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-bifrost -} - - -upgrade_bifrost diff --git a/zuul.d/scenarios/bifrost.yaml b/zuul.d/scenarios/bifrost.yaml index fb7abbb2c7..958c5a69c7 100644 --- a/zuul.d/scenarios/bifrost.yaml +++ b/zuul.d/scenarios/bifrost.yaml @@ -6,7 +6,7 @@ files: !inherit - ^ansible/group_vars/all/bifrost.yml - ^ansible/roles/bifrost/ - - ^tests/test-bifrost.sh + - ^roles/kolla-ansible-(deploy|test|upgrade)-bifrost/ vars: scenario: bifrost scenario_images_core: From ff5cbf0538ad29eea63c8d27fcd0fd895ccebbba Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 8 Oct 2025 17:12:35 +0200 Subject: [PATCH 052/165] CI: Rework test-dashboard.sh to Ansible role Change-Id: Iaeb6786625c8c0c71c7b4e0ab9450c67bc13ed1c Signed-off-by: Michal Nasiadka --- .../tasks/main.yml | 21 ++++++ tests/run.yml | 22 ++---- tests/test-dashboard.sh | 68 ------------------- 3 files changed, 25 insertions(+), 86 deletions(-) create mode 100644 roles/kolla-ansible-test-dashboard/tasks/main.yml delete mode 100755 tests/test-dashboard.sh diff --git a/roles/kolla-ansible-test-dashboard/tasks/main.yml b/roles/kolla-ansible-test-dashboard/tasks/main.yml new file mode 100644 index 0000000000..561867aedd --- /dev/null +++ b/roles/kolla-ansible-test-dashboard/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: Get contents of clouds.yaml + ansible.builtin.slurp: + src: /etc/kolla/clouds.yaml + register: clouds_yaml + +- name: Query dashboard and check that the returned page looks like a login page + vars: + clouds: "{{ clouds_yaml['content'] | b64decode | from_yaml }}" + url_scheme: "{{ clouds.clouds['kolla-admin'].auth.auth_url | urlsplit('scheme') }}" + url_host: "{{ kolla_external_vip_address | default(kolla_internal_vip_address) }}" + ansible.builtin.uri: + url: "{{ url_scheme + '://' + url_host }}" + ca_path: "{{ clouds.clouds['kolla-admin'].cacert | default(omit) }}" + follow_redirects: "all" + return_content: true + validate_certs: "{{ 'false' if scenario == 'lets-encrypt' else 'true' }}" + register: dashboard_output + until: dashboard_output.content.find('Login') != -1 + retries: 30 + delay: 10 diff --git a/tests/run.yml b/tests/run.yml index 9178300bec..46aae8cead 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -397,14 +397,8 @@ # NOTE(yoctozepto): this is nice as the first step after the deployment # because it waits for the services to stabilize well enough so that # the dashboard is able to show the login prompt - - name: Run test-dashboard.sh script - script: - cmd: test-dashboard.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - TLS_ENABLED: "{{ tls_enabled }}" - LE_ENABLED: "{{ le_enabled }}" + - import_role: + name: kolla-ansible-test-dashboard when: dashboard_enabled - name: Run init-core-openstack.sh script @@ -739,16 +733,8 @@ KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" SCENARIO: "{{ scenario }}" - # NOTE(yoctozepto): this is nice as the first step after the upgrade - # because it waits for the services to stabilize well enough so that - # the dashboard is able to show the login prompt - - name: Run test-dashboard.sh script (post upgrade) - shell: - cmd: tests/test-dashboard.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - TLS_ENABLED: "{{ tls_enabled }}" + - import_role: + name: kolla-ansible-test-dashboard when: dashboard_enabled # NOTE(yoctozepto): We need the script module here to avoid diff --git a/tests/test-dashboard.sh b/tests/test-dashboard.sh deleted file mode 100755 index c3cb9a72cc..0000000000 --- a/tests/test-dashboard.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -export PYTHONUNBUFFERED=1 - - -function check_dashboard { - # Query the dashboard, and check that the returned page looks like a login - # page. - DASHBOARD_URL=${OS_AUTH_URL%:*} - output_path=$1 - args=( - --include - --location - --fail - ) - if [[ "$TLS_ENABLED" = "True" ]]; then - args+=(--cacert $OS_CACERT) - fi - if ! curl "${args[@]}" $DASHBOARD_URL > $output_path; then - return 1 - fi - if ! grep Login $output_path >/dev/null; then - return 1 - fi -} - -function test_dashboard_logged { - . /etc/kolla/admin-openrc.sh - - echo "TESTING: Dashboard" - # The dashboard has been known to take some time to become accessible, so - # use retries. - output_path=$(mktemp) - attempt=1 - while ! check_dashboard $output_path; do - echo "Dashboard not accessible yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 30 ]]; then - echo "FAILED: Dashboard did not become accessible. Response:" - cat $output_path - return 1 - fi - sleep 10 - done - echo "SUCCESS: Dashboard" -} - -function test_dashboard { - echo "Testing dashboard" - log_file=/tmp/logs/ansible/test-dashboard - if [[ -f $log_file ]]; then - log_file=${log_file}-upgrade - fi - test_dashboard_logged > $log_file 2>&1 - result=$? - if [[ $result != 0 ]]; then - echo "Testing dashboard failed. See ansible/test-dashboard for details" - else - echo "Successfully tested dashboard. See ansible/test-dashboard for details" - fi - return $result -} - - -test_dashboard From 7beb0c8d80a7018c24aa964045008610931094b8 Mon Sep 17 00:00:00 2001 From: Seunghun Lee Date: Mon, 29 Sep 2025 11:01:52 +0100 Subject: [PATCH 053/165] Drop support for DB Loadbalancing with HAProxy + clustercheck MariaDB Clustercheck has not been updated for a long time and ProxySQL has been working well as the default. Dropped DB Loadbalancing with HAProxy and MariaDB clustercheck support and ProxySQL now gets enabled automatically when MariaDB is enabled. Replaced MariaDB container healthcheck method with healthcheck.sh from official MariaDB docker images. Performing service upgrade with this patch will remove remaining mariadb_clustercheck container. Depends-on: https://review.opendev.org/c/openstack/kolla/+/962657 Change-Id: Ib655eae79ba73f4582b29fef82c2d882a474329b Signed-off-by: Seunghun Lee --- ansible/group_vars/all/mariadb.yml | 7 +-- ansible/group_vars/all/proxysql.yml | 2 +- ansible/roles/mariadb/defaults/main.yml | 58 ++----------------- ansible/roles/mariadb/handlers/main.yml | 16 ----- ansible/roles/mariadb/tasks/config.yml | 16 +++++ ansible/roles/mariadb/tasks/loadbalancer.yml | 2 +- ansible/roles/mariadb/tasks/register.yml | 2 +- ansible/roles/mariadb/tasks/upgrade.yml | 11 ++++ .../mariadb/templates/healthcheck.cnf.j2 | 3 + .../templates/mariadb-clustercheck.json.j2 | 11 ---- .../roles/mariadb/templates/mariadb.json.j2 | 7 +++ ansible/roles/prometheus/tasks/bootstrap.yml | 2 +- ...-support-for-mariadb-4cbd7c8590a34981.yaml | 13 +++++ tests/check-logs.sh | 3 - 14 files changed, 63 insertions(+), 90 deletions(-) create mode 100644 ansible/roles/mariadb/templates/healthcheck.cnf.j2 delete mode 100644 ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 create mode 100644 releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml diff --git a/ansible/group_vars/all/mariadb.yml b/ansible/group_vars/all/mariadb.yml index ea6c9fda9c..5a5439fea8 100644 --- a/ansible/group_vars/all/mariadb.yml +++ b/ansible/group_vars/all/mariadb.yml @@ -18,10 +18,9 @@ mariadb_port: "{{ database_port }}" mariadb_wsrep_port: "4567" mariadb_ist_port: "4568" mariadb_sst_port: "4444" -mariadb_clustercheck_port: "4569" mariadb_enable_tls_backend: "{{ database_enable_tls_backend }}" -mariadb_monitor_user: "{{ 'monitor' if enable_proxysql | bool else 'haproxy' }}" +mariadb_monitor_user: "monitor" mariadb_datadir_volume: "mariadb" @@ -30,8 +29,8 @@ mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host i mariadb_shard_id: "{{ mariadb_default_database_shard_id }}" mariadb_shard_name: "shard_{{ mariadb_shard_id }}" mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}" -mariadb_loadbalancer: "{{ 'proxysql' if enable_proxysql | bool else 'haproxy' }}" -mariadb_backup_target: "{{ 'active' if mariadb_loadbalancer == 'haproxy' else 'replica' }}" +mariadb_loadbalancer: proxysql +mariadb_backup_target: replica mariadb_shard_root_user_prefix: "root_shard_" mariadb_shard_backup_user_prefix: "backup_shard_" mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}" diff --git a/ansible/group_vars/all/proxysql.yml b/ansible/group_vars/all/proxysql.yml index 822483d094..386a9be564 100644 --- a/ansible/group_vars/all/proxysql.yml +++ b/ansible/group_vars/all/proxysql.yml @@ -1,4 +1,4 @@ --- -enable_proxysql: "yes" +enable_proxysql: "{{ enable_mariadb }}" proxysql_admin_port: "6032" diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml index 837baaab88..16165e901a 100644 --- a/ansible/roles/mariadb/defaults/main.yml +++ b/ansible/roles/mariadb/defaults/main.yml @@ -8,25 +8,7 @@ mariadb_services: volumes: "{{ mariadb_default_volumes + mariadb_extra_volumes }}" dimensions: "{{ mariadb_dimensions }}" healthcheck: "{{ mariadb_healthcheck }}" - environment: - MYSQL_USERNAME: "{{ mariadb_monitor_user }}" - MYSQL_PASSWORD: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}" - MYSQL_HOST: "{{ api_interface_address }}" - AVAILABLE_WHEN_DONOR: "1" haproxy: - mariadb: - enabled: "{{ enable_mariadb | bool and not enable_external_mariadb_load_balancer | bool }}" - mode: "tcp" - port: "{{ database_port }}" - listen_port: "{{ mariadb_port }}" - frontend_tcp_extra: - - "option clitcpka" - - "timeout client 3600s" - backend_tcp_extra: - - "option srvtcpka" - - "timeout server 3600s" - - "{% if enable_mariadb_clustercheck | bool %}option httpchk{% endif %}" - custom_member_list: "{{ internal_haproxy_members.split(';') }}" mariadb_external_lb: enabled: "{{ enable_external_mariadb_load_balancer | bool }}" mode: "tcp" @@ -39,18 +21,6 @@ mariadb_services: - "option srvtcpka" - "timeout server 3600s" custom_member_list: "{{ external_haproxy_members.split(';') }}" - mariadb-clustercheck: - container_name: mariadb_clustercheck - group: "{{ mariadb_shard_group }}" - enabled: "{{ enable_mariadb_clustercheck | bool }}" - image: "{{ mariadb_clustercheck_image_full }}" - volumes: "{{ mariadb_clustercheck_default_volumes + mariadb_clustercheck_extra_volumes }}" - dimensions: "{{ mariadb_clustercheck_dimensions }}" - environment: - MYSQL_USERNAME: "{{ mariadb_monitor_user }}" - MYSQL_PASSWORD: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}" - MYSQL_HOST: "{{ api_interface_address }}" - AVAILABLE_WHEN_DONOR: "1" #################### # Database @@ -61,8 +31,7 @@ database_max_timeout: 120 #################### # HAProxy #################### -internal_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ hostvars[host].ansible_facts.hostname }} {{ 'api' | kolla_address(host) }}:{{ mariadb_port }} check port {% if enable_mariadb_clustercheck | bool %}{{ mariadb_clustercheck_port }}{% else %}{{ mariadb_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" -external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ host }} {{ host }}:{{ mariadb_port }} check port {% if enable_mariadb_clustercheck | bool %}{{ mariadb_clustercheck_port }}{% else %}{{ mariadb_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" +external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ host }} {{ host }}:{{ mariadb_port }} check port {{ mariadb_port }} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" #################### # Docker @@ -71,12 +40,7 @@ mariadb_image: "{{ docker_image_url }}mariadb-server" mariadb_tag: "{{ openstack_tag }}" mariadb_image_full: "{{ mariadb_image }}:{{ mariadb_tag }}" -mariadb_clustercheck_image: "{{ docker_image_url }}mariadb-clustercheck" -mariadb_clustercheck_tag: "{{ mariadb_tag }}" -mariadb_clustercheck_image_full: "{{ mariadb_clustercheck_image }}:{{ mariadb_clustercheck_tag }}" - mariadb_dimensions: "{{ default_container_dimensions }}" -mariadb_clustercheck_dimensions: "{{ default_container_dimensions }}" mariadb_default_volumes: - "{{ node_config_directory }}/mariadb/:{{ container_config_directory }}/:ro" @@ -84,20 +48,15 @@ mariadb_default_volumes: - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - "{{ mariadb_datadir_volume }}:/var/lib/mysql" - "kolla_logs:/var/log/kolla/" -mariadb_clustercheck_default_volumes: - - "{{ node_config_directory }}/mariadb-clustercheck/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" mariadb_extra_volumes: "{{ default_extra_volumes }}" -mariadb_clustercheck_extra_volumes: "{{ default_extra_volumes }}" mariadb_enable_healthchecks: "{{ enable_container_healthchecks }}" mariadb_healthcheck_interval: "{{ default_container_healthcheck_interval }}" mariadb_healthcheck_retries: "{{ default_container_healthcheck_retries }}" mariadb_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -mariadb_healthcheck_test: ["CMD-SHELL", "/usr/bin/clustercheck"] +mariadb_healthcheck_test: ["CMD-SHELL", "/usr/bin/healthcheck.sh --defaults-file /etc/{{ 'mysql/' if kolla_base_distro in ['ubuntu', 'debian'] else '' }}healthcheck.cnf --connect --galera_online"] + mariadb_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" mariadb_healthcheck: interval: "{{ mariadb_healthcheck_interval }}" @@ -122,19 +81,14 @@ mariadb_wsrep_extra_provider_options: [] #################### mariadb_backup_host: "{{ groups[mariadb_shard_group][0] }}" mariadb_backup_database_schema: "mysql" -mariadb_backup_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}backup{% else %}{{ mariadb_shard_backup_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}" +mariadb_backup_database_user: "{{ mariadb_shard_backup_user_prefix }}{{ mariadb_shard_id | string }}" mariadb_backup_type: "full" -mariadb_backup_possible: "{{ mariadb_loadbalancer != 'haproxy' or inventory_hostname in mariadb_default_database_shard_hosts }}" - -#################### -# Clustercheck -#################### -enable_mariadb_clustercheck: "{{ 'True' if mariadb_loadbalancer == 'haproxy' else 'False' }}" +mariadb_backup_possible: "{{ inventory_hostname in mariadb_default_database_shard_hosts }}" #################### # Sharding #################### -mariadb_shard_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}{{ mariadb_shard_root_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}" +mariadb_shard_database_user: "{{ mariadb_shard_root_user_prefix }}{{ mariadb_shard_id | string }}" mariadb_database_shard: "{{ mariadb_shards_info }}" # Database diff --git a/ansible/roles/mariadb/handlers/main.yml b/ansible/roles/mariadb/handlers/main.yml index c5d120615f..5e1b68ae90 100644 --- a/ansible/roles/mariadb/handlers/main.yml +++ b/ansible/roles/mariadb/handlers/main.yml @@ -72,19 +72,3 @@ - groups[mariadb_shard_group + '_port_alive_False'] is defined - inventory_hostname in groups[mariadb_shard_group + '_port_alive_False'] listen: Restart mariadb container - -- name: Restart mariadb-clustercheck container - vars: - service_name: "mariadb-clustercheck" - service: "{{ mariadb_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - image: "{{ service.image }}" - name: "{{ service.container_name }}" - volumes: "{{ service.volumes }}" - dimensions: "{{ service.dimensions }}" - environment: "{{ service.environment }}" - when: - - service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/mariadb/tasks/config.yml b/ansible/roles/mariadb/tasks/config.yml index bf1779be55..405e34ffbd 100644 --- a/ansible/roles/mariadb/tasks/config.yml +++ b/ansible/roles/mariadb/tasks/config.yml @@ -70,6 +70,22 @@ become: true when: service | service_enabled_and_mapped_to_host +- name: Copying over healthcheck.cnf + vars: + service_name: "mariadb" + service: "{{ mariadb_services[service_name] }}" + merge_configs: + sources: + - "{{ role_path }}/templates/healthcheck.cnf.j2" + - "{{ node_custom_config }}/healthcheck.cnf" + - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/healthcheck.cnf" + dest: "{{ node_config_directory }}/{{ service_name }}/healthcheck.cnf" + mode: "0660" + become: true + when: + - mariadb_enable_healthchecks | bool + - service | service_enabled_and_mapped_to_host + - include_tasks: copy-certs.yml when: - mariadb_copy_certs | bool diff --git a/ansible/roles/mariadb/tasks/loadbalancer.yml b/ansible/roles/mariadb/tasks/loadbalancer.yml index 78cac3fb56..bc7439dfd8 100644 --- a/ansible/roles/mariadb/tasks/loadbalancer.yml +++ b/ansible/roles/mariadb/tasks/loadbalancer.yml @@ -30,7 +30,7 @@ login_user: "{{ database_user }}" login_password: "{{ database_password }}" name: "{{ mariadb_monitor_user }}" - password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}" + password: "{{ mariadb_monitor_password }}" host: "%" priv: "*.*:USAGE,REPLICATION CLIENT" tags: always diff --git a/ansible/roles/mariadb/tasks/register.yml b/ansible/roles/mariadb/tasks/register.yml index 8c679d9385..1b733afa70 100644 --- a/ansible/roles/mariadb/tasks/register.yml +++ b/ansible/roles/mariadb/tasks/register.yml @@ -29,7 +29,7 @@ login_user: "{{ database_user }}" login_password: "{{ database_password }}" name: "{{ mariadb_monitor_user }}" - password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}" + password: "{{ mariadb_monitor_password }}" host: "%" priv: "*.*:USAGE,REPLICATION CLIENT" when: diff --git a/ansible/roles/mariadb/tasks/upgrade.yml b/ansible/roles/mariadb/tasks/upgrade.yml index 5b10a7e111..d72cc5b3d6 100644 --- a/ansible/roles/mariadb/tasks/upgrade.yml +++ b/ansible/roles/mariadb/tasks/upgrade.yml @@ -1,2 +1,13 @@ --- - import_tasks: deploy.yml + +# TODO(seunghun1ee): Remove this task after 2026.1 +- name: "Stop and remove mariadb_clustercheck containers" + become: true + kolla_container: + action: "stop_and_remove_container" + common_options: "{{ docker_common_options }}" + name: "mariadb_clustercheck" + ignore_missing: true + when: + - "'mariadb_clustercheck' not in skip_stop_containers" diff --git a/ansible/roles/mariadb/templates/healthcheck.cnf.j2 b/ansible/roles/mariadb/templates/healthcheck.cnf.j2 new file mode 100644 index 0000000000..670280dd51 --- /dev/null +++ b/ansible/roles/mariadb/templates/healthcheck.cnf.j2 @@ -0,0 +1,3 @@ +[mariadb-client] +user={{ mariadb_monitor_user }} +password={{ mariadb_monitor_password }} diff --git a/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 b/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 deleted file mode 100644 index aad07bff6a..0000000000 --- a/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{ - "command": "socat_wrapper {% if network_address_family == 'ipv6' %}-6{% endif %} -d -lf/var/log/kolla/mariadb/mariadb-clustercheck.log tcp-l:{{ mariadb_clustercheck_port }},fork,reuseaddr,bind={{ api_interface_address }} EXEC:clustercheck", - "config_files": [], - "permissions": [ - { - "path": "/var/log/kolla/mariadb", - "owner": "mysql:mysql", - "recurse": true - } - ] -} diff --git a/ansible/roles/mariadb/templates/mariadb.json.j2 b/ansible/roles/mariadb/templates/mariadb.json.j2 index 7910d69293..2ecc9fa5ef 100644 --- a/ansible/roles/mariadb/templates/mariadb.json.j2 +++ b/ansible/roles/mariadb/templates/mariadb.json.j2 @@ -8,6 +8,13 @@ "owner": "mysql", "perm": "0600" } + {% if mariadb_enable_healthchecks | bool %}, + { + "source": "{{ container_config_directory }}/healthcheck.cnf", + "dest": "/etc/{{ mysql_dir }}/healthcheck.cnf", + "owner": "mysql", + "perm": "0600" + }{% endif %} {% if database_enable_tls_backend | bool %}, { "source": "{{ container_config_directory }}/ca-certificates/root.crt", diff --git a/ansible/roles/prometheus/tasks/bootstrap.yml b/ansible/roles/prometheus/tasks/bootstrap.yml index 3eda6b1a3e..9cc8e2634c 100644 --- a/ansible/roles/prometheus/tasks/bootstrap.yml +++ b/ansible/roles/prometheus/tasks/bootstrap.yml @@ -3,7 +3,7 @@ become: true vars: shard_id: "{{ item.key }}" - shard_root_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}{{ mariadb_shard_root_user_prefix }}{{ shard_id | string }}{% endif %}" + shard_root_user: "{{ mariadb_shard_root_user_prefix }}{{ shard_id | string }}" shard_host: "{{ mariadb_shards_info.shards[shard_id].hosts[0] }}" kolla_toolbox: container_engine: "{{ kolla_container_engine }}" diff --git a/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml b/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml new file mode 100644 index 0000000000..ab486ca79d --- /dev/null +++ b/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + ProxySQL is now automatically enabled when MariaDB is enabled. + MariaDB container healthcheck method was updated as healthcheck script was + replaced from Clustercheck to official MariaDB docker image's + `healthcheck.sh `__ +upgrade: + - | + Database loadbalancing with HAProxy and MariaDB Clustercheck is no longer + supported. For the system that uses HAProxy and Clustercheck, upgrading + MariaDB with ``kolla-ansible upgrade`` will deploy ProxySQL containers and + remove MariaDB Clustercheck containers. diff --git a/tests/check-logs.sh b/tests/check-logs.sh index cb8b763964..a9f7d464e3 100755 --- a/tests/check-logs.sh +++ b/tests/check-logs.sh @@ -63,9 +63,6 @@ function check_fluentd_missing_logs { /var/log/kolla/mariadb/mariadb-bootstrap.log) continue ;; - /var/log/kolla/mariadb/mariadb-clustercheck.log) - continue - ;; /var/log/kolla/mariadb/mariadb-upgrade.log) continue ;; From 0a3e2210016b138ef4f3fadc1e3b4babbee78bd7 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 10 Oct 2025 11:45:37 +0200 Subject: [PATCH 054/165] CI: rework upgrade.sh to Ansible role Change-Id: I73ca974aa6eef071e53dcbb9b66af2713ac655a8 Signed-off-by: Michal Nasiadka --- roles/kolla-ansible-deploy/tasks/deploy.yml | 2 +- roles/kolla-ansible-upgrade/tasks/main.yml | 65 +++++++++++++++++++++ tests/run.yml | 10 +--- tests/upgrade.sh | 32 ---------- 4 files changed, 68 insertions(+), 41 deletions(-) create mode 100644 roles/kolla-ansible-upgrade/tasks/main.yml delete mode 100755 tests/upgrade.sh diff --git a/roles/kolla-ansible-deploy/tasks/deploy.yml b/roles/kolla-ansible-deploy/tasks/deploy.yml index 5b36d7b971..771006735d 100644 --- a/roles/kolla-ansible-deploy/tasks/deploy.yml +++ b/roles/kolla-ansible-deploy/tasks/deploy.yml @@ -35,7 +35,7 @@ -vvv >/tmp/logs/ansible/post-deploy 2>&1 -- name: Run kolla-ansible validate-config on upgrades +- name: Run kolla-ansible validate-config ansible.builtin.shell: cmd: > . {{ kolla_ansible_venv_path }}/bin/activate && diff --git a/roles/kolla-ansible-upgrade/tasks/main.yml b/roles/kolla-ansible-upgrade/tasks/main.yml new file mode 100644 index 0000000000..a464f1cf3b --- /dev/null +++ b/roles/kolla-ansible-upgrade/tasks/main.yml @@ -0,0 +1,65 @@ +--- +- name: Generate self-signed certificates for the optional internal TLS tests + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible certificates + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-certificates 2>&1 + +# NOTE(mnasiadka): Need to run bootstrap before upgrade +- name: Run kolla-ansible bootstrap-servers + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible bootstrap-servers + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-bootstrap 2>&1 + +- name: Run kolla-ansible prechecks + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible prechecks + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-prechecks 2>&1 + +- name: Run kolla-ansible pull + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible pull + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-pull 2>&1 + +- name: Run kolla-ansible deploy + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible upgrade + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade 2>&1 + +- name: Run kolla-ansible post-deploy + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible post-deploy + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-post-deploy 2>&1 + +- name: Run kolla-ansible validate-config on upgrades + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible validate-config + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-validate-config 2>&1 + diff --git a/tests/run.yml b/tests/run.yml index 1108ae6a4d..aa1ca4dbfd 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -725,14 +725,8 @@ --final /etc/kolla/passwords.yml # Perform an upgrade to the in-development code. - - name: Run upgrade.sh script - shell: - cmd: tests/upgrade.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" - SCENARIO: "{{ scenario }}" + - import_role: + name: kolla-ansible-upgrade - import_role: name: kolla-ansible-test-dashboard diff --git a/tests/upgrade.sh b/tests/upgrade.sh deleted file mode 100755 index eeea9d8188..0000000000 --- a/tests/upgrade.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function upgrade { - RAW_INVENTORY=/etc/kolla/inventory - - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - kolla-ansible certificates -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/certificates - # Previous versions had older docker, requests requirements for example - # Therefore we need to run bootstrap again to ensure libraries are in - # proper versions (ansible-collection-kolla is different for new version, potentionally - # also dependencies). - kolla-ansible bootstrap-servers -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-bootstrap - kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-prechecks - - kolla-ansible pull -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/pull-upgrade - kolla-ansible upgrade -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade - - kolla-ansible post-deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-post-deploy - - kolla-ansible validate-config -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/validate-config -} - - -upgrade From 102dc3cd0c8137df3856dd6e7885b4fa7ad5e448 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 14 Oct 2025 08:51:28 +0200 Subject: [PATCH 055/165] CI: Rework reconfigure.sh to Ansible role Change-Id: Ibe21c342ae4c4d79c16d4058f90f5763e94d231c Signed-off-by: Michal Nasiadka --- .../kolla-ansible-reconfigure/tasks/main.yml | 63 +++++++++++++++++++ tests/reconfigure.sh | 26 -------- tests/run.yml | 11 +--- 3 files changed, 65 insertions(+), 35 deletions(-) create mode 100644 roles/kolla-ansible-reconfigure/tasks/main.yml delete mode 100755 tests/reconfigure.sh diff --git a/roles/kolla-ansible-reconfigure/tasks/main.yml b/roles/kolla-ansible-reconfigure/tasks/main.yml new file mode 100644 index 0000000000..4cb60025a7 --- /dev/null +++ b/roles/kolla-ansible-reconfigure/tasks/main.yml @@ -0,0 +1,63 @@ +--- +- name: Run kolla-ansible prechecks + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible prechecks + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/reconfigure-prechecks 2>&1 + +- name: Remove OVN DB containers and volumes on primary to test recreation (docker) + become: true + when: + - scenario == 'ovn' + - container_engine == 'docker' + vars: + ovn_db_services: + - "ovn_nb_db" + - "ovn_sb_db" + block: + - name: Remove OVN DB containers + community.docker.docker_container: + name: "{{ item }}" + state: absent + loop: "{{ ovn_db_services }}" + + - name: Remove OVN DB volumes + community.docker.docker_volume: + name: "{{ item }}" + state: absent + loop: "{{ ovn_db_services }}" + +- name: Remove OVN DB containers and volumes on primary to test recreation (podman) + become: true + when: + - scenario == 'ovn' + - container_engine == 'podman' + vars: + ovn_db_services: + - "ovn_nb_db" + - "ovn_sb_db" + block: + - name: Remove OVN DB containers + containers.podman.podman_container: + name: "{{ item }}" + state: absent + loop: "{{ ovn_db_services }}" + + - name: Remove OVN DB volumes + containers.podman.podman_volume: + name: "{{ item }}" + state: absent + loop: "{{ ovn_db_services }}" + +- name: Run kolla-ansible reconfigure + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible reconfigure + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/reconfigure 2>&1 + diff --git a/tests/reconfigure.sh b/tests/reconfigure.sh deleted file mode 100755 index 1824755729..0000000000 --- a/tests/reconfigure.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function reconfigure { - RAW_INVENTORY=/etc/kolla/inventory - - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - # TODO(jeffrey4l): make some configure file change and - # trigger a real reconfigure - # NOTE(mnasiadka): Remove OVN DB containers and volumes on primary to test recreation - if [[ $SCENARIO == "ovn" ]]; then - sudo ${CONTAINER_ENGINE} rm -f ovn_nb_db ovn_sb_db && sudo ${CONTAINER_ENGINE} volume rm ovn_nb_db ovn_sb_db - fi - kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/reconfigure-prechecks - kolla-ansible reconfigure -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/reconfigure -} - - -reconfigure diff --git a/tests/run.yml b/tests/run.yml index aa1ca4dbfd..e837470cfe 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -807,15 +807,8 @@ - hosts: primary any_errors_fatal: true tasks: - - name: Run reconfigure.sh script - script: - cmd: reconfigure.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" - SCENARIO: "{{ scenario }}" - CONTAINER_ENGINE: "{{ container_engine }}" + - import_role: + name: kolla-ansible-reconfigure when: - not is_upgrade - scenario != "bifrost" From 13c40f141be8b456f887f13f6adbc05eba9c6497 Mon Sep 17 00:00:00 2001 From: Jack Hodgkiss Date: Tue, 14 Oct 2025 21:29:40 +0100 Subject: [PATCH 056/165] fix: use `FQDN` for `horizon` endpoints The `public` and `internal` endpoints for `Horizon` have been set explicitly to `kolla_external_fqdn` and `kolla_internal_fqdn` this setup does not work if using `HAProxy` single frontend configuration and can lead to `CORS` blocked when attempting to upload images via the `Horizon` dashboard. This change brings `Horizon` configuration inline with other services. Closes-Bug: 2111125 Change-Id: I3a8f40a06d64ac5ebd321c4da7fe577c1c4c380a Signed-off-by: Jack Hodgkiss --- ansible/group_vars/all/horizon.yml | 4 ++-- .../notes/fix-horizon-glance-cors-55e2e83902662c99.yaml | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml diff --git a/ansible/group_vars/all/horizon.yml b/ansible/group_vars/all/horizon.yml index fac7a27a46..fcab934b25 100644 --- a/ansible/group_vars/all/horizon.yml +++ b/ansible/group_vars/all/horizon.yml @@ -42,8 +42,8 @@ horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}" # Ports horizon_internal_fqdn: "{{ kolla_internal_fqdn }}" horizon_external_fqdn: "{{ kolla_external_fqdn }}" -horizon_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}" -horizon_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}" +horizon_internal_endpoint: "{{ horizon_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}" +horizon_public_endpoint: "{{ horizon_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}" horizon_port: "80" horizon_tls_port: "443" horizon_listen_port: "{{ '8080' if enable_haproxy | bool else horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}" diff --git a/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml b/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml new file mode 100644 index 0000000000..513f2d190f --- /dev/null +++ b/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue where CORS can be blocked when attempting + to upload an image via the Horizon user interface. From 2e5c62c31c89d0e992c640576f3b184903c6bf65 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 14 Oct 2025 09:19:17 +0200 Subject: [PATCH 057/165] CI: Rework setup_disks.sh into Ansible role Change-Id: Icd8648399a7d0a74419c80d573792397f971ff3c Signed-off-by: Michal Nasiadka --- roles/kolla-ansible-setup-disks/README.rst | 15 +++++++ .../kolla-ansible-setup-disks/tasks/main.yml | 42 +++++++++++++++++++ tests/run.yml | 7 +--- zuul.d/scenarios/cephadm.yaml | 3 ++ zuul.d/scenarios/zun.yaml | 2 + 5 files changed, 64 insertions(+), 5 deletions(-) create mode 100644 roles/kolla-ansible-setup-disks/README.rst create mode 100644 roles/kolla-ansible-setup-disks/tasks/main.yml diff --git a/roles/kolla-ansible-setup-disks/README.rst b/roles/kolla-ansible-setup-disks/README.rst new file mode 100644 index 0000000000..90bb4e92c0 --- /dev/null +++ b/roles/kolla-ansible-setup-disks/README.rst @@ -0,0 +1,15 @@ +Prepare disks for Kolla-Ansible CI run. + +**Role Variables** + +.. zuul:rolevar:: kolla_ansible_setup_disks_filepath + + Path to allocated file passed to loopmount + +.. zuul:rolevar:: kolla_ansible_setup_disks_lv_name + + Logical volume name to create (skipped if not set) + +.. zuul:rolevar:: kolla_ansible_setup_disks_vg_name + + Volume group name to create diff --git a/roles/kolla-ansible-setup-disks/tasks/main.yml b/roles/kolla-ansible-setup-disks/tasks/main.yml new file mode 100644 index 0000000000..e4e25f736a --- /dev/null +++ b/roles/kolla-ansible-setup-disks/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- name: Check if kolla_ansible_setup_disks_file_path is set + ansible.builtin.assert: + that: kolla_ansible_setup_disks_file_path is defined + +- name: Check if kolla_ansible_setup_disks_vg_name is set + ansible.builtin.assert: + that: kolla_ansible_setup_disks_vg_name is defined + +- name: Allocate file for disk backing + become: true + community.general.filesize: + path: "{{ kolla_ansible_setup_disks_file_path }}" + size: "{{ kolla_ansible_setup_disks_file_size | default('5G') }}" + +- name: Get free loop device + become: true + ansible.builtin.shell: + cmd: "losetup -f" + register: _loop_device + +- name: Mount file on loop device + become: true + ansible.builtin.shell: + cmd: > + losetup {{ _loop_device.stdout }} + {{ kolla_ansible_setup_disks_file_path }} + +- name: Create LVM extents on loop device + become: true + community.general.lvg: + vg: "{{ kolla_ansible_setup_disks_vg_name }}" + pvs: "{{ _loop_device.stdout }}" + +- name: Create LV + become: true + community.general.lvol: + vg: "{{ kolla_ansible_setup_disks_vg_name }}" + lv: "{{ kolla_ansible_setup_disks_lv_name }}" + size: "100%FREE" + when: + - kolla_ansible_setup_disks_lv_name is defined diff --git a/tests/run.yml b/tests/run.yml index e837470cfe..08c7e4f40f 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -35,12 +35,9 @@ name: "{{ 'bind-utils' if ansible_os_family == 'RedHat' else 'dnsutils' }}" when: scenario == 'magnum' - - name: Prepare disks for a storage service - script: "setup_disks.sh {{ disk_type }}" + - import_role: + name: kolla-ansible-setup-disks when: scenario in ['cephadm', 'zun'] - become: true - vars: - disk_type: "{{ 'ceph-lvm' if scenario in ['cephadm'] else scenario }}" - name: Update /etc/hosts with internal API FQDN blockinfile: diff --git a/zuul.d/scenarios/cephadm.yaml b/zuul.d/scenarios/cephadm.yaml index 9d3bf69f22..a9b1ac7ea4 100644 --- a/zuul.d/scenarios/cephadm.yaml +++ b/zuul.d/scenarios/cephadm.yaml @@ -9,6 +9,9 @@ - ^ansible/roles/(ceph-rgw|common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|openvswitch|placement|proxysql|rabbitmq)/ - ^roles/cephadm/ vars: + kolla_ansible_setup_disks_file_path: "/var/lib/ceph-osd.img" + kolla_ansible_setup_disks_vg_name: "cephvg" + kolla_ansible_setup_disks_lv_name: "cephlv" scenario: cephadm scenario_images_extra: - ^cinder diff --git a/zuul.d/scenarios/zun.yaml b/zuul.d/scenarios/zun.yaml index 6ef8a9c602..25397725dc 100644 --- a/zuul.d/scenarios/zun.yaml +++ b/zuul.d/scenarios/zun.yaml @@ -11,6 +11,8 @@ - ^tests/test-zun.sh - ^tests/test-dashboard.sh vars: + kolla_ansible_setup_disks_file_path: "/var/lib/cinder_data.img" + kolla_ansible_setup_disks_vg_name: "cinder-volumes" scenario: zun scenario_images_extra: - ^zun From 5d51037d2a703e936a8345e2ab5897bde36a03b8 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 10 Oct 2025 17:27:24 +0200 Subject: [PATCH 058/165] CI: Switch test-core-openstack.sh to Tempest run Unfortunately that will lead to dropping DNS integration tests for internal DNS, because neutron-tempest-plugin does not have such tests. Change-Id: Ic1e0a6643e67467a22e91d277a9947c027935f09 Signed-off-by: Michal Nasiadka --- roles/kolla-ansible-tempest/defaults/main.yml | 11 +++++ roles/kolla-ansible-tempest/tasks/main.yml | 49 +++++++++++++++++++ tests/get_logs.sh | 2 +- tests/post.yml | 4 ++ tests/pre.yml | 6 +++ tests/run.yml | 32 +++--------- tests/setup_disks.sh | 29 ----------- zuul.d/base.yaml | 1 + zuul.d/scenarios/cephadm.yaml | 1 + 9 files changed, 79 insertions(+), 56 deletions(-) create mode 100644 roles/kolla-ansible-tempest/defaults/main.yml create mode 100644 roles/kolla-ansible-tempest/tasks/main.yml delete mode 100644 tests/setup_disks.sh diff --git a/roles/kolla-ansible-tempest/defaults/main.yml b/roles/kolla-ansible-tempest/defaults/main.yml new file mode 100644 index 0000000000..600059789a --- /dev/null +++ b/roles/kolla-ansible-tempest/defaults/main.yml @@ -0,0 +1,11 @@ +--- +kolla_ansible_tempest_packages: + - python-tempestconf + - tempest + +kolla_ansible_tempest_cirros_ver: "0.6.3" +kolla_ansible_tempest_exclude_regex: "" +kolla_ansible_tempest_packages_extra: [] +kolla_ansible_tempest_regex: "" + +post_upgrade: false diff --git a/roles/kolla-ansible-tempest/tasks/main.yml b/roles/kolla-ansible-tempest/tasks/main.yml new file mode 100644 index 0000000000..4991340f90 --- /dev/null +++ b/roles/kolla-ansible-tempest/tasks/main.yml @@ -0,0 +1,49 @@ +--- +- name: Install required packages + ansible.builtin.pip: + name: "{{ kolla_ansible_tempest_packages + kolla_ansible_tempest_packages_extra }}" + virtualenv: "{{ kolla_ansible_venv_path }}" + virtualenv_command: "python3 -m venv" + +- name: Init tempest workspace + ansible.builtin.shell: + cmd: > + {{ kolla_ansible_venv_path }}/bin/tempest init tempest + >/tmp/logs/ansible/test-init-tempest 2>&1 + creates: "/home/zuul/tempest" + +- name: Discover tempest config + vars: + ver: "{{ kolla_ansible_tempest_cirros_ver }}" + image: "https://download.cirros-cloud.net/{{ ver }}/cirros-{{ ver }}-x86_64-disk.img" + ansible.builtin.shell: + chdir: "/home/zuul/tempest" + cmd: > + {{ kolla_ansible_venv_path }}/bin/discover-tempest-config + --debug + --image {{ image }} + --os-cloud kolla-admin + >/tmp/logs/ansible/test-init-tempest-discover 2>&1 + environment: + OS_CLIENT_CONFIG_FILE: "/etc/kolla/clouds.yaml" + +- name: Run tempest tests + environment: + OS_LOG_CAPTURE: "1" + OS_STDOUT_CAPTURE: "1" + OS_STDERR_CAPTURE: "1" + OS_TEST_TIMEOUT: "1200" + vars: + tempest_log_file: "test-tempest-run{{ '-post-upgrade' if post_upgrade | bool else '' }}" + ansible.builtin.shell: + chdir: "/home/zuul/tempest" + cmd: > + {{ kolla_ansible_venv_path }}/bin/tempest run + --config-file etc/tempest.conf + {% if kolla_ansible_tempest_regex | length > 0 %} + --regex '{{ kolla_ansible_tempest_regex }}' + {% endif %} + {% if kolla_ansible_tempest_exclude_regex | length > 0 %} + --exclude-regex '{{ kolla_ansible_tempest_exclude_regex }}' + {% endif %} + >/tmp/logs/ansible/{{ tempest_log_file }} 2>&1 diff --git a/tests/get_logs.sh b/tests/get_logs.sh index 8633ec1c36..db18de5cdf 100644 --- a/tests/get_logs.sh +++ b/tests/get_logs.sh @@ -15,7 +15,7 @@ copy_logs() { echo "Invalid container engine: ${CONTAINER_ENGINE}" exit 1 fi - + cp -rL /home/zuul/tempest ${LOG_DIR}/ [ -d ${VOLUMES_DIR}/kolla_logs/_data ] && cp -rnL ${VOLUMES_DIR}/kolla_logs/_data/* ${LOG_DIR}/kolla/ [ -d /etc/kolla ] && cp -rnL /etc/kolla/* ${LOG_DIR}/kolla_configs/ # Don't save the IPA images. diff --git a/tests/post.yml b/tests/post.yml index 77dd25b615..4d99737506 100644 --- a/tests/post.yml +++ b/tests/post.yml @@ -2,6 +2,10 @@ - hosts: all vars: logs_dir: "/tmp/logs" + roles: + - role: fetch-subunit-output + zuul_work_dir: '/home/zuul/tempest' + tasks: # TODO(mhiner): Currently only Docker to Podman migration is tested. # If we want to test the other direction we have to rework this. diff --git a/tests/pre.yml b/tests/pre.yml index 4f8d4556d0..1a666e97d1 100644 --- a/tests/pre.yml +++ b/tests/pre.yml @@ -63,6 +63,12 @@ - python3-setuptools - python3-requests + - name: Install stestr + become: true + pip: + break_system_packages: true + name: stestr + - name: Install lvm on storage scenarios become: true package: diff --git a/tests/run.yml b/tests/run.yml index 08c7e4f40f..7e9d71a775 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -425,16 +425,8 @@ CONTAINER_ENGINE: "{{ container_engine }}" IS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}" - - name: Run test-core-openstack.sh script - script: - cmd: test-core-openstack.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - SCENARIO: "{{ scenario }}" - HAS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}" - PHASE: deploy - IP_VERSION: "{{ 6 if address_family == 'ipv6' else 4 }}" + - import_role: + name: kolla-ansible-tempest when: openstack_core_tested - name: Run test-zun.sh script @@ -729,22 +721,10 @@ name: kolla-ansible-test-dashboard when: dashboard_enabled - # NOTE(yoctozepto): We need the script module here to avoid - # a bug in Glance OSC [1][2] which results in a failure when a volume - # is given as a source. The stdin works differently in shell/command - # than script. - # [1] https://opendev.org/openstack/python-openstackclient/src/commit/6810414e45a32dd44263dff47fec161989508ef0/openstackclient/image/v2/image.py#L114-L120 - # [2] https://opendev.org/openstack/python-openstackclient/src/commit/6810414e45a32dd44263dff47fec161989508ef0/openstackclient/image/v2/image.py#L414 - - name: Run test-core-openstack.sh script (post upgrade) - script: - cmd: test-core-openstack.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - SCENARIO: "{{ scenario }}" - HAS_UPGRADE: 'yes' - PHASE: upgrade - IP_VERSION: "{{ 6 if address_family == 'ipv6' else 4 }}" + - import_role: + name: kolla-ansible-tempest + vars: + post_upgrade: true when: openstack_core_tested - name: Run test-prometheus-opensearch.sh script (post-upgrade) diff --git a/tests/setup_disks.sh b/tests/setup_disks.sh deleted file mode 100644 index 6cd03ddc41..0000000000 --- a/tests/setup_disks.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# $1: scenario / ceph store type - -set -o xtrace -set -o errexit - -mkdir -p /opt/data/kolla - -if [ $1 = 'zun' ]; then - # create cinder-volumes volume group for cinder lvm backend - free_device=$(losetup -f) - fallocate -l 5G /var/lib/cinder_data.img - losetup $free_device /var/lib/cinder_data.img - pvcreate $free_device - vgcreate cinder-volumes $free_device -elif [ $1 = 'ceph-lvm' ]; then - free_device=$(losetup -f) - fallocate -l 5G /var/lib/ceph-osd1.img - losetup $free_device /var/lib/ceph-osd1.img - pvcreate $free_device - vgcreate cephvg $free_device - lvcreate -l 100%FREE -n cephlv cephvg -else - echo "Unknown type" >&2 - exit 1 -fi - -partprobe diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 0a53f6df12..bae3aab5b1 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -60,6 +60,7 @@ container_engine: "docker" is_upgrade: "{{ 'upgrade' in zuul.job }}" is_slurp: "{{ 'slurp' in zuul.job }}" + kolla_ansible_tempest_regex: "\\[.*\\bsmoke\\b.*\\]" kolla_internal_vip_address: "192.0.2.10" le_enabled: false neutron_external_bridge_name: br-0 diff --git a/zuul.d/scenarios/cephadm.yaml b/zuul.d/scenarios/cephadm.yaml index a9b1ac7ea4..28ac2128b9 100644 --- a/zuul.d/scenarios/cephadm.yaml +++ b/zuul.d/scenarios/cephadm.yaml @@ -12,6 +12,7 @@ kolla_ansible_setup_disks_file_path: "/var/lib/ceph-osd.img" kolla_ansible_setup_disks_vg_name: "cephvg" kolla_ansible_setup_disks_lv_name: "cephlv" + kolla_ansible_tempest_exclude_regex: "^tempest.api.object_storage" scenario: cephadm scenario_images_extra: - ^cinder From d40adf5f0e166930ccd5373682e7c5c1044791a3 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Sun, 8 Sep 2024 10:08:37 +0200 Subject: [PATCH 059/165] mariadb: Switch to rsync sst_method on upgrade Due to [1]. [1]: https://jira.mariadb.org/browse/MDEV-27437 Change-Id: If43052aaead032734ab570af754f60563b3f7cd5 Signed-off-by: Seunghun Lee --- ansible/roles/mariadb/defaults/main.yml | 1 + ansible/roles/mariadb/tasks/upgrade.yml | 20 +++++++++++++++++++ ansible/roles/mariadb/templates/galera.cnf.j2 | 5 ++--- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml index 16165e901a..31c5efb2ac 100644 --- a/ansible/roles/mariadb/defaults/main.yml +++ b/ansible/roles/mariadb/defaults/main.yml @@ -75,6 +75,7 @@ mariadb_recover_tmp_file_path: "/tmp/kolla_mariadb_recover_inventory_name_{{ mar # WSREP options ############### mariadb_wsrep_extra_provider_options: [] +mariadb_wsrep_sst_method: "mariabackup" #################### # Backups diff --git a/ansible/roles/mariadb/tasks/upgrade.yml b/ansible/roles/mariadb/tasks/upgrade.yml index d72cc5b3d6..0b26bc8178 100644 --- a/ansible/roles/mariadb/tasks/upgrade.yml +++ b/ansible/roles/mariadb/tasks/upgrade.yml @@ -1,4 +1,24 @@ --- +- name: Set wsrep_sst_method to rsync for upgrade + become: true + shell: > + {{ kolla_container_engine }} exec {{ mariadb_service.container_name }} + mysql -uroot -p{{ database_password }} + -e \"SET GLOBAL wsrep_sst_method='rsync';\" + no_log: true + +- import_tasks: deploy.yml + vars: + mariadb_wsrep_sst_method: "rsync" + +- name: Set wsrep_sst_method to mariabackup after upgrade + become: true + shell: > + {{ kolla_container_engine }} exec {{ mariadb_service.container_name }} + mysql -uroot -p{{ database_password }} + -e \"SET GLOBAL wsrep_sst_method='mariabackup';\" + no_log: true + - import_tasks: deploy.yml # TODO(seunghun1ee): Remove this task after 2026.1 diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2 index c7e5916fd5..2e97361e30 100644 --- a/ansible/roles/mariadb/templates/galera.cnf.j2 +++ b/ansible/roles/mariadb/templates/galera.cnf.j2 @@ -1,5 +1,4 @@ {%- set wsrep_driver = '/usr/lib/galera/libgalera_smm.so' if kolla_base_distro in ['debian', 'ubuntu'] else '/usr/lib64/galera/libgalera_smm.so' %} -{% set sst_method = 'mariabackup' %} [client] default-character-set=utf8 @@ -41,7 +40,7 @@ wsrep_sst_receive_address={{ api_interface_address | put_address_in_context('url wsrep_provider={{ wsrep_driver }} wsrep_cluster_name="{{ database_cluster_name }}" wsrep_node_name={{ ansible_facts.hostname }} -wsrep_sst_method={{ sst_method }} +wsrep_sst_method={{ mariadb_wsrep_sst_method }} wsrep_sst_auth={{ database_user }}:{{ database_password }} wsrep_slave_threads=4 wsrep_on = ON @@ -62,7 +61,7 @@ innodb_buffer_pool_size = '8192M' pid-file=/var/lib/mysql/mariadb.pid [sst] -{% if sst_method == 'mariabackup' and api_address_family == 'ipv6' %} +{% if mariadb_wsrep_sst_method == 'mariabackup' and api_address_family == 'ipv6' %} # NOTE(yoctozepto): for IPv6 we need to tweak sockopt for socat (mariabackup sst backend) # see: https://mariadb.com/kb/en/library/xtrabackup-v2-sst-method/#performing-ssts-with-ipv6-addresses # and: https://jira.mariadb.org/browse/MDEV-18797 From a8093c638471b8df6b3193a15ee993aec6855d46 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 22 Oct 2025 17:29:46 +0200 Subject: [PATCH 060/165] CI: Add rocky to mariadb scenario gate Change-Id: Iccc51c4dc57d0309bbca7aef655286bf8e6e49c5 Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/mariadb.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/zuul.d/scenarios/mariadb.yaml b/zuul.d/scenarios/mariadb.yaml index 01811c54a0..0da06d60c7 100644 --- a/zuul.d/scenarios/mariadb.yaml +++ b/zuul.d/scenarios/mariadb.yaml @@ -45,4 +45,5 @@ gate: jobs: - kolla-ansible-debian-bookworm-mariadb + - kolla-ansible-rocky-10-mariadb - kolla-ansible-ubuntu-noble-mariadb From ea5675f49941b33eb0dd465e0a7f7b9092917b93 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 24 Oct 2025 09:20:12 +0200 Subject: [PATCH 061/165] mariadb: Fix wsrep sst setting on upgrade It seems we merged a nice multiline failure Adding upgrade jobs to catch similar errors Change-Id: I38853def2efdeaa196628fd4caed8f0b287b344e Signed-off-by: Michal Nasiadka --- ansible/roles/mariadb/tasks/upgrade.yml | 8 ++++---- zuul.d/scenarios/mariadb.yaml | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/ansible/roles/mariadb/tasks/upgrade.yml b/ansible/roles/mariadb/tasks/upgrade.yml index 0b26bc8178..9a54536962 100644 --- a/ansible/roles/mariadb/tasks/upgrade.yml +++ b/ansible/roles/mariadb/tasks/upgrade.yml @@ -1,11 +1,11 @@ --- - name: Set wsrep_sst_method to rsync for upgrade become: true + no_log: true shell: > {{ kolla_container_engine }} exec {{ mariadb_service.container_name }} mysql -uroot -p{{ database_password }} - -e \"SET GLOBAL wsrep_sst_method='rsync';\" - no_log: true + -e "SET GLOBAL wsrep_sst_method='rsync';" - import_tasks: deploy.yml vars: @@ -13,11 +13,11 @@ - name: Set wsrep_sst_method to mariabackup after upgrade become: true + no_log: true shell: > {{ kolla_container_engine }} exec {{ mariadb_service.container_name }} mysql -uroot -p{{ database_password }} - -e \"SET GLOBAL wsrep_sst_method='mariabackup';\" - no_log: true + -e "SET GLOBAL wsrep_sst_method='mariabackup';" - import_tasks: deploy.yml diff --git a/zuul.d/scenarios/mariadb.yaml b/zuul.d/scenarios/mariadb.yaml index 0da06d60c7..3e540e93a1 100644 --- a/zuul.d/scenarios/mariadb.yaml +++ b/zuul.d/scenarios/mariadb.yaml @@ -23,6 +23,13 @@ parent: kolla-ansible-mariadb-base nodeset: kolla-ansible-debian-bookworm-multi-16GB +- job: + name: kolla-ansible-debian-bookworm-mariadb-upgrade + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + vars: + is_upgrade: true + - job: name: kolla-ansible-rocky-10-mariadb parent: kolla-ansible-mariadb-base @@ -33,6 +40,13 @@ parent: kolla-ansible-mariadb-base nodeset: kolla-ansible-ubuntu-noble-multi-8GB +- job: + name: kolla-ansible-ubuntu-noble-mariadb-upgrade + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + vars: + is_upgrade: true + - project-template: name: kolla-ansible-scenario-mariadb description: | @@ -40,10 +54,14 @@ check: jobs: - kolla-ansible-debian-bookworm-mariadb + - kolla-ansible-debian-bookworm-mariadb-upgrade - kolla-ansible-rocky-10-mariadb - kolla-ansible-ubuntu-noble-mariadb + - kolla-ansible-ubuntu-noble-mariadb-upgrade gate: jobs: - kolla-ansible-debian-bookworm-mariadb + - kolla-ansible-debian-bookworm-mariadb-upgrade - kolla-ansible-rocky-10-mariadb - kolla-ansible-ubuntu-noble-mariadb + - kolla-ansible-ubuntu-noble-mariadb-upgrade From 0b5ba14956c2e5fd5289ce7565182c3753922a3b Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 24 Oct 2025 12:00:10 +0200 Subject: [PATCH 062/165] neutron: Drop neutron_legacy_iptables Change-Id: Ic2b9c8cbb431435c7622b0ec03e21a2d246f066c Signed-off-by: Michal Nasiadka --- ansible/group_vars/all/neutron.yml | 3 --- ansible/roles/neutron/defaults/main.yml | 5 ----- ansible/roles/neutron/handlers/main.yml | 8 ++++---- .../notes/drop-legacy-iptables-1979f67a924d4da1.yaml | 4 ++++ 4 files changed, 8 insertions(+), 12 deletions(-) create mode 100644 releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml diff --git a/ansible/group_vars/all/neutron.yml b/ansible/group_vars/all/neutron.yml index 0ef47e2edb..938b1836ed 100644 --- a/ansible/group_vars/all/neutron.yml +++ b/ansible/group_vars/all/neutron.yml @@ -51,9 +51,6 @@ computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_comput # Default DNS resolvers for virtual networks neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4" -# Set legacy iptables to allow kernels not supporting iptables-nft -neutron_legacy_iptables: "no" - # Enable distributed floating ip for OVN deployments neutron_ovn_distributed_fip: "no" diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml index 2a9401772a..d36dd4edb5 100644 --- a/ansible/roles/neutron/defaults/main.yml +++ b/ansible/roles/neutron/defaults/main.yml @@ -62,8 +62,6 @@ neutron_services: image: "{{ neutron_openvswitch_agent_image_full }}" enabled: "{{ neutron_plugin_agent == 'openvswitch' }}" privileged: True - environment: - KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}" host_in_groups: >- {{ (inventory_hostname in groups['compute'] @@ -86,8 +84,6 @@ neutron_services: image: "{{ neutron_linuxbridge_agent_image_full }}" privileged: True enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}" - environment: - KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}" host_in_groups: >- {{ inventory_hostname in groups['compute'] @@ -123,7 +119,6 @@ neutron_services: enabled: "{{ neutron_plugin_agent != 'ovn' }}" environment: KOLLA_IMAGE: "{{ neutron_l3_agent_image_full }}" - KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}" KOLLA_NAME: "neutron_l3_agent" KOLLA_NEUTRON_WRAPPERS: "{{ 'true' if neutron_agents_wrappers | bool else 'false' }}" host_in_groups: >- diff --git a/ansible/roles/neutron/handlers/main.yml b/ansible/roles/neutron/handlers/main.yml index 867880bcbc..068b831526 100644 --- a/ansible/roles/neutron/handlers/main.yml +++ b/ansible/roles/neutron/handlers/main.yml @@ -69,7 +69,7 @@ common_options: "{{ docker_common_options }}" name: "{{ service.container_name }}" image: "{{ service.image }}" - environment: "{{ service.environment }}" + environment: "{{ service.environment | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" @@ -101,7 +101,7 @@ common_options: "{{ docker_common_options }}" name: "{{ service.container_name }}" image: "{{ service.image }}" - environment: "{{ service.environment }}" + environment: "{{ service.environment | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" @@ -150,7 +150,7 @@ common_options: "{{ docker_common_options }}" name: "{{ service.container_name }}" image: "{{ service.image }}" - environment: "{{ service.environment }}" + environment: "{{ service.environment | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" @@ -180,7 +180,7 @@ common_options: "{{ docker_common_options }}" name: "{{ service.container_name }}" image: "{{ service.image }}" - environment: "{{ service.environment }}" + environment: "{{ service.environment | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" diff --git a/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml b/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml new file mode 100644 index 0000000000..0fa17f01d0 --- /dev/null +++ b/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + ``neutron_legacy_iptables`` and its handling has been dropped. From 0f0e9bbb085b310fc1ed13fd283df91f7d82db10 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Tue, 14 Jan 2025 12:04:11 +0000 Subject: [PATCH 063/165] Support ironic-pxe-filter Closes-Bug: #2094790 Depends-On: https://review.opendev.org/c/openstack/kolla/+/939256 Change-Id: I1b5329d814432604640990b0ecc28906845e29d6 Signed-off-by: Michal Nasiadka Signed-off-by: Will Szumski --- ansible/group_vars/all/ironic.yml | 1 + ansible/roles/ironic/defaults/main.yml | 27 ++++++++++++++++++- ansible/roles/ironic/handlers/main.yml | 15 +++++++++++ ansible/roles/ironic/tasks/config.yml | 2 +- .../roles/ironic/tasks/rolling_upgrade.yml | 22 +++++++-------- .../templates/ironic-pxe-filter.json.j2 | 23 ++++++++++++++++ ansible/roles/ironic/templates/ironic.conf.j2 | 3 +++ .../reference/bare-metal/ironic-guide.rst | 20 ++++++++++++++ etc/kolla/globals.yml | 1 + .../ironic-pxe-filter-8376c424cb533bd3.yaml | 6 +++++ tests/templates/globals-default.j2 | 1 + 11 files changed, 107 insertions(+), 14 deletions(-) create mode 100644 ansible/roles/ironic/templates/ironic-pxe-filter.json.j2 create mode 100644 releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml diff --git a/ansible/group_vars/all/ironic.yml b/ansible/group_vars/all/ironic.yml index 48e017975d..1acb6e7ab1 100644 --- a/ansible/group_vars/all/ironic.yml +++ b/ansible/group_vars/all/ironic.yml @@ -3,6 +3,7 @@ enable_ironic: "no" enable_ironic_dnsmasq: "{{ enable_ironic | bool }}" enable_ironic_neutron_agent: "no" enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" +enable_ironic_pxe_filter: "no" # Keystone user ironic_keystone_user: "ironic" diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml index fb485375a2..9c80ad6fea 100644 --- a/ansible/roles/ironic/defaults/main.yml +++ b/ansible/roles/ironic/defaults/main.yml @@ -68,6 +68,17 @@ ironic_services: image: "{{ ironic_dnsmasq_image_full }}" volumes: "{{ ironic_dnsmasq_default_volumes + ironic_dnsmasq_extra_volumes }}" dimensions: "{{ ironic_dnsmasq_dimensions }}" + pid_mode: host + ironic-pxe-filter: + container_name: ironic_pxe_filter + group: ironic-dnsmasq + enabled: "{{ enable_ironic_pxe_filter }}" + image: "{{ ironic_pxe_filter_image_full }}" + volumes: "{{ ironic_pxe_filter_default_volumes + ironic_pxe_filter_extra_volumes }}" + dimensions: "{{ ironic_pxe_filter_dimensions }}" + # TODO: --pid container:ironic_dnsmasq but this is more complicated since we need to + # declare dependency in systemd too. + pid_mode: host ironic-prometheus-exporter: container_name: ironic_prometheus_exporter group: ironic-conductor @@ -127,6 +138,10 @@ ironic_dnsmasq_image: "{{ docker_image_url }}dnsmasq" ironic_dnsmasq_tag: "{{ ironic_tag }}" ironic_dnsmasq_image_full: "{{ ironic_dnsmasq_image }}:{{ ironic_dnsmasq_tag }}" +ironic_pxe_filter_image: "{{ docker_image_url }}ironic-pxe-filter" +ironic_pxe_filter_tag: "{{ ironic_tag }}" +ironic_pxe_filter_image_full: "{{ ironic_pxe_filter_image }}:{{ ironic_pxe_filter_tag }}" + ironic_prometheus_exporter_image: "{{ docker_image_url }}ironic-prometheus-exporter" ironic_prometheus_exporter_tag: "{{ ironic_tag }}" ironic_prometheus_exporter_image_full: "{{ ironic_prometheus_exporter_image }}:{{ ironic_prometheus_exporter_tag }}" @@ -136,6 +151,7 @@ ironic_conductor_dimensions: "{{ default_container_dimensions }}" ironic_tftp_dimensions: "{{ default_container_dimensions }}" ironic_http_dimensions: "{{ default_container_dimensions }}" ironic_dnsmasq_dimensions: "{{ default_container_dimensions }}" +ironic_pxe_filter_dimensions: "{{ default_container_dimensions }}" ironic_prometheus_exporter_dimensions: "{{ default_container_dimensions }}" ironic_api_enable_healthchecks: "{{ enable_container_healthchecks }}" @@ -212,8 +228,16 @@ ironic_dnsmasq_default_volumes: - "{{ node_config_directory }}/ironic-dnsmasq/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla" - "ironic_dhcp_hosts:/etc/dnsmasq/dhcp-hostsdir:ro" + - "kolla_logs:/var/log/kolla" + - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}" +ironic_pxe_filter_default_volumes: + - "{{ node_config_directory }}/ironic-pxe-filter/:{{ container_config_directory }}/:ro" + - "/etc/localtime:/etc/localtime:ro" + - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" + - "kolla_logs:/var/log/kolla" + - "ironic_dhcp_hosts:/etc/dnsmasq/dhcp-hostsdir" + - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}" ironic_prometheus_exporter_default_volumes: - "{{ node_config_directory }}/ironic-prometheus-exporter/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -227,6 +251,7 @@ ironic_conductor_extra_volumes: "{{ ironic_extra_volumes }}" ironic_tftp_extra_volumes: "{{ ironic_extra_volumes }}" ironic_http_extra_volumes: "{{ ironic_extra_volumes }}" ironic_dnsmasq_extra_volumes: "{{ ironic_extra_volumes }}" +ironic_pxe_filter_extra_volumes: "{{ ironic_extra_volumes }}" ironic_prometheus_exporter_extra_volumes: "{{ ironic_extra_volumes }}" #################### diff --git a/ansible/roles/ironic/handlers/main.yml b/ansible/roles/ironic/handlers/main.yml index d7989a5736..417fbd46f5 100644 --- a/ansible/roles/ironic/handlers/main.yml +++ b/ansible/roles/ironic/handlers/main.yml @@ -69,6 +69,21 @@ volumes: "{{ service.volumes }}" dimensions: "{{ service.dimensions }}" cap_add: "{{ service.cap_add }}" + pid_mode: "{{ service.pid_mode }}" + +- name: Restart ironic-pxe-filter container + vars: + service_name: "ironic-pxe-filter" + service: "{{ ironic_services[service_name] }}" + become: true + kolla_container: + action: "recreate_or_restart_container" + common_options: "{{ docker_common_options }}" + name: "{{ service.container_name }}" + image: "{{ service.image }}" + volumes: "{{ service.volumes }}" + dimensions: "{{ service.dimensions }}" + pid_mode: "{{ service.pid_mode }}" - name: Restart ironic-prometheus-exporter container vars: diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml index 98a3e66e2e..1919094860 100644 --- a/ansible/roles/ironic/tasks/config.yml +++ b/ansible/roles/ironic/tasks/config.yml @@ -54,7 +54,7 @@ mode: "0660" become: true when: - - item.key in [ "ironic-api", "ironic-conductor", "ironic-prometheus-exporter" ] + - item.key in [ "ironic-api", "ironic-conductor", "ironic-prometheus-exporter", "ironic-pxe-filter" ] with_dict: "{{ ironic_services | select_services_enabled_and_mapped_to_host }}" - name: Copying over dnsmasq.conf diff --git a/ansible/roles/ironic/tasks/rolling_upgrade.yml b/ansible/roles/ironic/tasks/rolling_upgrade.yml index 66a86fcf6e..40998142c1 100644 --- a/ansible/roles/ironic/tasks/rolling_upgrade.yml +++ b/ansible/roles/ironic/tasks/rolling_upgrade.yml @@ -5,22 +5,20 @@ # This is only needed when performing a slow rolling upgrade process # where you need to maintain compatibility between different versions # during the upgrade. For direct version jumps, this section can be skipped. -- import_tasks: config.yml - vars: - pin_release_version: "{{ ironic_pin_release_version }}" +- name: Pin release version for rolling upgrades when: ironic_pin_release_version | length > 0 + block: + - import_tasks: config.yml + vars: + pin_release_version: "{{ ironic_pin_release_version }}" -- import_tasks: check-containers.yml - -- import_tasks: bootstrap_service.yml + - import_tasks: check-containers.yml -# TODO(donghm): Flush_handlers to restart ironic services -# should be run in serial nodes to decrease downtime. Update when -# the module ansible strategy for rolling upgrade is finished. + - import_tasks: bootstrap_service.yml -# Restart ironic services with pinned release version -- name: Flush handlers - meta: flush_handlers + # Restart ironic services with pinned release version + - name: Flush handlers + meta: flush_handlers # Unpin version - import_tasks: config.yml diff --git a/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2 b/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2 new file mode 100644 index 0000000000..6bcf7e351b --- /dev/null +++ b/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2 @@ -0,0 +1,23 @@ +{ + "command": "ironic-pxe-filter --config-file /etc/ironic/ironic.conf --log-file /var/log/kolla/ironic/ironic-pxe-filter.log", + "config_files": [ + { + "source": "{{ container_config_directory }}/ironic.conf", + "dest": "/etc/ironic/ironic.conf", + "owner": "ironic", + "perm": "0600" + } + ], + "permissions": [ + { + "path": "/var/log/kolla/ironic", + "owner": "ironic:ironic", + "recurse": true + }, + { + "path": "/var/lib/ironic", + "owner": "ironic:ironic", + "recurse": true + } + ] +} diff --git a/ansible/roles/ironic/templates/ironic.conf.j2 b/ansible/roles/ironic/templates/ironic.conf.j2 index 1f0c6a4265..66fbdb1f77 100644 --- a/ansible/roles/ironic/templates/ironic.conf.j2 +++ b/ansible/roles/ironic/templates/ironic.conf.j2 @@ -197,3 +197,6 @@ dhcp_provider = none [oslo_concurrency] lock_path = /var/lib/ironic/tmp + +[pxe_filter] +dhcp_hostsdir = /etc/dnsmasq/dhcp-hostsdir diff --git a/doc/source/reference/bare-metal/ironic-guide.rst b/doc/source/reference/bare-metal/ironic-guide.rst index d7a5ee90b7..99e8bbefb2 100644 --- a/doc/source/reference/bare-metal/ironic-guide.rst +++ b/doc/source/reference/bare-metal/ironic-guide.rst @@ -107,6 +107,26 @@ You may optionally pass extra kernel parameters to the inspection kernel using: in ``/etc/kolla/globals.yml``. +PXE filter (optional) +~~~~~~~~~~~~~~~~~~~~~ + +To keep parity with the standalone inspector you can enable the experimental +PXE filter service: + +.. code-block:: yaml + + enable_ironic_pxe_filter: "yes" + +The PXE filter container runs alongside ``ironic-dnsmasq`` and cleans up stale +DHCP entries. It is especially useful when auto discovery is enabled and when +the dnsmasq DHCP range overlaps with a Neutron-served network. For the upstream +details see +https://docs.openstack.org/ironic/latest/admin/inspection/pxe_filter.html. + +.. note:: + + Upstream still classifies this PXE filter implementation as experimental. + Configure conductor's HTTP server port (optional) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The port used for conductor's HTTP server is controlled via diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index 9d4f00a0a4..3c05da982d 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -385,6 +385,7 @@ workaround_ansible_issue_8743: yes #enable_ironic: "no" #enable_ironic_neutron_agent: "no" #enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" +#enable_ironic_pxe_filter: "no" #enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}" #enable_kuryr: "no" #enable_magnum: "no" diff --git a/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml b/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml new file mode 100644 index 0000000000..80f38db8d2 --- /dev/null +++ b/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds the optional ``ironic-pxe-filter`` service controlled by + ``enable_ironic_pxe_filter``. This brings parity with the standalone + inspector. Upstream currently classifies the PXE filter as experimental. diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index 32a28773fb..3444d87e3b 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -127,6 +127,7 @@ enable_aodh: "yes" {% if scenario == "ironic" %} enable_ironic: "yes" +enable_ironic_pxe_filter: "yes" enable_prometheus: "yes" enable_prometheus_openstack_exporter: "no" ironic_dnsmasq_dhcp_ranges: From fd6d740106965b6518fd2b6f3d1ab9e84221966d Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Thu, 25 Sep 2025 22:26:01 +0900 Subject: [PATCH 064/165] neutron: Remove support for linux bridge mechanism driver ... because it was removed from neutron during 2025.1 cycle[1]. [1] https://review.opendev.org/c/openstack/neutron/+/927216 Depends-On: https://review.opendev.org/c/openstack/kolla/+/962279 Change-Id: I79d896d7dea299641bc5a7f0f2a0735c9719a16d Signed-off-by: Takashi Kajinami --- ansible/group_vars/all/neutron.yml | 4 +- ansible/group_vars/all/openvswitch.yml | 2 +- .../manila/templates/manila-share.conf.j2 | 2 - ansible/roles/neutron/defaults/main.yml | 47 ------------------- ansible/roles/neutron/handlers/main.yml | 16 ------- ansible/roles/neutron/tasks/config-host.yml | 2 +- ansible/roles/neutron/tasks/config.yml | 16 ------- .../templates/linuxbridge_agent.ini.j2 | 24 ---------- .../neutron-linuxbridge-agent.json.j2 | 44 ----------------- .../roles/neutron/templates/neutron.conf.j2 | 2 - etc/kolla/globals.yml | 6 +-- ...-neutron-linuxbridge-b1a2457e848709f7.yaml | 5 ++ 12 files changed, 10 insertions(+), 160 deletions(-) delete mode 100644 ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 create mode 100644 releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml diff --git a/ansible/group_vars/all/neutron.yml b/ansible/group_vars/all/neutron.yml index 938b1836ed..2c3b9ca948 100644 --- a/ansible/group_vars/all/neutron.yml +++ b/ansible/group_vars/all/neutron.yml @@ -22,9 +22,7 @@ neutron_enable_ovn_agent: "no" neutron_keystone_user: "neutron" -# Valid options are [ openvswitch, ovn, linuxbridge ] -# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable. -# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html +# Valid options are [ openvswitch, ovn ] neutron_plugin_agent: "openvswitch" # Valid options are [ internal, infoblox ] diff --git a/ansible/group_vars/all/openvswitch.yml b/ansible/group_vars/all/openvswitch.yml index 68a813b43b..731f99d00b 100644 --- a/ansible/group_vars/all/openvswitch.yml +++ b/ansible/group_vars/all/openvswitch.yml @@ -1,5 +1,5 @@ --- -enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}" +enable_openvswitch: "{{ enable_neutron | bool }}" enable_ovs_dpdk: "no" ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}" diff --git a/ansible/roles/manila/templates/manila-share.conf.j2 b/ansible/roles/manila/templates/manila-share.conf.j2 index ee1deb3413..cade41e823 100644 --- a/ansible/roles/manila/templates/manila-share.conf.j2 +++ b/ansible/roles/manila/templates/manila-share.conf.j2 @@ -74,8 +74,6 @@ memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_addres share_driver = manila.share.drivers.generic.GenericShareDriver {% if neutron_plugin_agent == "openvswitch" %} interface_driver = manila.network.linux.interface.OVSInterfaceDriver -{% elif neutron_plugin_agent == "linuxbridge" %} -interface_driver = manila.network.linux.interface.BridgeInterfaceDriver {% endif %} driver_handles_share_servers = true diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml index d36dd4edb5..731672997f 100644 --- a/ansible/roles/neutron/defaults/main.yml +++ b/ansible/roles/neutron/defaults/main.yml @@ -79,22 +79,6 @@ neutron_services: volumes: "{{ neutron_openvswitch_agent_default_volumes + neutron_openvswitch_agent_extra_volumes }}" dimensions: "{{ neutron_openvswitch_agent_dimensions }}" healthcheck: "{{ neutron_openvswitch_agent_healthcheck }}" - neutron-linuxbridge-agent: - container_name: "neutron_linuxbridge_agent" - image: "{{ neutron_linuxbridge_agent_image_full }}" - privileged: True - enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}" - host_in_groups: >- - {{ - inventory_hostname in groups['compute'] - or (enable_manila | bool and inventory_hostname in groups['manila-share']) - or inventory_hostname in groups['neutron-dhcp-agent'] - or inventory_hostname in groups['neutron-l3-agent'] - or inventory_hostname in groups['neutron-metadata-agent'] - }} - volumes: "{{ neutron_linuxbridge_agent_default_volumes + neutron_linuxbridge_agent_extra_volumes }}" - dimensions: "{{ neutron_linuxbridge_agent_dimensions }}" - healthcheck: "{{ neutron_linuxbridge_agent_healthcheck }}" neutron-dhcp-agent: cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}" container_name: "neutron_dhcp_agent" @@ -248,8 +232,6 @@ neutron_config_validation: config: "/etc/neutron/l3_agent.ini" - generator: "/neutron/etc/oslo-config-generator/dhcp_agent.ini" config: "/etc/neutron/dhcp_agent.ini" - - generator: "/neutron/etc/oslo-config-generator/linuxbridge_agent.ini" - config: "/etc/neutron/plugins/ml2/linuxbridge_agent.ini" #################### # Database @@ -301,10 +283,6 @@ neutron_eswitchd_image: "{{ docker_image_url }}neutron-mlnx-agent" neutron_eswitchd_tag: "{{ neutron_mlnx_agent_tag }}" neutron_eswitchd_image_full: "{{ neutron_eswitchd_image }}:{{ neutron_eswitchd_tag }}" -neutron_linuxbridge_agent_image: "{{ docker_image_url }}neutron-linuxbridge-agent" -neutron_linuxbridge_agent_tag: "{{ neutron_tag }}" -neutron_linuxbridge_agent_image_full: "{{ neutron_linuxbridge_agent_image }}:{{ neutron_linuxbridge_agent_tag }}" - neutron_metadata_agent_image: "{{ docker_image_url }}neutron-metadata-agent" neutron_metadata_agent_tag: "{{ neutron_tag }}" neutron_metadata_agent_image_full: "{{ neutron_metadata_agent_image }}:{{ neutron_metadata_agent_tag }}" @@ -360,7 +338,6 @@ neutron_l3_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_sriov_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_mlnx_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_eswitchd_dimensions: "{{ neutron_agent_dimensions }}" -neutron_linuxbridge_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_metadata_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_ovn_metadata_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_openvswitch_agent_dimensions: "{{ neutron_agent_dimensions }}" @@ -400,19 +377,6 @@ neutron_l3_agent_healthcheck: test: "{% if neutron_l3_agent_enable_healthchecks | bool %}{{ neutron_l3_agent_healthcheck_test }}{% else %}NONE{% endif %}" timeout: "{{ neutron_l3_agent_healthcheck_timeout }}" -neutron_linuxbridge_agent_enable_healthchecks: "{{ enable_container_healthchecks }}" -neutron_linuxbridge_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}" -neutron_linuxbridge_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}" -neutron_linuxbridge_agent_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -neutron_linuxbridge_agent_healthcheck_test: ["CMD-SHELL", "healthcheck_port neutron-linuxbridge-agent {{ om_rpc_port }}"] -neutron_linuxbridge_agent_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" -neutron_linuxbridge_agent_healthcheck: - interval: "{{ neutron_linuxbridge_agent_healthcheck_interval }}" - retries: "{{ neutron_linuxbridge_agent_healthcheck_retries }}" - start_period: "{{ neutron_linuxbridge_agent_healthcheck_start_period }}" - test: "{% if neutron_linuxbridge_agent_enable_healthchecks | bool %}{{ neutron_linuxbridge_agent_healthcheck_test }}{% else %}NONE{% endif %}" - timeout: "{{ neutron_linuxbridge_agent_healthcheck_timeout }}" - neutron_metadata_agent_enable_healthchecks: "no" neutron_metadata_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}" neutron_metadata_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}" @@ -603,14 +567,6 @@ neutron_eswitchd_default_volumes: - "kolla_logs:/var/log/kolla/" - "{{ '/dev/shm:/dev/shm' }}" - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" -neutron_linuxbridge_agent_default_volumes: - - "{{ node_config_directory }}/neutron-linuxbridge-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "/lib/modules:/lib/modules:ro" - - "kolla_logs:/var/log/kolla/" - - "{{ '/dev/shm:/dev/shm' }}" - - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" neutron_metadata_agent_default_volumes: - "{{ node_config_directory }}/neutron-metadata-agent/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -700,7 +656,6 @@ neutron_l3_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_sriov_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_mlnx_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_eswitchd_extra_volumes: "{{ neutron_extra_volumes }}" -neutron_linuxbridge_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_metadata_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_ovn_metadata_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_openvswitch_agent_extra_volumes: "{{ neutron_extra_volumes }}" @@ -763,8 +718,6 @@ neutron_subprojects: # Mechanism drivers #################### mechanism_drivers: - - name: "linuxbridge" - enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}" - name: "openvswitch" enabled: "{{ neutron_plugin_agent == 'openvswitch' }}" - name: "mlnx_infiniband" diff --git a/ansible/roles/neutron/handlers/main.yml b/ansible/roles/neutron/handlers/main.yml index 068b831526..386e3a97d1 100644 --- a/ansible/roles/neutron/handlers/main.yml +++ b/ansible/roles/neutron/handlers/main.yml @@ -91,22 +91,6 @@ healthcheck: "{{ service.healthcheck | default(omit) }}" with_sequence: "start=1 end={{ num_nova_fake_per_node }}" -- name: Restart neutron-linuxbridge-agent container - vars: - service_name: "neutron-linuxbridge-agent" - service: "{{ neutron_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" - image: "{{ service.image }}" - environment: "{{ service.environment | default(omit) }}" - volumes: "{{ service.volumes | reject('equalto', '') | list }}" - dimensions: "{{ service.dimensions }}" - privileged: "{{ service.privileged | default(False) }}" - healthcheck: "{{ service.healthcheck | default(omit) }}" - - name: Restart neutron-dhcp-agent container vars: service_name: "neutron-dhcp-agent" diff --git a/ansible/roles/neutron/tasks/config-host.yml b/ansible/roles/neutron/tasks/config-host.yml index a33919b123..8ad3f3d5a1 100644 --- a/ansible/roles/neutron/tasks/config-host.yml +++ b/ansible/roles/neutron/tasks/config-host.yml @@ -8,7 +8,7 @@ neutron_services | select_services_enabled_and_mapped_to_host | list | - intersect(["neutron-l3-agent", "neutron-linuxbridge-agent", "neutron-openvswitch-agent"]) | + intersect(["neutron-l3-agent", "neutron-openvswitch-agent"]) | list | length > 0 diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml index a8c22e0068..1de5ca3585 100644 --- a/ansible/roles/neutron/tasks/config.yml +++ b/ansible/roles/neutron/tasks/config.yml @@ -69,7 +69,6 @@ - "neutron-eswitchd" - "neutron-infoblox-ipam-agent" - "neutron-l3-agent" - - "neutron-linuxbridge-agent" - "neutron-metadata-agent" - "neutron-metering-agent" - "neutron-mlnx-agent" @@ -144,20 +143,6 @@ - item.key in services_need_ml2_conf_ini with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}" -- name: Copying over linuxbridge_agent.ini - become: true - vars: - service_name: "neutron-linuxbridge-agent" - service: "{{ neutron_services[service_name] }}" - merge_configs: - sources: - - "{{ role_path }}/templates/linuxbridge_agent.ini.j2" - - "{{ node_custom_config }}/neutron/linuxbridge_agent.ini" - - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/linuxbridge_agent.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/linuxbridge_agent.ini" - mode: "0660" - when: service | service_enabled_and_mapped_to_host - - name: Copying over openvswitch_agent.ini become: true vars: @@ -372,7 +357,6 @@ vars: service_name: "{{ item.0 }}" services_need_ml2_conf_ini: - - "neutron-linuxbridge-agent" - "neutron-openvswitch-agent" - "neutron-server" template: diff --git a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 b/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 deleted file mode 100644 index 5b0ae990b8..0000000000 --- a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 +++ /dev/null @@ -1,24 +0,0 @@ -[agent] -{% if neutron_agent_extensions %} -extensions = {{ neutron_agent_extensions|map(attribute='name')|join(',') }} -{% endif %} - -[linux_bridge] -{% if inventory_hostname in groups["network"] or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool ) %} -{# Format: physnet1:br1,physnet2:br2 #} -physical_interface_mappings = {{ neutron_physical_networks.split(',') | zip(neutron_external_interface.split(',')) | map('join', ':') | join(',') }} -{% endif %} - -[securitygroup] -firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - -[vxlan] -l2_population = true -local_ip = {{ tunnel_interface_address }} - -{% if enable_neutron_sriov | bool %} -[FDB] -# Allows instances using sriov ports to communicate with instances that do not. -# See https://docs.openstack.org/neutron/latest/admin/config-sriov.html -shared_physical_device_mappings = {{ neutron_sriov_physnets }} -{% endif %} diff --git a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 b/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 deleted file mode 100644 index 937abe37c8..0000000000 --- a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 +++ /dev/null @@ -1,44 +0,0 @@ -{ - "command": "neutron-linuxbridge-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/linuxbridge_agent.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - {% if neutron_policy_file is defined %}{ - "source": "{{ container_config_directory }}/{{ neutron_policy_file }}", - "dest": "/etc/neutron/{{ neutron_policy_file }}", - "owner": "neutron", - "perm": "0600" - },{% endif %} -{% if check_extra_ml2_plugins is defined and check_extra_ml2_plugins.matched > 0 %}{% for plugin in check_extra_ml2_plugins.files %} - { - "source": "{{ container_config_directory }}/{{ plugin.path | basename }}", - "dest": "/etc/neutron/plugins/ml2/{{ plugin.path | basename }}", - "owner": "neutron", - "perm": "0600" - }, -{% endfor %}{% endif %} - { - "source": "{{ container_config_directory }}/linuxbridge_agent.ini", - "dest": "/etc/neutron/plugins/ml2/linuxbridge_agent.ini", - "owner": "neutron", - "perm": "0600" - }{% if kolla_copy_ca_into_containers | bool %}, - { - "source": "{{ container_config_directory }}/ca-certificates", - "dest": "/var/lib/kolla/share/ca-certificates", - "owner": "root", - "perm": "0600" - }{% endif %} - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron.conf.j2 b/ansible/roles/neutron/templates/neutron.conf.j2 index 648ea97dfb..0c5323d7ee 100644 --- a/ansible/roles/neutron/templates/neutron.conf.j2 +++ b/ansible/roles/neutron/templates/neutron.conf.j2 @@ -25,8 +25,6 @@ state_path = /var/lib/neutron/kolla {% if neutron_plugin_agent == "openvswitch" or (neutron_plugin_agent == "ovn" and neutron_ovn_dhcp_agent | bool) %} interface_driver = openvswitch -{% elif neutron_plugin_agent == "linuxbridge" %} -interface_driver = linuxbridge {% endif %} {% if enable_nova_fake | bool %} diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index 9d4f00a0a4..38aab5c60a 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -160,9 +160,7 @@ workaround_ansible_issue_8743: yes # addresses for that reason. #neutron_external_interface: "eth1" -# Valid options are [ openvswitch, ovn, linuxbridge ] -# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable. -# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html +# Valid options are [ openvswitch, ovn ] #neutron_plugin_agent: "openvswitch" # Valid options are [ internal, infoblox ] @@ -421,7 +419,7 @@ workaround_ansible_issue_8743: yes #enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}" #enable_opensearch_dashboards: "{{ enable_opensearch | bool }}" #enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}" -#enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}" +#enable_openvswitch: "{{ enable_neutron }}" #enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}" #enable_ovs_dpdk: "no" #enable_osprofiler: "no" diff --git a/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml b/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml new file mode 100644 index 0000000000..f5625e7820 --- /dev/null +++ b/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Support for Linux Bridge mechanism driver has been removed. The driver was + already removed from neutron. From 77f7009654a02f38365220656a5d5e7f16836019 Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Mon, 27 Oct 2025 14:38:53 +0100 Subject: [PATCH 065/165] CI: Add non-voting Kayobe jobs This adds Kayobe CI jobs to catch any regression triggered by kolla-ansible changes. Change-Id: I4007be957f0f5ed7d0eab839517a54e5e1f8f5ef Signed-off-by: Pierre Riteau --- zuul.d/project.yaml | 1 + zuul.d/scenarios/kayobe.yaml | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 zuul.d/scenarios/kayobe.yaml diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index ed9c7b718f..3fae656762 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -12,6 +12,7 @@ # https://review.opendev.org/c/openstack/kolla-ansible/+/864780 # - kolla-ansible-scenario-container-engine-migration - kolla-ansible-scenario-haproxy-fqdn + - kolla-ansible-scenario-kayobe - kolla-ansible-scenario-openbao - kolla-ansible-scenario-kvm - kolla-ansible-scenario-lets-encrypt diff --git a/zuul.d/scenarios/kayobe.yaml b/zuul.d/scenarios/kayobe.yaml new file mode 100644 index 0000000000..abc0d173a8 --- /dev/null +++ b/zuul.d/scenarios/kayobe.yaml @@ -0,0 +1,11 @@ +--- +- project-template: + name: kolla-ansible-scenario-kayobe + description: | + Runs a subset of Kayobe jobs in Kolla Ansible CI to catch regressions. + check: + jobs: + - kayobe-overcloud-rocky10: + voting: false + - kayobe-overcloud-ubuntu-noble: + voting: false From e2a4e6c294e9c23f69445585bbdb32ccd261ff21 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 21 Oct 2025 15:52:34 +0200 Subject: [PATCH 066/165] CI: Use selenium in Horizon tests Change-Id: I814413fdb1a74675846a4ee2d5fbada9592fceef Signed-off-by: Michal Nasiadka --- .../tasks/main.yml | 59 +++++++++ tests/post.yml | 20 +++ tests/testinfra/test_horizon.py | 118 ++++++++++++++++++ zuul.d/scenarios/aio.yaml | 4 +- 4 files changed, 199 insertions(+), 2 deletions(-) create mode 100644 tests/testinfra/test_horizon.py diff --git a/roles/kolla-ansible-test-dashboard/tasks/main.yml b/roles/kolla-ansible-test-dashboard/tasks/main.yml index 561867aedd..e09d62a334 100644 --- a/roles/kolla-ansible-test-dashboard/tasks/main.yml +++ b/roles/kolla-ansible-test-dashboard/tasks/main.yml @@ -19,3 +19,62 @@ until: dashboard_output.content.find('Login') != -1 retries: 30 delay: 10 + +- name: Check if testinfra subdirectory exists + ansible.builtin.stat: + path: "{{ zuul.project.src_dir }}/tests/testinfra" + register: testinfra_dir + +- name: Run testinfra tests + when: testinfra_dir.stat.exists + block: + - name: Ensure testinfra subdirectory exists + ansible.builtin.file: + path: "/home/zuul/testinfra" + state: directory + + - name: Ensure screenshots directory exists + ansible.builtin.file: + path: "/home/zuul/testinfra/screenshots" + state: directory + + - name: Ensure required packages are installed + ansible.builtin.pip: + name: + - pytest-html + - pytest-testinfra + - selenium + virtualenv: "{{ kolla_ansible_venv_path }}" + virtualenv_command: "python3 -m venv" + + - name: Run Selenium Firefox container (Docker) + become: true + when: container_engine == 'docker' + community.docker.docker_container: + name: "selenium" + detach: true + image: "quay.io/opendevmirror/selenium-standalone-firefox:latest" + network_mode: host + + - name: Run Selenium Firefox container (Podman) + become: true + when: container_engine == 'podman' + containers.podman.podman_container: + name: "selenium" + detach: true + image: "quay.io/opendevmirror/selenium-standalone-firefox:latest" + network_mode: host + + - name: Wait for port 444 to be up + ansible.builtin.wait_for: + port: 4444 + + - name: Run testinfra tests + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + py.test + --junit-xml /home/zuul/testinfra/testinfra-junit.xml -o junit_family=xunit1 + --html=/home/zuul/testinfra/test-results-testinfra.html --self-contained-html + -v tests/testinfra + chdir: "{{ zuul.project.src_dir }}" diff --git a/tests/post.yml b/tests/post.yml index 4d99737506..83b34638f5 100644 --- a/tests/post.yml +++ b/tests/post.yml @@ -7,6 +7,18 @@ zuul_work_dir: '/home/zuul/tempest' tasks: + - name: Return artifact to Zuul + zuul_return: + data: + zuul: + artifacts: + - name: "TestInfra Unit Test Report" + url: "testinfra/test-results-testinfra.html" + metadata: + type: unit_test_report + - name: "TestInfra Screenshots" + url: "testinfra/screenshots" + # TODO(mhiner): Currently only Docker to Podman migration is tested. # If we want to test the other direction we have to rework this. - name: Change container engine after the migration @@ -76,6 +88,14 @@ ara_report_local_dir: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}/ara-report" kolla_ansible_local_src_dir: "{{ zuul.executor.work_root }}/src/{{ zuul.project.canonical_hostname }}/openstack/kolla-ansible" tasks: + - name: Download testinfra to executor + synchronize: + src: "/home/zuul/testinfra" + dest: "{{ zuul.executor.log_root }}/" + mode: pull + # TODO(mnasiadka): Remove in G/2026.1 cycle + ignore_errors: true + - name: Check for existence of ara sqlite stat: path: "{{ ansible_env.HOME }}/.ara/server/ansible.sqlite" diff --git a/tests/testinfra/test_horizon.py b/tests/testinfra/test_horizon.py new file mode 100644 index 0000000000..84b101e22d --- /dev/null +++ b/tests/testinfra/test_horizon.py @@ -0,0 +1,118 @@ +# Copyright 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time +import yaml + +from pathlib import Path +from selenium.common.exceptions import TimeoutException +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait + +home = Path.home() +subpath = '/testinfra/screenshots/' +screenshot_path = str(home) + subpath + +with open("/etc/kolla/passwords.yml", 'r') as file: + passwords = yaml.safe_load(file) + admin_password = passwords.get('keystone_admin_password') + + +def test_horizon_screenshot(host): + + firefox_options = webdriver.FirefoxOptions() + + driver = webdriver.Remote( + command_executor='http://localhost:4444/wd/hub', + options=firefox_options) + + horizon_url = "https://192.0.2.10" + + try: + driver.get(horizon_url) + WebDriverWait(driver, 30).until( + lambda driver: driver.execute_script( + 'return document.readyState') == 'complete') + + time.sleep(5) + + original_size = driver.get_window_size() + required_width = driver.execute_script( + 'return document.body.parentNode.scrollWidth') + required_height = driver.execute_script( + 'return document.body.parentNode.scrollHeight') + 100 + driver.set_window_size(required_width, required_height) + + driver.find_element(By.TAG_NAME, 'body').\ + screenshot(screenshot_path + "horizon-main.png") # nosec B108 + + driver.set_window_size( + original_size['width'], original_size['height']) + + assert 'Login' in driver.title # nosec B101 + + except TimeoutException as e: + raise e + finally: + driver.quit() + + +def test_horizon_login(host): + + firefox_options = webdriver.FirefoxOptions() + + driver = webdriver.Remote( + command_executor='http://localhost:4444/wd/hub', + options=firefox_options) + + horizon_url = "https://192.0.2.10" + logout_url = '/'.join(( + horizon_url, + 'auth', + 'logout')) + + try: + driver.get(logout_url) + user_field = driver.find_element(By.ID, 'id_username') + user_field.send_keys('admin') + pass_field = driver.find_element(By.ID, 'id_password') + pass_field.send_keys(admin_password) + button = driver.find_element(By.CSS_SELECTOR, '.btn-primary') + button.click() + WebDriverWait(driver, 30).until( + lambda driver: driver.execute_script( + 'return document.readyState') == 'complete') + + time.sleep(10) + + original_size = driver.get_window_size() + required_width = driver.execute_script( + 'return document.body.parentNode.scrollWidth') + required_height = driver.execute_script( + 'return document.body.parentNode.scrollHeight') + 100 + driver.set_window_size(required_width, required_height) + + driver.find_element(By.TAG_NAME, 'body').\ + screenshot(screenshot_path + "horizon-logged-in.png") # nosec B108 + + driver.set_window_size( + original_size['width'], original_size['height']) + + assert 'Overview - OpenStack Dashboard' in driver.title # nosec B101 + + except TimeoutException as e: + raise e + finally: + driver.quit() diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 39bbe5f05d..2e8252112c 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -8,8 +8,8 @@ - ^ansible/(action_plugins|filter_plugins|library|module_utils)/ - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq)/ - ^kolla_ansible/ - - ^roles/kolla-ansible-deploy/ - - ^tests/test-(core-openstack|dashboard).sh + - ^roles/kolla-ansible-(deploy|test-dashboard)/ + - ^tests/testinfra/test_horizon.py - ^tools/init-runonce - job: From 011034a141b385ed4c86fd832425e800484efdb6 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 28 Oct 2025 11:25:36 +0100 Subject: [PATCH 067/165] ovn: Mark as non-voting due to db related failures Change-Id: I5c27614fb46b62e2d9fc3ecc07379b1e899ed89b Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/ovn.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml index 6e94728457..cbeca20058 100644 --- a/zuul.d/scenarios/ovn.yaml +++ b/zuul.d/scenarios/ovn.yaml @@ -6,6 +6,7 @@ - ^ansible/group_vars/all/(neutron|octavia|openvswitch|ovn).yml - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ - ^tests/test-ovn.sh + voting: false vars: scenario: ovn scenario_images_extra: @@ -47,7 +48,3 @@ - kolla-ansible-debian-bookworm-ovn-upgrade - kolla-ansible-ubuntu-noble-ovn - kolla-ansible-ubuntu-noble-ovn-upgrade - gate: - jobs: - - kolla-ansible-ubuntu-noble-ovn - - kolla-ansible-ubuntu-noble-ovn-upgrade From 3380770aeef5eebc4fd0dd1f7b6c36cde0c881fa Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 10 Sep 2025 07:44:04 +0200 Subject: [PATCH 068/165] Bump ansible-core to 2.18/2.19 Bumped collections in requirements-core.yml to Ansible 12 major versions, see [1]. [1]: https://github.com/ansible-community/ansible-build-data/blob/main/12/ansible-12.1.0.yaml Depends-On: https://review.opendev.org/c/openstack/kolla/+/960307 Co-authored-by: Doug Szumski doug@stackhpc.com Change-Id: If8a29052d8a43ffc0fef11514adabb5e4ab9d977 Signed-off-by: Michal Nasiadka --- ansible/action_plugins/merge_configs.py | 13 ++- ansible/action_plugins/merge_yaml.py | 12 ++- ansible/group_vars/all/common.yml | 2 +- .../module_utils/kolla_container_worker.py | 2 +- ansible/module_utils/kolla_docker_worker.py | 7 +- ansible/module_utils/kolla_podman_worker.py | 14 ++- ansible/roles/aodh/tasks/config.yml | 2 +- ansible/roles/barbican/tasks/config.yml | 2 +- ansible/roles/blazar/tasks/config.yml | 2 +- ansible/roles/ceilometer/tasks/config.yml | 2 +- ansible/roles/cinder/tasks/config.yml | 2 +- ansible/roles/cloudkitty/tasks/config.yml | 2 +- ansible/roles/cyborg/tasks/config.yml | 2 +- ansible/roles/designate/tasks/config.yml | 2 +- ansible/roles/glance/tasks/config.yml | 2 +- ansible/roles/gnocchi/tasks/config.yml | 2 +- ansible/roles/heat/tasks/config.yml | 2 +- ansible/roles/horizon/tasks/policy_item.yml | 2 +- ansible/roles/ironic/tasks/config.yml | 2 +- ansible/roles/keystone/tasks/config.yml | 2 +- ansible/roles/kuryr/tasks/config.yml | 2 +- ansible/roles/magnum/tasks/config.yml | 2 +- ansible/roles/manila/tasks/config.yml | 2 +- ansible/roles/masakari/tasks/config.yml | 2 +- ansible/roles/mistral/tasks/config.yml | 2 +- ansible/roles/neutron/defaults/main.yml | 12 +-- ansible/roles/neutron/tasks/config.yml | 2 +- ansible/roles/nova-cell/tasks/config.yml | 2 +- .../roles/nova-cell/tasks/create_cells.yml | 4 +- ansible/roles/nova/tasks/config.yml | 2 +- ansible/roles/nova/tasks/map_cell0.yml | 2 +- ansible/roles/octavia/tasks/config.yml | 2 +- ansible/roles/ovn-db/tasks/bootstrap-db.yml | 2 +- .../roles/ovn-db/tasks/bootstrap-initial.yml | 8 +- ansible/roles/placement/tasks/config.yml | 2 +- ansible/roles/prechecks/vars/main.yml | 4 +- ansible/roles/tacker/tasks/config.yml | 2 +- ansible/roles/trove/tasks/config.yml | 2 +- ansible/roles/venus/tasks/config.yml | 2 +- ansible/roles/watcher/tasks/config.yml | 2 +- ansible/roles/zun/tasks/config.yml | 2 +- doc/source/conf.py | 8 +- .../notes/ansible-2.19-986e55799b72dbf5.yaml | 5 + requirements-core.yml | 10 +- requirements.txt | 2 +- test-requirements.txt | 28 ++++++ tests/test_kolla_toolbox.py | 99 ++++++++++--------- zuul.d/base.yaml | 10 +- zuul.d/project.yaml | 2 +- zuul.d/python3-jobs.yaml | 23 +++++ 50 files changed, 208 insertions(+), 119 deletions(-) create mode 100644 releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml create mode 100644 zuul.d/python3-jobs.yaml diff --git a/ansible/action_plugins/merge_configs.py b/ansible/action_plugins/merge_configs.py index a825835506..67a7da1cd5 100644 --- a/ansible/action_plugins/merge_configs.py +++ b/ansible/action_plugins/merge_configs.py @@ -20,10 +20,21 @@ from ansible import constants from ansible.plugins import action +# TODO(dougszu): From Ansible 12 onwards we must explicitly trust templates. +# Since this feature is not supported in previous releases, we define a +# noop method here for backwards compatibility. This can be removed in the +# G cycle. +try: + from ansible.template import trust_as_template +except ImportError: + def trust_as_template(template): + return template + from io import StringIO from oslo_config import iniparser + _ORPHAN_SECTION = 'TEMPORARY_ORPHAN_VARIABLE_SECTION' DOCUMENTATION = ''' @@ -150,7 +161,7 @@ def read_config(self, source, config): # Only use config if present if os.access(source, os.R_OK): with open(source, 'r') as f: - template_data = f.read() + template_data = trust_as_template(f.read()) # set search path to mimic 'template' module behavior searchpath = [ diff --git a/ansible/action_plugins/merge_yaml.py b/ansible/action_plugins/merge_yaml.py index ea7350bf73..d2c15eb702 100644 --- a/ansible/action_plugins/merge_yaml.py +++ b/ansible/action_plugins/merge_yaml.py @@ -23,6 +23,16 @@ from ansible import errors as ansible_errors from ansible.plugins import action +# TODO(dougszu): From Ansible 12 onwards we must explicitly trust templates. +# Since this feature is not supported in previous releases, we define a +# noop method here for backwards compatibility. This can be removed in the +# G cycle. +try: + from ansible.template import trust_as_template +except ImportError: + def trust_as_template(template): + return template + DOCUMENTATION = ''' --- module: merge_yaml @@ -91,7 +101,7 @@ def read_config(self, source): # Only use config if present if source and os.access(source, os.R_OK): with open(source, 'r') as f: - template_data = f.read() + template_data = trust_as_template(f.read()) # set search path to mimic 'template' module behavior searchpath = [ diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml index 7b8f83332b..4a82c29ea3 100644 --- a/ansible/group_vars/all/common.yml +++ b/ansible/group_vars/all/common.yml @@ -93,7 +93,7 @@ run_default_volumes_docker: [] # Dimension options for Docker Containers # NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9) # fixes at least rabbitmq and mariadb -default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else '{}' }}" +default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else {} }}" default_container_dimensions_el9: "{{ default_docker_dimensions_el9 if kolla_container_engine == 'docker' else default_podman_dimensions_el9 }}" default_docker_dimensions_el9: ulimits: diff --git a/ansible/module_utils/kolla_container_worker.py b/ansible/module_utils/kolla_container_worker.py index 3e16ac2c9b..d9d2bf7142 100644 --- a/ansible/module_utils/kolla_container_worker.py +++ b/ansible/module_utils/kolla_container_worker.py @@ -410,7 +410,7 @@ def generate_volumes(self, binds=None): vol_dict = dict() for vol in volumes: - if len(vol) == 0: + if not vol: continue if ':' not in vol: diff --git a/ansible/module_utils/kolla_docker_worker.py b/ansible/module_utils/kolla_docker_worker.py index f0799f0645..267cc27a86 100644 --- a/ansible/module_utils/kolla_docker_worker.py +++ b/ansible/module_utils/kolla_docker_worker.py @@ -469,8 +469,11 @@ def create_volume(self, name=None): labels={'kolla_managed': 'true'}) def create_container_volumes(self): - volumes = self.params.get("volumes", []) - + volumes = self.params.get('volumes') + if not volumes: + return + # Filter out null / empty string volumes + volumes = [v for v in volumes if v] for volume in volumes: volume_name = volume.split(":")[0] if "/" in volume_name: diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py index f26846dedc..d7b09a1354 100644 --- a/ansible/module_utils/kolla_podman_worker.py +++ b/ansible/module_utils/kolla_podman_worker.py @@ -85,7 +85,7 @@ def prepare_container_args(self): # functionality is broken mounts = [] filtered_volumes = {} - volumes = self.params.get('volumes', []) + volumes = self.params.get('volumes') if volumes: self.parse_volumes(volumes, mounts, filtered_volumes) # we can delete original volumes so it won't raise error later @@ -149,10 +149,10 @@ def prepare_container_args(self): # Therefore, we must parse them and set the permissions ourselves # and send them to API separately. def parse_volumes(self, volumes, mounts, filtered_volumes): - # we can ignore empty strings - volumes = [item for item in volumes if item.strip()] - for item in volumes: + if not item or not item.strip(): + # we can ignore empty strings or null volumes + continue # if it starts with / it is bind not volume if item[0] == '/': mode = None @@ -642,7 +642,11 @@ def create_volume(self, name=None): self.result = vol.attrs def create_container_volumes(self): - volumes = self.params.get("volumes", []) or [] + volumes = self.params.get('volumes') + if not volumes: + return + # Filter out null / empty string volumes + volumes = [v for v in volumes if v] for volume in volumes: volume_name = volume.split(":")[0] diff --git a/ansible/roles/aodh/tasks/config.yml b/ansible/roles/aodh/tasks/config.yml index cbf5db25ad..2542d061b3 100644 --- a/ansible/roles/aodh/tasks/config.yml +++ b/ansible/roles/aodh/tasks/config.yml @@ -26,7 +26,7 @@ aodh_policy_file: "{{ aodh_policy.results.0.stat.path | basename }}" aodh_policy_file_path: "{{ aodh_policy.results.0.stat.path }}" when: - - aodh_policy.results + - aodh_policy.results | length > 0 - name: Copying over existing policy file template: diff --git a/ansible/roles/barbican/tasks/config.yml b/ansible/roles/barbican/tasks/config.yml index 60a06c64dd..2870735d90 100644 --- a/ansible/roles/barbican/tasks/config.yml +++ b/ansible/roles/barbican/tasks/config.yml @@ -40,7 +40,7 @@ barbican_policy_file: "{{ barbican_policy.results.0.stat.path | basename }}" barbican_policy_file_path: "{{ barbican_policy.results.0.stat.path }}" when: - - barbican_policy.results + - barbican_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/blazar/tasks/config.yml b/ansible/roles/blazar/tasks/config.yml index 942107fd26..d411eecf85 100644 --- a/ansible/roles/blazar/tasks/config.yml +++ b/ansible/roles/blazar/tasks/config.yml @@ -26,7 +26,7 @@ blazar_policy_file: "{{ blazar_policy.results.0.stat.path | basename }}" blazar_policy_file_path: "{{ blazar_policy.results.0.stat.path }}" when: - - blazar_policy.results + - blazar_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/ceilometer/tasks/config.yml b/ansible/roles/ceilometer/tasks/config.yml index eda99e6e71..faa40fa9db 100644 --- a/ansible/roles/ceilometer/tasks/config.yml +++ b/ansible/roles/ceilometer/tasks/config.yml @@ -155,7 +155,7 @@ ceilometer_policy_file: "{{ ceilometer_policy.results.0.stat.path | basename }}" ceilometer_policy_file_path: "{{ ceilometer_policy.results.0.stat.path }}" when: - - ceilometer_policy.results + - ceilometer_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/cinder/tasks/config.yml b/ansible/roles/cinder/tasks/config.yml index bc4c2c838e..2b7a05bee5 100644 --- a/ansible/roles/cinder/tasks/config.yml +++ b/ansible/roles/cinder/tasks/config.yml @@ -38,7 +38,7 @@ cinder_policy_file: "{{ cinder_policy.results.0.stat.path | basename }}" cinder_policy_file_path: "{{ cinder_policy.results.0.stat.path }}" when: - - cinder_policy.results + - cinder_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/cloudkitty/tasks/config.yml b/ansible/roles/cloudkitty/tasks/config.yml index fae192d301..2b1d357d8f 100644 --- a/ansible/roles/cloudkitty/tasks/config.yml +++ b/ansible/roles/cloudkitty/tasks/config.yml @@ -26,7 +26,7 @@ cloudkitty_policy_file: "{{ cloudkitty_policy.results.0.stat.path | basename }}" cloudkitty_policy_file_path: "{{ cloudkitty_policy.results.0.stat.path }}" when: - - cloudkitty_policy.results + - cloudkitty_policy.results | length > 0 - name: Check if custom {{ cloudkitty_custom_metrics_yaml_file }} exists stat: diff --git a/ansible/roles/cyborg/tasks/config.yml b/ansible/roles/cyborg/tasks/config.yml index c1fb81c0c2..463b1a127b 100644 --- a/ansible/roles/cyborg/tasks/config.yml +++ b/ansible/roles/cyborg/tasks/config.yml @@ -26,7 +26,7 @@ cyborg_policy_file: "{{ cyborg_policy.results.0.stat.path | basename }}" cyborg_policy_file_path: "{{ cyborg_policy.results.0.stat.path }}" when: - - cyborg_policy.results + - cyborg_policy.results | length > 0 - name: Copying over existing policy file template: diff --git a/ansible/roles/designate/tasks/config.yml b/ansible/roles/designate/tasks/config.yml index d61d2958dd..2b05382488 100644 --- a/ansible/roles/designate/tasks/config.yml +++ b/ansible/roles/designate/tasks/config.yml @@ -26,7 +26,7 @@ designate_policy_file: "{{ designate_policy.results.0.stat.path | basename }}" designate_policy_file_path: "{{ designate_policy.results.0.stat.path }}" when: - - designate_policy.results + - designate_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/glance/tasks/config.yml b/ansible/roles/glance/tasks/config.yml index d268e92d32..52ee34a973 100644 --- a/ansible/roles/glance/tasks/config.yml +++ b/ansible/roles/glance/tasks/config.yml @@ -30,7 +30,7 @@ glance_policy_file: "{{ glance_policy.results.0.stat.path | basename }}" glance_policy_file_path: "{{ glance_policy.results.0.stat.path }}" when: - - glance_policy.results + - glance_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/gnocchi/tasks/config.yml b/ansible/roles/gnocchi/tasks/config.yml index 2c6db03278..b978044e29 100644 --- a/ansible/roles/gnocchi/tasks/config.yml +++ b/ansible/roles/gnocchi/tasks/config.yml @@ -30,7 +30,7 @@ gnocchi_policy_file: "{{ gnocchi_policy.results.0.stat.path | basename }}" gnocchi_policy_file_path: "{{ gnocchi_policy.results.0.stat.path }}" when: - - gnocchi_policy.results + - gnocchi_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/heat/tasks/config.yml b/ansible/roles/heat/tasks/config.yml index 107e72198e..10271bd8d4 100644 --- a/ansible/roles/heat/tasks/config.yml +++ b/ansible/roles/heat/tasks/config.yml @@ -26,7 +26,7 @@ heat_policy_file: "{{ heat_policy.results.0.stat.path | basename }}" heat_policy_file_path: "{{ heat_policy.results.0.stat.path }}" when: - - heat_policy.results + - heat_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/horizon/tasks/policy_item.yml b/ansible/roles/horizon/tasks/policy_item.yml index 708f60e6ef..7b427108a3 100644 --- a/ansible/roles/horizon/tasks/policy_item.yml +++ b/ansible/roles/horizon/tasks/policy_item.yml @@ -22,4 +22,4 @@ set_fact: custom_policy: "{{ custom_policy + [overwritten_files.results.0.stat.path] }}" when: - - overwritten_files.results + - overwritten_files.results | length > 0 diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml index 98a3e66e2e..4f7caed0c0 100644 --- a/ansible/roles/ironic/tasks/config.yml +++ b/ansible/roles/ironic/tasks/config.yml @@ -26,7 +26,7 @@ ironic_policy_file: "{{ ironic_policy.results.0.stat.path | basename }}" ironic_policy_file_path: "{{ ironic_policy.results.0.stat.path }}" when: - - ironic_policy.results + - ironic_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml index 3f1a26fc39..df6617dd41 100644 --- a/ansible/roles/keystone/tasks/config.yml +++ b/ansible/roles/keystone/tasks/config.yml @@ -26,7 +26,7 @@ keystone_policy_file: "{{ keystone_policy.results.0.stat.path | basename }}" keystone_policy_file_path: "{{ keystone_policy.results.0.stat.path }}" when: - - keystone_policy.results + - keystone_policy.results | length > 0 - name: Check if Keystone domain-specific config is supplied stat: diff --git a/ansible/roles/kuryr/tasks/config.yml b/ansible/roles/kuryr/tasks/config.yml index d0d436a87e..10bfff47db 100644 --- a/ansible/roles/kuryr/tasks/config.yml +++ b/ansible/roles/kuryr/tasks/config.yml @@ -26,7 +26,7 @@ kuryr_policy_file: "{{ kuryr_policy.results.0.stat.path | basename }}" kuryr_policy_file_path: "{{ kuryr_policy.results.0.stat.path }}" when: - - kuryr_policy.results + - kuryr_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/magnum/tasks/config.yml b/ansible/roles/magnum/tasks/config.yml index 6243153244..5921da31e0 100644 --- a/ansible/roles/magnum/tasks/config.yml +++ b/ansible/roles/magnum/tasks/config.yml @@ -26,7 +26,7 @@ magnum_policy_file: "{{ magnum_policy.results.0.stat.path | basename }}" magnum_policy_file_path: "{{ magnum_policy.results.0.stat.path }}" when: - - magnum_policy.results + - magnum_policy.results | length > 0 - name: Check if kubeconfig file is supplied stat: diff --git a/ansible/roles/manila/tasks/config.yml b/ansible/roles/manila/tasks/config.yml index f334acc9c5..1607316a26 100644 --- a/ansible/roles/manila/tasks/config.yml +++ b/ansible/roles/manila/tasks/config.yml @@ -31,7 +31,7 @@ manila_policy_file: "{{ manila_policy.results.0.stat.path | basename }}" manila_policy_file_path: "{{ manila_policy.results.0.stat.path }}" when: - - manila_policy.results + - manila_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/masakari/tasks/config.yml b/ansible/roles/masakari/tasks/config.yml index 106faf4284..16e14e99f4 100644 --- a/ansible/roles/masakari/tasks/config.yml +++ b/ansible/roles/masakari/tasks/config.yml @@ -26,7 +26,7 @@ masakari_policy_file: "{{ masakari_policy.results.0.stat.path | basename }}" masakari_policy_file_path: "{{ masakari_policy.results.0.stat.path }}" when: - - masakari_policy.results + - masakari_policy.results | length > 0 - name: Copying over existing policy file template: diff --git a/ansible/roles/mistral/tasks/config.yml b/ansible/roles/mistral/tasks/config.yml index 4cb4fcdebf..d9ea731db3 100644 --- a/ansible/roles/mistral/tasks/config.yml +++ b/ansible/roles/mistral/tasks/config.yml @@ -26,7 +26,7 @@ mistral_policy_file: "{{ mistral_policy.results.0.stat.path | basename }}" mistral_policy_file_path: "{{ mistral_policy.results.0.stat.path }}" when: - - mistral_policy.results + - mistral_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml index 731672997f..fcac69adc6 100644 --- a/ansible/roles/neutron/defaults/main.yml +++ b/ansible/roles/neutron/defaults/main.yml @@ -529,9 +529,9 @@ neutron_dhcp_agent_default_volumes: - "kolla_logs:/var/log/kolla/" - "{{ '/dev/shm:/dev/shm' }}" - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" - - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' }}" - - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}" - - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}" + - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}" + - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" + - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" neutron_l3_agent_default_volumes: - "{{ node_config_directory }}/neutron-l3-agent/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -542,9 +542,9 @@ neutron_l3_agent_default_volumes: - "kolla_logs:/var/log/kolla/" - "{{ '/dev/shm:/dev/shm' }}" - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" - - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' }}" - - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}" - - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}" + - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}" + - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" + - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" neutron_sriov_agent_default_volumes: - "{{ node_config_directory }}/neutron-sriov-agent/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml index 1de5ca3585..18b8f3991d 100644 --- a/ansible/roles/neutron/tasks/config.yml +++ b/ansible/roles/neutron/tasks/config.yml @@ -38,7 +38,7 @@ neutron_policy_file: "{{ neutron_policy.results.0.stat.path | basename }}" neutron_policy_file_path: "{{ neutron_policy.results.0.stat.path }}" when: - - neutron_policy.results + - neutron_policy.results | length > 0 - name: Copying over existing policy file template: diff --git a/ansible/roles/nova-cell/tasks/config.yml b/ansible/roles/nova-cell/tasks/config.yml index 8ff28b3292..7783d5ee09 100644 --- a/ansible/roles/nova-cell/tasks/config.yml +++ b/ansible/roles/nova-cell/tasks/config.yml @@ -35,7 +35,7 @@ nova_policy_file: "{{ nova_policy.results.0.stat.path | basename }}" nova_policy_file_path: "{{ nova_policy.results.0.stat.path }}" when: - - nova_policy.results + - nova_policy.results | length > 0 - name: Check for vendordata file stat: diff --git a/ansible/roles/nova-cell/tasks/create_cells.yml b/ansible/roles/nova-cell/tasks/create_cells.yml index e4606d88cd..66c7f18759 100644 --- a/ansible/roles/nova-cell/tasks/create_cells.yml +++ b/ansible/roles/nova-cell/tasks/create_cells.yml @@ -26,7 +26,7 @@ - '"already exists" not in nova_cell_create.stdout' when: - inventory_hostname == groups[nova_conductor.group][0] | default(None) - - nova_cell_settings | length == 0 + - not nova_cell_settings | bool - name: Update cell vars: @@ -51,5 +51,5 @@ - nova_cell_updated.rc != 0 when: - inventory_hostname == groups[nova_conductor.group][0] | default(None) - - nova_cell_settings | length > 0 + - nova_cell_settings | bool - nova_cell_settings.cell_message_queue != nova_cell_rpc_transport_url or nova_cell_settings.cell_database != nova_cell_database_url diff --git a/ansible/roles/nova/tasks/config.yml b/ansible/roles/nova/tasks/config.yml index 92e55f417b..729cf802a3 100644 --- a/ansible/roles/nova/tasks/config.yml +++ b/ansible/roles/nova/tasks/config.yml @@ -26,7 +26,7 @@ nova_policy_file: "{{ nova_policy.results.0.stat.path | basename }}" nova_policy_file_path: "{{ nova_policy.results.0.stat.path }}" when: - - nova_policy.results + - nova_policy.results | length > 0 - name: Check for vendordata file stat: diff --git a/ansible/roles/nova/tasks/map_cell0.yml b/ansible/roles/nova/tasks/map_cell0.yml index 1fb6c4314b..429b2fb955 100644 --- a/ansible/roles/nova/tasks/map_cell0.yml +++ b/ansible/roles/nova/tasks/map_cell0.yml @@ -59,7 +59,7 @@ failed_when: - nova_cell0_updated.rc != 0 when: - - nova_cell_settings | length > 0 + - nova_cell_settings | bool - nova_cell_settings.cell_database != nova_cell0_connection run_once: True delegate_to: "{{ groups[nova_api.group][0] }}" diff --git a/ansible/roles/octavia/tasks/config.yml b/ansible/roles/octavia/tasks/config.yml index 73990ac6c8..63910990c1 100644 --- a/ansible/roles/octavia/tasks/config.yml +++ b/ansible/roles/octavia/tasks/config.yml @@ -29,7 +29,7 @@ octavia_policy_file: "{{ octavia_policy.results.0.stat.path | basename }}" octavia_policy_file_path: "{{ octavia_policy.results.0.stat.path }}" when: - - octavia_policy.results + - octavia_policy.results | length > 0 - name: Copying over existing policy file template: diff --git a/ansible/roles/ovn-db/tasks/bootstrap-db.yml b/ansible/roles/ovn-db/tasks/bootstrap-db.yml index d325ca9ac3..89282ab98b 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-db.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-db.yml @@ -96,4 +96,4 @@ delay: 6 when: - enable_ovn_sb_db_relay | bool - loop: "{{ range(1, (ovn_sb_db_relay_count | int) +1) }}" + loop: "{{ range(1, (ovn_sb_db_relay_count | int) +1) | list }}" diff --git a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml index 693e2c1ddf..fde7295039 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml @@ -20,7 +20,7 @@ changed_when: false register: ovn_nb_db_cluster_status when: groups['ovn-nb-db_leader'] is defined and inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '') - delegate_to: "{{ groups['ovn-nb-db_leader'][0] }}" + delegate_to: "{{ groups['ovn-nb-db_leader'][0] if groups['ovn-nb-db_leader'] is defined else omit }}" - name: Check SB cluster status command: > @@ -30,7 +30,7 @@ changed_when: false register: ovn_sb_db_cluster_status when: groups['ovn-sb-db_leader'] is defined and inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '') - delegate_to: "{{ groups['ovn-sb-db_leader'][0] }}" + delegate_to: "{{ groups['ovn-sb-db_leader'][0] if groups['ovn-sb-db_leader'] is defined else omit }}" - name: Remove an old node with the same ip address as the new node in NB DB vars: @@ -42,7 +42,7 @@ when: - ovn_nb_db_cluster_status.stdout is defined - (ovn_nb_db_cluster_status.stdout is search('at tcp:' + api_interface_address)) and inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '') - delegate_to: "{{ groups['ovn-nb-db_leader'][0] }}" + delegate_to: "{{ groups['ovn-nb-db_leader'][0] if groups['ovn-nb-db_leader'] is defined else omit }}" - name: Remove an old node with the same ip address as the new node in SB DB vars: @@ -54,7 +54,7 @@ when: - ovn_sb_db_cluster_status.stdout is defined - (ovn_sb_db_cluster_status.stdout is search('at tcp:' + api_interface_address)) and inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '') - delegate_to: "{{ groups['ovn-sb-db_leader'][0] }}" + delegate_to: "{{ groups['ovn-sb-db_leader'][0] if groups['ovn-sb-db_leader'] is defined else omit }}" - name: Set bootstrap args fact for NB (new member) set_fact: diff --git a/ansible/roles/placement/tasks/config.yml b/ansible/roles/placement/tasks/config.yml index 9093dc4bdc..8926746825 100644 --- a/ansible/roles/placement/tasks/config.yml +++ b/ansible/roles/placement/tasks/config.yml @@ -26,7 +26,7 @@ placement_policy_file: "{{ placement_policy.results.0.stat.path | basename }}" placement_policy_file_path: "{{ placement_policy.results.0.stat.path }}" when: - - placement_policy.results + - placement_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/prechecks/vars/main.yml b/ansible/roles/prechecks/vars/main.yml index 7cb1fe28e1..1da91bb291 100644 --- a/ansible/roles/prechecks/vars/main.yml +++ b/ansible/roles/prechecks/vars/main.yml @@ -1,8 +1,8 @@ --- docker_version_min: '18.09' docker_py_version_min: '3.4.1' -ansible_version_min: '2.17' -ansible_version_max: '2.18' +ansible_version_min: '2.18' +ansible_version_max: '2.19' # Top level keys should match ansible_facts.distribution. # These map to lists of supported releases (ansible_facts.distribution_release) or diff --git a/ansible/roles/tacker/tasks/config.yml b/ansible/roles/tacker/tasks/config.yml index 95669e5268..f6473b0d31 100644 --- a/ansible/roles/tacker/tasks/config.yml +++ b/ansible/roles/tacker/tasks/config.yml @@ -26,7 +26,7 @@ tacker_policy_file: "{{ tacker_policy.results.0.stat.path | basename }}" tacker_policy_file_path: "{{ tacker_policy.results.0.stat.path }}" when: - - tacker_policy.results + - tacker_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/trove/tasks/config.yml b/ansible/roles/trove/tasks/config.yml index fa2dc44179..8427d0079b 100644 --- a/ansible/roles/trove/tasks/config.yml +++ b/ansible/roles/trove/tasks/config.yml @@ -26,7 +26,7 @@ trove_policy_file: "{{ trove_policy.results.0.stat.path | basename }}" trove_policy_file_path: "{{ trove_policy.results.0.stat.path }}" when: - - trove_policy.results + - trove_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/venus/tasks/config.yml b/ansible/roles/venus/tasks/config.yml index 05cfe4de3e..28b9ee80f5 100644 --- a/ansible/roles/venus/tasks/config.yml +++ b/ansible/roles/venus/tasks/config.yml @@ -26,7 +26,7 @@ venus_policy_file: "{{ venus_policy.results.0.stat.path | basename }}" venus_policy_file_path: "{{ venus_policy.results.0.stat.path }}" when: - - venus_policy.results + - venus_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/watcher/tasks/config.yml b/ansible/roles/watcher/tasks/config.yml index ee1a6c6912..1b21a5202d 100644 --- a/ansible/roles/watcher/tasks/config.yml +++ b/ansible/roles/watcher/tasks/config.yml @@ -26,7 +26,7 @@ watcher_policy_file: "{{ watcher_policy.results.0.stat.path | basename }}" watcher_policy_file_path: "{{ watcher_policy.results.0.stat.path }}" when: - - watcher_policy.results + - watcher_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/zun/tasks/config.yml b/ansible/roles/zun/tasks/config.yml index 7d5100189d..7ef4c7e3b5 100644 --- a/ansible/roles/zun/tasks/config.yml +++ b/ansible/roles/zun/tasks/config.yml @@ -31,7 +31,7 @@ zun_policy_file: "{{ zun_policy.results.0.stat.path | basename }}" zun_policy_file_path: "{{ zun_policy.results.0.stat.path }}" when: - - zun_policy.results + - zun_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/doc/source/conf.py b/doc/source/conf.py index add1790c4a..785cb15e20 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -142,10 +142,10 @@ TESTED_RUNTIMES_GOVERNANCE_URL =\ 'https://governance.openstack.org/tc/reference/runtimes/{}.html'.format(KOLLA_OPENSTACK_RELEASE) -ANSIBLE_CORE_VERSION_MIN = '2.17' -ANSIBLE_CORE_VERSION_MAX = '2.18' -ANSIBLE_VERSION_MIN = '10' -ANSIBLE_VERSION_MAX = '11' +ANSIBLE_CORE_VERSION_MIN = '2.18' +ANSIBLE_CORE_VERSION_MAX = '2.19' +ANSIBLE_VERSION_MIN = '11' +ANSIBLE_VERSION_MAX = '12' GLOBAL_VARIABLE_MAP = { '|ANSIBLE_CORE_VERSION_MIN|': ANSIBLE_CORE_VERSION_MIN, diff --git a/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml b/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml new file mode 100644 index 0000000000..33ed5451df --- /dev/null +++ b/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Minimum supported Ansible version is now ``11`` (ansible-core 2.18) + and maximum supported is ``12`` (ansible-core 2.19). diff --git a/requirements-core.yml b/requirements-core.yml index a1d367ef2d..2eca2c9f36 100644 --- a/requirements-core.yml +++ b/requirements-core.yml @@ -2,19 +2,19 @@ collections: - name: ansible.netcommon source: https://galaxy.ansible.com - version: <8 + version: <9 - name: ansible.posix source: https://galaxy.ansible.com - version: <2 + version: <3 - name: ansible.utils source: https://galaxy.ansible.com - version: <6 + version: <7 - name: community.crypto source: https://galaxy.ansible.com - version: <3 + version: <4 - name: community.general source: https://galaxy.ansible.com - version: <11 + version: <12 - name: community.docker source: https://galaxy.ansible.com version: <5 diff --git a/requirements.txt b/requirements.txt index 568d7f9b63..bf2591b8e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ hvac>=0.10.1 # Apache-2.0 Jinja2>=3 # BSD License (3 clause) # Ansible and ansible's json_query -ansible-core>=2.17,<2.19 # GPLv3 +ansible-core>=2.18,!=2.19.0,<2.20; python_version >= '3.11' # GPLv3 jmespath>=0.9.3 # MIT # ini parsing diff --git a/test-requirements.txt b/test-requirements.txt index 303f3d5b1e..8451a2b904 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,31 @@ +# Password hashing +bcrypt>=3.0.0 # Apache-2.0 + +# password generation +cryptography>=2.1 # BSD/Apache-2.0 + +# Hashicorp Vault +hvac>=0.10.1 # Apache-2.0 + +# templating +Jinja2>=3 # BSD License (3 clause) + +# Ansible and ansible's json_query +ansible-core>=2.18,!=2.19.0,<2.20; python_version >= '3.11' # GPLv3 +jmespath>=0.9.3 # MIT + +# ini parsing +oslo.config>=5.2.0 # Apache-2.0 + +# password generation +oslo.utils>=3.33.0 # Apache-2.0 + +# Password hashing +passlib[bcrypt]>=1.0.0 # BSD + +# CLI +cliff>=4.7.0 # Apache-2.0 + # coverage testing coverage!=4.4,>=4.0 # Apache-2.0 diff --git a/tests/test_kolla_toolbox.py b/tests/test_kolla_toolbox.py index 112bd403c8..9dca821cc4 100644 --- a/tests/test_kolla_toolbox.py +++ b/tests/test_kolla_toolbox.py @@ -13,6 +13,7 @@ # limitations under the License. import builtins +import contextlib import json import os import sys @@ -20,6 +21,18 @@ from ansible.module_utils import basic from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes +try: + from ansible.module_utils.testing import patch_module_args +except ImportError: + # TODO(dougszu): Remove this exception handler when Python 3.10 support + # is not required. Python 3.10 isn't supported by Ansible Core 2.18 which + # provides patch_module_args + @contextlib.contextmanager + def patch_module_args(args): + serialized_args = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args})) + with mock.patch.object(basic, '_ANSIBLE_ARGS', serialized_args): + yield + from importlib.machinery import SourceFileLoader from oslotest import base from unittest import mock @@ -33,13 +46,6 @@ kolla_toolbox_file).load_module() -def set_module_args(args): - """Prepare arguments so they will be picked up during module creation.""" - - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - basic._ANSIBLE_ARGS = to_bytes(args) - - class AnsibleExitJson(BaseException): """Exception to be raised by module.exit_json and caught by a test case.""" @@ -307,40 +313,40 @@ class TestModuleInteraction(TestKollaToolboxModule): """Class focused on testing user input data from playbook.""" def test_create_ansible_module_missing_required_module_name(self): - set_module_args({ + ansible_module_args = { 'container_engine': 'docker' - }) - - error = self.assertRaises(AnsibleFailJson, - kolla_toolbox.create_ansible_module) + } + with patch_module_args(ansible_module_args): + error = self.assertRaises(AnsibleFailJson, + kolla_toolbox.create_ansible_module) self.assertIn('missing required arguments: module_name', error.result['msg']) def test_create_ansible_module_missing_required_container_engine(self): - set_module_args({ + ansible_module_args = { 'module_name': 'url' - }) - - error = self.assertRaises(AnsibleFailJson, - kolla_toolbox.create_ansible_module) + } + with patch_module_args(ansible_module_args): + error = self.assertRaises(AnsibleFailJson, + kolla_toolbox.create_ansible_module) self.assertIn('missing required arguments: container_engine', error.result['msg']) def test_create_ansible_module_invalid_container_engine(self): - set_module_args({ + ansible_module_args = { 'module_name': 'url', 'container_engine': 'podmano' - }) - - error = self.assertRaises(AnsibleFailJson, - kolla_toolbox.create_ansible_module) + } + with patch_module_args(ansible_module_args): + error = self.assertRaises(AnsibleFailJson, + kolla_toolbox.create_ansible_module) self.assertIn( 'value of container_engine must be one of: podman, docker', error.result['msg'] ) def test_create_ansible_module_success(self): - args = { + ansible_module_args = { 'container_engine': 'docker', 'module_name': 'file', 'module_args': { @@ -357,12 +363,10 @@ def test_create_ansible_module_success(self): 'timeout': 180, 'api_version': '1.5' } - set_module_args(args) - - module = kolla_toolbox.create_ansible_module() - + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() self.assertIsInstance(module, AnsibleModule) - self.assertEqual(args, module.params) + self.assertEqual(ansible_module_args, module.params) class TestContainerEngineClientIntraction(TestKollaToolboxModule): @@ -381,14 +385,14 @@ def mock_import_error(self, name, globals, locals, fromlist, level): return self.original_import(name, globals, locals, fromlist, level) def test_podman_client_params(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'podman', 'api_version': '1.47', 'timeout': 155 - }) - - module = kolla_toolbox.create_ansible_module() + } + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() mock_podman = mock.MagicMock() mock_podman_errors = mock.MagicMock() import_dict = {'podman': mock_podman, @@ -403,14 +407,14 @@ def test_podman_client_params(self): ) def test_docker_client_params(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'docker', 'api_version': '1.47', 'timeout': 155 - }) - - module = kolla_toolbox.create_ansible_module() + } + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() mock_docker = mock.MagicMock() mock_docker_errors = mock.MagicMock() import_dict = {'docker': mock_docker, @@ -425,14 +429,14 @@ def test_docker_client_params(self): ) def test_create_container_client_podman_not_called_with_auto(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'podman', 'api_version': 'auto', 'timeout': 90 - }) - - module = kolla_toolbox.create_ansible_module() + } + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() mock_podman = mock.MagicMock() mock_podman_errors = mock.MagicMock() import_dict = {'podman': mock_podman, @@ -446,12 +450,13 @@ def test_create_container_client_podman_not_called_with_auto(self): ) def test_create_container_client_podman_importerror(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'podman' - }) + } self.module_to_mock_import = 'podman' - module = kolla_toolbox.create_ansible_module() + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() with mock.patch('builtins.__import__', side_effect=self.mock_import_error): @@ -462,13 +467,13 @@ def test_create_container_client_podman_importerror(self): error.result['msg']) def test_create_container_client_docker_importerror(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'docker' - }) - + } self.module_to_mock_import = 'docker' - module = kolla_toolbox.create_ansible_module() + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() with mock.patch('builtins.__import__', side_effect=self.mock_import_error): diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index bae3aab5b1..8564afa1f8 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -46,11 +46,11 @@ # Test latest ansible-core version on Ubuntu, minimum supported on others. # Use SLURP version (two releases back) on SLURP upgrades. ansible_core_version_constraint: >- - {{ ansible_core_version_slurp if is_slurp else ansible_core_version_min if - (is_upgrade or ansible_facts.distribution != "Ubuntu") else ansible_core_version_max }} - ansible_core_version_slurp: "==2.16.*" - ansible_core_version_max: "==2.18.*" - ansible_core_version_min: "==2.17.*" + {{ ansible_core_version_slurp if is_slurp else ansible_core_version_min if is_upgrade + or ansible_facts.distribution != "Ubuntu" else ansible_core_version_max }} + ansible_core_version_slurp: "==2.17.*" + ansible_core_version_max: "==2.19.*" + ansible_core_version_min: "==2.18.*" # NOTE(mgoddard): Test the use of interface names with dashes. api_interface_name: "vxlan-0" api_network_prefix: "192.0.2." diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 3fae656762..2c078fc032 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -31,7 +31,7 @@ - kolla-ansible-scenario-telemetry - kolla-ansible-scenario-venus - openstack-cover-jobs - - openstack-python3-jobs + - openstack-python3-jobs-kolla-ansible - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 diff --git a/zuul.d/python3-jobs.yaml b/zuul.d/python3-jobs.yaml new file mode 100644 index 0000000000..5dc7903ed0 --- /dev/null +++ b/zuul.d/python3-jobs.yaml @@ -0,0 +1,23 @@ +- project-template: + name: openstack-python3-jobs-kolla-ansible + # NOTE(mnasiadka): Local definition to skip py310 jobs on Flamingo + description: | + Runs unit tests for an OpenStack Python project under the CPython + version 3 releases designated for testing the latest release. + check: + jobs: + - openstack-tox-pep8 + # py3.12 testing is added as mandatory from 2025.1 release onwards. + # From 2026.1, we run it as periodic only(assuming py3.10 and py3.13 + # will be enough coverage to run on every change) + - openstack-tox-py312 + gate: + jobs: + - openstack-tox-pep8 + # py3.12 testing is added as mandatory from 2025.1 release onwards. + # From 2026.1, we run it as periodic only(assuming py3.10 and py3.13 + # will be enough coverage to run on every change) + - openstack-tox-py312 + post: + jobs: + - publish-openstack-python-branch-tarball \ No newline at end of file From 7b8705d2f6418bd6e71799c0f71571cd04542af7 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 28 Oct 2025 12:29:33 +0100 Subject: [PATCH 069/165] CI: Add service-* roles to AIO scenario trigger Change-Id: Ie28d0e2a89e3eb9d3123954f3a58496e7348d33e Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/aio.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 39bbe5f05d..b05a3e095f 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -6,7 +6,7 @@ - ^ansible/group_vars/all/(common|fluentd|glance|haproxy|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml - ^ansible/(action_plugins|filter_plugins|library|module_utils)/ - - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq)/ + - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq|service-.*)/ - ^kolla_ansible/ - ^roles/kolla-ansible-deploy/ - ^tests/test-(core-openstack|dashboard).sh From e56dd78a4e45a0bfbc0619764a272808b90cb4e7 Mon Sep 17 00:00:00 2001 From: Fabio Sbano Date: Tue, 28 Oct 2025 02:45:15 -0300 Subject: [PATCH 070/165] service-uwsgi-config: Add static-map configuration That will fix missing Horizon images Change-Id: I9b1ee456dbe36f48bf78448c5b12d1a849b61167 Signed-off-by: Fabio Sbano --- ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 index ce42d352d5..a7495b4395 100644 --- a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 +++ b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 @@ -33,3 +33,4 @@ thunder-lock = true uid = {{ service_uwsgi_config_uid }} {% endif %} worker-reload-mercy = {{ service_uwsgi_config_worker_timeout }} +static-map = /static=/var/lib/kolla/venv/lib/python3/site-packages/static From af2544be54ba816ec1a12e5e083fd09f29df64fb Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 27 Oct 2025 09:09:50 +0100 Subject: [PATCH 071/165] Drop venus It has been marked as inactive and venus dashboard plugin is breaking Horizon Kolla builds. Change-Id: I78cf7a33bf1025f5c8e4b76ecfeef7d8a3126d61 Signed-off-by: Michal Nasiadka --- README.rst | 1 - ansible/group_vars/all/horizon.yml | 1 - ansible/group_vars/all/venus.yml | 10 - ansible/inventory/all-in-one | 9 - ansible/inventory/multinode | 9 - ansible/roles/cron/tasks/config.yml | 1 - .../templates/cron-logrotate-venus.conf.j2 | 3 - .../templates/conf/filter/01-rewrite.conf.j2 | 5 - ansible/roles/horizon/defaults/main.yml | 1 - ansible/roles/prometheus/defaults/main.yml | 4 - ansible/roles/venus/defaults/main.yml | 172 ------------------ ansible/roles/venus/handlers/main.yml | 28 --- ansible/roles/venus/tasks/bootstrap.yml | 38 ---- .../roles/venus/tasks/check-containers.yml | 3 - ansible/roles/venus/tasks/check.yml | 4 - ansible/roles/venus/tasks/clone.yml | 8 - ansible/roles/venus/tasks/config.yml | 65 ------- ansible/roles/venus/tasks/config_validate.yml | 7 - ansible/roles/venus/tasks/copy-certs.yml | 6 - .../roles/venus/tasks/deploy-containers.yml | 2 - ansible/roles/venus/tasks/deploy.yml | 14 -- ansible/roles/venus/tasks/loadbalancer.yml | 7 - ansible/roles/venus/tasks/main.yml | 2 - ansible/roles/venus/tasks/precheck.yml | 27 --- ansible/roles/venus/tasks/pull.yml | 3 - ansible/roles/venus/tasks/reconfigure.yml | 2 - ansible/roles/venus/tasks/register.yml | 7 - ansible/roles/venus/tasks/stop.yml | 6 - ansible/roles/venus/tasks/upgrade.yml | 7 - .../roles/venus/templates/venus-api.json.j2 | 25 --- .../venus/templates/venus-manager.json.j2 | 25 --- ansible/roles/venus/templates/venus.conf.j2 | 38 ---- ansible/roles/venus/vars/main.yml | 2 - ansible/site.yml | 21 --- etc/kolla/globals.yml | 2 - etc/kolla/passwords.yml | 3 - .../notes/drop-venus-b929071fb79b8026.yaml | 4 + tests/check-logs.sh | 3 - tests/run.yml | 9 +- tests/templates/globals-default.j2 | 6 - tests/templates/inventory.j2 | 9 - tests/test-venus.sh | 94 ---------- zuul.d/project.yaml | 1 - zuul.d/scenarios/venus.yaml | 43 ----- 44 files changed, 5 insertions(+), 732 deletions(-) delete mode 100644 ansible/group_vars/all/venus.yml delete mode 100644 ansible/roles/cron/templates/cron-logrotate-venus.conf.j2 delete mode 100644 ansible/roles/venus/defaults/main.yml delete mode 100644 ansible/roles/venus/handlers/main.yml delete mode 100644 ansible/roles/venus/tasks/bootstrap.yml delete mode 100644 ansible/roles/venus/tasks/check-containers.yml delete mode 100644 ansible/roles/venus/tasks/check.yml delete mode 100644 ansible/roles/venus/tasks/clone.yml delete mode 100644 ansible/roles/venus/tasks/config.yml delete mode 100644 ansible/roles/venus/tasks/config_validate.yml delete mode 100644 ansible/roles/venus/tasks/copy-certs.yml delete mode 100644 ansible/roles/venus/tasks/deploy-containers.yml delete mode 100644 ansible/roles/venus/tasks/deploy.yml delete mode 100644 ansible/roles/venus/tasks/loadbalancer.yml delete mode 100644 ansible/roles/venus/tasks/main.yml delete mode 100644 ansible/roles/venus/tasks/precheck.yml delete mode 100644 ansible/roles/venus/tasks/pull.yml delete mode 100644 ansible/roles/venus/tasks/reconfigure.yml delete mode 100644 ansible/roles/venus/tasks/register.yml delete mode 100644 ansible/roles/venus/tasks/stop.yml delete mode 100644 ansible/roles/venus/tasks/upgrade.yml delete mode 100644 ansible/roles/venus/templates/venus-api.json.j2 delete mode 100644 ansible/roles/venus/templates/venus-manager.json.j2 delete mode 100644 ansible/roles/venus/templates/venus.conf.j2 delete mode 100644 ansible/roles/venus/vars/main.yml create mode 100644 releasenotes/notes/drop-venus-b929071fb79b8026.yaml delete mode 100755 tests/test-venus.sh delete mode 100644 zuul.d/scenarios/venus.yaml diff --git a/README.rst b/README.rst index 6a1ccf9620..4ee2c7dd04 100644 --- a/README.rst +++ b/README.rst @@ -64,7 +64,6 @@ Kolla Ansible deploys containers for the following OpenStack projects: - Skyline (`APIServer `__ and `Console `__) - `Tacker `__ - `Trove `__ -- `Venus `__ - `Watcher `__ - `Zun `__ diff --git a/ansible/group_vars/all/horizon.yml b/ansible/group_vars/all/horizon.yml index fcab934b25..8e477ad9c1 100644 --- a/ansible/group_vars/all/horizon.yml +++ b/ansible/group_vars/all/horizon.yml @@ -14,7 +14,6 @@ enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}" enable_horizon_octavia: "{{ enable_octavia | bool }}" enable_horizon_tacker: "{{ enable_tacker | bool }}" enable_horizon_trove: "{{ enable_trove | bool }}" -enable_horizon_venus: "{{ enable_venus | bool }}" enable_horizon_watcher: "{{ enable_watcher | bool }}" enable_horizon_zun: "{{ enable_zun | bool }}" diff --git a/ansible/group_vars/all/venus.yml b/ansible/group_vars/all/venus.yml deleted file mode 100644 index 85908647b1..0000000000 --- a/ansible/group_vars/all/venus.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -enable_venus: "no" - -venus_internal_fqdn: "{{ kolla_internal_fqdn }}" -venus_external_fqdn: "{{ kolla_external_fqdn }}" -venus_internal_endpoint: "{{ venus_internal_fqdn | kolla_url(internal_protocol, venus_api_port) }}" -venus_public_endpoint: "{{ venus_external_fqdn | kolla_url(public_protocol, venus_api_public_port) }}" -venus_api_port: "10010" -venus_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else venus_api_port }}" -venus_api_listen_port: "{{ venus_api_port }}" diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one index 1d47ec7982..6ab9227f5b 100644 --- a/ansible/inventory/all-in-one +++ b/ansible/inventory/all-in-one @@ -165,9 +165,6 @@ control [blazar:children] control -[venus:children] -monitoring - [letsencrypt:children] loadbalancer @@ -590,12 +587,6 @@ ovn-database [ovn-sb-db-relay:children] ovn-database -[venus-api:children] -venus - -[venus-manager:children] -venus - [letsencrypt-webserver:children] letsencrypt diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode index 9c35be0475..972e4059e5 100644 --- a/ansible/inventory/multinode +++ b/ansible/inventory/multinode @@ -183,9 +183,6 @@ control [blazar:children] control -[venus:children] -monitoring - [letsencrypt:children] loadbalancer @@ -608,12 +605,6 @@ ovn-database [ovn-sb-db-relay:children] ovn-database -[venus-api:children] -venus - -[venus-manager:children] -venus - [letsencrypt-webserver:children] letsencrypt diff --git a/ansible/roles/cron/tasks/config.yml b/ansible/roles/cron/tasks/config.yml index adb5bfe5ef..e1a5e30e90 100644 --- a/ansible/roles/cron/tasks/config.yml +++ b/ansible/roles/cron/tasks/config.yml @@ -75,7 +75,6 @@ - { name: "skyline", enabled: "{{ enable_skyline | bool }}" } - { name: "tacker", enabled: "{{ enable_tacker | bool }}" } - { name: "trove", enabled: "{{ enable_trove | bool }}" } - - { name: "venus", enabled: "{{ enable_venus | bool }}" } - { name: "watcher", enabled: "{{ enable_watcher | bool }}" } - { name: "zun", enabled: "{{ enable_zun | bool }}" } template: diff --git a/ansible/roles/cron/templates/cron-logrotate-venus.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-venus.conf.j2 deleted file mode 100644 index 5ff3c425a7..0000000000 --- a/ansible/roles/cron/templates/cron-logrotate-venus.conf.j2 +++ /dev/null @@ -1,3 +0,0 @@ -"/var/log/kolla/venus/*.log" -{ -} diff --git a/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 b/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 index c8e35c8598..66a1897a20 100644 --- a/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 +++ b/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 @@ -136,11 +136,6 @@ pattern ^(masakari-engine|masakari-api)$ tag openstack_python - - key programname - pattern ^(venus-api|venus-manager)$ - tag openstack_python - key programname pattern ^(skyline)$ diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml index d1dcae651f..01a6e3082a 100644 --- a/ansible/roles/horizon/defaults/main.yml +++ b/ansible/roles/horizon/defaults/main.yml @@ -20,7 +20,6 @@ horizon_services: ENABLE_OCTAVIA: "{{ 'yes' if enable_horizon_octavia | bool else 'no' }}" ENABLE_TACKER: "{{ 'yes' if enable_horizon_tacker | bool else 'no' }}" ENABLE_TROVE: "{{ 'yes' if enable_horizon_trove | bool else 'no' }}" - ENABLE_VENUS: "{{ 'yes' if enable_horizon_venus | bool else 'no' }}" ENABLE_WATCHER: "{{ 'yes' if enable_horizon_watcher | bool else 'no' }}" ENABLE_ZUN: "{{ 'yes' if enable_horizon_zun | bool else 'no' }}" FORCE_GENERATE: "{{ 'yes' if horizon_dev_mode | bool else 'no' }}" diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml index 883fdce2fe..27affe4472 100644 --- a/ansible/roles/prometheus/defaults/main.yml +++ b/ansible/roles/prometheus/defaults/main.yml @@ -286,10 +286,6 @@ prometheus_blackbox_exporter_endpoints_default: - "trove:os_endpoint:{{ trove_public_base_endpoint }}" - "{{ ('trove_internal:os_endpoint:' + trove_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}" enabled: "{{ enable_trove | bool }}" - - endpoints: - - "venus:os_endpoint:{{ venus_public_endpoint }}" - - "{{ ('venus_internal:os_endpoint:' + venus_internal_endpoint) if not kolla_same_external_internal_vip | bool }}" - enabled: "{{ enable_venus | bool }}" - endpoints: - "watcher:os_endpoint:{{ watcher_public_endpoint }}" - "{{ ('watcher_internal:os_endpoint:' + watcher_internal_endpoint) if not kolla_same_external_internal_vip | bool }}" diff --git a/ansible/roles/venus/defaults/main.yml b/ansible/roles/venus/defaults/main.yml deleted file mode 100644 index 2677cb5263..0000000000 --- a/ansible/roles/venus/defaults/main.yml +++ /dev/null @@ -1,172 +0,0 @@ ---- -venus_services: - venus-api: - container_name: venus_api - group: venus-api - enabled: true - image: "{{ venus_api_image_full }}" - volumes: "{{ venus_api_default_volumes + venus_api_extra_volumes }}" - dimensions: "{{ venus_api_dimensions }}" - healthcheck: "{{ venus_api_healthcheck }}" - haproxy: - venus_api: - enabled: "{{ enable_venus }}" - mode: "http" - external: false - port: "{{ venus_api_port }}" - backend_http_extra: - - "option httpchk" - venus_api_external: - enabled: "{{ enable_venus }}" - mode: "http" - external: true - external_fqdn: "{{ venus_external_fqdn }}" - port: "{{ venus_api_public_port }}" - backend_http_extra: - - "option httpchk" - venus-manager: - container_name: venus_manager - group: venus-manager - enabled: true - image: "{{ venus_manager_image_full }}" - volumes: "{{ venus_manager_default_volumes + venus_manager_extra_volumes }}" - dimensions: "{{ venus_manager_dimensions }}" - -#################### -# Config Validate -#################### -venus_config_validation: - - generator: "/venus/tools/config/venus-config-generator.conf" - config: "/etc/venus/venus.conf" - -#################### -# Database -#################### -venus_database_name: "venus" -venus_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}venus{% endif %}" -venus_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}" - -#################### -# Database sharding -#################### -venus_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ venus_database_shard_id }}{% else %}{{ database_user }}{% endif %}" -venus_database_shard_id: "{{ mariadb_default_database_shard_id | int }}" -venus_database_shard: - users: - - user: "{{ venus_database_user }}" - password: "{{ venus_database_password }}" - shard_id: "{{ venus_database_shard_id }}" - rules: - - schema: "{{ venus_database_name }}" - shard_id: "{{ venus_database_shard_id }}" - - user: "{{ venus_database_user }}" - shard_id: "{{ venus_database_shard_id }}" - - -#################### -# Docker -#################### -venus_tag: "{{ openstack_tag }}" - -venus_api_image: "{{ docker_image_url }}venus-api" -venus_api_tag: "{{ venus_tag }}" -venus_api_image_full: "{{ venus_api_image }}:{{ venus_api_tag }}" - -venus_manager_image: "{{ docker_image_url }}venus-manager" -venus_manager_tag: "{{ venus_tag }}" -venus_manager_image_full: "{{ venus_manager_image }}:{{ venus_manager_tag }}" - -venus_api_dimensions: "{{ default_container_dimensions }}" -venus_manager_dimensions: "{{ default_container_dimensions }}" - -venus_api_enable_healthchecks: "{{ enable_container_healthchecks }}" -venus_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}" -venus_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}" -venus_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -venus_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ venus_api_port }}"] -venus_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" -venus_api_healthcheck: - interval: "{{ venus_api_healthcheck_interval }}" - retries: "{{ venus_api_healthcheck_retries }}" - start_period: "{{ venus_api_healthcheck_start_period }}" - test: "{% if venus_api_enable_healthchecks | bool %}{{ venus_api_healthcheck_test }}{% else %}NONE{% endif %}" - timeout: "{{ venus_api_healthcheck_timeout }}" - -venus_api_default_volumes: - - "{{ node_config_directory }}/venus-api/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" - - "{{ '/dev/shm:/dev/shm' }}" - - "{{ kolla_dev_repos_directory ~ '/venus:/dev-mode/venus' if venus_dev_mode | bool else '' }}" - - "venus:/var/lib/venus/" -venus_manager_default_volumes: - - "{{ node_config_directory }}/venus-manager/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" - - "{{ '/dev/shm:/dev/shm' }}" - - "{{ kolla_dev_repos_directory ~ '/venus:/dev-mode/venus' if venus_dev_mode | bool else '' }}" - - "venus:/var/lib/venus/" - -venus_extra_volumes: "{{ default_extra_volumes }}" -venus_api_extra_volumes: "{{ venus_extra_volumes }}" -venus_manager_extra_volumes: "{{ venus_extra_volumes }}" - -#################### -# OpenStack -#################### -venus_logging_debug: "{{ openstack_logging_debug }}" - -venus_keystone_user: "venus" - -openstack_venus_auth: "{{ openstack_auth }}" - - -#################### -# Kolla -#################### -venus_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}" -venus_dev_repos_pull: "{{ kolla_dev_repos_pull }}" -venus_dev_mode: "{{ kolla_dev_mode }}" -venus_source_version: "{{ kolla_source_version }}" - -#################### -# logging -#################### -openstack_logging_default_format_string: "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [- req-None - - - - -] %(instance)s%(message)s" -openstack_logging_context_format_string: "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s" - -#################### -# Notifications -#################### -venus_notification_topics: - - name: notifications - enabled: "{{ enable_ceilometer | bool }}" - -venus_enabled_notification_topics: "{{ venus_notification_topics | selectattr('enabled', 'equalto', true) | list }}" - -#################### -# Keystone -#################### -venus_ks_services: - - name: "venus" - type: "LMS" - description: "Log Manager Service" - endpoints: - - {'interface': 'internal', 'url': '{{ venus_internal_endpoint }}'} - - {'interface': 'public', 'url': '{{ venus_public_endpoint }}'} - -venus_ks_users: - - project: "service" - user: "{{ venus_keystone_user }}" - password: "{{ venus_keystone_password }}" - role: "admin" - -# Database -venus_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }}" - -################### -# Copy certificates -################### -venus_copy_certs: "{{ kolla_copy_ca_into_containers | bool or venus_database_enable_tls_internal | bool }}" diff --git a/ansible/roles/venus/handlers/main.yml b/ansible/roles/venus/handlers/main.yml deleted file mode 100644 index 1f8b3fdb50..0000000000 --- a/ansible/roles/venus/handlers/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: Restart venus-api container - vars: - service_name: "venus-api" - service: "{{ venus_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" - image: "{{ service.image }}" - volumes: "{{ service.volumes | reject('equalto', '') | list }}" - dimensions: "{{ service.dimensions }}" - healthcheck: "{{ service.healthcheck | default(omit) }}" - -- name: Restart venus-manager container - vars: - service_name: "venus-manager" - service: "{{ venus_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" - image: "{{ service.image }}" - volumes: "{{ service.volumes | reject('equalto', '') | list }}" - dimensions: "{{ service.dimensions }}" - healthcheck: "{{ service.healthcheck | default(omit) }}" diff --git a/ansible/roles/venus/tasks/bootstrap.yml b/ansible/roles/venus/tasks/bootstrap.yml deleted file mode 100644 index 57938e60f1..0000000000 --- a/ansible/roles/venus/tasks/bootstrap.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- name: Creating venus database - become: true - kolla_toolbox: - container_engine: "{{ kolla_container_engine }}" - module_name: mysql_db - module_args: - ca_cert: "{{ openstack_cacert if database_enable_tls_internal | bool else omit }}" - login_host: "{{ database_address }}" - login_port: "{{ database_port }}" - login_user: "{{ venus_database_shard_root_user }}" - login_password: "{{ database_password }}" - name: "{{ venus_database_name }}" - run_once: True - delegate_to: "{{ groups['venus-api'][0] }}" - when: - - not use_preconfigured_databases | bool - -- name: Creating venus database user and setting permissions - become: true - kolla_toolbox: - container_engine: "{{ kolla_container_engine }}" - module_name: mysql_user - module_args: - ca_cert: "{{ openstack_cacert if database_enable_tls_internal | bool else omit }}" - login_host: "{{ database_address }}" - login_port: "{{ database_port }}" - login_user: "{{ venus_database_shard_root_user }}" - login_password: "{{ database_password }}" - name: "{{ venus_database_user }}" - password: "{{ venus_database_password }}" - host: "%" - priv: "{{ venus_database_name }}.*:ALL" - append_privs: "yes" - run_once: True - delegate_to: "{{ groups['venus-api'][0] }}" - when: - - not use_preconfigured_databases | bool diff --git a/ansible/roles/venus/tasks/check-containers.yml b/ansible/roles/venus/tasks/check-containers.yml deleted file mode 100644 index b7e2f7c29f..0000000000 --- a/ansible/roles/venus/tasks/check-containers.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- import_role: - name: service-check-containers diff --git a/ansible/roles/venus/tasks/check.yml b/ansible/roles/venus/tasks/check.yml deleted file mode 100644 index 63d29a6f31..0000000000 --- a/ansible/roles/venus/tasks/check.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- name: Checking Venus containers - import_role: - role: service-check diff --git a/ansible/roles/venus/tasks/clone.yml b/ansible/roles/venus/tasks/clone.yml deleted file mode 100644 index 4d85cc0e80..0000000000 --- a/ansible/roles/venus/tasks/clone.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Cloning venus source repository for development - become: true - git: - repo: "{{ venus_git_repository }}" - dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}" - update: "{{ venus_dev_repos_pull }}" - version: "{{ venus_source_version }}" diff --git a/ansible/roles/venus/tasks/config.yml b/ansible/roles/venus/tasks/config.yml deleted file mode 100644 index 28b9ee80f5..0000000000 --- a/ansible/roles/venus/tasks/config.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - owner: "{{ config_owner_user }}" - group: "{{ config_owner_group }}" - mode: "0770" - become: true - with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}" - -- name: Check if policies shall be overwritten - stat: - path: "{{ item }}" - run_once: True - delegate_to: localhost - register: venus_policy - with_first_found: - - files: "{{ supported_policy_format_list }}" - paths: - - "{{ node_custom_config }}/venus/" - skip: true - -- name: Set venus policy file - set_fact: - venus_policy_file: "{{ venus_policy.results.0.stat.path | basename }}" - venus_policy_file_path: "{{ venus_policy.results.0.stat.path }}" - when: - - venus_policy.results | length > 0 - -- include_tasks: copy-certs.yml - when: - - venus_copy_certs | bool - -- name: Copying over config.json files for services - template: - src: "{{ item.key }}.json.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/config.json" - mode: "0660" - become: true - with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}" - -- name: Copying over venus.conf - vars: - service_name: "{{ item.key }}" - merge_configs: - sources: - - "{{ role_path }}/templates/venus.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/venus.conf" - - "{{ node_custom_config }}/venus/{{ item.key }}.conf" - - "{{ node_custom_config }}/venus/{{ inventory_hostname }}/venus.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/venus.conf" - mode: "0660" - become: true - with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}" - -- name: Copying over existing policy file - template: - src: "{{ venus_policy_file_path }}" - dest: "{{ node_config_directory }}/{{ item.key }}/{{ venus_policy_file }}" - mode: "0660" - when: - - venus_policy_file is defined - with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}" diff --git a/ansible/roles/venus/tasks/config_validate.yml b/ansible/roles/venus/tasks/config_validate.yml deleted file mode 100644 index 57ab862017..0000000000 --- a/ansible/roles/venus/tasks/config_validate.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- import_role: - name: service-config-validate - vars: - service_config_validate_services: "{{ venus_services }}" - service_name: "{{ project_name }}" - service_config_validation: "{{ venus_config_validation }}" diff --git a/ansible/roles/venus/tasks/copy-certs.yml b/ansible/roles/venus/tasks/copy-certs.yml deleted file mode 100644 index c0452d546e..0000000000 --- a/ansible/roles/venus/tasks/copy-certs.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: "Copy certificates and keys for {{ project_name }}" - import_role: - role: service-cert-copy - vars: - project_services: "{{ venus_services }}" diff --git a/ansible/roles/venus/tasks/deploy-containers.yml b/ansible/roles/venus/tasks/deploy-containers.yml deleted file mode 100644 index eb24ab5c7a..0000000000 --- a/ansible/roles/venus/tasks/deploy-containers.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- import_tasks: check-containers.yml diff --git a/ansible/roles/venus/tasks/deploy.yml b/ansible/roles/venus/tasks/deploy.yml deleted file mode 100644 index b9775dda85..0000000000 --- a/ansible/roles/venus/tasks/deploy.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- import_tasks: register.yml - -- import_tasks: config.yml - -- import_tasks: check-containers.yml - -- include_tasks: clone.yml - when: venus_dev_mode | bool - -- import_tasks: bootstrap.yml - -- name: Flush handlers - meta: flush_handlers diff --git a/ansible/roles/venus/tasks/loadbalancer.yml b/ansible/roles/venus/tasks/loadbalancer.yml deleted file mode 100644 index b692351e63..0000000000 --- a/ansible/roles/venus/tasks/loadbalancer.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: "Configure loadbalancer for {{ project_name }}" - import_role: - name: loadbalancer-config - vars: - project_services: "{{ venus_services }}" - tags: always diff --git a/ansible/roles/venus/tasks/main.yml b/ansible/roles/venus/tasks/main.yml deleted file mode 100644 index bc5d1e6257..0000000000 --- a/ansible/roles/venus/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include_tasks: "{{ kolla_action }}.yml" diff --git a/ansible/roles/venus/tasks/precheck.yml b/ansible/roles/venus/tasks/precheck.yml deleted file mode 100644 index 10408219d1..0000000000 --- a/ansible/roles/venus/tasks/precheck.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- import_role: - name: service-precheck - vars: - service_precheck_services: "{{ venus_services }}" - service_name: "{{ project_name }}" - -- name: Get container facts - become: true - kolla_container_facts: - action: get_containers - container_engine: "{{ kolla_container_engine }}" - name: - - venus_api - check_mode: false - register: container_facts - -- name: Checking free port for Venus API - wait_for: - host: "{{ api_interface_address }}" - port: "{{ venus_api_port }}" - connect_timeout: 1 - timeout: 1 - state: stopped - when: - - container_facts.containers['venus_api'] is not defined - - inventory_hostname in groups['venus-api'] diff --git a/ansible/roles/venus/tasks/pull.yml b/ansible/roles/venus/tasks/pull.yml deleted file mode 100644 index 53f9c5fda1..0000000000 --- a/ansible/roles/venus/tasks/pull.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- import_role: - role: service-images-pull diff --git a/ansible/roles/venus/tasks/reconfigure.yml b/ansible/roles/venus/tasks/reconfigure.yml deleted file mode 100644 index 5b10a7e111..0000000000 --- a/ansible/roles/venus/tasks/reconfigure.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- import_tasks: deploy.yml diff --git a/ansible/roles/venus/tasks/register.yml b/ansible/roles/venus/tasks/register.yml deleted file mode 100644 index d61d9a9b0c..0000000000 --- a/ansible/roles/venus/tasks/register.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- import_role: - name: service-ks-register - vars: - service_ks_register_auth: "{{ openstack_venus_auth }}" - service_ks_register_services: "{{ venus_ks_services }}" - service_ks_register_users: "{{ venus_ks_users }}" diff --git a/ansible/roles/venus/tasks/stop.yml b/ansible/roles/venus/tasks/stop.yml deleted file mode 100644 index 5016a76343..0000000000 --- a/ansible/roles/venus/tasks/stop.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- import_role: - name: service-stop - vars: - project_services: "{{ venus_services }}" - service_name: "{{ project_name }}" diff --git a/ansible/roles/venus/tasks/upgrade.yml b/ansible/roles/venus/tasks/upgrade.yml deleted file mode 100644 index 49edff81e3..0000000000 --- a/ansible/roles/venus/tasks/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- import_tasks: config.yml - -- import_tasks: check-containers.yml - -- name: Flush handlers - meta: flush_handlers diff --git a/ansible/roles/venus/templates/venus-api.json.j2 b/ansible/roles/venus/templates/venus-api.json.j2 deleted file mode 100644 index 0a825529d8..0000000000 --- a/ansible/roles/venus/templates/venus-api.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "venus_api --config-file /etc/venus/venus.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/venus.conf", - "dest": "/etc/venus/venus.conf", - "owner": "venus", - "perm": "0644" - }{% if kolla_copy_ca_into_containers | bool %}, - { - "source": "{{ container_config_directory }}/ca-certificates", - "dest": "/var/lib/kolla/share/ca-certificates", - "owner": "root", - "perm": "0600" - }{% endif %} - ], - "permissions": [ - { - "path":"/var/log/kolla/venus/venus-api.log", - "owner": "venus:venus", - "recurse": true - } - ] -} - diff --git a/ansible/roles/venus/templates/venus-manager.json.j2 b/ansible/roles/venus/templates/venus-manager.json.j2 deleted file mode 100644 index 02f7503cb3..0000000000 --- a/ansible/roles/venus/templates/venus-manager.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "venus_manager --config-file /etc/venus/venus.conf task start", - "config_files": [ - { - "source": "{{ container_config_directory }}/venus.conf", - "dest": "/etc/venus/venus.conf", - "owner": "venus", - "perm": "0644" - }{% if kolla_copy_ca_into_containers | bool %}, - { - "source": "{{ container_config_directory }}/ca-certificates", - "dest": "/var/lib/kolla/share/ca-certificates", - "owner": "root", - "perm": "0600" - }{% endif %} - ], - "permissions": [ - { - "path":"/var/log/kolla/venus/venus-manager.log", - "owner": "venus:venus", - "recurse": true - } - ] -} - diff --git a/ansible/roles/venus/templates/venus.conf.j2 b/ansible/roles/venus/templates/venus.conf.j2 deleted file mode 100644 index d4cbd91f40..0000000000 --- a/ansible/roles/venus/templates/venus.conf.j2 +++ /dev/null @@ -1,38 +0,0 @@ -[DEFAULT] -my_ip = {{ api_interface_address }} -periodic_interval = 60 -rootwrap_config = /etc/venus/rootwrap.conf -api_paste_config = /etc/venus/api-paste.ini -log_dir = /var/log/kolla/venus/ -debug = {{ venus_logging_debug }} -auth_strategy = keystone -os_region_name = {{ openstack_region_name }} -osapi_venus_listen = {{ api_interface_address }} -osapi_venus_listen_port = {{ venus_api_port }} - -logging_default_format_string = {{ openstack_logging_default_format_string }} -logging_context_format_string = {{ openstack_logging_context_format_string }} - -transport_url = {{ rpc_transport_url }} - -[database] -connection = mysql+pymysql://{{ venus_database_user }}:{{ venus_database_password }}@{{ venus_database_address }}/{{ venus_database_name }}?charset=utf8{{ '&ssl_ca=' ~ openstack_cacert if venus_database_enable_tls_internal | bool }} - -[keystone_authtoken] -cafile = {{ openstack_cacert }} -project_name = service -password = {{ venus_keystone_password }} -username = {{ venus_keystone_user }} -auth_url = {{ keystone_internal_url }} -project_domain_id = {{ default_project_domain_id }} -user_domain_id = {{ default_user_domain_id }} -auth_type = password -memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %} - -{% if enable_opensearch | bool %} -[elasticsearch] -url = {{ opensearch_internal_endpoint }} -{% endif %} - -[oslo_concurrency] -lock_path = /var/lib/venus/tmp diff --git a/ansible/roles/venus/vars/main.yml b/ansible/roles/venus/vars/main.yml deleted file mode 100644 index 3955d5f95f..0000000000 --- a/ansible/roles/venus/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -project_name: "venus" diff --git a/ansible/site.yml b/ansible/site.yml index 0ab3862741..33855f1d61 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -68,7 +68,6 @@ - enable_tacker_{{ enable_tacker | bool }} - enable_telegraf_{{ enable_telegraf | bool }} - enable_trove_{{ enable_trove | bool }} - - enable_venus_{{ enable_venus | bool }} - enable_watcher_{{ enable_watcher | bool }} - enable_zun_{{ enable_zun | bool }} tags: always @@ -326,11 +325,6 @@ tasks_from: loadbalancer tags: trove when: enable_trove | bool - - include_role: - name: venus - tasks_from: loadbalancer - tags: venus - when: enable_venus | bool - include_role: name: watcher tasks_from: loadbalancer @@ -1066,21 +1060,6 @@ - { role: masakari, tags: masakari } -- name: Apply role venus - gather_facts: false - hosts: - - venus-api - - venus-manager - - '&enable_venus_True' - serial: '{{ kolla_serial|default("0") }}' - max_fail_percentage: >- - {{ venus_max_fail_percentage | - default(kolla_max_fail_percentage) | - default(100) }} - roles: - - { role: venus, - tags: venus } - - name: Apply role skyline gather_facts: false hosts: diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index 38aab5c60a..37a7486d32 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -376,7 +376,6 @@ workaround_ansible_issue_8743: yes #enable_horizon_octavia: "{{ enable_octavia | bool }}" #enable_horizon_tacker: "{{ enable_tacker | bool }}" #enable_horizon_trove: "{{ enable_trove | bool }}" -#enable_horizon_venus: "{{ enable_venus | bool }}" #enable_horizon_watcher: "{{ enable_watcher | bool }}" #enable_horizon_zun: "{{ enable_zun | bool }}" #enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}" @@ -432,7 +431,6 @@ workaround_ansible_issue_8743: yes #enable_telegraf: "no" #enable_trove: "no" #enable_trove_singletenant: "no" -#enable_venus: "no" #enable_watcher: "no" #enable_zun: "no" diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml index 8647dd9d29..5705873d68 100644 --- a/etc/kolla/passwords.yml +++ b/etc/kolla/passwords.yml @@ -131,9 +131,6 @@ tacker_keystone_password: zun_database_password: zun_keystone_password: -venus_database_password: -venus_keystone_password: - masakari_database_password: masakari_keystone_password: diff --git a/releasenotes/notes/drop-venus-b929071fb79b8026.yaml b/releasenotes/notes/drop-venus-b929071fb79b8026.yaml new file mode 100644 index 0000000000..e33fe99bb0 --- /dev/null +++ b/releasenotes/notes/drop-venus-b929071fb79b8026.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Support for deploying ``Venus`` container images has been dropped. diff --git a/tests/check-logs.sh b/tests/check-logs.sh index a9f7d464e3..dac11ccb9b 100755 --- a/tests/check-logs.sh +++ b/tests/check-logs.sh @@ -97,9 +97,6 @@ function check_fluentd_missing_logs { /var/log/kolla/tenks/*) continue ;; - /var/log/kolla/venus/*) - continue - ;; /var/log/kolla/zun/*) continue ;; diff --git a/tests/run.yml b/tests/run.yml index 7e9d71a775..d421c3e7a0 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -10,7 +10,7 @@ - name: Set facts for commonly used variables vars: # NOTE(yoctozepto): needed here to use in other facts too - openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-opensearch', 'venus'] }}" + openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-opensearch'] }}" set_fact: kolla_inventory_path: "/etc/kolla/inventory" logs_dir: "/tmp/logs" @@ -526,13 +526,6 @@ when: - scenario == "prometheus-opensearch" - - name: Run test-venus.sh script - script: - cmd: test-venus.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - when: scenario == "venus" - - name: Run test-skyline.sh script script: cmd: test-skyline.sh diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index 32a28773fb..1ec2a74422 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -236,12 +236,6 @@ octavia_network_type: "tenant" enable_redis: "yes" {% endif %} -{% if scenario == "venus" %} -enable_opensearch: "yes" -enable_keystone: "yes" -enable_venus: "yes" -{% endif %} - {% if groups['all'] | length == 1 %} keepalived_track_script_enabled: "no" {% endif %} diff --git a/tests/templates/inventory.j2 b/tests/templates/inventory.j2 index ca98719a89..d0711e93ee 100644 --- a/tests/templates/inventory.j2 +++ b/tests/templates/inventory.j2 @@ -231,9 +231,6 @@ control [blazar:children] control -[venus:children] -monitoring - [letsencrypt:children] loadbalancer @@ -662,12 +659,6 @@ ovn-database [ovn-sb-db-relay:children] ovn-database -[venus-api:children] -venus - -[venus-manager:children] -venus - [letsencrypt-webserver:children] letsencrypt diff --git a/tests/test-venus.sh b/tests/test-venus.sh deleted file mode 100755 index 0039d67749..0000000000 --- a/tests/test-venus.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit -set -o pipefail - -# Enable unbuffered output -export PYTHONUNBUFFERED=1 - -# TODO(yoctozepto): Avoid duplicating this from prometheus-opensearch -function check_opensearch { - # Verify that we see a healthy index created due to Fluentd forwarding logs - local opensearch_url=${OS_AUTH_URL%:*}:9200/_cluster/health - output_path=$1 - args=( - --include - --location - --fail - ) - if ! curl "${args[@]}" $opensearch_url > $output_path; then - return 1 - fi - # NOTE(mgoddard): Status may be yellow because no indices have been - # created. - if ! grep -E '"status":"(green|yellow)"' $output_path >/dev/null; then - return 1 - fi -} - -function check_venus { - local venus_url=${OS_AUTH_URL%:*}:10010/custom_config - output_path=$1 - if ! curl --include --fail $venus_url > $output_path; then - return 1 - fi - if ! grep -E '"status": "SUPPORTED"' $output_path >/dev/null; then - return 1 - fi -} - -function test_opensearch { - echo "TESTING: OpenSearch" - output_path=$(mktemp) - attempt=1 - while ! check_opensearch $output_path; do - echo "OpenSearch not accessible yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 12 ]]; then - echo "FAILED: OpenSearch did not become accessible. Response:" - cat $output_path - return 1 - fi - sleep 10 - done - echo "SUCCESS: OpenSearch" -} - -function test_venus { - echo "TESTING: Venus" - output_path=$(mktemp) - attempt=1 - while ! check_venus $output_path; do - echo "Venus not accessible yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 12 ]]; then - echo "FAILED: Venus did not become accessible. Response:" - cat $output_path - return 1 - fi - sleep 10 - done - echo "SUCCESS: Venus" -} - -function test_venus_scenario_logged { - . /etc/kolla/admin-openrc.sh - - test_opensearch - test_venus -} - -function test_venus_scenario { - echo "Testing Venus and OpenSearch" - test_venus_scenario_logged > /tmp/logs/ansible/test-venus-scenario 2>&1 - result=$? - if [[ $result != 0 ]]; then - echo "Testing Venus scenario failed. See ansible/test-venus-scenario for details" - else - echo "Successfully tested Venus scenario. See ansible/test-venus-scenario for details" - fi - return $result -} - -test_venus_scenario diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 2c078fc032..dffc20c4ed 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -29,7 +29,6 @@ # NOTE(mnasiadka): SSO and non-SSO tests are failing #- kolla-ansible-scenario-skyline - kolla-ansible-scenario-telemetry - - kolla-ansible-scenario-venus - openstack-cover-jobs - openstack-python3-jobs-kolla-ansible - periodic-stable-jobs diff --git a/zuul.d/scenarios/venus.yaml b/zuul.d/scenarios/venus.yaml deleted file mode 100644 index f933304c7a..0000000000 --- a/zuul.d/scenarios/venus.yaml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- job: - name: kolla-ansible-venus-base - parent: kolla-ansible-base - voting: false - files: !inherit - - ^ansible/group_vars/all/(fluentd|opensearch|venus).yml - - ^ansible/roles/(fluentd|opensearch|venus)/ - - ^tests/test-venus.sh - vars: - scenario: venus - scenario_images_core: - - ^cron - - ^opensearch - - ^fluentd - - ^haproxy - - ^keepalived - - ^keystone - - ^kolla-toolbox - - ^mariadb - - ^memcached - - ^rabbitmq - - ^venus - tls_enabled: false - -- job: - name: kolla-ansible-debian-bookworm-venus - parent: kolla-ansible-venus-base - nodeset: kolla-ansible-debian-bookworm-8GB - -- job: - name: kolla-ansible-ubuntu-noble-venus - parent: kolla-ansible-venus-base - nodeset: kolla-ansible-ubuntu-noble-8GB - -- project-template: - name: kolla-ansible-scenario-venus - description: | - Runs Kolla-Ansible Venus scenario jobs. - check: - jobs: - - kolla-ansible-debian-bookworm-venus - - kolla-ansible-ubuntu-noble-venus From 809e5b2e341fefd655c12532d61a588bc2ce4091 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 28 Oct 2025 19:03:53 +0000 Subject: [PATCH 072/165] Revert "ovn: Mark as non-voting due to db related failures" This reverts commit 011034a141b385ed4c86fd832425e800484efdb6. Change-Id: I4750ea03c8e7885707150c9d2722ed4ee36fbf46 Signed-off-by: Michal Nasiadka --- zuul.d/scenarios/ovn.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml index cbeca20058..6e94728457 100644 --- a/zuul.d/scenarios/ovn.yaml +++ b/zuul.d/scenarios/ovn.yaml @@ -6,7 +6,6 @@ - ^ansible/group_vars/all/(neutron|octavia|openvswitch|ovn).yml - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ - ^tests/test-ovn.sh - voting: false vars: scenario: ovn scenario_images_extra: @@ -48,3 +47,7 @@ - kolla-ansible-debian-bookworm-ovn-upgrade - kolla-ansible-ubuntu-noble-ovn - kolla-ansible-ubuntu-noble-ovn-upgrade + gate: + jobs: + - kolla-ansible-ubuntu-noble-ovn + - kolla-ansible-ubuntu-noble-ovn-upgrade From a39190ef47369b015ddd51a8d6748f5933aa45af Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 28 Oct 2025 20:09:46 +0100 Subject: [PATCH 073/165] CI: Fix task name in upgrade Change-Id: I7409c3f955faa6281fb19c85bbb4a9aa03f9f219 Signed-off-by: Michal Nasiadka --- roles/kolla-ansible-upgrade/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kolla-ansible-upgrade/tasks/main.yml b/roles/kolla-ansible-upgrade/tasks/main.yml index a464f1cf3b..c66e06fe20 100644 --- a/roles/kolla-ansible-upgrade/tasks/main.yml +++ b/roles/kolla-ansible-upgrade/tasks/main.yml @@ -36,7 +36,7 @@ -vvv >/tmp/logs/ansible/upgrade-pull 2>&1 -- name: Run kolla-ansible deploy +- name: Run kolla-ansible upgrade ansible.builtin.shell: cmd: > . {{ kolla_ansible_venv_path }}/bin/activate && From e67a4dea7322e53a7426b2ce054723cbe5a030be Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 28 Oct 2025 17:59:56 +0100 Subject: [PATCH 074/165] CI: test-dashboard: Use http/https where configured Also move zuul_return to run only on primary Change-Id: I5b27663e8c539715e6aeb18b179a4f0b98103f7f Signed-off-by: Michal Nasiadka --- .../tasks/main.yml | 2 ++ tests/post.yml | 24 +++++++++---------- tests/testinfra/test_horizon.py | 6 +++-- zuul.d/base.yaml | 1 + 4 files changed, 19 insertions(+), 14 deletions(-) diff --git a/roles/kolla-ansible-test-dashboard/tasks/main.yml b/roles/kolla-ansible-test-dashboard/tasks/main.yml index e09d62a334..8455b31549 100644 --- a/roles/kolla-ansible-test-dashboard/tasks/main.yml +++ b/roles/kolla-ansible-test-dashboard/tasks/main.yml @@ -70,6 +70,8 @@ port: 4444 - name: Run testinfra tests + environment: + HORIZON_PROTO: "{{ 'https' if tls_enabled | bool else 'http' }}" ansible.builtin.shell: cmd: > . {{ kolla_ansible_venv_path }}/bin/activate && diff --git a/tests/post.yml b/tests/post.yml index 83b34638f5..46e4370960 100644 --- a/tests/post.yml +++ b/tests/post.yml @@ -7,18 +7,6 @@ zuul_work_dir: '/home/zuul/tempest' tasks: - - name: Return artifact to Zuul - zuul_return: - data: - zuul: - artifacts: - - name: "TestInfra Unit Test Report" - url: "testinfra/test-results-testinfra.html" - metadata: - type: unit_test_report - - name: "TestInfra Screenshots" - url: "testinfra/screenshots" - # TODO(mhiner): Currently only Docker to Podman migration is tested. # If we want to test the other direction we have to rework this. - name: Change container engine after the migration @@ -96,6 +84,18 @@ # TODO(mnasiadka): Remove in G/2026.1 cycle ignore_errors: true + - name: Return artifact to Zuul + zuul_return: + data: + zuul: + artifacts: + - name: "TestInfra Unit Test Report" + url: "testinfra/test-results-testinfra.html" + metadata: + type: unit_test_report + - name: "TestInfra Screenshots" + url: "testinfra/screenshots" + - name: Check for existence of ara sqlite stat: path: "{{ ansible_env.HOME }}/.ara/server/ansible.sqlite" diff --git a/tests/testinfra/test_horizon.py b/tests/testinfra/test_horizon.py index 84b101e22d..abdb787c68 100644 --- a/tests/testinfra/test_horizon.py +++ b/tests/testinfra/test_horizon.py @@ -38,7 +38,8 @@ def test_horizon_screenshot(host): command_executor='http://localhost:4444/wd/hub', options=firefox_options) - horizon_url = "https://192.0.2.10" + horizon_proto = host.environment().get('HORIZON_PROTO') + horizon_url = horizon_proto + "://192.0.2.10" try: driver.get(horizon_url) @@ -77,7 +78,8 @@ def test_horizon_login(host): command_executor='http://localhost:4444/wd/hub', options=firefox_options) - horizon_url = "https://192.0.2.10" + horizon_proto = host.environment().get('HORIZON_PROTO') + horizon_url = horizon_proto + "://192.0.2.10" logout_url = '/'.join(( horizon_url, 'auth', diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 8564afa1f8..7a9b481cbe 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -14,6 +14,7 @@ files: - ^ansible/group_vars/all/common.yml - ^requirements-core.yml + - ^roles/kolla-ansible-test-dashboard/ - ^tests/check-(config|failure|logs).sh - ^tests/get_logs.sh - ^tests/init-core-openstack.sh From 23514cef8592c0a011fb2f27de2a10be9656556e Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Tue, 1 Apr 2025 15:15:41 +0200 Subject: [PATCH 075/165] Remove distro_python_version variable This patch drops the ``distro_python_version`` variable from Kolla Ansible, as it is no longer needed, the paths have already been handled at the image level. More details can be found in [1]. [1] https://review.opendev.org/q/topic:%22kolla-distro-python-version%22 Change-Id: I1b972c607c1d9fd387e977b73a7529c3e2574387 Signed-off-by: Michal Nasiadka --- ansible/group_vars/all/common.yml | 2 -- ansible/roles/horizon/templates/horizon.conf.j2 | 6 ++---- .../templates/ironic-prometheus-exporter-wsgi.conf.j2 | 7 +++---- ansible/roles/skyline/templates/nginx.conf.j2 | 2 +- ansible/roles/skyline/templates/skyline-console.json.j2 | 2 +- .../remove-distro-python-version-09adac8895cb87d1.yaml | 4 ++++ 6 files changed, 11 insertions(+), 12 deletions(-) create mode 100644 releasenotes/notes/remove-distro-python-version-09adac8895cb87d1.yaml diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml index 4a82c29ea3..021df28cfc 100644 --- a/ansible/group_vars/all/common.yml +++ b/ansible/group_vars/all/common.yml @@ -213,8 +213,6 @@ kolla_base_distro_version_default_map: { "ubuntu": "noble", } -distro_python_version: "3" - kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}" #################### diff --git a/ansible/roles/horizon/templates/horizon.conf.j2 b/ansible/roles/horizon/templates/horizon.conf.j2 index e5f851aeba..2f6380da2e 100644 --- a/ansible/roles/horizon/templates/horizon.conf.j2 +++ b/ansible/roles/horizon/templates/horizon.conf.j2 @@ -1,5 +1,3 @@ -{% set python_path = '/var/lib/kolla/venv/lib/python' + distro_python_version + '/site-packages' %} - {% if horizon_enable_tls_backend | bool %} {% if kolla_base_distro in ['centos', 'rocky'] %} LoadModule ssl_module /usr/lib64/httpd/modules/mod_ssl.so @@ -23,7 +21,7 @@ TraceEnable off WSGIScriptReloading On WSGIDaemonProcess horizon-http processes={{ horizon_wsgi_processes }} threads={{ horizon_wsgi_threads }} user=horizon group=horizon display-name=horizon WSGIProcessGroup horizon-http - WSGIScriptAlias / {{ python_path }}/openstack_dashboard/wsgi.py + WSGIScriptAlias / /var/lib/kolla/venv/lib/python3/site-packages/openstack_dashboard/wsgi.py WSGIPassAuthorization On WSGIApplicationGroup %{GLOBAL} @@ -35,7 +33,7 @@ TraceEnable off Require local - Alias /static {{ python_path }}/static + Alias /static /var/lib/kolla/venv/lib/python3/site-packages/static SetHandler None diff --git a/ansible/roles/ironic/templates/ironic-prometheus-exporter-wsgi.conf.j2 b/ansible/roles/ironic/templates/ironic-prometheus-exporter-wsgi.conf.j2 index ace7fd9a1d..319bf2f888 100644 --- a/ansible/roles/ironic/templates/ironic-prometheus-exporter-wsgi.conf.j2 +++ b/ansible/roles/ironic/templates/ironic-prometheus-exporter-wsgi.conf.j2 @@ -1,12 +1,11 @@ {% set ironic_log_dir = '/var/log/kolla/ironic' %} -{% set python_path = '/var/lib/kolla/venv/lib/python' + distro_python_version + '/site-packages' %} Listen {{ api_interface_address | put_address_in_context('url') }}:{{ ironic_prometheus_exporter_port }} ServerSignature Off ServerTokens Prod TraceEnable off - + AllowOverride None Options None @@ -28,9 +27,9 @@ LogLevel info LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ ironic_log_dir }}/ironic-prometheus-exporter-wsgi-access.log" logformat - WSGIDaemonProcess ironic-prometheus-exporter processes={{ openstack_service_workers }} threads=1 user=ironic display-name=%{GROUP} python-path={{ python_path }} + WSGIDaemonProcess ironic-prometheus-exporter processes={{ openstack_service_workers }} threads=1 user=ironic display-name=%{GROUP} python-path=/var/lib/kolla/venv/lib/python3/site-packages WSGIProcessGroup ironic-prometheus-exporter - WSGIScriptAlias / {{ python_path }}/ironic_prometheus_exporter/app/wsgi.py + WSGIScriptAlias / /var/lib/kolla/venv/lib/python3/site-packages/ironic_prometheus_exporter/app/wsgi.py WSGIApplicationGroup %{GLOBAL} Require all granted diff --git a/ansible/roles/skyline/templates/nginx.conf.j2 b/ansible/roles/skyline/templates/nginx.conf.j2 index 944480464b..711c7fbe63 100644 --- a/ansible/roles/skyline/templates/nginx.conf.j2 +++ b/ansible/roles/skyline/templates/nginx.conf.j2 @@ -68,7 +68,7 @@ http { server { listen {{ api_interface_address | put_address_in_context('url') }}:{{ skyline_console_listen_port }}{% if skyline_ssl_certfile and skyline_ssl_keyfile %} ssl http2{% endif %} default_server; - root /var/lib/kolla/venv/lib/python{{ distro_python_version }}/site-packages/skyline_console/static; + root /var/lib/kolla/venv/lib/python3/site-packages/skyline_console/static; # Add index.php to the list if you are using PHP index index.html; diff --git a/ansible/roles/skyline/templates/skyline-console.json.j2 b/ansible/roles/skyline/templates/skyline-console.json.j2 index 6d9e0a0822..3cab049059 100644 --- a/ansible/roles/skyline/templates/skyline-console.json.j2 +++ b/ansible/roles/skyline/templates/skyline-console.json.j2 @@ -15,7 +15,7 @@ }{% if skyline_custom_logos | length > 0 %}, { "source": "{{ container_config_directory}}/logos", - "dest": "/var/lib/kolla/venv/lib/python{{ distro_python_version }}/site-packages/skyline_console/static", + "dest": "/var/lib/kolla/venv/lib/python3/site-packages/skyline_console/static", "owner": "root", "perm": "0644", "merge": true diff --git a/releasenotes/notes/remove-distro-python-version-09adac8895cb87d1.yaml b/releasenotes/notes/remove-distro-python-version-09adac8895cb87d1.yaml new file mode 100644 index 0000000000..6f19718c39 --- /dev/null +++ b/releasenotes/notes/remove-distro-python-version-09adac8895cb87d1.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The global variable ``distro_python_version`` has been removed From 100efb4153d581635ed9ae1e47893b33bd109bc1 Mon Sep 17 00:00:00 2001 From: Maximilian Stinsky Date: Wed, 17 Sep 2025 16:55:12 +0200 Subject: [PATCH 076/165] Run neutron-ovn-metadata-agent haproxy processes in separate containers Follow up on Iaa1e687152db8351bc0e9b10e66f412860ac13a5 This patch implements the same experimental feature of running processes in seperated containers to the neutron-ovn-metadata-agent Co-Authored-By: Bartosz Bezak Depends-On: https://review.opendev.org/c/openstack/kolla/+/960726 Change-Id: If42aa11735bee963afc33d2e06541e1d9aab0515 Signed-off-by: Maximilian Stinsky Signed-off-by: Bartosz Bezak --- ansible/roles/neutron/defaults/main.yml | 9 +++++++++ ansible/roles/neutron/handlers/main.yml | 3 +++ doc/source/reference/networking/neutron.rst | 5 +++-- ...etadata-agent-haproxy-container-2935ea0b03c41900.yaml | 7 +++++++ 4 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml index 47d3e05a26..44f5426bdd 100644 --- a/ansible/roles/neutron/defaults/main.yml +++ b/ansible/roles/neutron/defaults/main.yml @@ -174,6 +174,7 @@ neutron_services: dimensions: "{{ neutron_metadata_agent_dimensions }}" healthcheck: "{{ neutron_metadata_agent_healthcheck }}" neutron-ovn-metadata-agent: + cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}" container_name: "neutron_ovn_metadata_agent" image: "{{ neutron_ovn_metadata_agent_image_full }}" privileged: True @@ -182,6 +183,11 @@ neutron_services: volumes: "{{ neutron_ovn_metadata_agent_default_volumes + neutron_ovn_metadata_agent_extra_volumes }}" dimensions: "{{ neutron_ovn_metadata_agent_dimensions }}" healthcheck: "{{ neutron_ovn_metadata_agent_healthcheck }}" + pid_mode: "{{ 'host' if neutron_agents_wrappers | bool else '' }}" + environment: + KOLLA_IMAGE: "{{ neutron_ovn_metadata_agent_image_full }}" + KOLLA_NAME: "neutron_ovn_metadata_agent" + KOLLA_NEUTRON_WRAPPERS: "{{ 'true' if neutron_agents_wrappers | bool else 'false' }}" neutron-bgp-dragent: container_name: "neutron_bgp_dragent" image: "{{ neutron_bgp_dragent_image_full }}" @@ -634,6 +640,9 @@ neutron_ovn_metadata_agent_default_volumes: - "kolla_logs:/var/log/kolla/" - "{{ '/dev/shm:/dev/shm' }}" - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" + - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}" + - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" + - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" neutron_openvswitch_agent_default_volumes: - "{{ node_config_directory }}/neutron-openvswitch-agent/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" diff --git a/ansible/roles/neutron/handlers/main.yml b/ansible/roles/neutron/handlers/main.yml index 867880bcbc..249125fb2d 100644 --- a/ansible/roles/neutron/handlers/main.yml +++ b/ansible/roles/neutron/handlers/main.yml @@ -266,6 +266,9 @@ dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" healthcheck: "{{ service.healthcheck | default(omit) }}" + pid_mode: "{{ service.pid_mode | default(omit) }}" + cgroupns_mode: "{{ service.cgroupns_mode | default(omit) }}" + environment: "{{ service.environment | default(omit) }}" - name: Restart neutron-bgp-dragent container vars: diff --git a/doc/source/reference/networking/neutron.rst b/doc/source/reference/networking/neutron.rst index 0dae7dcb6d..82b4321f45 100644 --- a/doc/source/reference/networking/neutron.rst +++ b/doc/source/reference/networking/neutron.rst @@ -340,8 +340,9 @@ Running Neutron agents subprocesses in separate containers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is an experimental feature in Kolla-Ansible that allows to overcome -the issue of breaking data plane connectivity and dhcp services when -restarting neutron-l3-agent and neutron-dhcp-agent. +the issue of breaking data plane connectivity, dhcp and metadata services +when restarting neutron-l3-agent and neutron-dhcp-agent in ml2/ovs or +restarting the neutron-ovn-metadata-agent in ml2/ovn. To enable it, modify the configuration in ``/etc/kolla/globals.yml``: diff --git a/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml b/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml new file mode 100644 index 0000000000..3a13a9ee79 --- /dev/null +++ b/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Implement ``neutron_agents_wrappers`` for the + neutron-ovn-metdata-agent. This allows the haproxy processes which + forward metadata requests in ml2/ovn setups to spawn in separate + containers. From d03b2df5a9920a9c3317c07769cdcc94ad4f74e4 Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Wed, 29 Oct 2025 13:03:12 +0100 Subject: [PATCH 077/165] Enable Neutron agent wrappers by default Neutron agent wrappers are now enabled by default. Wrapper containers restart DHCP, L3, and related agents without having to respawn the main service containers, which reduces dataplane disruptions during upgrades and restarts. Operators who need the previous behaviour can set ``neutron_agents_wrappers`` to ``"no"`` in ``/etc/kolla/globals.yml``. Change-Id: I755e3dfcd326f7c1c05c1e9275dda93753db5873 Signed-off-by: Bartosz Bezak --- ansible/roles/neutron/defaults/main.yml | 2 +- doc/source/reference/networking/neutron.rst | 2 +- etc/kolla/globals.yml | 3 +++ ...eutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml | 8 ++++++++ 4 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml index 44f5426bdd..5ff4c63c0a 100644 --- a/ansible/roles/neutron/defaults/main.yml +++ b/ansible/roles/neutron/defaults/main.yml @@ -757,7 +757,7 @@ neutron_l3_agent_host_ipv6_neigh_gc_thresh3: "{{ neutron_l3_agent_host_ipv4_neig neutron_api_workers: "{{ openstack_service_workers }}" neutron_metadata_workers: "{{ openstack_service_workers }}" -neutron_agents_wrappers: "no" +neutron_agents_wrappers: "yes" #################### # Subprojects diff --git a/doc/source/reference/networking/neutron.rst b/doc/source/reference/networking/neutron.rst index 82b4321f45..264f2366f5 100644 --- a/doc/source/reference/networking/neutron.rst +++ b/doc/source/reference/networking/neutron.rst @@ -339,7 +339,7 @@ In this example: Running Neutron agents subprocesses in separate containers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -There is an experimental feature in Kolla-Ansible that allows to overcome +There is a feature in Kolla-Ansible that allows to overcome the issue of breaking data plane connectivity, dhcp and metadata services when restarting neutron-l3-agent and neutron-dhcp-agent in ml2/ovs or restarting the neutron-ovn-metadata-agent in ml2/ovn. diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index cb0e651d59..70d55940d5 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -175,6 +175,9 @@ workaround_ansible_issue_8743: yes # Neutron rolling upgrade were enable by default #neutron_enable_rolling_upgrade: "yes" +# Enable wrapper containers to keep Neutron agent restarts isolated from the main service containers +#neutron_agents_wrappers: "yes" + # Configure neutron logging framework to log ingress/egress connections to instances # for security groups rules. More information can be found here: # https://docs.openstack.org/neutron/latest/admin/config-logging.html diff --git a/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml b/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml new file mode 100644 index 0000000000..1b7464f8ae --- /dev/null +++ b/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + Neutron agent wrappers are now enabled by default. The wrapper containers + restart DHCP, L3, and related agents without having to respawn the main + service containers, which reduces dataplane disruptions during upgrades and + restarts. Operators who need the previous behaviour can set + ``neutron_agents_wrappers`` to ``"no"`` in ``/etc/kolla/globals.yml``. From fd2012a8999bb52d2a6d01904ae49662543f3d2b Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 29 Oct 2025 16:16:25 +0100 Subject: [PATCH 078/165] CI: Bump tempest timeout to 3600 Change-Id: I85c8a2b833b8b4e46bb33124deb594fb2a2b9f78 Signed-off-by: Michal Nasiadka --- roles/kolla-ansible-tempest/tasks/main.yml | 2 +- zuul.d/scenarios/aio.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/kolla-ansible-tempest/tasks/main.yml b/roles/kolla-ansible-tempest/tasks/main.yml index 4991340f90..7c118feff8 100644 --- a/roles/kolla-ansible-tempest/tasks/main.yml +++ b/roles/kolla-ansible-tempest/tasks/main.yml @@ -32,7 +32,7 @@ OS_LOG_CAPTURE: "1" OS_STDOUT_CAPTURE: "1" OS_STDERR_CAPTURE: "1" - OS_TEST_TIMEOUT: "1200" + OS_TEST_TIMEOUT: "3600" vars: tempest_log_file: "test-tempest-run{{ '-post-upgrade' if post_upgrade | bool else '' }}" ansible.builtin.shell: diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index 13d2c00618..f8e9b06e31 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -8,7 +8,7 @@ - ^ansible/(action_plugins|filter_plugins|library|module_utils)/ - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq|service-.*)/ - ^kolla_ansible/ - - ^roles/kolla-ansible-(deploy|test-dashboard)/ + - ^roles/kolla-ansible-(deploy|tempest|test-dashboard)/ - ^tests/testinfra/test_horizon.py - ^tools/init-runonce From dfd24deec373e1d65185e3a0ac255ec16a4a459d Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 22 Oct 2025 09:11:51 +0200 Subject: [PATCH 079/165] uwsgi: Add configuration for http chunked input This is mainly required for Glance, but other services use that as well - so let's enable it by default. Change-Id: I74340a9b279d5c12542cdf54ae92d814e7a8bf38 Signed-off-by: Michal Nasiadka --- ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 | 3 +++ releasenotes/notes/uwsgi-chunked-input-8b15b1c77f093332.yaml | 4 ++++ zuul.d/scenarios/aio.yaml | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/uwsgi-chunked-input-8b15b1c77f093332.yaml diff --git a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 index ce42d352d5..2fb9f030c2 100644 --- a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 +++ b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 @@ -10,6 +10,9 @@ https = {{ service_uwsgi_config_host }}:{{ service_uwsgi_config_http_port }},{{ {% else %} http = {{ service_uwsgi_config_host }}:{{ service_uwsgi_config_http_port }} {% endif %} +http-auto-chunked = true +http-chunked-input = true +http-raw-body = true lazy-apps = true logto2 = /var/log/kolla/{{ service_uwsgi_config_log_dir }}/{{ service_uwsgi_config_log_file }} {% if service_uwsgi_config_log_file_chmod is defined %} diff --git a/releasenotes/notes/uwsgi-chunked-input-8b15b1c77f093332.yaml b/releasenotes/notes/uwsgi-chunked-input-8b15b1c77f093332.yaml new file mode 100644 index 0000000000..a7007fe885 --- /dev/null +++ b/releasenotes/notes/uwsgi-chunked-input-8b15b1c77f093332.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + HTTP chunked input is now enabled by default for all ``uWSGI`` services. diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml index f8e9b06e31..384a06b271 100644 --- a/zuul.d/scenarios/aio.yaml +++ b/zuul.d/scenarios/aio.yaml @@ -6,7 +6,7 @@ - ^ansible/group_vars/all/(common|fluentd|glance|haproxy|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml - ^ansible/(action_plugins|filter_plugins|library|module_utils)/ - - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq|service-.*)/ + - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq|service.*)/ - ^kolla_ansible/ - ^roles/kolla-ansible-(deploy|tempest|test-dashboard)/ - ^tests/testinfra/test_horizon.py From 6dff5d5580f066c0568b1ba19dc0dc717cd4f81e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 3 Nov 2025 06:43:25 +0100 Subject: [PATCH 080/165] ironic: Fix upgrades It seems that due to unnoticed publish job failures, which resulted in no new images in about a month and after I1b5329d814432604640990b0ecc28906845e29d6 the Ironic upgrades are broken, due to a missing bootstrap step when using unpinned upgrade (the default). The failures were only visible when we fixed the publish jobs, that's why upgrade jobs passed in I1b5329d814432604640990b0ecc28906845e29d6 Change-Id: Ibd7748b99124fa11d14f4075e4e95836e6860451 Signed-off-by: Michal Nasiadka --- ansible/roles/ironic/tasks/rolling_upgrade.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/ironic/tasks/rolling_upgrade.yml b/ansible/roles/ironic/tasks/rolling_upgrade.yml index 40998142c1..6bda5569fb 100644 --- a/ansible/roles/ironic/tasks/rolling_upgrade.yml +++ b/ansible/roles/ironic/tasks/rolling_upgrade.yml @@ -20,12 +20,12 @@ - name: Flush handlers meta: flush_handlers -# Unpin version - import_tasks: config.yml - import_tasks: check-containers.yml -# Restart ironic services with unpinned release version +- import_tasks: bootstrap_service.yml + - name: Flush handlers meta: flush_handlers From cf02acb1a5b370fd4255073c26140646de324eaf Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Wed, 29 Oct 2025 15:42:24 +0100 Subject: [PATCH 081/165] CI: ironic - stop using tinyipa MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tinyipa was dropped [1]. Increase the Tenks nodes to 3 GiB ram and force `cpu_mode: maximum` so the CentOS Stream IPA no longer errors with "Fatal glibc error: CPU does not support x86-64-v2" [1] https://review.opendev.org/c/openstack/ironic-python-agent-builder/+/962490 Change-Id: If27163c55375338d9347afff0d2b9c19419fe06a Signed-off-by: Bartosz Bezak --- tests/run.yml | 6 +++--- tests/templates/tenks-deploy-config.yml.j2 | 8 ++++---- zuul.d/scenarios/ironic.yaml | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/run.yml b/tests/run.yml index d421c3e7a0..f050977a88 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -231,12 +231,12 @@ - name: Download Ironic Python Agent (IPA) images get_url: - url: "https://tarballs.opendev.org/openstack/ironic-python-agent/tinyipa/files/{{ item.src }}" + url: "https://tarballs.opendev.org/openstack/ironic-python-agent-builder/dib/files/{{ item.src }}" dest: "/etc/kolla/config/ironic/{{ item.dest }}" with_items: - - src: "tinyipa-{{ zuul.branch | replace('/', '-') }}.gz" + - src: "ipa-centos9-{{ zuul.branch | replace('/', '-') }}.initramfs" dest: ironic-agent.initramfs - - src: "tinyipa-{{ zuul.branch | replace('/', '-') }}.vmlinuz" + - src: "ipa-centos9-{{ zuul.branch | replace('/', '-') }}.kernel" dest: ironic-agent.kernel when: scenario == "ironic" diff --git a/tests/templates/tenks-deploy-config.yml.j2 b/tests/templates/tenks-deploy-config.yml.j2 index 8aff89eef4..6d2c409c42 100644 --- a/tests/templates/tenks-deploy-config.yml.j2 +++ b/tests/templates/tenks-deploy-config.yml.j2 @@ -3,7 +3,7 @@ node_types: type0: - memory_mb: 1024 + memory_mb: 3072 vcpus: 1 volumes: # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent: @@ -12,9 +12,9 @@ node_types: physical_networks: - physnet1 console_log_enabled: true - # We seem to hit issues with missing cpu features in CI as a result of using host-model, e.g: - # https://zuul.opendev.org/t/openstack/build/02c33ab51664419a88a5a54ad22852a9/log/primary/system_logs/libvirt/qemu/tk0.txt.gz#38 - cpu_mode: + # NOTE(bbezak): Force QEMU to expose x86-64‑v2 features, so the Centos Stream IPA + # doesn’t fail with "Fatal glibc error: CPU does not support x86-64-v2". + cpu_mode: maximum specs: - type: type0 count: 1 diff --git a/zuul.d/scenarios/ironic.yaml b/zuul.d/scenarios/ironic.yaml index b1fa449060..ee20005570 100644 --- a/zuul.d/scenarios/ironic.yaml +++ b/zuul.d/scenarios/ironic.yaml @@ -40,12 +40,12 @@ - job: name: kolla-ansible-ubuntu-noble-ironic parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-ubuntu-noble-8GB + nodeset: kolla-ansible-ubuntu-noble-16GB - job: name: kolla-ansible-ubuntu-noble-ironic-upgrade parent: kolla-ansible-ubuntu-noble-ironic - nodeset: kolla-ansible-ubuntu-noble-8GB + nodeset: kolla-ansible-ubuntu-noble-16GB - project-template: name: kolla-ansible-scenario-ironic From 4169f856298931c4016f830feb6cad51531e31e4 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 24 Oct 2025 12:49:52 +0200 Subject: [PATCH 082/165] CI: Drop test-core-openstack.sh We stopped using it - let's delete it. Change-Id: I581478f557d489dc9c738077bd197ae4ff40ccbc Signed-off-by: Michal Nasiadka --- tests/test-core-openstack.sh | 545 ----------------------------------- 1 file changed, 545 deletions(-) delete mode 100755 tests/test-core-openstack.sh diff --git a/tests/test-core-openstack.sh b/tests/test-core-openstack.sh deleted file mode 100755 index aa1cf74033..0000000000 --- a/tests/test-core-openstack.sh +++ /dev/null @@ -1,545 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit -set -o pipefail - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - -function test_smoke { - openstack --debug endpoint list - openstack --debug compute service list - openstack --debug network agent list - openstack --debug orchestration service list - if [[ $SCENARIO == "cephadm" ]] || [[ $SCENARIO == "zun" ]]; then - openstack --debug volume service list - fi -} - -function create_a_volume { - local volume_name=$1 - - local attempt - - openstack volume create --size 1 $volume_name - attempt=1 - while [[ $(openstack volume show $volume_name -f value -c status) != "available" ]]; do - echo "Volume $volume_name not available yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 10 ]]; then - echo "Volume $volume_name failed to become available" - openstack volume show $volume_name - return 1 - fi - sleep 10 - done -} - -function create_a_volume_from_image { - local volume_name=$1 - local image_name=$2 - - local attempt - - openstack volume create --image $image_name --size 1 $volume_name - attempt=1 - while [[ $(openstack volume show $volume_name -f value -c status) != "available" ]]; do - echo "Volume $volume_name not available yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 11 ]]; then - echo "Volume $volume_name failed to become available" - openstack volume show $volume_name - return 1 - fi - sleep 30 - done -} - -function create_an_image_from_volume { - local image_name=$1 - local volume_name=$2 - - local attempt - - # NOTE(yoctozepto): Adding explicit microversion of Victoria as a sane default to work - # around the bug: https://storyboard.openstack.org/#!/story/2009287 - openstack --os-volume-api-version 3.62 image create --volume $volume_name $image_name - attempt=1 - while [[ $(openstack image show $image_name -f value -c status) != "active" ]]; do - echo "Image $image_name not active yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 11 ]]; then - echo "Image $image_name failed to become active" - openstack image show $image_name - return 1 - fi - sleep 30 - done -} - -function create_an_image_from_instance { - local image_name=$1 - local instance_name=$2 - - local attempt - - openstack server image create $instance_name --name $image_name - attempt=1 - while [[ $(openstack image show $image_name -f value -c status) != "active" ]]; do - echo "Image $image_name not active yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 11 ]]; then - echo "Image $image_name failed to become active" - openstack image show $image_name - return 1 - fi - sleep 30 - done -} - -function attach_and_detach_a_volume { - local volume_name=$1 - local instance_name=$2 - - local attempt - - openstack server add volume $instance_name $volume_name --device /dev/vdb - attempt=1 - while [[ $(openstack volume show $volume_name -f value -c status) != "in-use" ]]; do - echo "Volume $volume_name not attached yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 10 ]]; then - echo "Volume failed to attach" - openstack volume show $volume_name - return 1 - fi - sleep 10 - done - - openstack server remove volume $instance_name $volume_name - attempt=1 - while [[ $(openstack volume show $volume_name -f value -c status) != "available" ]]; do - echo "Volume $volume_name not detached yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 10 ]]; then - echo "Volume failed to detach" - openstack volume show $volume_name - return 1 - fi - sleep 10 - done -} - -function delete_a_volume { - local volume_name=$1 - - local attempt - local result - - openstack volume delete $volume_name - - attempt=1 - # NOTE(yoctozepto): This is executed outside of the `while` clause - # *on purpose*. You see, bash is evil (TM) and will silence any error - # happening in any "condition" clause (such as `if` or `while`) even with - # `errexit` being set. - result=$(openstack volume list --name $volume_name -f value -c ID) - while [[ -n "$result" ]]; do - echo "Volume $volume_name not deleted yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 10 ]]; then - echo "Volume failed to delete" - openstack volume show $volume_name - return 1 - fi - sleep 10 - result=$(openstack volume list --name $volume_name -f value -c ID) - done -} - -function create_instance { - local name=$1 - local server_create_extra - - if [[ $IP_VERSION -eq 6 ]]; then - # NOTE(yoctozepto): CirrOS has no IPv6 metadata support, hence need to use configdrive - server_create_extra="${server_create_extra} --config-drive True" - fi - - openstack server create --wait --image cirros --flavor c1.tiny --key-name mykey --network demo-net ${server_create_extra} ${name} - # If the status is not ACTIVE, print info and exit 1 - if [[ $(openstack server show ${name} -f value -c status) != "ACTIVE" ]]; then - echo "FAILED: Instance is not active" - openstack --debug server show ${name} - return 1 - fi -} - -function resize_instance { - local name=$1 - - openstack server resize --flavor c2.tiny --wait ${name} - # If the status is not VERIFY_RESIZE, print info and exit 1 - if [[ $(openstack server show ${name} -f value -c status) != "VERIFY_RESIZE" ]]; then - echo "FAILED: Instance is not resized" - openstack --debug server show ${name} - return 1 - fi - - openstack server resize confirm ${name} - - # Confirming the resize operation is not instantaneous. Wait for change to - # be reflected in server status. - local attempt - attempt=1 - while [[ $(openstack server show ${name} -f value -c status) != "ACTIVE" ]]; do - echo "Instance is not active yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 10 ]]; then - echo "FAILED: Instance failed to become active after resize confirm" - openstack --debug server show ${name} - return 1 - fi - sleep 10 - done -} - -function delete_instance { - local name=$1 - openstack server delete --wait ${name} -} - -function create_fip { - openstack floating ip create public1 -f value -c floating_ip_address -} - -function delete_fip { - local fip_addr=$1 - openstack floating ip delete ${fip_addr} -} - -function attach_fip { - local instance_name=$1 - local fip_addr=$2 - openstack server add floating ip ${instance_name} ${fip_addr} -} - -function detach_fip { - local instance_name=$1 - local fip_addr=$2 - openstack server remove floating ip ${instance_name} ${fip_addr} -} - -function set_cirros_image_q35_machine_type { - openstack image set --property hw_machine_type=q35 cirros -} - -function unset_cirros_image_q35_machine_type { - openstack image unset --property hw_machine_type cirros -} - -function test_neutron_modules { - # Exit the function if scenario is "ovn" or if there's an upgrade - # as it only concerns ml2/ovs - if [[ $SCENARIO == "ovn" ]] || [[ $HAS_UPGRADE == "yes" ]]; then - return - fi - - local modules - modules=( $(sed -n '/neutron_modules_extra:/,/^[^ ]/p' /etc/kolla/globals.yml | grep -oP '^ - name: \K[^ ]+' | tr -d "'") ) - for module in "${modules[@]}"; do - if ! grep -q "^${module} " /proc/modules; then - echo "Error: Module $module is not loaded." - exit 1 - else - echo "Module $module is loaded." - fi - done -} - -function test_ssh { - local instance_name=$1 - local fip_addr=$2 - local attempts - attempts=30 - for i in $(seq 1 ${attempts}); do - if ping -c1 -W1 ${fip_addr} && ssh -v -o BatchMode=yes -o StrictHostKeyChecking=no cirros@${fip_addr} hostname; then - break - elif [[ $i -eq ${attempts} ]]; then - echo "Failed to access server via SSH after ${attempts} attempts" - echo "Console log:" - openstack console log show ${instance_name} || true - openstack --debug server show ${instance_name} - return 1 - else - echo "Cannot access server - retrying" - fi - sleep 10 - done -} - -function test_instance_boot { - local fip_addr - local machine_type="${1}" - local fip_file="/tmp/kolla_ci_pre_upgrade_fip_addr${machine_type:+_$machine_type}" - local upgrade_instance_name="kolla_upgrade_test${machine_type:+_$machine_type}" - local volume_name="durable_volume${machine_type:+_$machine_type}" - - echo "TESTING: Server creation" - create_instance kolla_boot_test - echo "SUCCESS: Server creation" - - if [[ $SCENARIO == "cephadm" ]] || [[ $SCENARIO == "zun" ]]; then - echo "TESTING: Cinder volume creation and attachment" - - create_a_volume test_volume - openstack volume show test_volume - attach_and_detach_a_volume test_volume kolla_boot_test - delete_a_volume test_volume - - # test a qcow2 image (non-cloneable) - create_a_volume_from_image test_volume_from_image cirros - openstack volume show test_volume_from_image - attach_and_detach_a_volume test_volume_from_image kolla_boot_test - delete_a_volume test_volume_from_image - - # test a raw image (cloneable) - openstack image create --disk-format raw --container-format bare --public \ - --file /etc/passwd raw-image - create_a_volume_from_image test_volume_from_image raw-image - openstack volume show test_volume_from_image - attach_and_detach_a_volume test_volume_from_image kolla_boot_test - delete_a_volume test_volume_from_image - openstack image delete raw-image - - echo "SUCCESS: Cinder volume creation and attachment" - - if [[ $HAS_UPGRADE == 'yes' ]]; then - echo "TESTING: Cinder volume upgrade stability (PHASE: $PHASE)" - - if [[ $PHASE == 'deploy' ]]; then - create_a_volume $volume_name - openstack volume show $volume_name - elif [[ $PHASE == 'upgrade' ]]; then - openstack volume show $volume_name - attach_and_detach_a_volume $volume_name kolla_boot_test - delete_a_volume $volume_name - fi - - echo "SUCCESS: Cinder volume upgrade stability (PHASE: $PHASE)" - fi - - echo "TESTING: Glance image from Cinder volume and back to volume" - - create_a_volume test_volume_to_image - openstack volume show test_volume_to_image - create_an_image_from_volume test_image_from_volume test_volume_to_image - - create_a_volume_from_image test_volume_from_image_from_volume test_image_from_volume - openstack volume show test_volume_from_image_from_volume - attach_and_detach_a_volume test_volume_from_image_from_volume kolla_boot_test - - delete_a_volume test_volume_from_image_from_volume - openstack image delete test_image_from_volume - delete_a_volume test_volume_to_image - - echo "SUCCESS: Glance image from Cinder volume and back to volume" - fi - - echo "TESTING: Instance image upload" - create_an_image_from_instance image_from_instance kolla_boot_test - openstack image delete image_from_instance - echo "SUCCESS: Instance image upload" - - if [[ $IP_VERSION -eq 4 ]]; then - echo "TESTING: Floating ip allocation" - fip_addr=$(create_fip) - attach_fip kolla_boot_test ${fip_addr} - echo "SUCCESS: Floating ip allocation" - else - # NOTE(yoctozepto): Neutron has no IPv6 NAT support, hence no floating ip addresses - local instance_addresses - fip_addr=$(openstack server show kolla_boot_test -f yaml -c addresses|tail -1|cut -d- -f2) - fi - - echo "TESTING: PING&SSH to instance" - test_ssh kolla_boot_test ${fip_addr} - echo "SUCCESS: PING&SSH to instance" - - if [[ $IP_VERSION -eq 4 ]]; then - echo "TESTING: Floating ip deallocation" - detach_fip kolla_boot_test ${fip_addr} - delete_fip ${fip_addr} - echo "SUCCESS: Floating ip deallocation" - fi - - echo "TESTING: Server resize" - resize_instance kolla_boot_test - echo "SUCCESS: Server resize" - - echo "TESTING: Server deletion" - delete_instance kolla_boot_test - echo "SUCCESS: Server deletion" - - if [[ $HAS_UPGRADE == 'yes' ]]; then - echo "TESTING: Instance (Nova and Neutron) upgrade stability (PHASE: $PHASE)" - - if [[ $PHASE == 'deploy' ]]; then - create_instance $upgrade_instance_name - fip_addr=$(create_fip) - attach_fip $upgrade_instance_name ${fip_addr} - test_ssh $upgrade_instance_name ${fip_addr} # tested to see if the instance has not just failed booting already - echo ${fip_addr} > $fip_file - elif [[ $PHASE == 'upgrade' ]]; then - fip_addr=$(cat $fip_file) - test_ssh $upgrade_instance_name ${fip_addr} - detach_fip $upgrade_instance_name ${fip_addr} - delete_fip ${fip_addr} - delete_instance $upgrade_instance_name - fi - - echo "SUCCESS: Instance (Nova and Neutron) upgrade stability (PHASE: $PHASE)" - fi -} - -function test_internal_dns_integration { - - # As per test globals - neutron integration is turned off - if openstack extension list --network -f value -c Alias | grep -q dns-integration; then - DNS_NAME="my-port" - PORT_NAME="${DNS_NAME}" - DNS_DOMAIN=$(awk -F ':' '/neutron_dns_domain:/ { print $2 }' /etc/kolla/globals.yml \ - | sed -e 's/"//g' -e "s/'//g" -e "s/\ *//g") - - openstack network create dns-test-network - openstack subnet create --network dns-test-network --subnet-range 192.168.88.0/24 dns-test-subnet - openstack port create --network dns-test-network --dns-name ${DNS_NAME} ${PORT_NAME} - - local attempt - while [[ $(openstack port show ${DNS_NAME} -f value -c dns_assignment) == "[]" ]]; do - echo "dns_assignment for port ${DNS_NAME} not available yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 20 ]]; then - echo "ERROR: dns_assignment for port ${DNS_NAME} failed to become available" - openstack port show ${DNS_NAME} - return 1 - fi - sleep $attempt - done - set +e - DNS_ASSIGNMENT=$(openstack port show ${DNS_NAME} -f json -c dns_assignment) - FQDN=$(echo ${DNS_ASSIGNMENT} | python -c 'import json,sys;obj=json.load(sys.stdin);print(obj["dns_assignment"][0]["fqdn"]);') - HOSTNAME=$(echo ${DNS_ASSIGNMENT} | python -c 'import json,sys;obj=json.load(sys.stdin);print(obj["dns_assignment"][0]["hostname"]);') - set -e - - if [ "${DNS_NAME}.${DNS_DOMAIN}" == "${FQDN}" ]; then - echo "[i] Test neutron internal DNS integration FQDN check port - PASS" - else - echo "[e] Test neutron internal DNS integration FQDN check port - FAIL" - exit 1 - fi - - if [ "${DNS_NAME}" == "${HOSTNAME}" ]; then - echo "[i] Test neutron internal DNS integration HOSTNAME check port - PASS" - else - echo "[e] Test neutron internal DNS integration HOSTNAME check port - FAIL" - exit 1 - fi - - openstack port delete ${PORT_NAME} - - SERVER_NAME="my_vm" - SERVER_NAME_SANITIZED=$(echo ${SERVER_NAME} | sed -e 's/_/-/g') - - openstack server create --image cirros --flavor c1.tiny --network dns-test-network ${SERVER_NAME} - - SERVER_ID=$(openstack server show ${SERVER_NAME} -f value -c id) - attempt=0 - while [[ -z $(openstack port list --device-id ${SERVER_ID} -f value -c ID) ]]; do - echo "Port for server ${SERVER_NAME} not available yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 10 ]]; then - echo "ERROR: Port for server ${SERVER_NAME} failed to become available" - openstack port list --device-id ${SERVER_ID} - return 1 - fi - sleep $attempt - done - PORT_ID=$(openstack port list --device-id ${SERVER_ID} -f value -c ID) - - DNS_ASSIGNMENT=$(openstack port show ${PORT_ID} -f json -c dns_assignment) - FQDN=$(echo ${DNS_ASSIGNMENT} | python -c 'import json,sys;obj=json.load(sys.stdin);print(obj["dns_assignment"][0]["fqdn"]);') - HOSTNAME=$(echo ${DNS_ASSIGNMENT} | python -c 'import json,sys;obj=json.load(sys.stdin);print(obj["dns_assignment"][0]["hostname"]);') - - if [ "${SERVER_NAME_SANITIZED}.${DNS_DOMAIN}" == "${FQDN}" ]; then - echo "[i] Test neutron internal DNS integration FQDN check instance create - PASS" - else - echo "[e] Test neutron internal DNS integration FQDN check instance create - FAIL" - exit 1 - fi - - if [ "${SERVER_NAME_SANITIZED}" == "${HOSTNAME}" ]; then - echo "[i] Test neutron internal DNS integration HOSTNAME check instance create - PASS" - else - echo "[e] Test neutron internal DNS integration HOSTNAME check instance create - FAIL" - exit 1 - fi - - openstack server delete --wait ${SERVER_NAME} - openstack subnet delete dns-test-subnet - openstack network delete dns-test-network - - else - echo "[i] DNS Integration is not enabled." - fi -} - -function test_proxysql_prometheus_exporter { - if [[ $SCENARIO == "cells" ]]; then - if curl -v http://127.0.0.1:6070/metrics 2>/dev/null | grep '^proxysql_'; then - echo "[i] Proxysql prometheus exporter - PASS" - mkdir -p /tmp/logs/prometheus-exporters/proxysql - curl -v http://127.0.0.1:6070/metrics 2>/dev/null -o /tmp/logs/prometheus-exporters/proxysql/exporter.txt - else - echo "[e] Proxysql prometheus exporter - FAIL" - exit 1 - fi - fi -} - -function test_openstack_logged { - . /etc/kolla/admin-openrc.sh - . ~/openstackclient-venv/bin/activate - test_smoke - test_neutron_modules - test_instance_boot - # NOTE(mnasiadka): Disable because it started failing in OVN scenario - [[ $SCENARIO != "ovn" ]] && test_internal_dns_integration - test_proxysql_prometheus_exporter - - # Check for x86_64 architecture to run q35 tests - if [[ $(uname -m) == "x86_64" ]]; then - set_cirros_image_q35_machine_type - test_instance_boot q35 - unset_cirros_image_q35_machine_type - fi -} - -function test_openstack { - echo "Testing OpenStack" - log_file=/tmp/logs/ansible/test-core-openstack - if [[ -f $log_file ]]; then - log_file=${log_file}-upgrade - fi - test_openstack_logged > $log_file 2>&1 - result=$? - if [[ $result != 0 ]]; then - echo "Testing OpenStack failed. See ansible/test-core-openstack for details" - else - echo "Successfully tested OpenStack. See ansible/test-core-openstack for details" - fi - return $result -} - -test_openstack From 2b397a99d751b1ba7859a8d23264a86bcb5656af Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 24 Oct 2025 12:52:06 +0200 Subject: [PATCH 083/165] CI: Remove Rocky9 Python handling Change-Id: I9686da42a5f59fa36811c4500f8c67c0d32b92b5 Signed-off-by: Michal Nasiadka --- tests/run.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/tests/run.yml b/tests/run.yml index f050977a88..cfd55ebba5 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -50,17 +50,6 @@ when: - scenario == "lets-encrypt" - # NOTE(kevko): Rocky Linux has Python 3.9 as the default, but we want to use Python 3.12 instead. - - name: Install Python3.12 and dependencies on RHEL derivatives - dnf: - name: - - python3.12 - - python3.12-devel - - python3.12-pip - state: latest - when: ansible_facts.os_family == 'RedHat' - become: true - # NOTE(kevko): While in Rocky, virtualenv is included as part of the packages above, in Debuntu, virtualenv is in a separate package. - name: Install python3-virtualenv package: From 05bec68a3847bae311e9438a26d529c647011322 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 4 Jun 2025 07:44:16 +0100 Subject: [PATCH 084/165] ansible-lint: Fix yaml[octal-values] Change-Id: I0c55f924489657ddcdb2957070cd69563c6d9886 Signed-off-by: Michal Nasiadka --- .ansible-lint | 1 - ansible/post-deploy.yml | 10 +++++----- ansible/roles/horizon/tasks/config.yml | 2 +- ansible/roles/mariadb/tasks/recover_cluster.yml | 2 +- ansible/roles/neutron/tasks/config.yml | 2 +- ansible/roles/octavia-certificates/tasks/client_ca.yml | 4 ++-- ansible/roles/octavia-certificates/tasks/main.yml | 4 ++-- ansible/roles/octavia/tasks/hm-interface.yml | 2 +- ansible/roles/octavia/tasks/openrc.yml | 2 +- ansible/roles/prometheus/tasks/config.yml | 2 +- ansible/roles/skyline/tasks/config.yml | 2 +- 11 files changed, 16 insertions(+), 17 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index f682b65cfb..48663ad432 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -38,4 +38,3 @@ skip_list: - jinja[spacing] - yaml[truthy] - yaml[line-length] - - yaml[octal-values] diff --git a/ansible/post-deploy.yml b/ansible/post-deploy.yml index acc78cd6f2..3629b1d3dc 100644 --- a/ansible/post-deploy.yml +++ b/ansible/post-deploy.yml @@ -22,7 +22,7 @@ dest: "{{ node_config }}/clouds.yaml" owner: "{{ ansible_facts.user_uid }}" group: "{{ ansible_facts.user_gid }}" - mode: 0600 + mode: "0600" - name: Creating admin openrc file on the deploy node hosts: localhost @@ -34,7 +34,7 @@ dest: "{{ node_config }}/admin-openrc.sh" owner: "{{ ansible_facts.user_uid }}" group: "{{ ansible_facts.user_gid }}" - mode: 0600 + mode: "0600" - name: Template out admin-openrc-system.sh become: "{{ needs_root }}" @@ -43,7 +43,7 @@ dest: "{{ node_config }}/admin-openrc-system.sh" owner: "{{ ansible_facts.user_uid }}" group: "{{ ansible_facts.user_gid }}" - mode: 0600 + mode: "0600" - name: Template out public-openrc.sh become: "{{ needs_root }}" @@ -52,7 +52,7 @@ dest: "{{ node_config }}/public-openrc.sh" owner: "{{ ansible_facts.user_uid }}" group: "{{ ansible_facts.user_gid }}" - mode: 0600 + mode: "0600" - name: Template out public-openrc-system.sh become: "{{ needs_root }}" @@ -61,7 +61,7 @@ dest: "{{ node_config }}/public-openrc-system.sh" owner: "{{ ansible_facts.user_uid }}" group: "{{ ansible_facts.user_gid }}" - mode: 0600 + mode: "0600" - import_role: name: octavia diff --git a/ansible/roles/horizon/tasks/config.yml b/ansible/roles/horizon/tasks/config.yml index 7fece46ebd..b052764095 100644 --- a/ansible/roles/horizon/tasks/config.yml +++ b/ansible/roles/horizon/tasks/config.yml @@ -126,7 +126,7 @@ copy: src: "{{ node_custom_config }}/horizon/themes/{{ item.name }}" dest: "{{ node_config_directory }}/horizon/themes/" - mode: 0660 + mode: "0660" when: - service | service_enabled_and_mapped_to_host - horizon_custom_themes | length > 0 diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml index 165aa3963b..b2eae10089 100644 --- a/ansible/roles/mariadb/tasks/recover_cluster.yml +++ b/ansible/roles/mariadb/tasks/recover_cluster.yml @@ -82,7 +82,7 @@ template: src: "hostname.j2" dest: "{{ mariadb_recover_tmp_file_path }}" - mode: 0644 + mode: "0644" delegate_to: localhost connection: local changed_when: false diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml index 18b8f3991d..9c5a9702b5 100644 --- a/ansible/roles/neutron/tasks/config.yml +++ b/ansible/roles/neutron/tasks/config.yml @@ -119,7 +119,7 @@ template: src: "id_rsa" dest: "{{ node_config_directory }}/neutron-server/id_rsa" - mode: 0600 + mode: "0600" when: service | service_enabled_and_mapped_to_host - name: Copying over ml2_conf.ini diff --git a/ansible/roles/octavia-certificates/tasks/client_ca.yml b/ansible/roles/octavia-certificates/tasks/client_ca.yml index b9cd711e18..08c0214746 100644 --- a/ansible/roles/octavia-certificates/tasks/client_ca.yml +++ b/ansible/roles/octavia-certificates/tasks/client_ca.yml @@ -5,14 +5,14 @@ content: '' dest: "{{ octavia_certs_work_dir }}/client_ca/index.txt" force: no - mode: 0660 + mode: "0660" - name: Create client_ca serial copy: content: "1000\n" dest: "{{ octavia_certs_work_dir }}/client_ca/serial" force: no - mode: 0660 + mode: "0660" - name: Create client_ca private key command: > diff --git a/ansible/roles/octavia-certificates/tasks/main.yml b/ansible/roles/octavia-certificates/tasks/main.yml index 9ba737b2bd..b7903972ee 100644 --- a/ansible/roles/octavia-certificates/tasks/main.yml +++ b/ansible/roles/octavia-certificates/tasks/main.yml @@ -16,7 +16,7 @@ file: path: "{{ octavia_certs_work_dir }}/{{ item }}" state: "directory" - mode: 0770 + mode: "0770" loop: - server_ca - client_ca @@ -36,7 +36,7 @@ file: path: "{{ node_custom_config }}/octavia" state: "directory" - mode: 0770 + mode: "0770" - name: Copy the to-be-deployed keys and certs to {{ node_custom_config }}/octavia copy: diff --git a/ansible/roles/octavia/tasks/hm-interface.yml b/ansible/roles/octavia/tasks/hm-interface.yml index d1900cc2bb..2a547ffd07 100644 --- a/ansible/roles/octavia/tasks/hm-interface.yml +++ b/ansible/roles/octavia/tasks/hm-interface.yml @@ -59,7 +59,7 @@ request subnet-mask,broadcast-address,interface-mtu; do-forward-updates false; dest: /etc/dhcp/octavia-dhclient.conf - mode: 0664 + mode: "0664" - name: Create octavia-interface service become: True diff --git a/ansible/roles/octavia/tasks/openrc.yml b/ansible/roles/octavia/tasks/openrc.yml index 4455ebfdc2..8380bebe9b 100644 --- a/ansible/roles/octavia/tasks/openrc.yml +++ b/ansible/roles/octavia/tasks/openrc.yml @@ -6,4 +6,4 @@ dest: "{{ node_config }}/octavia-openrc.sh" owner: "{{ ansible_facts.user_uid }}" group: "{{ ansible_facts.user_gid }}" - mode: 0600 + mode: "0600" diff --git a/ansible/roles/prometheus/tasks/config.yml b/ansible/roles/prometheus/tasks/config.yml index 787400c4d3..8c7ba5a6bd 100644 --- a/ansible/roles/prometheus/tasks/config.yml +++ b/ansible/roles/prometheus/tasks/config.yml @@ -128,7 +128,7 @@ copy: src: "{{ item.path }}" dest: "{{ node_config_directory }}/prometheus-alertmanager/{{ item.path | basename }}" - mode: 0660 + mode: "0660" when: - service | service_enabled_and_mapped_to_host - alertmanager_notification_templates is defined and alertmanager_notification_templates.files | length > 0 diff --git a/ansible/roles/skyline/tasks/config.yml b/ansible/roles/skyline/tasks/config.yml index f84dc1cde3..58c1d97928 100644 --- a/ansible/roles/skyline/tasks/config.yml +++ b/ansible/roles/skyline/tasks/config.yml @@ -64,7 +64,7 @@ copy: src: "{{ node_custom_config }}/skyline/logos/{{ item }}" dest: "{{ node_config_directory }}/skyline-console/logos/" - mode: 0660 + mode: "0660" when: - service | service_enabled_and_mapped_to_host - skyline_custom_logos | length > 0 From 66338d574d0ffdcfa588b4cc877a90e3a71cd417 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 5 Jun 2025 21:53:14 +0100 Subject: [PATCH 085/165] CI: ansible-lint: Move exclude paths to .ansible-lint Change-Id: Ide4387d3a041ee424bc7f62fb4ddb38ff3e7cb52 Signed-off-by: Michal Nasiadka --- .ansible-lint | 5 +++++ tox.ini | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.ansible-lint b/.ansible-lint index f682b65cfb..ab09a99080 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -1,4 +1,9 @@ --- +exclude_paths: + - etc + - roles + - tests + - zuul.d strict: true use_default_rules: true skip_list: diff --git a/tox.ini b/tox.ini index b57346006e..054b84301d 100644 --- a/tox.ini +++ b/tox.ini @@ -158,4 +158,4 @@ setenv = {[testenv:linters]setenv} deps = {[testenv:linters]deps} commands = python {toxinidir}/tools/validate-all-file.py - ansible-lint -p --exclude tests --exclude roles --exclude etc --exclude zuul.d + ansible-lint -p From 68852492196e5797deb3077f63777e062bf12d16 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 6 Nov 2025 15:16:16 +0100 Subject: [PATCH 086/165] CI: Use main Rocky mirror Change-Id: Iaff207e741c5acd56c8c3f29b19666b2599fb7c0 Signed-off-by: Michal Nasiadka --- tests/pre.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/pre.yml b/tests/pre.yml index 1a666e97d1..1480609b36 100644 --- a/tests/pre.yml +++ b/tests/pre.yml @@ -52,6 +52,12 @@ - "veth-{{ neutron_external_bridge_name }}-ext" bridge_name: "{{ neutron_external_bridge_name }}" tasks: + - name: Set Rockylinux mirror to download.rockylinux.org + become: true + ansible.builtin.shell: + cmd: sed -i 's/mirrorlist/#mirrorlist/g; s/#baseurl/baseurl/g' /etc/yum.repos.d/rocky.repo + when: ansible_facts.distribution == "Rocky" + # NOTE(yoctozepto): we use gawk to add time to each logged line # outside of Ansible (e.g. for init-runonce) - name: Install gawk and required Python modules From 86c732dfabeb8cc9b116da758b3c55eb5d8a2b43 Mon Sep 17 00:00:00 2001 From: Grzegorz Koper Date: Thu, 4 Sep 2025 16:56:24 +0200 Subject: [PATCH 087/165] Replace Redis with Valkey This change replaces Redis with Valkey, which is non-existent on Rocky Linux 10 / CentOS Stream 10 due to licensing issues. Co-Authored-By: Bartosz Bezak Co-Authored-By: Michal Nasiadka Change-Id: Ifcc6ec6e943c20867d969b0bdb0e4cbac53eea1e Signed-off-by: Grzegorz Koper Signed-off-by: Bartosz Bezak Signed-off-by: Michal Nasiadka --- README.rst | 2 +- ansible/group_vars/all/cinder.yml | 4 +- ansible/group_vars/all/common.yml | 2 +- ansible/group_vars/all/designate.yml | 4 +- ansible/group_vars/all/gnocchi.yml | 4 +- ansible/group_vars/all/ironic.yml | 2 +- ansible/group_vars/all/masakari.yml | 2 +- ansible/group_vars/all/redis.yml | 11 -- ansible/group_vars/all/valkey.yml | 30 ++++ ansible/inventory/all-in-one | 2 +- ansible/inventory/multinode | 2 +- ansible/roles/cinder/tasks/precheck.yml | 2 +- ansible/roles/cinder/templates/cinder.conf.j2 | 4 +- ansible/roles/cron/tasks/config.yml | 2 +- .../templates/cron-logrotate-redis.conf.j2 | 3 - .../templates/cron-logrotate-valkey.conf.j2 | 3 + .../designate/templates/designate.conf.j2 | 4 +- ansible/roles/fluentd/tasks/config.yml | 2 + .../templates/conf/input/14-valkey.conf.j2 | 16 +++ .../roles/gnocchi/templates/gnocchi.conf.j2 | 8 +- ansible/roles/masakari/tasks/precheck.yml | 8 ++ .../roles/masakari/templates/masakari.conf.j2 | 4 +- .../roles/mistral/templates/mistral.conf.j2 | 2 +- ansible/roles/octavia/tasks/precheck.yml | 6 +- .../roles/octavia/templates/octavia.conf.j2 | 9 +- .../roles/prechecks/tasks/service_checks.yml | 7 + ansible/roles/prometheus/defaults/main.yml | 4 +- ansible/roles/redis/defaults/main.yml | 78 ---------- ansible/roles/redis/tasks/check.yml | 11 -- ansible/roles/redis/tasks/precheck.yml | 29 ---- ansible/roles/redis/tasks/upgrade.yml | 7 - .../redis/templates/redis-sentinel.conf.j2 | 12 -- .../redis/templates/redis-sentinel.json.j2 | 22 --- ansible/roles/redis/templates/redis.conf.j2 | 55 ------- ansible/roles/redis/templates/redis.json.j2 | 18 --- ansible/roles/redis/vars/main.yml | 2 - .../roles/telegraf/templates/telegraf.conf.j2 | 4 +- ansible/roles/valkey/defaults/main.yml | 78 ++++++++++ .../roles/{redis => valkey}/handlers/main.yml | 12 +- .../tasks/check-containers.yml | 0 ansible/roles/valkey/tasks/check.yml | 13 ++ .../roles/{redis => valkey}/tasks/config.yml | 10 +- .../tasks/config_validate.yml | 0 .../tasks/deploy-containers.yml | 0 .../roles/{redis => valkey}/tasks/deploy.yml | 0 .../roles/{redis => valkey}/tasks/main.yml | 0 ansible/roles/valkey/tasks/precheck.yml | 45 ++++++ .../roles/{redis => valkey}/tasks/pull.yml | 0 .../{redis => valkey}/tasks/reconfigure.yml | 0 .../roles/{redis => valkey}/tasks/stop.yml | 2 +- ansible/roles/valkey/tasks/upgrade.yml | 134 ++++++++++++++++++ .../valkey/templates/valkey-sentinel.conf.j2 | 12 ++ .../valkey/templates/valkey-sentinel.json.j2 | 22 +++ .../valkey/templates/valkey-server.conf.j2 | 29 ++++ .../valkey/templates/valkey-server.json.j2 | 18 +++ ansible/roles/valkey/vars/main.yml | 2 + ansible/site.yml | 14 +- doc/source/admin/password-rotation.rst | 2 +- .../reference/networking/designate-guide.rst | 6 +- etc/kolla/globals.yml | 20 +-- etc/kolla/passwords.yml | 4 +- ...ce-redis-with-valkey-8f60e01c2460a301.yaml | 12 ++ tests/check-logs.sh | 1 + tests/templates/globals-default.j2 | 23 ++- tests/templates/inventory.j2 | 4 + zuul.d/scenarios/cephadm.yaml | 6 +- zuul.d/scenarios/masakari.yaml | 5 +- zuul.d/scenarios/nfv.yaml | 6 +- zuul.d/scenarios/octavia.yaml | 6 +- zuul.d/scenarios/ovn.yaml | 1 - zuul.d/scenarios/telemetry.yaml | 5 +- 71 files changed, 542 insertions(+), 337 deletions(-) delete mode 100644 ansible/group_vars/all/redis.yml create mode 100644 ansible/group_vars/all/valkey.yml delete mode 100644 ansible/roles/cron/templates/cron-logrotate-redis.conf.j2 create mode 100644 ansible/roles/cron/templates/cron-logrotate-valkey.conf.j2 create mode 100644 ansible/roles/fluentd/templates/conf/input/14-valkey.conf.j2 delete mode 100644 ansible/roles/redis/defaults/main.yml delete mode 100644 ansible/roles/redis/tasks/check.yml delete mode 100644 ansible/roles/redis/tasks/precheck.yml delete mode 100644 ansible/roles/redis/tasks/upgrade.yml delete mode 100644 ansible/roles/redis/templates/redis-sentinel.conf.j2 delete mode 100644 ansible/roles/redis/templates/redis-sentinel.json.j2 delete mode 100644 ansible/roles/redis/templates/redis.conf.j2 delete mode 100644 ansible/roles/redis/templates/redis.json.j2 delete mode 100644 ansible/roles/redis/vars/main.yml create mode 100644 ansible/roles/valkey/defaults/main.yml rename ansible/roles/{redis => valkey}/handlers/main.yml (74%) rename ansible/roles/{redis => valkey}/tasks/check-containers.yml (100%) create mode 100644 ansible/roles/valkey/tasks/check.yml rename ansible/roles/{redis => valkey}/tasks/config.yml (59%) rename ansible/roles/{redis => valkey}/tasks/config_validate.yml (100%) rename ansible/roles/{redis => valkey}/tasks/deploy-containers.yml (100%) rename ansible/roles/{redis => valkey}/tasks/deploy.yml (100%) rename ansible/roles/{redis => valkey}/tasks/main.yml (100%) create mode 100644 ansible/roles/valkey/tasks/precheck.yml rename ansible/roles/{redis => valkey}/tasks/pull.yml (100%) rename ansible/roles/{redis => valkey}/tasks/reconfigure.yml (100%) rename ansible/roles/{redis => valkey}/tasks/stop.yml (65%) create mode 100644 ansible/roles/valkey/tasks/upgrade.yml create mode 100644 ansible/roles/valkey/templates/valkey-sentinel.conf.j2 create mode 100644 ansible/roles/valkey/templates/valkey-sentinel.json.j2 create mode 100644 ansible/roles/valkey/templates/valkey-server.conf.j2 create mode 100644 ansible/roles/valkey/templates/valkey-server.json.j2 create mode 100644 ansible/roles/valkey/vars/main.yml create mode 100644 releasenotes/notes/replace-redis-with-valkey-8f60e01c2460a301.yaml diff --git a/README.rst b/README.rst index 4ee2c7dd04..78058b5d21 100644 --- a/README.rst +++ b/README.rst @@ -93,7 +93,7 @@ Kolla Ansible deploys containers for the following infrastructure components: - `Open vSwitch `__ for use with Neutron. - `RabbitMQ `__ as a messaging backend for communication between services. -- `Redis `__ an in-memory data structure store. +- `Valkey `__ an in-memory data structure store. Directories =========== diff --git a/ansible/group_vars/all/cinder.yml b/ansible/group_vars/all/cinder.yml index e4659674ea..51948a18ad 100644 --- a/ansible/group_vars/all/cinder.yml +++ b/ansible/group_vars/all/cinder.yml @@ -19,8 +19,8 @@ cinder_backend_huawei: "no" cinder_backend_huawei_xml_files: [] cinder_volume_group: "cinder-volumes" cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else 'tgtadm' }}" -# Valid options are [ '', redis, etcd ] -cinder_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" +# Valid options are [ '', valkey, etcd ] +cinder_coordination_backend: "{{ 'valkey' if enable_valkey | bool else 'etcd' if enable_etcd | bool else '' }}" # Valid options are [ nfs, ceph, s3 ] cinder_backup_driver: "ceph" diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml index 4a82c29ea3..a32d3154c2 100644 --- a/ansible/group_vars/all/common.yml +++ b/ansible/group_vars/all/common.yml @@ -331,7 +331,7 @@ enable_osprofiler: "no" # valid values: ["elasticsearch", "redis"] osprofiler_backend: "elasticsearch" opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}" -osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string }}" +osprofiler_backend_connection_string: "{{ valkey_connection_string if osprofiler_backend in ['redis', 'valkey'] else opensearch_connection_string }}" ###################### # Backend TLS options diff --git a/ansible/group_vars/all/designate.yml b/ansible/group_vars/all/designate.yml index e9916c5420..343472a165 100644 --- a/ansible/group_vars/all/designate.yml +++ b/ansible/group_vars/all/designate.yml @@ -12,8 +12,8 @@ designate_ns_record: - "ns1.example.org" designate_backend_external: "no" designate_backend_external_bind9_nameservers: "" -# Valid options are [ '', redis ] -designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}" +# Valid options are [ '', valkey ] +designate_coordination_backend: "{{ 'valkey' if enable_valkey | bool else '' }}" designate_enable_notifications_sink: "no" designate_notifications_topic_name: "notifications_designate" diff --git a/ansible/group_vars/all/gnocchi.yml b/ansible/group_vars/all/gnocchi.yml index f103736b43..3792bf7d08 100644 --- a/ansible/group_vars/all/gnocchi.yml +++ b/ansible/group_vars/all/gnocchi.yml @@ -8,8 +8,8 @@ enable_gnocchi_statsd: "no" # Valid options are [ file, ceph ] gnocchi_backend_storage: "file" -# Valid options are [redis, ''] -gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}" +# Valid options are [valkey, ''] +gnocchi_incoming_storage: "{{ 'valkey' if enable_valkey | bool else '' }}" gnocchi_metric_datadir_volume: "gnocchi" gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}" diff --git a/ansible/group_vars/all/ironic.yml b/ansible/group_vars/all/ironic.yml index 1acb6e7ab1..97df0fe71d 100644 --- a/ansible/group_vars/all/ironic.yml +++ b/ansible/group_vars/all/ironic.yml @@ -9,7 +9,7 @@ enable_ironic_pxe_filter: "no" ironic_keystone_user: "ironic" # Coordination backend -ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" +ironic_coordination_backend: "{{ 'valkey' if enable_valkey | bool else 'etcd' if enable_etcd | bool else '' }}" # Network interfaces ironic_http_interface: "{{ api_interface }}" diff --git a/ansible/group_vars/all/masakari.yml b/ansible/group_vars/all/masakari.yml index bef390e719..1f3d117e15 100644 --- a/ansible/group_vars/all/masakari.yml +++ b/ansible/group_vars/all/masakari.yml @@ -10,4 +10,4 @@ masakari_public_endpoint: "{{ masakari_external_fqdn | kolla_url(public_protocol masakari_api_port: "15868" masakari_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else masakari_api_port }}" masakari_api_listen_port: "{{ masakari_api_port }}" -masakari_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" +masakari_coordination_backend: "{{ 'valkey' if enable_valkey | bool else 'etcd' if enable_etcd | bool else '' }}" diff --git a/ansible/group_vars/all/redis.yml b/ansible/group_vars/all/redis.yml deleted file mode 100644 index bea98be6e2..0000000000 --- a/ansible/group_vars/all/redis.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -enable_redis: "no" - -#################### -# Redis options -#################### -redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}default:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}" -redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes" - -redis_port: "6379" -redis_sentinel_port: "26379" diff --git a/ansible/group_vars/all/valkey.yml b/ansible/group_vars/all/valkey.yml new file mode 100644 index 0000000000..c970de6cec --- /dev/null +++ b/ansible/group_vars/all/valkey.yml @@ -0,0 +1,30 @@ +--- +enable_valkey: "no" + +valkey_connection_string: >- + redis://{%- + for host in groups['valkey'] -%} + {%- if host == groups['valkey'][0] -%} + default:{{- valkey_master_password -}}@{{- + 'api' | kolla_address(host) | put_address_in_context('url') + -}}:{{- valkey_sentinel_port -}}?sentinel={{- valkey_sentinel_monitor_name -}} + {%- else -%} + &sentinel_fallback={{- + 'api' | kolla_address(host) | put_address_in_context('url') + -}}:{{- valkey_sentinel_port -}} + {%- endif -%} + {%- endfor -%} + {{- valkey_connection_string_extras -}} + +valkey_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes" + +# TODO(mnasiadka): Remove in G/2026.1 +# NOTE(mnasiadka): These can't reference valkey_server_port or valkey_sentinel_port since these +# are changed during migration +redis_port: "6379" +redis_sentinel_port: "26379" + +valkey_server_port: "6379" +valkey_sentinel_port: "26379" +valkey_sentinel_monitor_name: "kolla" +valkey_sentinel_quorum: 2 # TODO (gkoper) Discuss and settle on some sane defaults here diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one index 6ab9227f5b..4c6c23670c 100644 --- a/ansible/inventory/all-in-one +++ b/ansible/inventory/all-in-one @@ -159,7 +159,7 @@ control [skyline:children] control -[redis:children] +[valkey:children] control [blazar:children] diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode index 972e4059e5..a2cba879a8 100644 --- a/ansible/inventory/multinode +++ b/ansible/inventory/multinode @@ -177,7 +177,7 @@ control [skyline:children] control -[redis:children] +[valkey:children] control [blazar:children] diff --git a/ansible/roles/cinder/tasks/precheck.yml b/ansible/roles/cinder/tasks/precheck.yml index 21f3d93099..8f4fdff4a8 100644 --- a/ansible/roles/cinder/tasks/precheck.yml +++ b/ansible/roles/cinder/tasks/precheck.yml @@ -58,7 +58,7 @@ - name: Checking for coordination backend if Ceph backend is enabled run_once: True fail: - msg: "Please enable redis or etcd when using Cinder Ceph backend" + msg: "Please enable valkey or etcd when using Cinder Ceph backend" when: - not skip_cinder_backend_check | bool - cinder_backend_ceph | bool diff --git a/ansible/roles/cinder/templates/cinder.conf.j2 b/ansible/roles/cinder/templates/cinder.conf.j2 index df6da4851e..92cd83a884 100644 --- a/ansible/roles/cinder/templates/cinder.conf.j2 +++ b/ansible/roles/cinder/templates/cinder.conf.j2 @@ -247,8 +247,8 @@ verify_ssl_path = {{ openstack_cacert }} {% endif %} [coordination] -{% if cinder_coordination_backend == 'redis' %} -backend_url = {{ redis_connection_string }} +{% if cinder_coordination_backend == 'valkey' %} +backend_url = {{ valkey_connection_string }} {% elif cinder_coordination_backend == 'etcd' %} # NOTE(yoctozepto): we must use etcd3gw (aka etcd3+http) due to issues with alternative (etcd3) and eventlet (as used by cinder) # see https://bugs.launchpad.net/kolla-ansible/+bug/1854932 diff --git a/ansible/roles/cron/tasks/config.yml b/ansible/roles/cron/tasks/config.yml index e1a5e30e90..caebee58cf 100644 --- a/ansible/roles/cron/tasks/config.yml +++ b/ansible/roles/cron/tasks/config.yml @@ -71,7 +71,7 @@ - { name: "prometheus", enabled: "{{ enable_prometheus | bool }}" } - { name: "proxysql", enabled: "{{ enable_proxysql | bool }}" } - { name: "rabbitmq", enabled: "{{ enable_rabbitmq | bool }}" } - - { name: "redis", enabled: "{{ enable_redis | bool }}" } + - { name: "valkey", enabled: "{{ enable_valkey | bool }}" } - { name: "skyline", enabled: "{{ enable_skyline | bool }}" } - { name: "tacker", enabled: "{{ enable_tacker | bool }}" } - { name: "trove", enabled: "{{ enable_trove | bool }}" } diff --git a/ansible/roles/cron/templates/cron-logrotate-redis.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-redis.conf.j2 deleted file mode 100644 index 9fb4c5a500..0000000000 --- a/ansible/roles/cron/templates/cron-logrotate-redis.conf.j2 +++ /dev/null @@ -1,3 +0,0 @@ -"/var/log/kolla/redis/*.log" -{ -} diff --git a/ansible/roles/cron/templates/cron-logrotate-valkey.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-valkey.conf.j2 new file mode 100644 index 0000000000..f59111e1d4 --- /dev/null +++ b/ansible/roles/cron/templates/cron-logrotate-valkey.conf.j2 @@ -0,0 +1,3 @@ +"/var/log/kolla/valkey/*.log" +{ +} diff --git a/ansible/roles/designate/templates/designate.conf.j2 b/ansible/roles/designate/templates/designate.conf.j2 index 7f56b71214..2ec55f8270 100644 --- a/ansible/roles/designate/templates/designate.conf.j2 +++ b/ansible/roles/designate/templates/designate.conf.j2 @@ -118,8 +118,8 @@ policy_file = {{ designate_policy_file }} {% endif %} [coordination] -{% if designate_coordination_backend == 'redis' %} -backend_url = {{ redis_connection_string }} +{% if designate_coordination_backend == 'valkey' %} +backend_url = {{ valkey_connection_string }} {% endif %} {# NOTE(yoctozepto): etcd is not supported due to lack of group membership diff --git a/ansible/roles/fluentd/tasks/config.yml b/ansible/roles/fluentd/tasks/config.yml index 83a5504f3f..d6c8cb649f 100644 --- a/ansible/roles/fluentd/tasks/config.yml +++ b/ansible/roles/fluentd/tasks/config.yml @@ -98,6 +98,8 @@ enabled: "{{ enable_fluentd_systemd | bool }}" - name: "conf/input/13-uwsgi.conf.j2" enabled: true + - name: "conf/input/14-valkey.conf.j2" + enabled: "{{ enable_valkey | bool }}" customised_input_files: "{{ find_custom_fluentd_inputs.files | map(attribute='path') | list }}" # Filters fluentd_filter_files: "{{ default_filter_files | customise_fluentd(customised_filter_files) }}" diff --git a/ansible/roles/fluentd/templates/conf/input/14-valkey.conf.j2 b/ansible/roles/fluentd/templates/conf/input/14-valkey.conf.j2 new file mode 100644 index 0000000000..113de855c5 --- /dev/null +++ b/ansible/roles/fluentd/templates/conf/input/14-valkey.conf.j2 @@ -0,0 +1,16 @@ +#jinja2: trim_blocks: False + + @type tail + path /var/log/kolla/valkey/valkey.log,/var/log/kolla/valkey/valkey-sentinel.log + pos_file /var/run/fluentd/kolla-valkey.pos + tag infra.valkey + ignore_repeated_permission_error true + enable_watch_timer {{ fluentd_enable_watch_timer }} + + @type regexp + expression /^(?\d+):(?[A-Za-z-]+) (?:(?\d+ \w+ \d+ \d+:\d+:\d+\.\d+) (?.) )?(?.*)$/ + time_key Timestamp + keep_time_key true + time_format %c + + diff --git a/ansible/roles/gnocchi/templates/gnocchi.conf.j2 b/ansible/roles/gnocchi/templates/gnocchi.conf.j2 index 643f2d35b7..56f74d761e 100644 --- a/ansible/roles/gnocchi/templates/gnocchi.conf.j2 +++ b/ansible/roles/gnocchi/templates/gnocchi.conf.j2 @@ -3,8 +3,8 @@ debug = {{ gnocchi_logging_debug }} log_dir = /var/log/kolla/gnocchi -{% if enable_redis | bool %} -coordination_url = {{ redis_connection_string }} +{% if enable_valkey | bool %} +coordination_url = {{ valkey_connection_string }} {% endif %} {% if service_name == 'gnocchi-api' %} @@ -66,9 +66,9 @@ policy_file = {{ gnocchi_policy_file }} {% endif %} [incoming] -{% if gnocchi_incoming_storage == 'redis' %} +{% if gnocchi_incoming_storage == 'valkey' %} driver = redis -redis_url = {{ redis_connection_string }} +redis_url = {{ valkey_connection_string }} {% endif %} [storage] diff --git a/ansible/roles/masakari/tasks/precheck.yml b/ansible/roles/masakari/tasks/precheck.yml index e3faf09082..9db46ab9fb 100644 --- a/ansible/roles/masakari/tasks/precheck.yml +++ b/ansible/roles/masakari/tasks/precheck.yml @@ -25,3 +25,11 @@ when: - container_facts.containers['masakari_api'] is not defined - inventory_hostname in groups['masakari-api'] + +- name: Checking for coordination backend if running in multinode setup + run_once: True + fail: + msg: "Please enable Valkey or etcd when running in multinode scenario." + when: + - masakari_coordination_backend == '' + - groups['masakari'] | length > 1 diff --git a/ansible/roles/masakari/templates/masakari.conf.j2 b/ansible/roles/masakari/templates/masakari.conf.j2 index b84e0f0eb4..d3f6a8721a 100644 --- a/ansible/roles/masakari/templates/masakari.conf.j2 +++ b/ansible/roles/masakari/templates/masakari.conf.j2 @@ -84,8 +84,8 @@ connection = mysql+pymysql://{{ masakari_database_user }}:{{ masakari_database_p {% if service_name == 'masakari-api' %} [coordination] -{% if masakari_coordination_backend == 'redis' %} -backend_url = {{ redis_connection_string }} +{% if masakari_coordination_backend == 'valkey' %} +backend_url = {{ valkey_connection_string }} {% elif masakari_coordination_backend == 'etcd' %} # NOTE(jan.gutter): etcd v3.4 removed the default `v3alpha` api_version. Until # tooz defaults to a newer version, we should explicitly specify `v3` diff --git a/ansible/roles/mistral/templates/mistral.conf.j2 b/ansible/roles/mistral/templates/mistral.conf.j2 index 58de8cfc7e..548200bb22 100644 --- a/ansible/roles/mistral/templates/mistral.conf.j2 +++ b/ansible/roles/mistral/templates/mistral.conf.j2 @@ -100,7 +100,7 @@ connection_string = {{ osprofiler_backend_connection_string }} {% endif %} [coordination] -backend_url = {{ redis_connection_string }} +backend_url = {{ valkey_connection_string }} [oslo_concurrency] lock_path = /var/lib/mistral/tmp diff --git a/ansible/roles/octavia/tasks/precheck.yml b/ansible/roles/octavia/tasks/precheck.yml index 51b4a0be42..23cf4d431b 100644 --- a/ansible/roles/octavia/tasks/precheck.yml +++ b/ansible/roles/octavia/tasks/precheck.yml @@ -65,10 +65,10 @@ - octavia_network_type == "tenant" - neutron_plugin_agent != 'openvswitch' -- name: Checking whether Redis is enabled for octavia jobboard +- name: Checking whether Valkey is enabled for Octavia Jobboard assert: - that: enable_redis | bool - fail_msg: "Redis must be enabled when using octavia jobboard" + that: enable_valkey | bool + fail_msg: "Valkey must be enabled when using Octavia Jobboard" run_once: True when: - enable_octavia_jobboard | bool diff --git a/ansible/roles/octavia/templates/octavia.conf.j2 b/ansible/roles/octavia/templates/octavia.conf.j2 index 586957f303..9f251ffa94 100644 --- a/ansible/roles/octavia/templates/octavia.conf.j2 +++ b/ansible/roles/octavia/templates/octavia.conf.j2 @@ -168,16 +168,15 @@ ca_certificates_file = {{ openstack_cacert }} {% if enable_octavia_jobboard | bool %} [task_flow] -jobboard_backend_hosts = {% for host in groups['redis'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}{% if not loop.last %},{% endif %}{% endfor %} -jobboard_backend_password = {{ redis_master_password }} -jobboard_backend_port = {{ redis_sentinel_port }} +jobboard_backend_hosts = {% for host in groups['valkey'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}{% if not loop.last %},{% endif %}{% endfor %} +jobboard_backend_password = {{ valkey_master_password }} +jobboard_backend_port = {{ valkey_sentinel_port }} jobboard_backend_username = default jobboard_enabled = true jobboard_redis_backend_ssl_options = ssl:False -jobboard_redis_sentinel = kolla +jobboard_redis_sentinel = {{ valkey_sentinel_monitor_name }} jobboard_redis_sentinel_ssl_options = ssl:False persistence_connection = mysql+pymysql://{{ octavia_persistence_database_user }}:{{ octavia_persistence_database_password }}@{{ octavia_persistence_database_address }}/{{ octavia_persistence_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if octavia_database_enable_tls_internal | bool }} -jobboard_enabled = true {% endif %} [oslo_concurrency] diff --git a/ansible/roles/prechecks/tasks/service_checks.yml b/ansible/roles/prechecks/tasks/service_checks.yml index 5f3ceef4b4..3f2a711da1 100644 --- a/ansible/roles/prechecks/tasks/service_checks.yml +++ b/ansible/roles/prechecks/tasks/service_checks.yml @@ -63,3 +63,10 @@ msg: "We are sorry but enable_ceph is no longer supported. Please use external ceph support." when: - (enable_ceph | default()) | bool + +- name: Validate that enable_redis is disabled + run_once: True + assert: + that: + - not (enable_redis | default(false)) | bool + msg: "Redis support has been replaced with Valkey, please set enable_redis to no and enable_valkey to yes" diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml index 27affe4472..147e241a6d 100644 --- a/ansible/roles/prometheus/defaults/main.yml +++ b/ansible/roles/prometheus/defaults/main.yml @@ -318,8 +318,8 @@ prometheus_blackbox_exporter_endpoints_default: enabled: "{{ enable_prometheus_alertmanager | bool }}" - endpoints: "{% set rabbitmq_endpoints = [] %}{% for host in groups.get('rabbitmq', []) %}{{ rabbitmq_endpoints.append('rabbitmq_' + host + (':tls_connect:' if rabbitmq_enable_tls | bool else ':tcp_connect:') + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['rabbitmq_port'] ) }}{% endfor %}{{ rabbitmq_endpoints }}" enabled: "{{ enable_rabbitmq | bool }}" - - endpoints: "{% set redis_endpoints = [] %}{% for host in groups.get('redis', []) %}{{ redis_endpoints.append('redis_' + host + ':tcp_connect:' + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['redis_port']) }}{% endfor %}{{ redis_endpoints }}" - enabled: "{{ enable_redis | bool }}" + - endpoints: "{% set valkey_endpoints = [] %}{% for host in groups.get('valkey', []) %}{{ valkey_endpoints.append('valkey_' + host + ':tcp_connect:' + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['valkey_server_port']) }}{% endfor %}{{ valkey_endpoints }}" + enabled: "{{ enable_valkey | bool }}" prometheus_blackbox_exporter_endpoints_custom: [] diff --git a/ansible/roles/redis/defaults/main.yml b/ansible/roles/redis/defaults/main.yml deleted file mode 100644 index 6d3af49f09..0000000000 --- a/ansible/roles/redis/defaults/main.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -redis_services: - redis: - container_name: redis - group: redis - enabled: true - image: "{{ redis_image_full }}" - volumes: "{{ redis_default_volumes + redis_extra_volumes }}" - dimensions: "{{ redis_dimensions }}" - healthcheck: "{{ redis_healthcheck }}" - redis-sentinel: - container_name: redis_sentinel - group: redis - environment: - REDIS_CONF: "{{ redis_conf_path }}" - REDIS_GEN_CONF: "{{ redis_generated_conf_path }}" - enabled: true - image: "{{ redis_sentinel_image_full }}" - volumes: "{{ redis_sentinel_default_volumes + redis_sentinel_extra_volumes }}" - dimensions: "{{ redis_sentinel_dimensions }}" - healthcheck: "{{ redis_sentinel_healthcheck }}" - -#################### -# Docker -#################### -redis_image: "{{ docker_image_url }}redis" -redis_tag: "{{ openstack_tag }}" -redis_image_full: "{{ redis_image }}:{{ redis_tag }}" - -redis_sentinel_image: "{{ docker_image_url }}redis-sentinel" -redis_sentinel_tag: "{{ openstack_tag }}" -redis_sentinel_image_full: "{{ redis_sentinel_image }}:{{ redis_tag }}" -redis_dimensions: "{{ default_container_dimensions }}" -redis_sentinel_dimensions: "{{ default_container_dimensions }}" - -redis_enable_healthchecks: "{{ enable_container_healthchecks }}" -redis_healthcheck_interval: "{{ default_container_healthcheck_interval }}" -redis_healthcheck_retries: "{{ default_container_healthcheck_retries }}" -redis_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -redis_healthcheck_test: ["CMD-SHELL", "healthcheck_listen redis-server {{ redis_port }}"] -redis_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" -redis_healthcheck: - interval: "{{ redis_healthcheck_interval }}" - retries: "{{ redis_healthcheck_retries }}" - start_period: "{{ redis_healthcheck_start_period }}" - test: "{% if redis_enable_healthchecks | bool %}{{ redis_healthcheck_test }}{% else %}NONE{% endif %}" - timeout: "{{ redis_healthcheck_timeout }}" - -redis_sentinel_enable_healthchecks: "{{ enable_container_healthchecks }}" -redis_sentinel_healthcheck_interval: "{{ default_container_healthcheck_interval }}" -redis_sentinel_healthcheck_retries: "{{ default_container_healthcheck_retries }}" -redis_sentinel_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -redis_sentinel_healthcheck_test: ["CMD-SHELL", "healthcheck_listen redis-sentinel {{ redis_sentinel_port }}"] -redis_sentinel_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" -redis_sentinel_healthcheck: - interval: "{{ redis_sentinel_healthcheck_interval }}" - retries: "{{ redis_sentinel_healthcheck_retries }}" - start_period: "{{ redis_sentinel_healthcheck_start_period }}" - test: "{% if redis_sentinel_enable_healthchecks | bool %}{{ redis_sentinel_healthcheck_test }}{% else %}NONE{% endif %}" - timeout: "{{ redis_sentinel_healthcheck_timeout }}" - -redis_default_volumes: - - "{{ node_config_directory }}/redis/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "{{ project_name }}:/var/lib/redis/" - - "kolla_logs:/var/log/kolla/" -redis_sentinel_default_volumes: - - "{{ node_config_directory }}/redis-sentinel/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" - -redis_extra_volumes: "{{ default_extra_volumes }}" -redis_sentinel_extra_volumes: "{{ default_extra_volumes }}" - -redis_conf_path: "/etc/redis/redis.conf" -redis_generated_conf_path: "/etc/redis/redis-regenerated-by-config-rewrite.conf" diff --git a/ansible/roles/redis/tasks/check.yml b/ansible/roles/redis/tasks/check.yml deleted file mode 100644 index 0e3c550397..0000000000 --- a/ansible/roles/redis/tasks/check.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Checking Redis containers - import_role: - role: service-check - -- name: Redis ping pong check - become: true - command: "{{ kolla_container_engine }} exec redis redis-cli -h {{ api_interface_address }} -a {{ redis_master_password }} ping" - register: redis_check - changed_when: "redis_check.stdout != 'PONG'" - failed_when: "redis_check.stdout != 'PONG'" diff --git a/ansible/roles/redis/tasks/precheck.yml b/ansible/roles/redis/tasks/precheck.yml deleted file mode 100644 index 8f6371d0bb..0000000000 --- a/ansible/roles/redis/tasks/precheck.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- import_role: - name: service-precheck - vars: - service_precheck_services: "{{ redis_services }}" - service_name: "{{ project_name }}" - -- name: Get container facts - become: true - kolla_container_facts: - action: get_containers - container_engine: "{{ kolla_container_engine }}" - name: - - redis - check_mode: false - register: container_facts - -- name: Checking free port for Redis - vars: - service: "{{ redis_services['redis'] }}" - wait_for: - host: "{{ api_interface_address }}" - port: "{{ redis_port }}" - connect_timeout: 1 - timeout: 1 - state: stopped - when: - - container_facts.containers['redis'] is not defined - - service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/redis/tasks/upgrade.yml b/ansible/roles/redis/tasks/upgrade.yml deleted file mode 100644 index 49edff81e3..0000000000 --- a/ansible/roles/redis/tasks/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- import_tasks: config.yml - -- import_tasks: check-containers.yml - -- name: Flush handlers - meta: flush_handlers diff --git a/ansible/roles/redis/templates/redis-sentinel.conf.j2 b/ansible/roles/redis/templates/redis-sentinel.conf.j2 deleted file mode 100644 index 34ef13a3dd..0000000000 --- a/ansible/roles/redis/templates/redis-sentinel.conf.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{% set redis_master_address = 'api' | kolla_address(groups['redis'][0]) %} -daemonize no -pidfile "/var/run/redis/redis-sentinel.pid" -logfile "/var/log/kolla/redis/redis-sentinel.log" -bind {{ api_interface_address }} -port {{ redis_sentinel_port }} -sentinel myid {{ inventory_hostname | hash('sha1') }} -sentinel monitor kolla {{ redis_master_address }} {{ redis_port }} 2 -sentinel auth-pass kolla {{ redis_master_password }} -sentinel down-after-milliseconds kolla 5000 -sentinel failover-timeout kolla 60000 -sentinel parallel-syncs kolla 1 diff --git a/ansible/roles/redis/templates/redis-sentinel.json.j2 b/ansible/roles/redis/templates/redis-sentinel.json.j2 deleted file mode 100644 index d062cefc44..0000000000 --- a/ansible/roles/redis/templates/redis-sentinel.json.j2 +++ /dev/null @@ -1,22 +0,0 @@ -{ - "command": "redis-sentinel {{ redis_generated_conf_path }}", - "config_files": [ - { - "source": "{{ container_config_directory }}/redis.conf", - "dest": "{{ redis_conf_path }}", - "owner": "redis", - "perm": "0600" - } - ], - "permissions": [ - { - "path": "/var/log/kolla/redis", - "owner": "redis:redis", - "recurse": true - }, - { - "path": "/etc/redis", - "owner": "redis:redis" - } - ] -} diff --git a/ansible/roles/redis/templates/redis.conf.j2 b/ansible/roles/redis/templates/redis.conf.j2 deleted file mode 100644 index 57a5ddce48..0000000000 --- a/ansible/roles/redis/templates/redis.conf.j2 +++ /dev/null @@ -1,55 +0,0 @@ -bind {{ api_interface_address }} -port {{ redis_port }} -tcp-backlog 511 -timeout 0 -tcp-keepalive 300 -daemonize no -pidfile /var/run/redis/redis-server.pid -loglevel notice -logfile /var/log/kolla/redis/redis.log -databases 16 -save 900 1 -save 300 10 -save 60 10000 -stop-writes-on-bgsave-error yes -rdbcompression yes -rdbchecksum yes -dbfilename dump.rdb -dir /var/lib/redis -replica-serve-stale-data yes -replica-read-only yes -repl-diskless-sync no -repl-diskless-sync-delay 5 -repl-disable-tcp-nodelay no -replica-priority 100 -appendonly yes -appendfilename "redis-staging-ao.aof" -appendfsync everysec -no-appendfsync-on-rewrite no -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb -aof-load-truncated yes -lua-time-limit 5000 -slowlog-log-slower-than 10000 -slowlog-max-len 128 -latency-monitor-threshold 0 -notify-keyspace-events "" -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 -set-max-intset-entries 512 -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 -hll-sparse-max-bytes 3000 -activerehashing yes -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit replica 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 -hz 10 -aof-rewrite-incremental-fsync yes -requirepass {{ redis_master_password }} -masterauth {{ redis_master_password }} - -{% if inventory_hostname != groups['redis'][0] %} -{% set redis_master_address = 'api' | kolla_address(groups['redis'][0]) %} -replicaof {{ redis_master_address }} 6379 -{% endif %} diff --git a/ansible/roles/redis/templates/redis.json.j2 b/ansible/roles/redis/templates/redis.json.j2 deleted file mode 100644 index 02de435a38..0000000000 --- a/ansible/roles/redis/templates/redis.json.j2 +++ /dev/null @@ -1,18 +0,0 @@ -{ - "command": "redis-server {{ redis_conf_path }}", - "config_files": [ - { - "source": "{{ container_config_directory }}/redis.conf", - "dest": "{{ redis_conf_path }}", - "owner": "redis", - "perm": "0600" - } - ], - "permissions": [ - { - "path": "/var/log/kolla/redis", - "owner": "redis:redis", - "recurse": true - } - ] -} diff --git a/ansible/roles/redis/vars/main.yml b/ansible/roles/redis/vars/main.yml deleted file mode 100644 index 1544bfcf99..0000000000 --- a/ansible/roles/redis/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -project_name: "redis" diff --git a/ansible/roles/telegraf/templates/telegraf.conf.j2 b/ansible/roles/telegraf/templates/telegraf.conf.j2 index a8c88f5e44..a890bf1042 100644 --- a/ansible/roles/telegraf/templates/telegraf.conf.j2 +++ b/ansible/roles/telegraf/templates/telegraf.conf.j2 @@ -67,9 +67,9 @@ username = "{{ rabbitmq_user }}" password = "{{ rabbitmq_password }}" {% endif %} -{% if inventory_hostname in groups['redis'] and enable_redis | bool %} +{% if inventory_hostname in groups['valkey'] and enable_valkey | bool %} [[inputs.redis]] - servers = ["tcp://:{{ redis_master_password }}@{{ api_interface_address | put_address_in_context('url') }}:{{ redis_port }}"] + servers = ["tcp://:{{ valkey_master_password }}@{{ api_interface_address | put_address_in_context('url') }}:{{ valkey_server_port }}"] {% endif %} {% if inventory_hostname in groups['mariadb'] and (enable_mariadb or enable_external_mariadb_load_balancer) | bool %} [[inputs.mysql]] diff --git a/ansible/roles/valkey/defaults/main.yml b/ansible/roles/valkey/defaults/main.yml new file mode 100644 index 0000000000..46647abd75 --- /dev/null +++ b/ansible/roles/valkey/defaults/main.yml @@ -0,0 +1,78 @@ +--- +valkey_services: + valkey-server: + container_name: valkey_server + group: valkey + enabled: true + image: "{{ valkey_image_full }}" + volumes: "{{ valkey_default_volumes + valkey_extra_volumes }}" + dimensions: "{{ valkey_dimensions }}" + healthcheck: "{{ valkey_healthcheck }}" + valkey-sentinel: + container_name: valkey_sentinel + group: valkey + environment: + VALKEY_CONF: "{{ valkey_conf_path }}" + VALKEY_GEN_CONF: "{{ valkey_generated_conf_path }}" + enabled: true + image: "{{ valkey_sentinel_image_full }}" + volumes: "{{ valkey_sentinel_default_volumes + valkey_sentinel_extra_volumes }}" + dimensions: "{{ valkey_sentinel_dimensions }}" + healthcheck: "{{ valkey_sentinel_healthcheck }}" + +#################### +# Docker +#################### +valkey_image: "{{ docker_image_url }}valkey-server" +valkey_tag: "{{ openstack_tag }}" +valkey_image_full: "{{ valkey_image }}:{{ valkey_tag }}" + +valkey_sentinel_image: "{{ docker_image_url }}valkey-sentinel" +valkey_sentinel_tag: "{{ openstack_tag }}" +valkey_sentinel_image_full: "{{ valkey_sentinel_image }}:{{ valkey_tag }}" +valkey_dimensions: "{{ default_container_dimensions }}" +valkey_sentinel_dimensions: "{{ default_container_dimensions }}" + +valkey_enable_healthchecks: "{{ enable_container_healthchecks }}" +valkey_healthcheck_interval: "{{ default_container_healthcheck_interval }}" +valkey_healthcheck_retries: "{{ default_container_healthcheck_retries }}" +valkey_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" +valkey_healthcheck_test: ["CMD-SHELL", "healthcheck_listen valkey-server {{ valkey_server_port }}"] +valkey_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" +valkey_healthcheck: + interval: "{{ valkey_healthcheck_interval }}" + retries: "{{ valkey_healthcheck_retries }}" + start_period: "{{ valkey_healthcheck_start_period }}" + test: "{% if valkey_enable_healthchecks | bool %}{{ valkey_healthcheck_test }}{% else %}NONE{% endif %}" + timeout: "{{ valkey_healthcheck_timeout }}" + +valkey_sentinel_enable_healthchecks: "{{ enable_container_healthchecks }}" +valkey_sentinel_healthcheck_interval: "{{ default_container_healthcheck_interval }}" +valkey_sentinel_healthcheck_retries: "{{ default_container_healthcheck_retries }}" +valkey_sentinel_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" +valkey_sentinel_healthcheck_test: ["CMD-SHELL", "healthcheck_listen valkey-sentinel {{ valkey_sentinel_port }}"] +valkey_sentinel_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" +valkey_sentinel_healthcheck: + interval: "{{ valkey_sentinel_healthcheck_interval }}" + retries: "{{ valkey_sentinel_healthcheck_retries }}" + start_period: "{{ valkey_sentinel_healthcheck_start_period }}" + test: "{% if valkey_sentinel_enable_healthchecks | bool %}{{ valkey_sentinel_healthcheck_test }}{% else %}NONE{% endif %}" + timeout: "{{ valkey_sentinel_healthcheck_timeout }}" + +valkey_default_volumes: + - "{{ node_config_directory }}/valkey-server/:{{ container_config_directory }}/:ro" + - "/etc/localtime:/etc/localtime:ro" + - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" + - "{{ project_name }}:/var/lib/valkey/" + - "kolla_logs:/var/log/kolla/" +valkey_sentinel_default_volumes: + - "{{ node_config_directory }}/valkey-sentinel/:{{ container_config_directory }}/:ro" + - "/etc/localtime:/etc/localtime:ro" + - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" + - "kolla_logs:/var/log/kolla/" + +valkey_extra_volumes: "{{ default_extra_volumes }}" +valkey_sentinel_extra_volumes: "{{ default_extra_volumes }}" + +valkey_conf_path: "/etc/valkey/valkey.conf" +valkey_generated_conf_path: "/etc/valkey/valkey-regenerated-by-config-rewrite.conf" diff --git a/ansible/roles/redis/handlers/main.yml b/ansible/roles/valkey/handlers/main.yml similarity index 74% rename from ansible/roles/redis/handlers/main.yml rename to ansible/roles/valkey/handlers/main.yml index d1bd614771..d0c0cfcd65 100644 --- a/ansible/roles/redis/handlers/main.yml +++ b/ansible/roles/valkey/handlers/main.yml @@ -1,8 +1,8 @@ --- -- name: Restart redis container +- name: Restart valkey-server container vars: - service_name: "redis" - service: "{{ redis_services[service_name] }}" + service_name: "valkey-server" + service: "{{ valkey_services[service_name] }}" become: true kolla_container: action: "recreate_or_restart_container" @@ -13,10 +13,10 @@ dimensions: "{{ service.dimensions }}" healthcheck: "{{ service.healthcheck | default(omit) }}" -- name: Restart redis-sentinel container +- name: Restart valkey-sentinel container vars: - service_name: "redis-sentinel" - service: "{{ redis_services[service_name] }}" + service_name: "valkey-sentinel" + service: "{{ valkey_services[service_name] }}" become: true kolla_container: action: "recreate_or_restart_container" diff --git a/ansible/roles/redis/tasks/check-containers.yml b/ansible/roles/valkey/tasks/check-containers.yml similarity index 100% rename from ansible/roles/redis/tasks/check-containers.yml rename to ansible/roles/valkey/tasks/check-containers.yml diff --git a/ansible/roles/valkey/tasks/check.yml b/ansible/roles/valkey/tasks/check.yml new file mode 100644 index 0000000000..d7531527ac --- /dev/null +++ b/ansible/roles/valkey/tasks/check.yml @@ -0,0 +1,13 @@ +--- +- name: Checking valkey containers + import_role: + role: service-check + +- name: Valkey ping pong check + become: true + shell: >- + {{ kolla_container_engine }} exec valkey_server valkey-cli -h {{ api_interface_address }} + -a {{ valkey_master_password }} ping + register: valkey_check + changed_when: "'PONG' in valkey_check.stdout" + failed_when: "'PONG' not in valkey_check.stdout" diff --git a/ansible/roles/redis/tasks/config.yml b/ansible/roles/valkey/tasks/config.yml similarity index 59% rename from ansible/roles/redis/tasks/config.yml rename to ansible/roles/valkey/tasks/config.yml index 9f133933da..c040256730 100644 --- a/ansible/roles/redis/tasks/config.yml +++ b/ansible/roles/valkey/tasks/config.yml @@ -7,7 +7,7 @@ group: "{{ config_owner_group }}" mode: "0770" become: true - with_dict: "{{ redis_services | select_services_enabled_and_mapped_to_host }}" + with_dict: "{{ valkey_services | select_services_enabled_and_mapped_to_host }}" - name: Copying over default config.json files template: @@ -15,12 +15,12 @@ dest: "{{ node_config_directory }}/{{ item.key }}/config.json" mode: "0660" become: true - with_dict: "{{ redis_services | select_services_enabled_and_mapped_to_host }}" + with_dict: "{{ valkey_services | select_services_enabled_and_mapped_to_host }}" -- name: Copying over redis config files +- name: Copying over valkey config files template: src: "{{ item.key }}.conf.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/redis.conf" + dest: "{{ node_config_directory }}/{{ item.key }}/valkey.conf" mode: "0660" become: true - with_dict: "{{ redis_services | select_services_enabled_and_mapped_to_host }}" + with_dict: "{{ valkey_services | select_services_enabled_and_mapped_to_host }}" diff --git a/ansible/roles/redis/tasks/config_validate.yml b/ansible/roles/valkey/tasks/config_validate.yml similarity index 100% rename from ansible/roles/redis/tasks/config_validate.yml rename to ansible/roles/valkey/tasks/config_validate.yml diff --git a/ansible/roles/redis/tasks/deploy-containers.yml b/ansible/roles/valkey/tasks/deploy-containers.yml similarity index 100% rename from ansible/roles/redis/tasks/deploy-containers.yml rename to ansible/roles/valkey/tasks/deploy-containers.yml diff --git a/ansible/roles/redis/tasks/deploy.yml b/ansible/roles/valkey/tasks/deploy.yml similarity index 100% rename from ansible/roles/redis/tasks/deploy.yml rename to ansible/roles/valkey/tasks/deploy.yml diff --git a/ansible/roles/redis/tasks/main.yml b/ansible/roles/valkey/tasks/main.yml similarity index 100% rename from ansible/roles/redis/tasks/main.yml rename to ansible/roles/valkey/tasks/main.yml diff --git a/ansible/roles/valkey/tasks/precheck.yml b/ansible/roles/valkey/tasks/precheck.yml new file mode 100644 index 0000000000..689fcfb6ff --- /dev/null +++ b/ansible/roles/valkey/tasks/precheck.yml @@ -0,0 +1,45 @@ +--- +# TODO(gkoper) Remove redis checks after G/2026.1 release +- name: Check if Redis is running + become: true + kolla_container_facts: + action: get_containers + container_engine: "{{ kolla_container_engine }}" + name: + - redis + check_mode: false + register: redis_container_facts + +- name: Set migration flag if Redis is present + set_fact: + valkey_is_migrating: "{{ redis_container_facts.containers['redis'] is defined }}" + +- import_role: + name: service-precheck + vars: + service_precheck_services: "{{ valkey_services }}" + service_name: "{{ project_name }}" + +- name: Get container facts + become: true + kolla_container_facts: + action: get_containers + container_engine: "{{ kolla_container_engine }}" + name: + - valkey_server + check_mode: false + register: container_facts + +- name: Checking free port for Valkey + vars: + service: "{{ valkey_services['valkey-server'] }}" + wait_for: + host: "{{ api_interface_address }}" + port: "{{ valkey_server_port }}" + connect_timeout: 1 + timeout: 1 + state: stopped + when: + - container_facts.containers['valkey_server'] is not defined + - service | service_enabled_and_mapped_to_host + - not (valkey_is_migrating | default(false) | bool) diff --git a/ansible/roles/redis/tasks/pull.yml b/ansible/roles/valkey/tasks/pull.yml similarity index 100% rename from ansible/roles/redis/tasks/pull.yml rename to ansible/roles/valkey/tasks/pull.yml diff --git a/ansible/roles/redis/tasks/reconfigure.yml b/ansible/roles/valkey/tasks/reconfigure.yml similarity index 100% rename from ansible/roles/redis/tasks/reconfigure.yml rename to ansible/roles/valkey/tasks/reconfigure.yml diff --git a/ansible/roles/redis/tasks/stop.yml b/ansible/roles/valkey/tasks/stop.yml similarity index 65% rename from ansible/roles/redis/tasks/stop.yml rename to ansible/roles/valkey/tasks/stop.yml index 452b62ffbe..a175892b76 100644 --- a/ansible/roles/redis/tasks/stop.yml +++ b/ansible/roles/valkey/tasks/stop.yml @@ -2,5 +2,5 @@ - import_role: name: service-stop vars: - project_services: "{{ redis_services }}" + project_services: "{{ valkey_services }}" service_name: "{{ project_name }}" diff --git a/ansible/roles/valkey/tasks/upgrade.yml b/ansible/roles/valkey/tasks/upgrade.yml new file mode 100644 index 0000000000..0cae1ce26e --- /dev/null +++ b/ansible/roles/valkey/tasks/upgrade.yml @@ -0,0 +1,134 @@ +--- +# TODO(bbezak): Remove in G/2026.1 release as Redis migration is no longer required +- name: Check if Redis is running + become: true + kolla_container_facts: + action: get_containers + container_engine: "{{ kolla_container_engine }}" + name: + - redis + check_mode: false + register: redis_container_facts + +- name: Set migration flag if Redis is present + set_fact: + _valkey_migration: "{{ redis_container_facts.containers['redis'] is defined }}" + +- name: Perform Redis to Valkey migration steps + block: + + - name: Set temporary Valkey migration vars + set_fact: + valkey_server_port: "6380" + valkey_sentinel_port: "26380" + valkey_sentinel_monitor_name: "kolla-temp" + + - name: Set valkey master host to valkey[0] + set_fact: + valkey_master_host: "{{ groups['valkey'][0] }}" + redis_slave_hosts: "{{ groups['redis'][1:] }}" + run_once: true + + - name: Start Valkey on temporary ports with temp sentinel monitor name + import_tasks: deploy.yml + + - name: Wait for Valkey replication sync + become: true + delegate_to: "{{ valkey_master_host }}" + run_once: true + shell: >- + {{ kolla_container_engine }} exec valkey_server + valkey-cli -h {{ api_interface_address }} -p {{ valkey_server_port }} info replication + register: valkey_replication + until: "'master_link_status:up' in valkey_replication.stdout" + retries: 30 + delay: 2 + + - name: Set replica-priority=10 on valkey[0] to ensure it becomes master + become: true + delegate_to: "{{ valkey_master_host }}" + run_once: true + command: >- + {{ kolla_container_engine }} exec valkey_server valkey-cli + -h {{ api_interface_address }} -p {{ valkey_server_port }} + CONFIG SET replica-priority 10 + + - name: Set replica-priority=0 on other valkey nodes to prevent promotion + become: true + command: >- + {{ kolla_container_engine }} exec valkey_server valkey-cli + -h {{ api_interface_address }} -p {{ valkey_server_port }} CONFIG SET replica-priority 0 + when: inventory_hostname != valkey_master_host + + - name: Stop redis slaves (so they don't get promoted during failover) + become: true + kolla_container: + name: redis + action: stop_container + when: inventory_hostname in redis_slave_hosts + + - name: Trigger Sentinel failover (using one of redis-sentinels) + become: true + delegate_to: "{{ valkey_master_host }}" + run_once: true + command: >- + {{ kolla_container_engine }} exec redis_sentinel redis-cli + -h {{ api_interface_address }} + -p {{ redis_sentinel_port }} + SENTINEL failover kolla + + - name: Wait until Valkey becomes master + become: true + delegate_to: "{{ valkey_master_host }}" + run_once: true + shell: >- + {{ kolla_container_engine }} exec valkey_server + valkey-cli -h {{ api_interface_address }} -p {{ valkey_server_port }} info replication + register: valkey_role + until: "'role:master' in valkey_role.stdout" + retries: 30 + delay: 2 + + - name: Stop all Redis containers + become: true + kolla_container: + name: redis + action: stop_and_remove_container + when: inventory_hostname in groups['redis'] + + - name: Stop all Sentinel containers + become: true + kolla_container: + name: redis_sentinel + action: stop_and_remove_container + when: inventory_hostname in groups['redis'] + + - name: Remove Redis data volume + become: true + kolla_container: + action: remove_volume + name: redis + when: inventory_hostname in groups['redis'] + + when: redis_container_facts.containers['redis'] is defined + +# These tasks run always, regardless of Redis presence + +- name: Reset Valkey port to default (6379) after migration + set_fact: + valkey_server_port: "6379" + valkey_sentinel_port: "26379" + valkey_sentinel_monitor_name: "kolla" + _valkey_migration: false + + +- name: Reconfigure/Redeploy Valkey on default ports + import_tasks: reconfigure.yml + +- name: Verify Valkey responds on port 6379 + import_tasks: check.yml + +- import_tasks: check-containers.yml + +- name: Flush handlers + meta: flush_handlers diff --git a/ansible/roles/valkey/templates/valkey-sentinel.conf.j2 b/ansible/roles/valkey/templates/valkey-sentinel.conf.j2 new file mode 100644 index 0000000000..ee5e4ff2b3 --- /dev/null +++ b/ansible/roles/valkey/templates/valkey-sentinel.conf.j2 @@ -0,0 +1,12 @@ +{% set valkey_master_address = 'api' | kolla_address(groups['valkey'][0]) %} +daemonize no +pidfile "/var/run/valkey/valkey-sentinel.pid" +logfile "/var/log/kolla/valkey/valkey-sentinel.log" +bind {{ api_interface_address }} +port {{ valkey_sentinel_port }} +sentinel myid {{ inventory_hostname | hash('sha1') }} +sentinel monitor {{ valkey_sentinel_monitor_name }} {{ valkey_master_address }} {{ valkey_server_port }} {{ valkey_sentinel_quorum }} +sentinel auth-pass {{ valkey_sentinel_monitor_name }} {{ valkey_master_password }} +sentinel down-after-milliseconds {{ valkey_sentinel_monitor_name }} 5000 +sentinel failover-timeout {{ valkey_sentinel_monitor_name }} 60000 +sentinel parallel-syncs {{ valkey_sentinel_monitor_name }} 1 diff --git a/ansible/roles/valkey/templates/valkey-sentinel.json.j2 b/ansible/roles/valkey/templates/valkey-sentinel.json.j2 new file mode 100644 index 0000000000..7dabf9599e --- /dev/null +++ b/ansible/roles/valkey/templates/valkey-sentinel.json.j2 @@ -0,0 +1,22 @@ +{ + "command": "valkey-sentinel {{ valkey_generated_conf_path }}", + "config_files": [ + { + "source": "{{ container_config_directory }}/valkey.conf", + "dest": "{{ valkey_conf_path }}", + "owner": "valkey", + "perm": "0600" + } + ], + "permissions": [ + { + "path": "/var/log/kolla/valkey", + "owner": "valkey:valkey", + "recurse": true + }, + { + "path": "/etc/valkey", + "owner": "valkey:valkey" + } + ] +} diff --git a/ansible/roles/valkey/templates/valkey-server.conf.j2 b/ansible/roles/valkey/templates/valkey-server.conf.j2 new file mode 100644 index 0000000000..b1b94af0db --- /dev/null +++ b/ansible/roles/valkey/templates/valkey-server.conf.j2 @@ -0,0 +1,29 @@ +appendonly yes +bind {{ api_interface_address }} +dir /var/lib/valkey +logfile /var/log/kolla/valkey/valkey.log +pidfile /var/run/valkey/valkey-server.pid +port {{ valkey_server_port }} + +{# TODO(mnasiadka): Remove after Gazpacho/2026.1 #} +{% if _valkey_migration | default(false) | bool %} +protected-mode no +{% if inventory_hostname == groups['valkey'][0] %} +{# Migration mode: valkey[0] replicates from Redis #} +replicaof {{ 'api' | kolla_address(groups['redis'][0]) }} {{ redis_port }} +masterauth {{ redis_master_password }} +{% elif not inventory_hostname == groups['valkey'][0] %} +{# Secondary valkey nodes replicate from valkey[0] #} +replicaof {{ 'api' | kolla_address(groups['valkey'][0]) }} {{ valkey_server_port }} +masterauth {{ valkey_master_password }} +{% endif %} +{% else %} +{# Normal mode: valkey[0] is master #} +{# NOTE: Keep this part after removing the migration block above #} +{% if not inventory_hostname == groups['valkey'][0] %} +{# Secondary valkey nodes replicate from valkey[0] #} +replicaof {{ 'api' | kolla_address(groups['valkey'][0]) }} {{ valkey_server_port }} +{% endif %} +requirepass {{ valkey_master_password }} +masterauth {{ valkey_master_password }} +{% endif %} diff --git a/ansible/roles/valkey/templates/valkey-server.json.j2 b/ansible/roles/valkey/templates/valkey-server.json.j2 new file mode 100644 index 0000000000..3a9933ab13 --- /dev/null +++ b/ansible/roles/valkey/templates/valkey-server.json.j2 @@ -0,0 +1,18 @@ +{ + "command": "valkey-server {{ valkey_conf_path }}", + "config_files": [ + { + "source": "{{ container_config_directory }}/valkey.conf", + "dest": "{{ valkey_conf_path }}", + "owner": "valkey", + "perm": "0600" + } + ], + "permissions": [ + { + "path": "/var/log/kolla/valkey", + "owner": "valkey:valkey", + "recurse": true + } + ] +} diff --git a/ansible/roles/valkey/vars/main.yml b/ansible/roles/valkey/vars/main.yml new file mode 100644 index 0000000000..209d4d45fd --- /dev/null +++ b/ansible/roles/valkey/vars/main.yml @@ -0,0 +1,2 @@ +--- +project_name: "valkey" diff --git a/ansible/site.yml b/ansible/site.yml index 33855f1d61..ae5f281cb8 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -63,7 +63,7 @@ - enable_placement_{{ enable_placement | bool }} - enable_prometheus_{{ enable_prometheus | bool }} - enable_rabbitmq_{{ enable_rabbitmq | bool }} - - enable_redis_{{ enable_redis | bool }} + - enable_valkey_{{ enable_valkey | bool }} - enable_skyline_{{ enable_skyline | bool }} - enable_tacker_{{ enable_tacker | bool }} - enable_telegraf_{{ enable_telegraf | bool }} @@ -415,19 +415,19 @@ - { role: telegraf, tags: telegraf } -- name: Apply role redis +- name: Apply role valkey gather_facts: false hosts: - - redis - - '&enable_redis_True' + - valkey + - '&enable_valkey_True' serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- - {{ redis_max_fail_percentage | + {{ valkey_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: redis, - tags: redis } + - { role: valkey, + tags: valkey } # MariaDB deployment is more complicated than other services, so is covered in # its own playbook. diff --git a/doc/source/admin/password-rotation.rst b/doc/source/admin/password-rotation.rst index 5067099759..fe5b5bdb0f 100644 --- a/doc/source/admin/password-rotation.rst +++ b/doc/source/admin/password-rotation.rst @@ -58,7 +58,7 @@ applied this way. * ``osprofiler_secret`` * ``prometheus_alertmanager_password`` * ``qdrouterd_password`` -* ``redis_master_password`` +* ``valkey_master_password`` It is possible to change more secrets however some require manual steps. The manual steps vary depending on the secret. They are listed below in the order diff --git a/doc/source/reference/networking/designate-guide.rst b/doc/source/reference/networking/designate-guide.rst index 9a629ebb4c..aa0932b879 100644 --- a/doc/source/reference/networking/designate-guide.rst +++ b/doc/source/reference/networking/designate-guide.rst @@ -51,10 +51,10 @@ Configure Designate options in ``/etc/kolla/globals.yml`` .. important:: If multiple nodes are assigned to be Designate workers, then you must - enable a supported coordination backend, currently only ``redis`` + enable a supported coordination backend, currently only ``valkey`` is supported. The backend choice can be overridden via the - ``designate_coordination_backend`` variable. It defaults to ``redis`` - when ``redis`` is enabled (``enable_redis`` is set to ``yes``). + ``designate_coordination_backend`` variable. It defaults to ``valkey`` + when ``valkey`` is enabled (``enable_valkey`` is set to ``yes``). The following additional variables are required depending on which backend you intend to use: diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index 8f0973f5a8..36f5b4806e 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -429,7 +429,7 @@ workaround_ansible_issue_8743: yes #enable_placement: "{{ enable_nova | bool or enable_zun | bool }}" #enable_prometheus: "no" #enable_proxysql: "yes" -#enable_redis: "no" +#enable_valkey: "no" #enable_skyline: "no" #enable_tacker: "no" #enable_telegraf: "no" @@ -540,7 +540,7 @@ workaround_ansible_issue_8743: yes #################### # Osprofiler options #################### -# valid values: ["elasticsearch", "redis"] +# valid values: ["elasticsearch", "valkey"] #osprofiler_backend: "elasticsearch" ################## @@ -556,8 +556,8 @@ workaround_ansible_issue_8743: yes # Valid options are [ file, ceph ] #gnocchi_backend_storage: "file" -# Valid options are [redis, ''] -#gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}" +# Valid options are [valkey, ''] +#gnocchi_incoming_storage: "{{ 'valkey' if enable_valkey | bool else '' }}" ################################ # Cinder - Block Storage Options @@ -565,8 +565,8 @@ workaround_ansible_issue_8743: yes # Enable / disable Cinder backends #cinder_backend_ceph: "no" #cinder_volume_group: "cinder-volumes" -# Valid options are [ '', redis, etcd ] -#cinder_coordination_backend: "{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}" +# Valid options are [ '', valkey, etcd ] +#cinder_coordination_backend: "{{ 'valkey' if enable_valkey | bool else 'etcd' if enable_etcd | bool else '' }}" # Valid options are [ nfs, ceph, s3 ] #cinder_backup_driver: "ceph" @@ -598,8 +598,8 @@ workaround_ansible_issue_8743: yes #designate_backend: "bind9" #designate_ns_record: # - "ns1.example.org" -# Valid options are [ '', redis ] -#designate_coordination_backend: "{{ 'redis' if enable_redis|bool else '' }}" +# Valid options are [ '', valkey ] +#designate_coordination_backend: "{{ 'valkey' if enable_valkey|bool else '' }}" ######################## # Nova - Compute Options @@ -659,8 +659,8 @@ workaround_ansible_issue_8743: yes # List of extra kernel parameters passed to the kernel used during inspection #ironic_kernel_cmdline_extras: [] -# Valid options are [ '', redis, etcd ] -#ironic_coordination_backend: "{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}" +# Valid options are [ '', valkey, etcd ] +#ironic_coordination_backend: "{{ 'valkey' if enable_valkey | bool else 'etcd' if enable_etcd | bool else '' }}" ###################################### # Manila - Shared File Systems Options diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml index 5705873d68..1120754a82 100644 --- a/etc/kolla/passwords.yml +++ b/etc/kolla/passwords.yml @@ -197,8 +197,10 @@ keepalived_password: etcd_cluster_token: #################### -# redis options +# valkey options #################### +valkey_master_password: +# TODO(gkoper): Remove after G/2026.1 release redis_master_password: #################### diff --git a/releasenotes/notes/replace-redis-with-valkey-8f60e01c2460a301.yaml b/releasenotes/notes/replace-redis-with-valkey-8f60e01c2460a301.yaml new file mode 100644 index 0000000000..4a7b279988 --- /dev/null +++ b/releasenotes/notes/replace-redis-with-valkey-8f60e01c2460a301.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Added a Valkey role with Sentinel so deployments use Valkey instead of + Redis without changing coordination endpoints. +upgrade: + - | + Redis has been replaced with Valkey. Before running ``kolla-ansible + upgrade``, set ``enable_redis: "no"`` and ``enable_valkey: "yes"`` + in ``globals.yml``. The upgrade playbooks automatically migrate Redis + data into Valkey using temporary ports and then switch back to the + defaults. diff --git a/tests/check-logs.sh b/tests/check-logs.sh index dac11ccb9b..c9953982c6 100755 --- a/tests/check-logs.sh +++ b/tests/check-logs.sh @@ -88,6 +88,7 @@ function check_fluentd_missing_logs { /var/log/kolla/rabbitmq/*upgrade.log) continue ;; + # TODO(gkoper) Remove after G/2026.1 release /var/log/kolla/redis/*) continue ;; diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index ddcc631dd4..c8a975dbd0 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -118,11 +118,15 @@ enable_prometheus_openstack_exporter: "no" enable_tacker: "yes" enable_neutron_sfc: "yes" enable_mistral: "yes" -enable_redis: "yes" +enable_valkey: "yes" enable_barbican: "yes" enable_heat: "yes" # NOTE(yoctozepto): see https://bugs.launchpad.net/kolla-ansible/+bug/1906299 enable_aodh: "yes" +# TODO(gkoper): Remove after G/2026.1 release +{% if is_previous_release %} +enable_redis: "yes" +{% endif %} {% endif %} {% if scenario == "ironic" %} @@ -136,6 +140,7 @@ ironic_dnsmasq_dhcp_ranges: {% if scenario == "masakari" %} enable_masakari: "yes" +enable_valkey: "yes" {% endif %} {% if scenario == "cells" %} @@ -156,7 +161,7 @@ enable_mariabackup: "yes" # kolla-ansible vars enable_cinder: "yes" enable_neutron_agent_ha: "yes" -enable_redis: "yes" +enable_valkey: "yes" cinder_cluster_name: "kolla_ceph" # External Ceph glance_backend_ceph: "yes" @@ -166,8 +171,10 @@ nova_backend_ceph: "yes" rabbitmq_cluster_partition_handling: "autoheal" rabbitmq_extra_config: cluster_keepalive_interval: 50000 -# Redis for coordination +# TODO(gkoper): Remove after G/2026.1 release +{% if is_previous_release %} enable_redis: "yes" +{% endif %} enable_ceph_rgw: "yes" ceph_rgw_hosts: @@ -201,8 +208,11 @@ neutron_enable_ovn_agent: "yes" enable_octavia: "yes" octavia_provider_drivers: "ovn:OVN provider" octavia_provider_agents: "ovn" -enable_redis: "yes" neutron_dns_domain: "example.org." +# TODO(gkoper): Remove after G/2026.1 release +{% if is_previous_release %} +enable_redis: "yes" +{% endif %} {% endif %} {% if scenario == "prometheus-opensearch" %} @@ -234,8 +244,12 @@ octavia_amp_flavor: ram: 1024 disk: 5 octavia_network_type: "tenant" +enable_valkey: "yes" +# TODO(gkoper): Remove after G/2026.1 release +{% if is_previous_release %} enable_redis: "yes" {% endif %} +{% endif %} {% if groups['all'] | length == 1 %} keepalived_track_script_enabled: "no" @@ -281,6 +295,7 @@ skyline_enable_sso: "yes" enable_aodh: "yes" enable_ceilometer: "yes" enable_gnocchi: "yes" +enable_valkey: "yes" {% endif %} mariadb_monitor_read_only_interval: "30000" diff --git a/tests/templates/inventory.j2 b/tests/templates/inventory.j2 index d0711e93ee..8441d825ee 100644 --- a/tests/templates/inventory.j2 +++ b/tests/templates/inventory.j2 @@ -225,9 +225,13 @@ deployment [zun:children] control +# TODO(gkoper): Remove redis group after G/2026.1 release [redis:children] control +[valkey:children] +control + [blazar:children] control diff --git a/zuul.d/scenarios/cephadm.yaml b/zuul.d/scenarios/cephadm.yaml index 28ac2128b9..2120af30e7 100644 --- a/zuul.d/scenarios/cephadm.yaml +++ b/zuul.d/scenarios/cephadm.yaml @@ -4,9 +4,9 @@ parent: kolla-ansible-base voting: false files: - - ^ansible/group_vars/all/(ceph|ceph-rgw|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml + - ^ansible/group_vars/all/(ceph|ceph-rgw|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq|valkey).yml - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml - - ^ansible/roles/(ceph-rgw|common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|openvswitch|placement|proxysql|rabbitmq)/ + - ^ansible/roles/(ceph-rgw|common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|openvswitch|placement|proxysql|rabbitmq|valkey)/ - ^roles/cephadm/ vars: kolla_ansible_setup_disks_file_path: "/var/lib/ceph-osd.img" @@ -16,7 +16,7 @@ scenario: cephadm scenario_images_extra: - ^cinder - - ^redis + - ^valkey - job: name: kolla-ansible-debian-bookworm-cephadm diff --git a/zuul.d/scenarios/masakari.yaml b/zuul.d/scenarios/masakari.yaml index 4593f0a673..02eb2a98e5 100644 --- a/zuul.d/scenarios/masakari.yaml +++ b/zuul.d/scenarios/masakari.yaml @@ -4,14 +4,15 @@ parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/group_vars/all/(hacluster|masakari).yml - - ^ansible/roles/(hacluster|masakari)/ + - ^ansible/group_vars/all/(hacluster|masakari|valkey).yml + - ^ansible/roles/(hacluster|masakari|valkey)/ - ^tests/test-masakari.sh vars: scenario: masakari scenario_images_extra: - ^masakari - ^hacluster + - ^valkey # TODO: Remove once Masakari has TLS support tls_enabled: false diff --git a/zuul.d/scenarios/nfv.yaml b/zuul.d/scenarios/nfv.yaml index 60e44f7eea..03df813c0e 100644 --- a/zuul.d/scenarios/nfv.yaml +++ b/zuul.d/scenarios/nfv.yaml @@ -4,8 +4,8 @@ parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/group_vars/all/(aodh|barbican|heat|mistral|redis|tacker).yml - - ^ansible/roles/(aodh|barbican|heat|mistral|redis|tacker)/ + - ^ansible/group_vars/all/(aodh|barbican|heat|mistral|valkey|tacker).yml + - ^ansible/roles/(aodh|barbican|heat|mistral|valkey|tacker)/ - ^tests/test-scenario-nfv.sh vars: scenario: nfv @@ -13,7 +13,7 @@ - ^aodh - ^tacker - ^mistral - - ^redis + - ^valkey - ^barbican tls_enabled: false diff --git a/zuul.d/scenarios/octavia.yaml b/zuul.d/scenarios/octavia.yaml index d7fdcb2080..520969f654 100644 --- a/zuul.d/scenarios/octavia.yaml +++ b/zuul.d/scenarios/octavia.yaml @@ -4,13 +4,13 @@ parent: kolla-ansible-base voting: false files: !inherit - - ^ansible/group_vars/all/octavia.yml - - ^ansible/roles/(octavia|octavia-certificates)/ + - ^ansible/group_vars/all/(octavia|valkey).yml + - ^ansible/roles/(octavia|octavia-certificates|valkey)/ - ^tests/test-octavia.sh vars: scenario: octavia scenario_images_extra: - - ^redis + - ^valkey - ^octavia tls_enabled: false diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml index 6e94728457..cfc4b29262 100644 --- a/zuul.d/scenarios/ovn.yaml +++ b/zuul.d/scenarios/ovn.yaml @@ -9,7 +9,6 @@ vars: scenario: ovn scenario_images_extra: - - ^redis - ^octavia - ^ovn diff --git a/zuul.d/scenarios/telemetry.yaml b/zuul.d/scenarios/telemetry.yaml index 08d01d890c..bd762ea4c0 100644 --- a/zuul.d/scenarios/telemetry.yaml +++ b/zuul.d/scenarios/telemetry.yaml @@ -4,8 +4,8 @@ parent: kolla-ansible-base voting: false files: - - ^ansible/group_vars/all/(aodh|ceilometer|gnocchi).yml - - ^ansible/roles/(aodh|ceilometer|gnocchi)/ + - ^ansible/group_vars/all/(aodh|ceilometer|gnocchi|valkey).yml + - ^ansible/roles/(aodh|ceilometer|gnocchi|valkey)/ - ^tests/test-telemetry.sh vars: scenario: telemetry @@ -13,6 +13,7 @@ - ^aodh - ^ceilometer - ^gnocchi + - ^valkey - job: name: kolla-ansible-debian-bookworm-telemetry From 3bffbca3c77b79c250b51ac04e65b26dbb7d3914 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 23 Sep 2025 11:35:48 +0200 Subject: [PATCH 088/165] keystone: Add support for deploying httpd for federation Depends-On: https://review.opendev.org/c/openstack/kolla/+/954707 Change-Id: I1f519b3ce8b7f2ceee2eaef88e4afc8ca8fa2f6e Signed-off-by: Michal Nasiadka --- ansible/roles/keystone/defaults/main.yml | 82 +++++++++---- ansible/roles/keystone/handlers/main.yml | 14 +++ ansible/roles/keystone/tasks/config.yml | 23 +++- .../keystone/templates/httpd-keystone.conf.j2 | 113 ++++++++++++++++++ .../keystone/templates/keystone-httpd.json.j2 | 63 ++++++++++ .../service-uwsgi-config/defaults/main.yml | 1 + .../templates/uwsgi.ini.j2 | 3 + tests/templates/globals-default.j2 | 4 + zuul.d/project.yaml | 1 + zuul.d/scenarios/keystone-federation.yaml | 37 ++++++ 10 files changed, 316 insertions(+), 25 deletions(-) create mode 100644 ansible/roles/keystone/templates/httpd-keystone.conf.j2 create mode 100644 ansible/roles/keystone/templates/keystone-httpd.json.j2 create mode 100644 zuul.d/scenarios/keystone-federation.yaml diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml index 3d236dfef7..0b98275f8c 100644 --- a/ansible/roles/keystone/defaults/main.yml +++ b/ansible/roles/keystone/defaults/main.yml @@ -31,32 +31,40 @@ keystone_services: backend_http_extra: - "balance {{ 'source' if enable_keystone_federation | bool else 'roundrobin' }}" - "option httpchk" - keystone-ssh: - container_name: "keystone_ssh" + keystone-fernet: + container_name: "keystone_fernet" group: "keystone" enabled: true - image: "{{ keystone_ssh_image_full }}" + image: "{{ keystone_fernet_image_full }}" volumes: - - "{{ node_config_directory }}/keystone-ssh/:{{ container_config_directory }}/:ro" + - "{{ node_config_directory }}/keystone-fernet/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - "kolla_logs:/var/log/kolla/" - "keystone_fernet_tokens:/etc/keystone/fernet-keys" - dimensions: "{{ keystone_ssh_dimensions }}" - healthcheck: "{{ keystone_ssh_healthcheck }}" - keystone-fernet: - container_name: "keystone_fernet" + dimensions: "{{ keystone_fernet_dimensions }}" + healthcheck: "{{ keystone_fernet_healthcheck }}" + keystone-httpd: + container_name: "keystone_httpd" + group: "keystone" + enabled: "{{ enable_keystone_federation | bool }}" + image: "{{ keystone_httpd_image_full }}" + volumes: "{{ keystone_httpd_default_volumes + keystone_httpd_extra_volumes }}" + dimensions: "{{ keystone_httpd_dimensions }}" + healthcheck: "{{ keystone_httpd_healthcheck }}" + keystone-ssh: + container_name: "keystone_ssh" group: "keystone" enabled: true - image: "{{ keystone_fernet_image_full }}" + image: "{{ keystone_ssh_image_full }}" volumes: - - "{{ node_config_directory }}/keystone-fernet/:{{ container_config_directory }}/:ro" + - "{{ node_config_directory }}/keystone-ssh/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - "kolla_logs:/var/log/kolla/" - "keystone_fernet_tokens:/etc/keystone/fernet-keys" - dimensions: "{{ keystone_fernet_dimensions }}" - healthcheck: "{{ keystone_fernet_healthcheck }}" + dimensions: "{{ keystone_ssh_dimensions }}" + healthcheck: "{{ keystone_ssh_healthcheck }}" #################### # Config Validate @@ -105,6 +113,10 @@ keystone_image: "{{ docker_image_url }}keystone" keystone_service_tag: "{{ keystone_tag }}" keystone_image_full: "{{ keystone_image }}:{{ keystone_service_tag }}" +keystone_httpd_image: "{{ docker_image_url }}httpd" +keystone_httpd_tag: "{{ keystone_tag }}" +keystone_httpd_image_full: "{{ keystone_httpd_image }}:{{ keystone_httpd_tag }}" + keystone_fernet_image: "{{ docker_image_url }}keystone-fernet" keystone_fernet_tag: "{{ keystone_tag }}" keystone_fernet_image_full: "{{ keystone_fernet_image }}:{{ keystone_fernet_tag }}" @@ -114,6 +126,7 @@ keystone_ssh_tag: "{{ keystone_tag }}" keystone_ssh_image_full: "{{ keystone_ssh_image }}:{{ keystone_ssh_tag }}" keystone_dimensions: "{{ default_container_dimensions }}" +keystone_httpd_dimensions: "{{ default_container_dimensions }}" keystone_fernet_dimensions: "{{ default_container_dimensions }}" keystone_ssh_dimensions: "{{ default_container_dimensions }}" @@ -130,18 +143,18 @@ keystone_healthcheck: test: "{% if keystone_enable_healthchecks | bool %}{{ keystone_healthcheck_test }}{% else %}NONE{% endif %}" timeout: "{{ keystone_healthcheck_timeout }}" -keystone_ssh_enable_healthchecks: "{{ enable_container_healthchecks }}" -keystone_ssh_healthcheck_interval: "{{ default_container_healthcheck_interval }}" -keystone_ssh_healthcheck_retries: "{{ default_container_healthcheck_retries }}" -keystone_ssh_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -keystone_ssh_healthcheck_test: ["CMD-SHELL", "healthcheck_listen sshd {{ keystone_ssh_port }}"] -keystone_ssh_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" -keystone_ssh_healthcheck: - interval: "{{ keystone_ssh_healthcheck_interval }}" - retries: "{{ keystone_ssh_healthcheck_retries }}" - start_period: "{{ keystone_ssh_healthcheck_start_period }}" - test: "{% if keystone_ssh_enable_healthchecks | bool %}{{ keystone_ssh_healthcheck_test }}{% else %}NONE{% endif %}" - timeout: "{{ keystone_ssh_healthcheck_timeout }}" +keystone_httpd_enable_healthchecks: "{{ enable_container_healthchecks }}" +keystone_httpd_healthcheck_interval: "{{ default_container_healthcheck_interval }}" +keystone_httpd_healthcheck_retries: "{{ default_container_healthcheck_retries }}" +keystone_httpd_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" +keystone_httpd_healthcheck_test: ["CMD-SHELL", "healthcheck_curl {{ 'https' if keystone_enable_tls_backend | bool else 'http' }}://{{ api_interface_address | put_address_in_context('url') }}:{{ keystone_public_listen_port }}"] +keystone_httpd_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" +keystone_httpd_healthcheck: + interval: "{{ keystone_httpd_healthcheck_interval }}" + retries: "{{ keystone_httpd_healthcheck_retries }}" + start_period: "{{ keystone_httpd_healthcheck_start_period }}" + test: "{% if keystone_httpd_enable_healthchecks | bool %}{{ keystone_httpd_healthcheck_test }}{% else %}NONE{% endif %}" + timeout: "{{ keystone_httpd_healthcheck_timeout }}" keystone_fernet_enable_healthchecks: "{{ enable_container_healthchecks }}" keystone_fernet_healthcheck_interval: "{{ default_container_healthcheck_interval }}" @@ -156,6 +169,19 @@ keystone_fernet_healthcheck: test: "{% if keystone_fernet_enable_healthchecks | bool %}{{ keystone_fernet_healthcheck_test }}{% else %}NONE{% endif %}" timeout: "{{ keystone_fernet_healthcheck_timeout }}" +keystone_ssh_enable_healthchecks: "{{ enable_container_healthchecks }}" +keystone_ssh_healthcheck_interval: "{{ default_container_healthcheck_interval }}" +keystone_ssh_healthcheck_retries: "{{ default_container_healthcheck_retries }}" +keystone_ssh_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" +keystone_ssh_healthcheck_test: ["CMD-SHELL", "healthcheck_listen sshd {{ keystone_ssh_port }}"] +keystone_ssh_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" +keystone_ssh_healthcheck: + interval: "{{ keystone_ssh_healthcheck_interval }}" + retries: "{{ keystone_ssh_healthcheck_retries }}" + start_period: "{{ keystone_ssh_healthcheck_start_period }}" + test: "{% if keystone_ssh_enable_healthchecks | bool %}{{ keystone_ssh_healthcheck_test }}{% else %}NONE{% endif %}" + timeout: "{{ keystone_ssh_healthcheck_timeout }}" + keystone_default_volumes: - "{{ node_config_directory }}/keystone/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -165,7 +191,14 @@ keystone_default_volumes: - "{{ '/dev/shm:/dev/shm' }}" - "keystone_fernet_tokens:/etc/keystone/fernet-keys" +keystone_httpd_default_volumes: + - "{{ node_config_directory }}/keystone-httpd/:{{ container_config_directory }}/:ro" + - "/etc/localtime:/etc/localtime:ro" + - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" + - "kolla_logs:/var/log/kolla/" + keystone_extra_volumes: "{{ default_extra_volumes }}" +keystone_httpd_extra_volumes: "{{ keystone_extra_volumes }}" #################### # OpenStack @@ -266,3 +299,4 @@ keystone_copy_certs: "{{ kolla_copy_ca_into_containers | bool or keystone_enable # WSGI ############ keystone_wsgi_provider: "uwsgi" +keystone_wsgi_socket_port: "5001" diff --git a/ansible/roles/keystone/handlers/main.yml b/ansible/roles/keystone/handlers/main.yml index c149877918..8d5f1fd049 100644 --- a/ansible/roles/keystone/handlers/main.yml +++ b/ansible/roles/keystone/handlers/main.yml @@ -68,6 +68,20 @@ dimensions: "{{ service.dimensions }}" healthcheck: "{{ service.healthcheck | default(omit) }}" +- name: Restart keystone-httpd container + vars: + service_name: "keystone-httpd" + service: "{{ keystone_services[service_name] }}" + become: true + kolla_container: + action: "recreate_or_restart_container" + common_options: "{{ docker_common_options }}" + name: "{{ service.container_name }}" + image: "{{ service.image }}" + volumes: "{{ service.volumes | reject('equalto', '') | list }}" + dimensions: "{{ service.dimensions }}" + healthcheck: "{{ service.healthcheck | default(omit) }}" + - name: Finish keystone database upgrade vars: service_name: "keystone" diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml index df6617dd41..3ad2472810 100644 --- a/ansible/roles/keystone/tasks/config.yml +++ b/ansible/roles/keystone/tasks/config.yml @@ -147,9 +147,14 @@ project_services: "{{ keystone_services }}" service: "{{ keystone_services['keystone'] }}" service_name: "keystone" + service_uwsgi_config_host: >- + {{ '127.0.0.1' if enable_keystone_federation | bool + else api_interface_address | put_address_in_context('url') }} service_uwsgi_config_http_port: "{{ keystone_listen_port }}" service_uwsgi_config_module: "{{ service.wsgi }}" - service_uwsgi_config_tls_backend: "{{ keystone_enable_tls_backend | bool }}" + service_uwsgi_config_socket_port: "{{ keystone_wsgi_socket_port if enable_keystone_federation | bool else '' }}" + service_uwsgi_config_tls_backend: >- + {{ keystone_enable_tls_backend | bool and (not enable_keystone_federation | bool) }} service_uwsgi_config_tls_cert: "/etc/keystone/certs/keystone-cert.pem" service_uwsgi_config_tls_key: "/etc/keystone/certs/keystone-key.pem" service_uwsgi_config_uid: "keystone" @@ -157,6 +162,22 @@ - service | service_enabled_and_mapped_to_host - keystone_wsgi_provider == "uwsgi" +- name: Copying over httpd-keystone.conf + vars: + service: "{{ keystone_services['keystone-httpd'] }}" + template: + src: "{{ item }}" + dest: "{{ node_config_directory }}/keystone-httpd/httpd-keystone.conf" + mode: "0660" + become: true + when: + - service | service_enabled_and_mapped_to_host + - keystone_wsgi_provider == "uwsgi" + with_first_found: + - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/httpd-keystone.conf" + - "{{ node_custom_config }}/keystone/httpd-keystone.conf" + - "httpd-keystone.conf.j2" + - name: Checking whether keystone-paste.ini file exists vars: service: "{{ keystone_services['keystone'] }}" diff --git a/ansible/roles/keystone/templates/httpd-keystone.conf.j2 b/ansible/roles/keystone/templates/httpd-keystone.conf.j2 new file mode 100644 index 0000000000..435fe7cdd7 --- /dev/null +++ b/ansible/roles/keystone/templates/httpd-keystone.conf.j2 @@ -0,0 +1,113 @@ +{% set keystone_log_dir = '/var/log/kolla/keystone' %} +{% set binary_path = '/var/lib/kolla/venv/bin' %} +{% if keystone_enable_tls_backend | bool %} +{% if kolla_base_distro in ['centos', 'rocky'] %} +LoadModule ssl_module /usr/lib64/httpd/modules/mod_ssl.so +{% else %} +LoadModule ssl_module /usr/lib/apache2/modules/mod_ssl.so +{% endif %} +{% endif %} + +{% if kolla_base_distro in ['centos', 'rocky'] %} +LoadModule proxy_module /usr/lib64/httpd/modules/mod_proxy.so +LoadModule proxy_uwsgi_module /usr/lib64/httpd/modules/mod_proxy_uwsgi.so +{% else %} +LoadModule proxy_module /usr/lib/apache2/modules/mod_proxy.so +LoadModule proxy_uwsgi_module /usr/lib/apache2/modules/mod_proxy_uwsgi.so +{% endif %} + +Listen {{ api_interface_address | put_address_in_context('url') }}:{{ keystone_public_listen_port }} + +ServerSignature Off +ServerTokens Prod +TraceEnable off +TimeOut {{ kolla_httpd_timeout }} +KeepAliveTimeout {{ kolla_httpd_keep_alive }} + +ErrorLog "{{ keystone_log_dir }}/apache-error.log" + + CustomLog "{{ keystone_log_dir }}/apache-access.log" common + + +{% if keystone_logging_debug | bool %} +LogLevel info +{% endif %} + + +{# NOTE(darmach): with external tls enabled OIDC redirection fails, as TLS terminated on haproxy keystone is not aware that redirection should use https. -#} +{# With missing ServerName Keystone Apache uses fqdn, with http. Adding ServerName pointing to keystone_public_url corrects this. -#} +{% if kolla_enable_tls_external | bool %} + ServerName {{ keystone_public_url }} +{% endif %} + ErrorLogFormat "%{cu}t %M" + ErrorLog "{{ keystone_log_dir }}/keystone-apache-public-error.log" + LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat + CustomLog "{{ keystone_log_dir }}/keystone-apache-public-access.log" logformat + ProxyPass / "uwsgi://localhost:{{ keystone_wsgi_socket_port }}/" + +{% if keystone_enable_tls_backend | bool %} + SSLEngine on + SSLCertificateFile /etc/keystone/certs/keystone-cert.pem + SSLCertificateKeyFile /etc/keystone/certs/keystone-key.pem +{% endif -%} + +{% if keystone_enable_federation_openid | bool %} +{% if keystone_federation_oidc_forwarded_headers | length > 0 %} + OIDCXForwardedHeaders "{{ keystone_federation_oidc_forwarded_headers }}" +{% endif %} + OIDCClaimPrefix "OIDC-" + OIDCClaimDelimiter "{{ keystone_federation_oidc_claim_delimiter }}" + OIDCResponseType "{{ keystone_federation_oidc_response_type }}" + OIDCScope "{{ keystone_federation_oidc_scopes }}" + OIDCMetadataDir {{ keystone_container_federation_oidc_metadata_folder }} +{% if keystone_federation_oidc_jwks_uri | length > 0 %} + OIDCOAuthVerifyJwksUri {{ keystone_federation_oidc_jwks_uri }} +{% endif %} +{% if keystone_federation_openid_certificate_key_ids | length > 0 %} + OIDCOAuthVerifyCertFiles {{ keystone_federation_openid_certificate_key_ids | join(" ") }} +{% endif %} + OIDCCryptoPassphrase {{ keystone_federation_openid_crypto_password }} + OIDCRedirectURI {{ keystone_public_url }}/redirect_uri +{% if enable_memcached | bool and keystone_oidc_enable_memcached | bool %} + OIDCCacheType memcache + OIDCMemCacheServers "{% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %} {% endif %}{% endfor %}" +{% endif %} +{% for key, value in keystone_federation_oidc_additional_options.items() %} + {{ key }} {{ value }} +{% endfor %} + + + Require valid-user + AuthType openid-connect + + + {# WebSSO authentication endpoint -#} + + Require valid-user + AuthType openid-connect + + +{% for idp in keystone_identity_providers %} +{% if idp.protocol == 'openid' %} + + OIDCDiscoverURL {{ keystone_public_url }}/redirect_uri?iss={{ idp.identifier | urlencode }} + Require valid-user + AuthType openid-connect + +{% endif %} +{% endfor %} + + {# CLI / API authentication endpoint -#} +{% for idp in keystone_identity_providers %} +{% if idp.protocol == 'openid' -%} + + Require valid-user + {# Note(jasonanderson): `auth-openidc` is a special auth type that can -#} + {# additionally handle verifying bearer tokens -#} + AuthType auth-openidc + +{% endif %} +{% endfor %} +{% endif %} + + diff --git a/ansible/roles/keystone/templates/keystone-httpd.json.j2 b/ansible/roles/keystone/templates/keystone-httpd.json.j2 new file mode 100644 index 0000000000..b82376afce --- /dev/null +++ b/ansible/roles/keystone/templates/keystone-httpd.json.j2 @@ -0,0 +1,63 @@ +{% set apache_cmd = '/usr/sbin/apache2' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/sbin/httpd' %} +{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} +{% set apache_user = 'www-data' if kolla_base_distro in ['ubuntu', 'debian'] else 'apache' %} + +{ + "command": "{{ apache_cmd }} -DFOREGROUND", + "config_files": [ + { + "source": "{{ container_config_directory }}/httpd-keystone.conf", + "dest": "/etc/{{ apache_dir }}/httpd-keystone.conf", + "owner": "root", + "perm": "0600" + }{% if keystone_enable_tls_backend | bool %}, + { + "source": "{{ container_config_directory }}/keystone-cert.pem", + "dest": "/etc/keystone/certs/keystone-cert.pem", + "owner": "keystone", + "perm": "0600" + }, + { + "source": "{{ container_config_directory }}/keystone-key.pem", + "dest": "/etc/keystone/certs/keystone-key.pem", + "owner": "keystone", + "perm": "0600" + }{% endif %}{% if keystone_enable_federation_openid | bool %}, + { + "source": "{{ container_config_directory }}/federation/oidc/metadata", + "dest": "{{ keystone_container_federation_oidc_metadata_folder }}", + "owner": "{{ apache_user }}:{{ apache_user }}", + "perm": "0600", + "merge": true + }, + { + "source": "{{ container_config_directory }}/federation/oidc/cert", + "dest": "{{ keystone_container_federation_oidc_idp_certificate_folder }}", + "owner": "{{ apache_user }}:{{ apache_user }}", + "perm": "0600", + "merge": true + }{% endif %}{% if kolla_copy_ca_into_containers | bool %}, + { + "source": "{{ container_config_directory }}/ca-certificates", + "dest": "/var/lib/kolla/share/ca-certificates", + "owner": "root", + "perm": "0600" + }{% endif %} + ], + "permissions": [ + { + "path": "/var/log/kolla/keystone", + "owner": "keystone:kolla" + }{% if keystone_enable_federation_openid | bool %}, + { + "path": "{{ keystone_container_federation_oidc_metadata_folder }}", + "owner": "{{ apache_user }}:{{ apache_user }}", + "perm": "0700" + }, + { + "path": "{{ keystone_container_federation_oidc_idp_certificate_folder }}", + "owner": "{{ apache_user }}:{{ apache_user }}", + "perm": "0700" + }{% endif %} + ] +} diff --git a/ansible/roles/service-uwsgi-config/defaults/main.yml b/ansible/roles/service-uwsgi-config/defaults/main.yml index 6e5225c926..9c5e13ed33 100644 --- a/ansible/roles/service-uwsgi-config/defaults/main.yml +++ b/ansible/roles/service-uwsgi-config/defaults/main.yml @@ -3,6 +3,7 @@ service_uwsgi_config_host: "{{ api_interface_address | put_address_in_context('u service_uwsgi_config_file: "{{ node_config_directory }}/{{ service_name }}/{{ service_name }}-uwsgi.ini" service_uwsgi_config_log_dir: "{{ ansible_parent_role_names | first }}" service_uwsgi_config_log_file: "{{ service_name }}-uwsgi.log" +service_uwsgi_config_socket_port: "" service_uwsgi_config_tls_backend: false service_uwsgi_config_worker_timeout: 80 service_uwsgi_config_workers: "{{ openstack_service_workers }}" diff --git a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 index a7495b4395..8ad80eb425 100644 --- a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 +++ b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 @@ -27,6 +27,9 @@ wsgi-file = {{ service_uwsgi_config_wsgi_file }} plugins-dir = {{ '/usr/lib/uwsgi/plugins' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/lib64/uwsgi' }} plugins = python3 processes = {{ service_uwsgi_config_workers }} +{% if service_uwsgi_config_socket_port | length > 0 %} +socket = {{ service_uwsgi_config_host }}:{{ service_uwsgi_config_socket_port }} +{% endif %} socket-timeout = 30 thunder-lock = true {% if service_uwsgi_config_uid is defined %} diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index ddcc631dd4..85ccecde5f 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -283,5 +283,9 @@ enable_ceilometer: "yes" enable_gnocchi: "yes" {% endif %} +{% if scenario == "federation" %} +enable_keystone_federation: true +{% endif %} + mariadb_monitor_read_only_interval: "30000" mariadb_monitor_galera_healthcheck_timeout: "30000" diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index dffc20c4ed..0986241300 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -11,6 +11,7 @@ # NOTE(mnasiadka): Failing since # https://review.opendev.org/c/openstack/kolla-ansible/+/864780 # - kolla-ansible-scenario-container-engine-migration + - kolla-ansible-scenario-federation - kolla-ansible-scenario-haproxy-fqdn - kolla-ansible-scenario-kayobe - kolla-ansible-scenario-openbao diff --git a/zuul.d/scenarios/keystone-federation.yaml b/zuul.d/scenarios/keystone-federation.yaml new file mode 100644 index 0000000000..47e3f3ff88 --- /dev/null +++ b/zuul.d/scenarios/keystone-federation.yaml @@ -0,0 +1,37 @@ +--- +- job: + name: kolla-ansible-federation-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/keystone.yml + - ^ansible/roles/keystone/ + vars: + scenario: federation + scenario_images_extra: + - ^httpd + +- job: + name: kolla-ansible-debian-bookworm-federation + parent: kolla-ansible-federation-base + nodeset: kolla-ansible-debian-bookworm-multi-8GB + +- job: + name: kolla-ansible-rocky-10-federation + parent: kolla-ansible-federation-base + nodeset: kolla-ansible-rocky-10-multi-8GB + +- job: + name: kolla-ansible-ubuntu-noble-federation + parent: kolla-ansible-federation-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-federation + description: | + Runs Kolla-Ansible Keystone federation scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-federation + - kolla-ansible-rocky-10-federation + - kolla-ansible-ubuntu-noble-federation From ba915dc2b6bba91263a9de66ebfddbb24843302e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 5 Jun 2025 22:30:29 +0100 Subject: [PATCH 089/165] ansible-lint: Fix jinja[spacing] Change-Id: I2e30c04bd1597ec15e4c681b31443c7886154194 Signed-off-by: Michal Nasiadka --- .ansible-lint | 1 - ansible/group_vars/all/common.yml | 4 ++-- ansible/group_vars/all/database.yml | 4 ++-- ansible/roles/cinder/defaults/main.yml | 2 +- ansible/roles/ovn-db/defaults/main.yml | 2 +- ansible/roles/ovn-db/tasks/bootstrap-db.yml | 2 +- ansible/roles/prometheus/defaults/main.yml | 4 ++-- 7 files changed, 9 insertions(+), 10 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index 48663ad432..f98b0586e9 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -35,6 +35,5 @@ skip_list: - command-instead-of-shell - command-instead-of-module - ignore-errors - - jinja[spacing] - yaml[truthy] - yaml[line-length] diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml index 4a82c29ea3..5f60c69236 100644 --- a/ansible/group_vars/all/common.yml +++ b/ansible/group_vars/all/common.yml @@ -349,8 +349,8 @@ database_user: "root" database_port: "3306" database_connection_recycle_time: 10 database_max_pool_size: 1 -database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}" -database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}" +database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool) and (enable_proxysql | bool)) else 'no' }}" +database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool) and (enable_proxysql | bool)) else 'no' }}" # Optionally allow Kolla to set sysctl values set_sysctl: "yes" diff --git a/ansible/group_vars/all/database.yml b/ansible/group_vars/all/database.yml index 7e0e1bd033..72b8a94ef3 100644 --- a/ansible/group_vars/all/database.yml +++ b/ansible/group_vars/all/database.yml @@ -7,5 +7,5 @@ database_user: "root" database_port: "3306" database_connection_recycle_time: 10 database_max_pool_size: 1 -database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}" -database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}" +database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool) and (enable_proxysql | bool)) else 'no' }}" +database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool) and (enable_proxysql | bool)) else 'no' }}" diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml index 6232922b55..77ef7fcc24 100644 --- a/ansible/roles/cinder/defaults/main.yml +++ b/ansible/roles/cinder/defaults/main.yml @@ -280,7 +280,7 @@ cinder_ceph_backends: enabled: "{{ cinder_backend_ceph | bool }}" cinder_backup_backend_ceph_name: "rbd-1" -cinder_backup_ceph_backend: "{{ cinder_ceph_backends | selectattr('name', 'equalto', cinder_backup_backend_ceph_name) | list | first | combine({'pool': ceph_cinder_backup_pool_name, 'user': ceph_cinder_backup_user }) }}" +cinder_backup_ceph_backend: "{{ cinder_ceph_backends | selectattr('name', 'equalto', cinder_backup_backend_ceph_name) | list | first | combine({'pool': ceph_cinder_backup_pool_name, 'user': ceph_cinder_backup_user}) }}" skip_cinder_backend_check: False diff --git a/ansible/roles/ovn-db/defaults/main.yml b/ansible/roles/ovn-db/defaults/main.yml index 0bc27949b6..37f64ac563 100644 --- a/ansible/roles/ovn-db/defaults/main.yml +++ b/ansible/roles/ovn-db/defaults/main.yml @@ -103,7 +103,7 @@ ovn_openflow_probe_interval: "60" ovn_db_inactivity_probe: "60000" ovn_sb_db_inactivity_probe: "{{ ovn_db_inactivity_probe }}" ovn_nb_db_inactivity_probe: "{{ ovn_db_inactivity_probe }}" -ovn_sb_db_relay_active_inactivity_probe: "{{ ovn_db_inactivity_probe | int * 2}}" +ovn_sb_db_relay_active_inactivity_probe: "{{ ovn_db_inactivity_probe | int * 2 }}" ovn_sb_db_relay_passive_inactivity_probe: "{{ ovn_db_inactivity_probe }}" ovn_sb_db_relay_max_backoff: "{{ ovn_db_inactivity_probe }}" diff --git a/ansible/roles/ovn-db/tasks/bootstrap-db.yml b/ansible/roles/ovn-db/tasks/bootstrap-db.yml index 89282ab98b..afeda55c2b 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-db.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-db.yml @@ -96,4 +96,4 @@ delay: 6 when: - enable_ovn_sb_db_relay | bool - loop: "{{ range(1, (ovn_sb_db_relay_count | int) +1) | list }}" + loop: "{{ range(1, (ovn_sb_db_relay_count | int) + 1) | list }}" diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml index 27affe4472..3e9083671d 100644 --- a/ansible/roles/prometheus/defaults/main.yml +++ b/ansible/roles/prometheus/defaults/main.yml @@ -295,7 +295,7 @@ prometheus_blackbox_exporter_endpoints_default: - "{{ ('zun_internal:os_endpoint:' + zun_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}" enabled: "{{ enable_zun | bool }}" # Additional service endpoints - - endpoints: "{% set etcd_endpoints = [] %}{% for host in groups.get('etcd', []) %}{{ etcd_endpoints.append('etcd_' + host + ':http_2xx:' + hostvars[host]['etcd_protocol'] + '://' + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['etcd_client_port'] + '/metrics')}}{% endfor %}{{ etcd_endpoints }}" + - endpoints: "{% set etcd_endpoints = [] %}{% for host in groups.get('etcd', []) %}{{ etcd_endpoints.append('etcd_' + host + ':http_2xx:' + hostvars[host]['etcd_protocol'] + '://' + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['etcd_client_port'] + '/metrics') }}{% endfor %}{{ etcd_endpoints }}" enabled: "{{ enable_etcd | bool }}" - endpoints: - "grafana:http_2xx:{{ grafana_public_endpoint }}" @@ -316,7 +316,7 @@ prometheus_blackbox_exporter_endpoints_default: - endpoints: - "prometheus_alertmanager:http_2xx_alertmanager:{{ prometheus_alertmanager_public_endpoint if enable_prometheus_alertmanager_external else prometheus_alertmanager_internal_endpoint }}" enabled: "{{ enable_prometheus_alertmanager | bool }}" - - endpoints: "{% set rabbitmq_endpoints = [] %}{% for host in groups.get('rabbitmq', []) %}{{ rabbitmq_endpoints.append('rabbitmq_' + host + (':tls_connect:' if rabbitmq_enable_tls | bool else ':tcp_connect:') + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['rabbitmq_port'] ) }}{% endfor %}{{ rabbitmq_endpoints }}" + - endpoints: "{% set rabbitmq_endpoints = [] %}{% for host in groups.get('rabbitmq', []) %}{{ rabbitmq_endpoints.append('rabbitmq_' + host + (':tls_connect:' if rabbitmq_enable_tls | bool else ':tcp_connect:') + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['rabbitmq_port']) }}{% endfor %}{{ rabbitmq_endpoints }}" enabled: "{{ enable_rabbitmq | bool }}" - endpoints: "{% set redis_endpoints = [] %}{% for host in groups.get('redis', []) %}{{ redis_endpoints.append('redis_' + host + ':tcp_connect:' + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['redis_port']) }}{% endfor %}{{ redis_endpoints }}" enabled: "{{ enable_redis | bool }}" From dc47549dc92c37a728ee40f27b20e565cd546630 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 7 Nov 2025 09:51:42 +0100 Subject: [PATCH 090/165] CI: Retry Selenium container pulls Change-Id: Ic4dfba427e6874556bb70c8b808d5f7069dc0f53 Signed-off-by: Michal Nasiadka --- roles/kolla-ansible-test-dashboard/tasks/main.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/kolla-ansible-test-dashboard/tasks/main.yml b/roles/kolla-ansible-test-dashboard/tasks/main.yml index 8455b31549..5347691824 100644 --- a/roles/kolla-ansible-test-dashboard/tasks/main.yml +++ b/roles/kolla-ansible-test-dashboard/tasks/main.yml @@ -55,6 +55,10 @@ detach: true image: "quay.io/opendevmirror/selenium-standalone-firefox:latest" network_mode: host + register: docker_result + retries: 5 + delay: 3 + until: docker_result is succeeded - name: Run Selenium Firefox container (Podman) become: true @@ -64,6 +68,10 @@ detach: true image: "quay.io/opendevmirror/selenium-standalone-firefox:latest" network_mode: host + register: podman_result + retries: 5 + delay: 3 + until: podman_result is succeeded - name: Wait for port 444 to be up ansible.builtin.wait_for: From 718eacca6d4804ec22c7f0dbe016df8242fa2084 Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Fri, 7 Nov 2025 12:07:59 +0100 Subject: [PATCH 091/165] Tidy up release notes for Flamingo release Change-Id: Ic7a042aa8a15ee7469a77749054596669a0593f4 Signed-off-by: Bartosz Bezak --- .../notes/bug-2097452-6b635e49606ebe26.yaml | 6 ++-- .../notes/bug-2111916-6b2ca611df41dceb.yaml | 4 +-- .../notes/bug-2112339-3f67eefc7f0062cc.yaml | 11 ++++--- .../cron-break-out-role-fa72289cc100ef53.yaml | 4 +-- ...m_modOIDC_error_page-0fe3dd7414310536.yaml | 20 +++++-------- ...-support-for-mariadb-4cbd7c8590a34981.yaml | 13 ++++----- ...rop-ironic-inspector-38fc91c64517ffc1.yaml | 15 +++++----- .../drop-vmware-support-6303715d3fb80946.yaml | 6 ++-- .../flamingo-prelude-57a2c0e9d578fd57.yaml | 29 +++++++++++++++++++ ...uentd-break-out-role-1e5306dac1c41583.yaml | 4 +-- .../notes/neutron-uwsgi-5ac29b1403c02554.yaml | 16 +++++----- .../nova-metadata-split-d1c9ff2010390352.yaml | 4 +-- 12 files changed, 76 insertions(+), 56 deletions(-) create mode 100644 releasenotes/notes/flamingo-prelude-57a2c0e9d578fd57.yaml diff --git a/releasenotes/notes/bug-2097452-6b635e49606ebe26.yaml b/releasenotes/notes/bug-2097452-6b635e49606ebe26.yaml index 1fa4888bb1..1731789bda 100644 --- a/releasenotes/notes/bug-2097452-6b635e49606ebe26.yaml +++ b/releasenotes/notes/bug-2097452-6b635e49606ebe26.yaml @@ -1,7 +1,7 @@ --- fixes: - | - Configuration with letsencrypt disabled generates - in haproxy unnecessary backend 'acme_client_back' - and 'path_reg ^/.well-known/acme-challenge/.+'. + Fixes haproxy configurations that kept rendering the ``acme_client_back`` + backend and the ``path_reg ^/.well-known/acme-challenge/.+`` ACL even when + Let's Encrypt support was disabled. `LP#2097452 `__ diff --git a/releasenotes/notes/bug-2111916-6b2ca611df41dceb.yaml b/releasenotes/notes/bug-2111916-6b2ca611df41dceb.yaml index 78b22c6333..c971365cd1 100644 --- a/releasenotes/notes/bug-2111916-6b2ca611df41dceb.yaml +++ b/releasenotes/notes/bug-2111916-6b2ca611df41dceb.yaml @@ -1,6 +1,6 @@ --- fixes: - | - Fixes invalid use of ``drain`` on single-node RabbitMQ setups - by using ``stop_app`` instead. + Single-node RabbitMQ upgrades no longer fail on the unsupported ``drain`` + command; the playbooks now call ``stop_app`` in that scenario. `LP#2111916 `__ diff --git a/releasenotes/notes/bug-2112339-3f67eefc7f0062cc.yaml b/releasenotes/notes/bug-2112339-3f67eefc7f0062cc.yaml index d01515854d..062acd1623 100644 --- a/releasenotes/notes/bug-2112339-3f67eefc7f0062cc.yaml +++ b/releasenotes/notes/bug-2112339-3f67eefc7f0062cc.yaml @@ -1,10 +1,9 @@ --- fixes: - | - Improves query routing in ProxySQL by setting ``default_hostgroup`` - for all database users and by adding user-based routing rules in - addition to schema-based rules. This enhancement also fixes incorrect - routing of queries that are executed before a schema is selected, - such as ``SET AUTOCOMMIT`` or ``ROLLBACK``, which could otherwise be sent - to a non-existent hostgroup. + Improves ProxySQL routing by setting ``default_hostgroup`` for every + MariaDB user and by adding user-based rules alongside the schema-based + rules. Statements that run before a schema is selected (for example + ``SET AUTOCOMMIT`` or ``ROLLBACK``) now land in a valid hostgroup instead + of failing against ``NULL`` backends. `LP#2112339 `__ diff --git a/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml b/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml index 7bbb085e5c..bdcdc94bda 100644 --- a/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml +++ b/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml @@ -1,5 +1,5 @@ --- upgrade: - | - A ``cron`` Ansible role has been created and its deployment is not part - of the ``common`` role anymore. + The ``cron`` tasks now live in their own Ansible role instead of being + shipped inside ``common``. diff --git a/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml b/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml index e50b469113..1fbcb35f85 100644 --- a/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml +++ b/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml @@ -1,19 +1,15 @@ --- features: - | - Enable the configuration of the timeout manager by - ``OIDCStateTimeout`` variable. We also provide means to - override the error page for the modOIDC plugin via - ``{{ node_custom_config }}/keystone/federation/modoidc-error-page.html`` - file. + Adds knobs for the Keystone mod_auth_openidc integration: tune the timeout + manager via ``OIDCStateTimeout`` and provide a custom error page at + ``{{ node_custom_config }}/keystone/federation/modoidc-error-page.html``. upgrade: - | - It was added a default template for the modOIDC plugin, - which will handle authentication errors for federated users. - The default template is found at - "ansible/roles/keystone/templates/modoidc-error-page.html.j2"; - it can also be replaced/overwritten. One can also overwrite, - the timeout, instead of the whole page via the following variable: + Deployments now ship a default template at + ``ansible/roles/keystone/templates/modoidc-error-page.html.j2`` to handle + federated authentication errors. Operators can override the full template + or just adjust the redirect delay via ``keystone_federation_oidc_error_page_retry_login_delay_milliseconds``. - The default timeout for the page redirection is 5 seconds. + The default redirect delay is 5 seconds. diff --git a/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml b/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml index ab486ca79d..095760b47a 100644 --- a/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml +++ b/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml @@ -1,13 +1,12 @@ --- features: - | - ProxySQL is now automatically enabled when MariaDB is enabled. - MariaDB container healthcheck method was updated as healthcheck script was - replaced from Clustercheck to official MariaDB docker image's + ProxySQL is now enabled automatically whenever MariaDB is enabled, and the + container health check now uses the upstream `healthcheck.sh `__ + script instead of ``clustercheck``. upgrade: - | - Database loadbalancing with HAProxy and MariaDB Clustercheck is no longer - supported. For the system that uses HAProxy and Clustercheck, upgrading - MariaDB with ``kolla-ansible upgrade`` will deploy ProxySQL containers and - remove MariaDB Clustercheck containers. + The HAProxy + ``clustercheck`` backend for MariaDB is no longer supported. + Running ``kolla-ansible upgrade`` now deploys ProxySQL and removes the old + ``clustercheck`` containers automatically. diff --git a/releasenotes/notes/drop-ironic-inspector-38fc91c64517ffc1.yaml b/releasenotes/notes/drop-ironic-inspector-38fc91c64517ffc1.yaml index a912433e94..5d379df6b1 100644 --- a/releasenotes/notes/drop-ironic-inspector-38fc91c64517ffc1.yaml +++ b/releasenotes/notes/drop-ironic-inspector-38fc91c64517ffc1.yaml @@ -1,12 +1,11 @@ --- upgrade: - | - The ``ironic-inspector`` deployment support has been dropped following - retirement of that service in ``Ironic`` project. - ``ironic_inspector_kernel_cmdline_extras`` has been renamed to - ``ironic_kernel_cmdline_extras`` and ``ironic_inspector_pxe_filter`` - has been renamed to ``ironic_pxe_filter``. - Also the ``inspector.ipxe`` file has been renamed to ``ipa.ipxe``. + Support for deploying ``ironic-inspector`` has been dropped following the + service's retirement upstream. The remaining variables and artifacts were + renamed for consistency: ``ironic_inspector_kernel_cmdline_extras`` becomes + ``ironic_kernel_cmdline_extras``, ``ironic_inspector_pxe_filter`` becomes + ``ironic_pxe_filter``, and ``inspector.ipxe`` becomes ``ipa.ipxe``. - | - ``bifrost`` support for deploying legacy ironic inspector has been - dropped together with ``bifrost_enable_ironic_inspector`` variable. + ``bifrost`` also removed its legacy inspector integration, so the + ``bifrost_enable_ironic_inspector`` option has been deleted. diff --git a/releasenotes/notes/drop-vmware-support-6303715d3fb80946.yaml b/releasenotes/notes/drop-vmware-support-6303715d3fb80946.yaml index 0a7b28a7d7..d672d90b3d 100644 --- a/releasenotes/notes/drop-vmware-support-6303715d3fb80946.yaml +++ b/releasenotes/notes/drop-vmware-support-6303715d3fb80946.yaml @@ -1,6 +1,6 @@ --- upgrade: - | - ``VMWare`` support for various OpenStack services (e.g. Nova, Cinder, - Neutron) has been dropped due to removal in respective services - and no development or new versions of third party libraries. + ``VMware`` drivers across Nova, Cinder, and Neutron are no longer deployed. + Upstream projects removed the integration and the third-party libraries are + unmaintained. diff --git a/releasenotes/notes/flamingo-prelude-57a2c0e9d578fd57.yaml b/releasenotes/notes/flamingo-prelude-57a2c0e9d578fd57.yaml new file mode 100644 index 0000000000..f7bddc1794 --- /dev/null +++ b/releasenotes/notes/flamingo-prelude-57a2c0e9d578fd57.yaml @@ -0,0 +1,29 @@ +--- +prelude: | + The Kolla Ansible ``21.0.0`` (Flamingo) release focuses on tightening + operations for the control plane, database layer, and observability stack + while following upstream service retirements. Highlights include: + + * Database services now use ``ProxySQL`` by default with ``MariaDB`` adopting + the upstream ``healthcheck.sh`` script, TLS enabled for all ``MariaDB`` + connections through ``ProxySQL``, and ``Valkey`` replacing ``Redis``. The + legacy ``HAProxy``/``clustercheck`` path and containers have been removed. + * Logging and monitoring were overhauled: ``Fluentd`` moved into its own role + and sends logs directly to ``OpenSearch`` nodes, ``Prometheus`` + node-exporters run from a dedicated role, and ``OpenSearch Dashboards`` + connects to the data nodes without an intermediate ``HAProxy`` hop. + * Control-plane services gained multiple lifecycle improvements. ``Neutron`` + now mirrors the upstream layout with new maintenance/RPC workers, wrapper + containers manage the OVN metadata-agent HAProxy processes, + ``nova-metadata`` runs in its own container, ``Horizon`` uses port ``8080`` + when fronted by ``HAProxy``, and the default ``uWSGI`` provider now covers + more services. + * Tooling and reliability improvements: the supported ``ansible`` window is + 11–12, host bootstrap tasks moved into ``ansible-collection-kolla``, CA + bundle trust paths were aligned on Enterprise Linux hosts, ``mod_oidc`` + gained templated error pages, and several HA fixes landed (Let’s Encrypt + ACME cleanup, Horizon memcached resilience, RabbitMQ single-node upgrades, + and ``ProxySQL`` routing improvements). + * With ``ironic-inspector`` retired upstream, Kolla Ansible now provides the + ``ironic-pxe-filter`` service to cover bare-metal PXE filtering and removes + other unused integrations such as ``Venus`` and ``VMware`` drivers. diff --git a/releasenotes/notes/fluentd-break-out-role-1e5306dac1c41583.yaml b/releasenotes/notes/fluentd-break-out-role-1e5306dac1c41583.yaml index 60d97f9224..f0110ee829 100644 --- a/releasenotes/notes/fluentd-break-out-role-1e5306dac1c41583.yaml +++ b/releasenotes/notes/fluentd-break-out-role-1e5306dac1c41583.yaml @@ -1,5 +1,5 @@ --- upgrade: - | - A ``fluentd`` Ansible role has been created and its deployment is not part - of the ``common`` role anymore. + ``fluentd`` now has its own Ansible role instead of being deployed from the + ``common`` role. diff --git a/releasenotes/notes/neutron-uwsgi-5ac29b1403c02554.yaml b/releasenotes/notes/neutron-uwsgi-5ac29b1403c02554.yaml index 9cabd83a84..7349c1b0ce 100644 --- a/releasenotes/notes/neutron-uwsgi-5ac29b1403c02554.yaml +++ b/releasenotes/notes/neutron-uwsgi-5ac29b1403c02554.yaml @@ -1,13 +1,11 @@ --- upgrade: - | - ``Neutron`` deployment has been reworked to use uWSGI for API workers - and to run additional processes in separate containers (following changes - in Neutron project). - Therefore ``neutron-tls-proxy`` service has been dropped and currently - TLS is terminated on the uWSGI server. - In addition to this there are new containers/services: + ``Neutron`` now runs its API workers under uWSGI and moves auxiliary + processes into dedicated containers, matching the upstream deployment + model. TLS is terminated directly on uWSGI, so the ``neutron-tls-proxy`` + service was removed. New containers introduced with this change include: - * neutron-ovn-maintenance-worker - * neutron-rpc-server - * neutron-periodic-workers + * ``neutron-ovn-maintenance-worker`` + * ``neutron-rpc-server`` + * ``neutron-periodic-workers`` diff --git a/releasenotes/notes/nova-metadata-split-d1c9ff2010390352.yaml b/releasenotes/notes/nova-metadata-split-d1c9ff2010390352.yaml index 2cf1ffb5fd..dd9085aa3f 100644 --- a/releasenotes/notes/nova-metadata-split-d1c9ff2010390352.yaml +++ b/releasenotes/notes/nova-metadata-split-d1c9ff2010390352.yaml @@ -1,5 +1,5 @@ --- features: - | - ``nova-metadata`` service has been split into it's own container in - preparation for uWSGI support. + The ``nova-metadata`` service now runs in its own container in preparation + for the uWSGI migration. From 7d0693d9d5f96081a1d402075fcb6f90bf9f631b Mon Sep 17 00:00:00 2001 From: OpenStack Release Bot Date: Mon, 10 Nov 2025 16:16:56 +0000 Subject: [PATCH 092/165] Update master for stable/2025.2 Add file to the reno documentation build to show release notes for stable/2025.2. Use pbr instruction to increment the minor version number automatically so that master versions are higher than the versions on stable/2025.2. Sem-Ver: feature Change-Id: Ib7a148a4eed0bcea52e405bf417ed089461dc8fd Signed-off-by: OpenStack Release Bot Generated-By: openstack/project-config:roles/copy-release-tools-scripts/files/release-tools/add_release_note_page.sh --- releasenotes/source/2025.2.rst | 6 ++++++ releasenotes/source/index.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 releasenotes/source/2025.2.rst diff --git a/releasenotes/source/2025.2.rst b/releasenotes/source/2025.2.rst new file mode 100644 index 0000000000..4dae18d869 --- /dev/null +++ b/releasenotes/source/2025.2.rst @@ -0,0 +1,6 @@ +=========================== +2025.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.2 diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst index 00cb78ce48..0af693b9fc 100644 --- a/releasenotes/source/index.rst +++ b/releasenotes/source/index.rst @@ -8,6 +8,7 @@ Contents :maxdepth: 2 unreleased + 2025.2 2025.1 2024.2 2024.1 From 6828a59bfc79eb988def6d52fc4dc1a56bdd5404 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 17:28:50 +0100 Subject: [PATCH 093/165] CI: Pin docker to <29 Change-Id: I5fb29efc9bff4560528a98824399126eb13873c7 Signed-off-by: Michal Nasiadka --- tests/templates/globals-default.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index af04859fff..b10cf4dd47 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -24,6 +24,8 @@ podman_registry_insecure: true {% endif %} {% else %} docker_debug: true +docker_apt_package_pin: "5:28.*" +docker_yum_package_pin: "28.*" docker_registry_mirrors: - {{ infra_dockerhub_mirror }} From 08e334868180e9e84fdb1026961aeca1b5e2ed29 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 13 Nov 2025 07:11:56 +0100 Subject: [PATCH 094/165] CI: Switch IPA images source location It should be ironic-python-agent, not builder. It seems Ironic has switched upload locations long time ago. Change-Id: I0189391dede5b325941d32ff54ac7032d1d20e5b Signed-off-by: Michal Nasiadka --- tests/run.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run.yml b/tests/run.yml index cfd55ebba5..a3c9ab46f6 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -220,7 +220,7 @@ - name: Download Ironic Python Agent (IPA) images get_url: - url: "https://tarballs.opendev.org/openstack/ironic-python-agent-builder/dib/files/{{ item.src }}" + url: "https://tarballs.opendev.org/openstack/ironic-python-agent/dib/files/{{ item.src }}" dest: "/etc/kolla/config/ironic/{{ item.dest }}" with_items: - src: "ipa-centos9-{{ zuul.branch | replace('/', '-') }}.initramfs" From 1ebc6796d59d3c3a8d5a05411f2ad5c0a2f1065f Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 14 Nov 2025 13:46:20 +0000 Subject: [PATCH 095/165] Revert^2 "ovn: Mark as non-voting due to db related failures" This reverts commit 809e5b2e341fefd655c12532d61a588bc2ce4091. Reason for revert: It's failing again Change-Id: Iddc5b14bf6fec937030ac75a3fce1774d46460cb Signed-Off-By: --- zuul.d/scenarios/ovn.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml index 6e94728457..cbeca20058 100644 --- a/zuul.d/scenarios/ovn.yaml +++ b/zuul.d/scenarios/ovn.yaml @@ -6,6 +6,7 @@ - ^ansible/group_vars/all/(neutron|octavia|openvswitch|ovn).yml - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ - ^tests/test-ovn.sh + voting: false vars: scenario: ovn scenario_images_extra: @@ -47,7 +48,3 @@ - kolla-ansible-debian-bookworm-ovn-upgrade - kolla-ansible-ubuntu-noble-ovn - kolla-ansible-ubuntu-noble-ovn-upgrade - gate: - jobs: - - kolla-ansible-ubuntu-noble-ovn - - kolla-ansible-ubuntu-noble-ovn-upgrade From 5ba334cedafd9cf460dd947c4e5c984ed1ccc034 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 14 Nov 2025 07:00:10 +0100 Subject: [PATCH 096/165] CI: Stop using infra cache servers as registry It stopped working with docker-ce >= 29.0.0 and has proven to be problematic in the past Change-Id: I9a3d8d3dd268b7bb098b8e87d22b11e4ef0c72e9 Signed-off-by: Michal Nasiadka --- tests/templates/globals-default.j2 | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index 32a28773fb..a0a8351043 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -82,13 +82,6 @@ podman_registry_insecure: yes {% endif %} openstack_tag: "{{ build_image_tag }}" {% else %} -# use the published images from a site mirror of quay.io -docker_registry: "{{ zuul_site_mirror_fqdn }}:4447" -docker_registry_insecure: no -{% if container_engine == 'podman' %} -podman_registry: "{{ zuul_site_mirror_fqdn }}:4447" -podman_registry_insecure: no -{% endif %} docker_namespace: openstack.kolla {% if docker_image_tag_suffix %} openstack_tag_suffix: "{{ docker_image_tag_suffix }}" From 95ae6b73361aa0c7da98aaed5fc2bc3349420418 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 13:48:31 +0100 Subject: [PATCH 097/165] ansible-lint: Remove key-order[task] exclude Change-Id: I77cce57f3f2b05455e8bd4e7093081ef4ed25d59 Signed-off-by: Michal Nasiadka --- .ansible-lint | 1 - ansible/roles/bifrost/tasks/stop.yml | 14 +++--- ansible/roles/certificates/tasks/generate.yml | 47 +++++++++---------- .../tasks/migrate-volumes.yml | 4 +- ansible/roles/destroy/tasks/cleanup_host.yml | 12 ++--- ansible/roles/loadbalancer/tasks/precheck.yml | 24 +++++----- .../roles/mariadb/tasks/lookup_cluster.yml | 13 +++-- .../roles/mariadb/tasks/recover_cluster.yml | 6 +-- .../nova-cell/tasks/discover_computes.yml | 11 ++--- .../roles/nova-cell/tasks/external_ceph.yml | 38 +++++++-------- .../roles/nova-cell/tasks/libvirt-cleanup.yml | 10 ++-- ansible/roles/nova-cell/tasks/rabbitmq.yml | 9 ++-- .../roles/nova-cell/tasks/version-check.yml | 13 +++-- .../tasks/wait_discover_computes.yml | 34 +++++++------- ansible/roles/nova/tasks/map_cell0.yml | 6 +-- .../roles/octavia-certificates/tasks/main.yml | 5 +- ansible/roles/octavia/tasks/config.yml | 18 ++++--- .../roles/ovn-db/tasks/bootstrap-initial.yml | 4 +- ansible/roles/ovn-db/tasks/lookup_cluster.yml | 12 ++--- .../roles/prechecks/tasks/datetime_checks.yml | 4 +- .../roles/prechecks/tasks/timesync_checks.yml | 4 +- ansible/roles/prometheus/tasks/config.yml | 10 ++-- ansible/roles/rabbitmq/tasks/precheck.yml | 11 ++--- .../rabbitmq/tasks/remove-ha-all-policy.yml | 17 ++++--- .../roles/rabbitmq/tasks/version-check.yml | 20 ++++---- .../roles/service-image-info/tasks/main.yml | 12 ++--- ansible/roles/service-rabbitmq/tasks/main.yml | 15 +++--- ansible/roles/valkey/tasks/upgrade.yml | 3 +- ansible/site.yml | 8 ++-- 29 files changed, 181 insertions(+), 204 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index 6a44bcfc2d..1ec2012fe6 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -31,7 +31,6 @@ skip_list: # role name check matching ^*$ - role-name # TODO(frickler): Discuss these in detail, skipping for now to unblock things - - key-order[task] - no-free-form - name[play] - var-naming[no-role-prefix] diff --git a/ansible/roles/bifrost/tasks/stop.yml b/ansible/roles/bifrost/tasks/stop.yml index ac71b934b2..2c7bfe6786 100644 --- a/ansible/roles/bifrost/tasks/stop.yml +++ b/ansible/roles/bifrost/tasks/stop.yml @@ -1,5 +1,8 @@ --- -- block: +- when: + - inventory_hostname in groups['bifrost'] + - "'bifrost_deploy' not in skip_stop_containers" + block: - name: Check if bifrost_deploy container is running become: true kolla_container_facts: @@ -9,7 +12,8 @@ - bifrost_deploy register: container_facts - - block: + - when: container_facts.containers['bifrost_deploy'] is defined + block: # Ensure that all services are stopped gracefully, and in a sensible # order. - name: Stop services gracefully @@ -26,9 +30,3 @@ action: "stop_container" common_options: "{{ docker_common_options }}" name: "bifrost_deploy" - - when: "container_facts.containers['bifrost_deploy'] is defined" - - when: - - inventory_hostname in groups['bifrost'] - - "'bifrost_deploy' not in skip_stop_containers" diff --git a/ansible/roles/certificates/tasks/generate.yml b/ansible/roles/certificates/tasks/generate.yml index ec24316efa..d1b536853b 100644 --- a/ansible/roles/certificates/tasks/generate.yml +++ b/ansible/roles/certificates/tasks/generate.yml @@ -11,7 +11,10 @@ state: "directory" mode: "0770" -- block: +- when: + - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == '' or database_enable_tls_internal | bool + - kolla_enable_tls_external | bool or database_enable_tls_internal | bool + block: - name: Creating external SSL configuration file template: src: "{{ item }}.j2" @@ -59,9 +62,6 @@ path: "{{ external_dir }}/external.key" mode: "0660" state: file - when: - - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == '' or database_enable_tls_internal | bool - - kolla_enable_tls_external | bool or database_enable_tls_internal | bool - name: Creating external Server PEM File assemble: @@ -73,7 +73,10 @@ - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == '' - kolla_enable_tls_external | bool -- block: +- when: + - database_enable_tls_internal | bool + - kolla_same_external_internal_vip | bool + block: - name: Copy Certificate for ProxySQL copy: src: "{{ external_dir }}/external.crt" @@ -85,24 +88,24 @@ src: "{{ external_dir }}/external.key" dest: "{{ kolla_certificates_dir }}/proxysql-key.pem" mode: "0660" - when: - - database_enable_tls_internal | bool - - kolla_same_external_internal_vip | bool -- block: - - name: Copy the external PEM file to be the internal when internal + external are same network - copy: - src: "{{ kolla_external_fqdn_cert }}" - dest: "{{ kolla_internal_fqdn_cert }}" - remote_src: yes - mode: "0660" +- name: Copy the external PEM file to be the internal when internal + external are same network + copy: + src: "{{ kolla_external_fqdn_cert }}" + dest: "{{ kolla_internal_fqdn_cert }}" + remote_src: yes + mode: "0660" when: - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == '' - kolla_enable_tls_external | bool - kolla_enable_tls_internal | bool - kolla_same_external_internal_vip | bool -- block: +- when: + - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == '' or database_enable_tls_internal | bool + - kolla_enable_tls_internal | bool or database_enable_tls_internal | bool + - not kolla_same_external_internal_vip | bool + block: - name: Creating internal SSL configuration file template: src: "{{ item }}.j2" @@ -150,10 +153,6 @@ path: "{{ internal_dir }}/internal.key" mode: "0660" state: file - when: - - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == '' or database_enable_tls_internal | bool - - kolla_enable_tls_internal | bool or database_enable_tls_internal | bool - - not kolla_same_external_internal_vip | bool - name: Creating internal Server PEM File assemble: @@ -166,7 +165,10 @@ - kolla_enable_tls_internal | bool - not kolla_same_external_internal_vip | bool -- block: +- when: + - database_enable_tls_internal | bool + - not kolla_same_external_internal_vip | bool + block: - name: Copy Certificate for ProxySQL copy: src: "{{ internal_dir }}/internal.crt" @@ -178,6 +180,3 @@ src: "{{ internal_dir }}/internal.key" dest: "{{ kolla_certificates_dir }}/proxysql-key.pem" mode: "0660" - when: - - database_enable_tls_internal | bool - - not kolla_same_external_internal_vip | bool diff --git a/ansible/roles/container-engine-migration/tasks/migrate-volumes.yml b/ansible/roles/container-engine-migration/tasks/migrate-volumes.yml index 86b10e85f3..e0339708bc 100644 --- a/ansible/roles/container-engine-migration/tasks/migrate-volumes.yml +++ b/ansible/roles/container-engine-migration/tasks/migrate-volumes.yml @@ -36,7 +36,8 @@ # NOTE(r-krcek): The following block is workaround for problem where podman # changes directory permissions after starting a container from any UID:GID # to root after migration. -- block: +- when: container_volumes.matched > 0 + block: - name: Pre-create volumes in target engine become: true kolla_container: @@ -58,7 +59,6 @@ path: "{{ item.path }}" state: absent with_items: "{{ container_volumes.files }}" - when: container_volumes.matched > 0 # NOTE(mhiner): this prevents RabbitMQ failing to establish connection # to other instances in multinode deployment diff --git a/ansible/roles/destroy/tasks/cleanup_host.yml b/ansible/roles/destroy/tasks/cleanup_host.yml index 96389564f2..3ed8705b1f 100644 --- a/ansible/roles/destroy/tasks/cleanup_host.yml +++ b/ansible/roles/destroy/tasks/cleanup_host.yml @@ -15,7 +15,12 @@ destroy_include_dev: "{{ destroy_include_dev }}" kolla_ansible_inventories: "{{ ansible_inventory_sources | join(' ') }}" -- block: +- when: + - enable_octavia | bool + - octavia_auto_configure | bool + - octavia_network_type == 'tenant' + - inventory_hostname in groups['octavia-health-manager'] + block: - name: Disable octavia-interface service service: name: octavia-interface @@ -32,8 +37,3 @@ file: path: /etc/dhcp/octavia-dhclient.conf state: absent - when: - - enable_octavia | bool - - octavia_auto_configure | bool - - octavia_network_type == 'tenant' - - inventory_hostname in groups['octavia-health-manager'] diff --git a/ansible/roles/loadbalancer/tasks/precheck.yml b/ansible/roles/loadbalancer/tasks/precheck.yml index 7d4fc4bbf7..cdb211f320 100644 --- a/ansible/roles/loadbalancer/tasks/precheck.yml +++ b/ansible/roles/loadbalancer/tasks/precheck.yml @@ -55,7 +55,11 @@ haproxy_vip_prechecks: "{{ all_hosts_in_batch and groups['haproxy_running_True'] is not defined }}" proxysql_vip_prechecks: "{{ all_hosts_in_batch and groups['proxysql_running_True'] is not defined }}" -- block: +- when: + - not kolla_externally_managed_cert | bool + - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == '' + - kolla_enable_tls_external | bool + block: - name: Checking if external haproxy certificate exists run_once: true stat: @@ -69,12 +73,12 @@ assert: that: haproxy_cert_file.stat.exists fail_msg: "External haproxy certificate file is not found. It is configured via 'kolla_external_fqdn_cert'" - when: - - not kolla_externally_managed_cert | bool - - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == '' - - kolla_enable_tls_external | bool -- block: +- when: + - not kolla_externally_managed_cert | bool + - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == '' + - kolla_enable_tls_internal | bool + block: - name: Checking if internal haproxy certificate exists run_once: true stat: @@ -88,10 +92,6 @@ assert: that: haproxy_internal_cert_file.stat.exists fail_msg: "Internal haproxy certificate file is not found. It is configured via 'kolla_internal_fqdn_cert'" - when: - - not kolla_externally_managed_cert | bool - - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == '' - - kolla_enable_tls_internal | bool - name: Checking the kolla_external_vip_interface is present assert: @@ -726,6 +726,8 @@ - haproxy_vip_prechecks - name: Firewalld checks + when: + - enable_external_api_firewalld | bool block: - name: Check if firewalld is running # noqa command-instead-of-module become: true @@ -743,5 +745,3 @@ Please install and configure firewalld. when: - firewalld_is_active.rc != 0 - when: - - enable_external_api_firewalld | bool diff --git a/ansible/roles/mariadb/tasks/lookup_cluster.yml b/ansible/roles/mariadb/tasks/lookup_cluster.yml index 48dd6e1ce8..207e1f90b8 100644 --- a/ansible/roles/mariadb/tasks/lookup_cluster.yml +++ b/ansible/roles/mariadb/tasks/lookup_cluster.yml @@ -17,7 +17,8 @@ set_fact: mariadb_cluster_exists: "{{ groups[mariadb_shard_group + '_had_volume_True'] is defined }}" -- block: +- when: not mariadb_recover | default(False) + block: - name: Check MariaDB service port liveness wait_for: host: "{{ api_interface_address }}" @@ -42,7 +43,10 @@ - mariadb_cluster_exists - groups[mariadb_shard_group + '_port_alive_True'] is not defined - - block: + - when: + - groups[mariadb_shard_group + '_port_alive_True'] is defined + - inventory_hostname in groups[mariadb_shard_group + '_port_alive_True'] + block: - name: Check MariaDB service WSREP sync status become: true kolla_toolbox: @@ -61,9 +65,6 @@ - name: Extract MariaDB service WSREP sync status set_fact: mariadb_sync_status: "{{ check_mariadb_sync_status.query_result[0][0]['Value'] }}" - when: - - groups[mariadb_shard_group + '_port_alive_True'] is defined - - inventory_hostname in groups[mariadb_shard_group + '_port_alive_True'] - name: Divide hosts by their MariaDB service WSREP sync status group_by: @@ -77,5 +78,3 @@ - groups[mariadb_shard_group + '_port_alive_True'] is defined - groups[mariadb_shard_group + '_sync_status_Synced'] is not defined or groups[mariadb_shard_group + '_port_alive_True'] | sort != groups[mariadb_shard_group + '_sync_status_Synced'] | sort - - when: not mariadb_recover | default(False) diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml index b2eae10089..7d3fb28e0b 100644 --- a/ansible/roles/mariadb/tasks/recover_cluster.yml +++ b/ansible/roles/mariadb/tasks/recover_cluster.yml @@ -15,7 +15,9 @@ run_once: true with_fileglob: "/tmp/kolla_mariadb_recover_inventory_name_*" -- block: +- when: + - mariadb_recover_inventory_name is not defined + block: - name: Stop MariaDB containers become: true kolla_container: @@ -92,8 +94,6 @@ connection: local set_fact: mariadb_recover_inventory_name: "{{ lookup('file', mariadb_recover_tmp_file_path) }}" - when: - - mariadb_recover_inventory_name is not defined - name: Store bootstrap and master hostnames into facts set_fact: diff --git a/ansible/roles/nova-cell/tasks/discover_computes.yml b/ansible/roles/nova-cell/tasks/discover_computes.yml index 9b6a1696f0..b49285b7e6 100644 --- a/ansible/roles/nova-cell/tasks/discover_computes.yml +++ b/ansible/roles/nova-cell/tasks/discover_computes.yml @@ -1,7 +1,11 @@ --- # Discover compute hosts for a cell. -- block: +- # Delegate to a cell conductor. + delegate_to: "{{ groups[nova_cell_conductor_group][0] }}" + # Fail all hosts if any of these once-per-cell tasks fail. + any_errors_fatal: true + block: - import_tasks: get_cell_settings.yml - name: Fail if cell settings not found @@ -16,8 +20,3 @@ command: > {{ kolla_container_engine }} exec -t nova_conductor nova-manage cell_v2 discover_hosts --by-service --cell_uuid {{ nova_cell_settings.cell_uuid }} changed_when: False - - # Delegate to a cell conductor. - delegate_to: "{{ groups[nova_cell_conductor_group][0] }}" - # Fail all hosts if any of these once-per-cell tasks fail. - any_errors_fatal: true diff --git a/ansible/roles/nova-cell/tasks/external_ceph.yml b/ansible/roles/nova-cell/tasks/external_ceph.yml index f2d7ba6967..9f4fe9cbc6 100644 --- a/ansible/roles/nova-cell/tasks/external_ceph.yml +++ b/ansible/roles/nova-cell/tasks/external_ceph.yml @@ -97,7 +97,11 @@ - service | service_enabled_and_mapped_to_host - nova_backend == "rbd" -- block: +- when: + - not enable_nova_libvirt_container | bool + - inventory_hostname in groups[nova_cell_compute_group] + - nova_backend == "rbd" + block: - name: Ensure /etc/ceph directory exists (host libvirt) vars: paths: @@ -123,12 +127,21 @@ group: "root" mode: "0644" become: true - when: - - not enable_nova_libvirt_container | bool - - inventory_hostname in groups[nova_cell_compute_group] - - nova_backend == "rbd" -- block: +- vars: + libvirt_secrets_dir: >- + {{ (node_config_directory ~ '/nova-libvirt/secrets') + if enable_nova_libvirt_container | bool + else '/etc/libvirt/secrets' }} + # NOTE(mgoddard): When running libvirt as a host daemon, on CentOS it + # appears to pick up secrets automatically, while on Ubuntu it requires a + # reload. This may be due to differences in tested versions of libvirt + # (8.0.0 vs 6.0.0). Reload should be low overhead, so do it always. + libvirt_restart_handlers: >- + {{ ['Reload libvirtd'] + if not enable_nova_libvirt_container | bool else + [] }} + block: - name: Ensuring libvirt secrets directory exists vars: service: "{{ nova_cell_services['nova-libvirt'] }}" @@ -190,16 +203,3 @@ enabled: "{{ cinder_backend_ceph }}" notify: "{{ libvirt_restart_handlers }}" no_log: True - vars: - libvirt_secrets_dir: >- - {{ (node_config_directory ~ '/nova-libvirt/secrets') - if enable_nova_libvirt_container | bool - else '/etc/libvirt/secrets' }} - # NOTE(mgoddard): When running libvirt as a host daemon, on CentOS it - # appears to pick up secrets automatically, while on Ubuntu it requires a - # reload. This may be due to differences in tested versions of libvirt - # (8.0.0 vs 6.0.0). Reload should be low overhead, so do it always. - libvirt_restart_handlers: >- - {{ ['Reload libvirtd'] - if not enable_nova_libvirt_container | bool else - [] }} diff --git a/ansible/roles/nova-cell/tasks/libvirt-cleanup.yml b/ansible/roles/nova-cell/tasks/libvirt-cleanup.yml index 316e2cf4cc..dda9648658 100644 --- a/ansible/roles/nova-cell/tasks/libvirt-cleanup.yml +++ b/ansible/roles/nova-cell/tasks/libvirt-cleanup.yml @@ -15,7 +15,8 @@ - nova_libvirt register: container_facts -- block: +- when: container_facts.containers['nova_libvirt'] is defined + block: # NOTE(mgoddard): pgrep exit status 1 means no match. - name: Check if there are any running VMs become: true @@ -41,7 +42,6 @@ common_options: "{{ docker_common_options }}" action: "stop_and_remove_container" name: nova_libvirt - when: container_facts.containers['nova_libvirt'] is defined - name: Remove nova_libvirt Docker volumes become: true @@ -62,7 +62,9 @@ state: "absent" # Revert the changes applied in config-host.yml. -- block: +- when: + - nova_compute_virt_type == 'kvm' + block: - name: Remove udev kolla kvm rules become: true file: @@ -82,5 +84,3 @@ masked: false when: - ansible_facts.distribution == 'Ubuntu' - when: - - nova_compute_virt_type == 'kvm' diff --git a/ansible/roles/nova-cell/tasks/rabbitmq.yml b/ansible/roles/nova-cell/tasks/rabbitmq.yml index e5eb4a15bd..62cade4477 100644 --- a/ansible/roles/nova-cell/tasks/rabbitmq.yml +++ b/ansible/roles/nova-cell/tasks/rabbitmq.yml @@ -1,6 +1,9 @@ --- # Create RabbitMQ users and vhosts. -- block: +- when: + - nova_cell_rpc_transport == 'rabbit' + - enable_rabbitmq | bool + block: - import_role: name: service-rabbitmq vars: @@ -23,7 +26,3 @@ when: - nova_cell_rpc_group_name != nova_cell_notify_group_name or nova_cell_rpc_rabbitmq_users != nova_cell_notify_rabbitmq_users - - when: - - nova_cell_rpc_transport == 'rabbit' - - enable_rabbitmq | bool diff --git a/ansible/roles/nova-cell/tasks/version-check.yml b/ansible/roles/nova-cell/tasks/version-check.yml index 5a339cc998..b66640587b 100644 --- a/ansible/roles/nova-cell/tasks/version-check.yml +++ b/ansible/roles/nova-cell/tasks/version-check.yml @@ -1,5 +1,10 @@ --- -- block: +- when: enable_nova_libvirt_container | bool and (groups[service.group] | length) > 0 + vars: + service_name: "nova-libvirt" + service: "{{ nova_cell_services[service_name] }}" + tags: nova-libvirt-version-check + block: - name: Get new Libvirt version become: true kolla_container: @@ -80,9 +85,3 @@ loop_control: label: "{{ item.item }}" when: item.stdout is defined - - tags: nova-libvirt-version-check - when: enable_nova_libvirt_container | bool and (groups[service.group] | length) > 0 - vars: - service_name: "nova-libvirt" - service: "{{ nova_cell_services[service_name] }}" diff --git a/ansible/roles/nova-cell/tasks/wait_discover_computes.yml b/ansible/roles/nova-cell/tasks/wait_discover_computes.yml index 1603af5dea..cded1381a1 100644 --- a/ansible/roles/nova-cell/tasks/wait_discover_computes.yml +++ b/ansible/roles/nova-cell/tasks/wait_discover_computes.yml @@ -4,7 +4,23 @@ # ironic compute services. # Work with --limit by including only hosts in ansible_play_batch. -- block: +- vars: + # For virt, use ansible_facts.nodename rather than inventory_hostname, since this + # is similar to what nova uses internally as its default for the + # [DEFAULT] host config option. + virt_compute_service_hosts: >- + {{ virt_computes_in_batch | + map('extract', hostvars, ['ansible_facts', 'nodename']) | + list }} + # For ironic, use {{ansible_facts.hostname}}-ironic since this is what we + # configure for [DEFAULT] host in nova.conf. + ironic_compute_service_hosts: >- + {{ ironic_computes_in_batch | + map('extract', hostvars) | json_query('[].nova_compute_ironic_custom_host || [].ansible_facts.hostname') | + map('regex_replace', '^(.*)$', '\1-ironic') | + list }} + expected_compute_service_hosts: "{{ virt_compute_service_hosts + ironic_compute_service_hosts }}" + block: - name: Waiting for nova-compute services to register themselves become: true command: > @@ -67,22 +83,6 @@ any_failed_services or (nova_compute_registration_fatal | bool and failed_compute_service_hosts | length > 0) - vars: - # For virt, use ansible_facts.nodename rather than inventory_hostname, since this - # is similar to what nova uses internally as its default for the - # [DEFAULT] host config option. - virt_compute_service_hosts: >- - {{ virt_computes_in_batch | - map('extract', hostvars, ['ansible_facts', 'nodename']) | - list }} - # For ironic, use {{ansible_facts.hostname}}-ironic since this is what we - # configure for [DEFAULT] host in nova.conf. - ironic_compute_service_hosts: >- - {{ ironic_computes_in_batch | - map('extract', hostvars) | json_query('[].nova_compute_ironic_custom_host || [].ansible_facts.hostname') | - map('regex_replace', '^(.*)$', '\1-ironic') | - list }} - expected_compute_service_hosts: "{{ virt_compute_service_hosts + ironic_compute_service_hosts }}" - name: Include discover_computes.yml include_tasks: discover_computes.yml diff --git a/ansible/roles/nova/tasks/map_cell0.yml b/ansible/roles/nova/tasks/map_cell0.yml index 429b2fb955..69a1204fd8 100644 --- a/ansible/roles/nova/tasks/map_cell0.yml +++ b/ansible/roles/nova/tasks/map_cell0.yml @@ -1,5 +1,7 @@ --- -- block: +- run_once: True + delegate_to: "{{ groups[nova_api.group][0] }}" + block: - name: Create cell0 mappings vars: nova_api: "{{ nova_services['nova-api'] }}" @@ -61,8 +63,6 @@ when: - nova_cell_settings | bool - nova_cell_settings.cell_database != nova_cell0_connection - run_once: True - delegate_to: "{{ groups[nova_api.group][0] }}" - include_tasks: bootstrap_service.yml when: map_cell0.changed diff --git a/ansible/roles/octavia-certificates/tasks/main.yml b/ansible/roles/octavia-certificates/tasks/main.yml index b7903972ee..ee2fb5466c 100644 --- a/ansible/roles/octavia-certificates/tasks/main.yml +++ b/ansible/roles/octavia-certificates/tasks/main.yml @@ -11,7 +11,8 @@ include_tasks: check_expiry.yml when: octavia_certs_check_expiry | bool -- block: +- when: not octavia_certs_check_expiry | bool + block: - name: Ensure server_ca and client_ca directories exist file: path: "{{ octavia_certs_work_dir }}/{{ item }}" @@ -47,5 +48,3 @@ - { src: "server_ca/server_ca.key.pem", dest: "server_ca.key.pem" } - { src: "client_ca/client_ca.cert.pem", dest: "client_ca.cert.pem" } - { src: "client_ca/client.cert-and-key.pem", dest: "client.cert-and-key.pem" } - - when: not octavia_certs_check_expiry | bool diff --git a/ansible/roles/octavia/tasks/config.yml b/ansible/roles/octavia/tasks/config.yml index 63910990c1..c5c581240f 100644 --- a/ansible/roles/octavia/tasks/config.yml +++ b/ansible/roles/octavia/tasks/config.yml @@ -102,8 +102,14 @@ become: true with_dict: "{{ octavia_services | select_services_enabled_and_mapped_to_host }}" -- block: - +- when: "'amphora' in octavia_provider_drivers" + vars: + octavia_amphora_keys: + - client.cert-and-key.pem + - client_ca.cert.pem + - server_ca.cert.pem + - server_ca.key.pem + block: - name: Copying over Octavia SSH key template: src: "octavia-ssh-key.j2" @@ -148,11 +154,3 @@ become: true when: service | service_enabled_and_mapped_to_host with_items: "{{ octavia_amphora_keys }}" - - when: "'amphora' in octavia_provider_drivers" - vars: - octavia_amphora_keys: - - client.cert-and-key.pem - - client_ca.cert.pem - - server_ca.cert.pem - - server_ca.key.pem diff --git a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml index fde7295039..e620d92bde 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml @@ -1,7 +1,7 @@ --- - name: Bootstrap new cluster + any_errors_fatal: true block: - - name: Set bootstrap args fact for NB (new cluster) set_fact: ovn_nb_db_bootstrap_args: "{% if groups['ovn-nb-db'] | length > 1 and inventory_hostname != groups['ovn-nb-db'][0] %} --db-nb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-nb-db'][0]) | put_address_in_context('url') }} {% endif %}" @@ -79,5 +79,3 @@ set_fact: ovn_nb_db_bootstrap_args: ovn_sb_db_bootstrap_args: - - any_errors_fatal: true diff --git a/ansible/roles/ovn-db/tasks/lookup_cluster.yml b/ansible/roles/ovn-db/tasks/lookup_cluster.yml index 86b4aa13af..c387aaf3b3 100644 --- a/ansible/roles/ovn-db/tasks/lookup_cluster.yml +++ b/ansible/roles/ovn-db/tasks/lookup_cluster.yml @@ -28,8 +28,9 @@ ovn_sb_db_cluster_exists: "{{ groups['ovn-sb-db' + '_had_volume_True'] is defined }}" - name: OVN NB checks + any_errors_fatal: true + when: inventory_hostname in groups.get('ovn-nb-db_had_volume_True', '') block: - - name: Check if running on all OVN NB DB hosts fail: msg: > @@ -77,12 +78,10 @@ when: - groups['ovn-nb-db_leader'] is not defined and groups['ovn-nb-db_follower'] is defined - any_errors_fatal: true - when: inventory_hostname in groups.get('ovn-nb-db_had_volume_True', '') - - name: OVN SB checks + any_errors_fatal: true + when: inventory_hostname in groups.get('ovn-sb-db_had_volume_True', '') block: - - name: Check if running on all OVN SB DB hosts fail: msg: > @@ -129,6 +128,3 @@ msg: OVN SB cluster exists but there is no leader - please check cluster status. when: - groups['ovn-sb-db_leader'] is not defined and groups['ovn-sb-db_follower'] is defined - - any_errors_fatal: true - when: inventory_hostname in groups.get('ovn-sb-db_had_volume_True', '') diff --git a/ansible/roles/prechecks/tasks/datetime_checks.yml b/ansible/roles/prechecks/tasks/datetime_checks.yml index aea0aad0f6..939d7d113e 100644 --- a/ansible/roles/prechecks/tasks/datetime_checks.yml +++ b/ansible/roles/prechecks/tasks/datetime_checks.yml @@ -11,7 +11,8 @@ settings and Kolla Ansible needs this file for mounting it to containers. when: not etc_localtime.stat.exists -- block: +- when: ansible_facts.os_family == 'Debian' + block: - name: Ensure /etc/timezone exist stat: path: /etc/timezone @@ -23,4 +24,3 @@ /etc/timezone is not found. This file is used for system-wide timezone settings and Kolla Ansible needs this file for mounting it to containers. when: not etc_timezone.stat.exists - when: ansible_facts.os_family == 'Debian' diff --git a/ansible/roles/prechecks/tasks/timesync_checks.yml b/ansible/roles/prechecks/tasks/timesync_checks.yml index c6a9bd2a07..e63563dadb 100644 --- a/ansible/roles/prechecks/tasks/timesync_checks.yml +++ b/ansible/roles/prechecks/tasks/timesync_checks.yml @@ -1,5 +1,6 @@ --- -- block: +- when: prechecks_enable_host_ntp_checks | bool + block: - name: Check for a running host NTP daemon # noqa command-instead-of-module vars: prechecks_host_ntp_daemons: @@ -44,4 +45,3 @@ 'timedatectl status'. when: - "'synchronized: yes' not in timedatectl_status.stdout" - when: prechecks_enable_host_ntp_checks | bool diff --git a/ansible/roles/prometheus/tasks/config.yml b/ansible/roles/prometheus/tasks/config.yml index 8c7ba5a6bd..3bbc497b72 100644 --- a/ansible/roles/prometheus/tasks/config.yml +++ b/ansible/roles/prometheus/tasks/config.yml @@ -175,7 +175,11 @@ - "{{ node_custom_config }}/prometheus/prometheus-blackbox-exporter.yml" - "{{ role_path }}/templates/prometheus-blackbox-exporter.yml.j2" -- block: +- vars: + base: "{{ node_custom_config }}/prometheus/" + service: "{{ prometheus_services['prometheus-server'] }}" + when: service | service_enabled_and_mapped_to_host + block: - name: Find extra prometheus server config files find: paths: "{{ node_custom_config }}/prometheus/extras/" @@ -210,7 +214,3 @@ dest: "{{ node_config_directory }}/prometheus-server/{{ relpath }}" mode: "0660" with_items: "{{ prometheus_config_extras_result.files | default([]) | map(attribute='path') | list }}" - vars: - base: "{{ node_custom_config }}/prometheus/" - service: "{{ prometheus_services['prometheus-server'] }}" - when: service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/rabbitmq/tasks/precheck.yml b/ansible/roles/rabbitmq/tasks/precheck.yml index bc66f8a4ff..240ccebb12 100644 --- a/ansible/roles/rabbitmq/tasks/precheck.yml +++ b/ansible/roles/rabbitmq/tasks/precheck.yml @@ -105,7 +105,11 @@ - not kolla_externally_managed_cert | bool - rabbitmq_enable_tls | bool -- block: +- run_once: true + when: + - container_facts.containers['rabbitmq'] is defined + tags: rabbitmq-ha-precheck + block: - name: List RabbitMQ queues become: true command: "{{ kolla_container_engine }} exec rabbitmq rabbitmqctl list_queues --silent name type --formatter json" @@ -155,8 +159,3 @@ when: - item.name is search('_fanout') - om_enable_rabbitmq_stream_fanout | bool - - run_once: true - when: - - container_facts.containers['rabbitmq'] is defined - tags: rabbitmq-ha-precheck diff --git a/ansible/roles/rabbitmq/tasks/remove-ha-all-policy.yml b/ansible/roles/rabbitmq/tasks/remove-ha-all-policy.yml index 71dc2b2a14..c05f75dd45 100644 --- a/ansible/roles/rabbitmq/tasks/remove-ha-all-policy.yml +++ b/ansible/roles/rabbitmq/tasks/remove-ha-all-policy.yml @@ -1,5 +1,10 @@ --- -- block: +- delegate_to: "{{ groups[role_rabbitmq_groups] | first }}" + run_once: true + vars: + service_name: "rabbitmq" + service: "{{ rabbitmq_services[service_name] }}" + block: - name: Get container facts become: true kolla_container_facts: @@ -9,7 +14,8 @@ - "{{ service.container_name }}" register: container_facts - - block: + - when: container_facts.containers[service.container_name] is defined + block: - name: List RabbitMQ policies become: true command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl list_policies --silent" @@ -21,10 +27,3 @@ command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl clear_policy ha-all" when: - "'ha-all' in rabbitmq_policies.stdout" - when: container_facts.containers[service.container_name] is defined - - delegate_to: "{{ groups[role_rabbitmq_groups] | first }}" - run_once: true - vars: - service_name: "rabbitmq" - service: "{{ rabbitmq_services[service_name] }}" diff --git a/ansible/roles/rabbitmq/tasks/version-check.yml b/ansible/roles/rabbitmq/tasks/version-check.yml index 946c876a01..daf1eb7dd1 100644 --- a/ansible/roles/rabbitmq/tasks/version-check.yml +++ b/ansible/roles/rabbitmq/tasks/version-check.yml @@ -1,5 +1,11 @@ --- -- block: +- delegate_to: "{{ groups[role_rabbitmq_groups] | first }}" + run_once: true + tags: rabbitmq-version-check + vars: + service_name: "rabbitmq" + service: "{{ rabbitmq_services[service_name] }}" + block: - name: Get container facts become: true kolla_container_facts: @@ -9,7 +15,8 @@ - "{{ service.container_name }}" register: container_facts - - block: + - when: container_facts.containers[service.container_name] is defined + block: - name: Get current RabbitMQ version become: true command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl --version" @@ -73,12 +80,3 @@ If you're absolutely certain you want to do this, please skip the tag `rabbitmq-version-check`. Otherwise, see these docs for how to pin the version of RabbitMQ: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#rabbitmq-versions - - when: container_facts.containers[service.container_name] is defined - - delegate_to: "{{ groups[role_rabbitmq_groups] | first }}" - run_once: true - tags: rabbitmq-version-check - vars: - service_name: "rabbitmq" - service: "{{ rabbitmq_services[service_name] }}" diff --git a/ansible/roles/service-image-info/tasks/main.yml b/ansible/roles/service-image-info/tasks/main.yml index 9027bf3025..df5c39dd57 100644 --- a/ansible/roles/service-image-info/tasks/main.yml +++ b/ansible/roles/service-image-info/tasks/main.yml @@ -1,5 +1,8 @@ --- - name: Get target image info (docker) + become: true + delegate_to: "{{ service_image_info_delegate_host }}" + when: kolla_container_engine == 'docker' block: - community.docker.docker_image_info: name: "{{ service.image }}" @@ -8,11 +11,11 @@ - set_fact: service_image_info: "{{ docker_image_info }}" delegate_facts: true - become: true - delegate_to: "{{ service_image_info_delegate_host }}" - when: kolla_container_engine == 'docker' - name: Get target image info (podman) + become: true + delegate_to: "{{ service_image_info_delegate_host }}" + when: kolla_container_engine == 'podman' block: - containers.podman.podman_image_info: name: "{{ service.image }}" @@ -21,6 +24,3 @@ - set_fact: service_image_info: "{{ podman_image_info }}" delegate_facts: true - become: true - delegate_to: "{{ service_image_info_delegate_host }}" - when: kolla_container_engine == 'podman' diff --git a/ansible/roles/service-rabbitmq/tasks/main.yml b/ansible/roles/service-rabbitmq/tasks/main.yml index bc1cb2c7d0..dcdc7d5da3 100644 --- a/ansible/roles/service-rabbitmq/tasks/main.yml +++ b/ansible/roles/service-rabbitmq/tasks/main.yml @@ -1,5 +1,11 @@ --- -- block: +- become: true + delegate_to: "{{ service_rabbitmq_delegate_host }}" + run_once: "{{ service_rabbitmq_run_once }}" + when: service_rabbitmq_when | bool + tags: + - service-rabbitmq + block: - name: "{{ project_name }} | Ensure RabbitMQ vhosts exist" kolla_toolbox: container_engine: "{{ kolla_container_engine }}" @@ -36,10 +42,3 @@ retries: "{{ service_rabbitmq_retries }}" delay: "{{ service_rabbitmq_delay }}" no_log: True - - become: true - when: service_rabbitmq_when | bool - delegate_to: "{{ service_rabbitmq_delegate_host }}" - run_once: "{{ service_rabbitmq_run_once }}" - tags: - - service-rabbitmq diff --git a/ansible/roles/valkey/tasks/upgrade.yml b/ansible/roles/valkey/tasks/upgrade.yml index 0cae1ce26e..0fa7338875 100644 --- a/ansible/roles/valkey/tasks/upgrade.yml +++ b/ansible/roles/valkey/tasks/upgrade.yml @@ -15,6 +15,7 @@ _valkey_migration: "{{ redis_container_facts.containers['redis'] is defined }}" - name: Perform Redis to Valkey migration steps + when: redis_container_facts.containers['redis'] is defined block: - name: Set temporary Valkey migration vars @@ -110,8 +111,6 @@ name: redis when: inventory_hostname in groups['redis'] - when: redis_container_facts.containers['redis'] is defined - # These tasks run always, regardless of Redis presence - name: Reset Valkey port to default (6379) after migration diff --git a/ansible/site.yml b/ansible/site.yml index ae5f281cb8..8d742462b4 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -143,7 +143,10 @@ roles: - { role: loadbalancer } tasks: - - block: + - when: + - enable_haproxy | bool + - kolla_action in ['deploy', 'reconfigure', 'upgrade', 'config'] + block: - include_role: name: aodh tasks_from: loadbalancer @@ -341,9 +344,6 @@ name: loadbalancer tasks_from: check-containers when: kolla_action != 'config' - when: - - enable_haproxy | bool - - kolla_action in ['deploy', 'reconfigure', 'upgrade', 'config'] - name: Apply role opensearch gather_facts: false From 9f3ae79661f3e2f0314705e95edae1e9fe24ef60 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 13:53:11 +0100 Subject: [PATCH 098/165] ansible-lint: Remove no-free-form from excludes Change-Id: I58d00089e01953a1b8e2d2ba20c5a7e300ca30b9 Signed-off-by: Michal Nasiadka --- .ansible-lint | 1 - ansible/roles/nova-cell/tasks/precheck.yml | 3 ++- ansible/roles/prechecks/tasks/port_checks.yml | 15 +++++++++------ 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index 1ec2012fe6..9c1597a67f 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -31,7 +31,6 @@ skip_list: # role name check matching ^*$ - role-name # TODO(frickler): Discuss these in detail, skipping for now to unblock things - - no-free-form - name[play] - var-naming[no-role-prefix] - risky-file-permissions diff --git a/ansible/roles/nova-cell/tasks/precheck.yml b/ansible/roles/nova-cell/tasks/precheck.yml index b0b510a5b3..01dfb3290f 100644 --- a/ansible/roles/nova-cell/tasks/precheck.yml +++ b/ansible/roles/nova-cell/tasks/precheck.yml @@ -111,7 +111,8 @@ - name: Checking that host libvirt is not running vars: service: "{{ nova_cell_services['nova-libvirt'] }}" - stat: path=/var/run/libvirt/libvirt-sock + stat: + path: "/var/run/libvirt/libvirt-sock" register: result failed_when: result.stat.exists when: diff --git a/ansible/roles/prechecks/tasks/port_checks.yml b/ansible/roles/prechecks/tasks/port_checks.yml index 576baa6d2d..6fdff37a82 100644 --- a/ansible/roles/prechecks/tasks/port_checks.yml +++ b/ansible/roles/prechecks/tasks/port_checks.yml @@ -1,13 +1,16 @@ --- - name: Checking the api_interface is present - fail: "msg='Please check the api_interface property - interface {{ api_interface }} not found'" - when: api_interface not in ansible_facts.interfaces + ansible.builtin.assert: + that: api_interface in ansible_facts.interfaces + fail_msg: "Please check the api_interface property - interface {{ api_interface }} not found" - name: Checking the api_interface is active - fail: "msg='Please check the api_interface settings - interface {{ api_interface }} is not active'" - when: not hostvars[inventory_hostname].ansible_facts[api_interface | replace('-', '_')]['active'] + ansible.builtin.assert: + that: hostvars[inventory_hostname].ansible_facts[api_interface | replace('-', '_')]['active'] + fail_msg: "Please check the api_interface settings - interface {{ api_interface }} is not active" # kolla_address handles relevant address check - name: Checking the api_interface ip address configuration - fail: "msg='Please check the api_interface settings - interface {{ api_interface }} ip address problem'" - when: api_interface_address is not defined + ansible.builtin.assert: + that: api_interface_address is defined + fail_msg: "Please check the api_interface settings - interface {{ api_interface }} ip address problem" From b290792aa21db50bb0f9aededf58cd3811f7630a Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 13:55:55 +0100 Subject: [PATCH 099/165] ansible-lint: Remove ignore-errors from excludes Change-Id: I87a5b312de28c37c21cd73920a681b7e9e603b30 Signed-off-by: Michal Nasiadka --- .ansible-lint | 1 - 1 file changed, 1 deletion(-) diff --git a/.ansible-lint b/.ansible-lint index 9c1597a67f..ad9a30abbc 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -37,6 +37,5 @@ skip_list: - risky-shell-pipe - command-instead-of-shell - command-instead-of-module - - ignore-errors - yaml[truthy] - yaml[line-length] From c7751f5ab739c83115b2e47658a396f5059ba834 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 17 Nov 2025 12:28:21 +0100 Subject: [PATCH 100/165] Drop python3.10 from setup.cfg ansible-core 2.18 lowest python version support is 3.11 [1]. [1]: https://pypi.org/project/ansible-core/2.18.11/ Change-Id: I4f174d4425a1a6a9ac74ce987690bca263cea51f Signed-off-by: Michal Nasiadka --- setup.cfg | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 164c82a001..224726ee67 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ description_file = author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/kolla-ansible/latest/ -python_requires = >=3.10 +python_requires = >=3.11 license = Apache License, Version 2.0 classifier = Environment :: OpenStack @@ -18,7 +18,6 @@ classifier = Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 - Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 From 15fddd22f28a512db339cec573509bcfbbf9a8d8 Mon Sep 17 00:00:00 2001 From: Michael Still Date: Tue, 18 Nov 2025 21:01:42 +1100 Subject: [PATCH 101/165] Drop requirement for blueprints. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update contributor guide per conversation with Michał Nasiadka (mnasiadka) on IRC. Change-Id: I9bd8c46b084ff0ffce6c3abce972508fd9d59d69 Signed-off-by: Michael Still --- doc/source/contributor/contributing.rst | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst index 36e798561b..7e9bff16a9 100644 --- a/doc/source/contributor/contributing.rst +++ b/doc/source/contributor/contributing.rst @@ -56,9 +56,13 @@ directly, you can find the lists in Gerrit: New Feature Planning ~~~~~~~~~~~~~~~~~~~~ -New features are discussed via IRC or mailing list (with [kolla] prefix). -Kolla project keeps blueprints in `Launchpad `__. -Specs are welcome but not strictly required. +New features are discussed via IRC or on the openstack-discuss mailing list +(please include the [kolla] prefix to your subject line). + +Kolla has previously used Launchpad blueprints, but now simply uses tracking +bugs for new feature work. Please tag any such bugs with a "[RFE]" prefix, +which indicates the bug is a Request For Enhancement. Bugs are discussed in +more detail in the next section. Task Tracking ~~~~~~~~~~~~~ From a12bfa80059ee5b4b0559d5be3d9243c5cbb7730 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 14 Nov 2025 07:42:43 +0100 Subject: [PATCH 102/165] MariaDB: Improve recovery We're only checking if master MariaDB node is listening, we should check also slaves. Apply InnoDB variables from Galera docs for improved performance. Change-Id: Ied6ce15ce0e987cd1794351d2139a75edd5f52bc Signed-off-by: Michal Nasiadka --- ansible/roles/mariadb/tasks/recover_cluster.yml | 5 +---- ansible/roles/mariadb/templates/galera.cnf.j2 | 4 ++++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml index b2eae10089..3e454ac13d 100644 --- a/ansible/roles/mariadb/tasks/recover_cluster.yml +++ b/ansible/roles/mariadb/tasks/recover_cluster.yml @@ -244,7 +244,7 @@ - bootstrap_host is defined - bootstrap_host == inventory_hostname -- name: Wait for master mariadb +- name: Wait for MariaDB wait_for: host: "{{ api_interface_address }}" port: "{{ mariadb_port }}" @@ -255,8 +255,5 @@ until: check_mariadb_port is success retries: 10 delay: 6 - when: - - bootstrap_host is defined - - bootstrap_host == inventory_hostname - import_tasks: check.yml diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2 index 805669aed1..e61ba9f6a4 100644 --- a/ansible/roles/mariadb/templates/galera.cnf.j2 +++ b/ansible/roles/mariadb/templates/galera.cnf.j2 @@ -56,6 +56,10 @@ innodb_buffer_pool_size = '{{ dynamic_pool_size_mb }}M' {% else %} innodb_buffer_pool_size = '8192M' {% endif %} +{# NOTE(mnasiadka): These options are recommended in #} +{# https://mariadb.com/docs/galera-cluster/galera-management/configuration/configuring-mariadb-galera-cluster#performance-related-options #} +innodb_flush_log_at_trx_commit = 0 +innodb_autoinc_lock_mode = 2 [server] pid-file=/var/lib/mysql/mariadb.pid From 503058c6d96bac42d78dd924cc5d2b1efbc335c2 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 21 Nov 2025 11:05:52 +0100 Subject: [PATCH 103/165] CI: Bump tempest compute.build_timeout to 900s It's often failing with 300s Change-Id: Id71c82a557745f9b47ea92a5eeecf1d859719821 Signed-off-by: Michal Nasiadka --- roles/kolla-ansible-tempest/defaults/main.yml | 1 + roles/kolla-ansible-tempest/tasks/main.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/roles/kolla-ansible-tempest/defaults/main.yml b/roles/kolla-ansible-tempest/defaults/main.yml index 600059789a..1c296e926b 100644 --- a/roles/kolla-ansible-tempest/defaults/main.yml +++ b/roles/kolla-ansible-tempest/defaults/main.yml @@ -3,6 +3,7 @@ kolla_ansible_tempest_packages: - python-tempestconf - tempest +kolla_ansible_tempest_build_timeout: 900 kolla_ansible_tempest_cirros_ver: "0.6.3" kolla_ansible_tempest_exclude_regex: "" kolla_ansible_tempest_packages_extra: [] diff --git a/roles/kolla-ansible-tempest/tasks/main.yml b/roles/kolla-ansible-tempest/tasks/main.yml index 7c118feff8..4fdb8571c0 100644 --- a/roles/kolla-ansible-tempest/tasks/main.yml +++ b/roles/kolla-ansible-tempest/tasks/main.yml @@ -23,6 +23,7 @@ --debug --image {{ image }} --os-cloud kolla-admin + compute.build_timeout {{ kolla_ansible_tempest_build_timeout }} >/tmp/logs/ansible/test-init-tempest-discover 2>&1 environment: OS_CLIENT_CONFIG_FILE: "/etc/kolla/clouds.yaml" From edd959238a3ad4d9cca929ad89fda756870903c6 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 13:59:51 +0100 Subject: [PATCH 104/165] ansible-lint: Fix yaml[truthy] in aodh role Change-Id: Icae3b3b6935ad6837831e43dc5ce847fcc1c374c Signed-off-by: Michal Nasiadka --- ansible/roles/aodh/tasks/bootstrap.yml | 4 ++-- ansible/roles/aodh/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/aodh/tasks/config.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/aodh/tasks/bootstrap.yml b/ansible/roles/aodh/tasks/bootstrap.yml index cd6d932695..7c69af01ae 100644 --- a/ansible/roles/aodh/tasks/bootstrap.yml +++ b/ansible/roles/aodh/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ aodh_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ aodh_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['aodh-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ aodh_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['aodh-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/aodh/tasks/bootstrap_service.yml b/ansible/roles/aodh/tasks/bootstrap_service.yml index 24bca33dce..b355eb5281 100644 --- a/ansible/roles/aodh/tasks/bootstrap_service.yml +++ b/ansible/roles/aodh/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_aodh" restart_policy: oneshot volumes: "{{ aodh_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[aodh_api.group][0] }}" diff --git a/ansible/roles/aodh/tasks/config.yml b/ansible/roles/aodh/tasks/config.yml index 2542d061b3..35dea6922a 100644 --- a/ansible/roles/aodh/tasks/config.yml +++ b/ansible/roles/aodh/tasks/config.yml @@ -12,7 +12,7 @@ - name: Check if policies shall be overwritten stat: path: "{{ item }}" - run_once: True + run_once: true delegate_to: localhost register: aodh_policy with_first_found: From 46ce3bdec958bab9b994b3b2e0a862d92ce9179e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:00:59 +0100 Subject: [PATCH 105/165] ansible-lint: Fix yaml[truthy] in barbican role Change-Id: Ifc27430369166f13bbbdb005b04194c6c6f8ec98 Signed-off-by: Michal Nasiadka --- ansible/roles/barbican/tasks/bootstrap.yml | 4 ++-- ansible/roles/barbican/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/barbican/tasks/config.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/barbican/tasks/bootstrap.yml b/ansible/roles/barbican/tasks/bootstrap.yml index 6ffb69e365..177ae74660 100644 --- a/ansible/roles/barbican/tasks/bootstrap.yml +++ b/ansible/roles/barbican/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ barbican_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ barbican_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['barbican-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ barbican_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['barbican-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/barbican/tasks/bootstrap_service.yml b/ansible/roles/barbican/tasks/bootstrap_service.yml index 7081abca54..4359311110 100644 --- a/ansible/roles/barbican/tasks/bootstrap_service.yml +++ b/ansible/roles/barbican/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_barbican" restart_policy: oneshot volumes: "{{ barbican_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[barbican_api.group][0] }}" diff --git a/ansible/roles/barbican/tasks/config.yml b/ansible/roles/barbican/tasks/config.yml index 2870735d90..f3c5cb48d4 100644 --- a/ansible/roles/barbican/tasks/config.yml +++ b/ansible/roles/barbican/tasks/config.yml @@ -26,7 +26,7 @@ - name: Check if policies shall be overwritten stat: path: "{{ item }}" - run_once: True + run_once: true delegate_to: localhost register: barbican_policy with_first_found: @@ -72,7 +72,7 @@ service: "{{ barbican_services['barbican-api'] }}" stat: path: "{{ node_custom_config }}/barbican/barbican-api-paste.ini" - run_once: True + run_once: true delegate_to: localhost register: check_barbican_api_paste_ini when: service | service_enabled_and_mapped_to_host From 3ad4bcebd8f670f9fc37f63fb83c83906829d27c Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:04:46 +0100 Subject: [PATCH 106/165] ansible-lint: Fix yaml[truthy] in bifrost role Change-Id: Ifa650dbd3368b4b6e3f7ff8ba220c2ae7c6dae88 Signed-off-by: Michal Nasiadka --- ansible/roles/bifrost/tasks/start.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/bifrost/tasks/start.yml b/ansible/roles/bifrost/tasks/start.yml index 3c7022c80e..f194e15f18 100644 --- a/ansible/roles/bifrost/tasks/start.yml +++ b/ansible/roles/bifrost/tasks/start.yml @@ -6,7 +6,7 @@ common_options: "{{ docker_common_options }}" image: "{{ bifrost_deploy_image_full }}" name: "bifrost_deploy" - privileged: True + privileged: true environment: "{{ bifrost_deploy_container_proxy }}" volumes: - "{{ node_config_directory }}/bifrost/:/etc/bifrost:ro" From ff674d0e6b4bd18af62c7b27d663fad9742994bf Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:05:44 +0100 Subject: [PATCH 107/165] ansible-lint: Fix yaml[truthy] in blazar role Change-Id: Idb396265e6090d6f106375a28b93b72faff7d585 Signed-off-by: Michal Nasiadka --- ansible/roles/blazar/tasks/bootstrap.yml | 6 +++--- ansible/roles/blazar/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/blazar/tasks/config.yml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/blazar/tasks/bootstrap.yml b/ansible/roles/blazar/tasks/bootstrap.yml index af3ef413d9..c8e1eab3ff 100644 --- a/ansible/roles/blazar/tasks/bootstrap.yml +++ b/ansible/roles/blazar/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ blazar_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ blazar_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['blazar-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ blazar_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['blazar-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -48,7 +48,7 @@ endpoint_type: "{{ openstack_interface }}" region_name: "{{ openstack_region_name }}" name: "{{ blazar_aggregate_pool_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['blazar-api'][0] }}" - import_tasks: bootstrap_service.yml diff --git a/ansible/roles/blazar/tasks/bootstrap_service.yml b/ansible/roles/blazar/tasks/bootstrap_service.yml index 0e4b552194..7b51adabf9 100644 --- a/ansible/roles/blazar/tasks/bootstrap_service.yml +++ b/ansible/roles/blazar/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_blazar" restart_policy: oneshot volumes: "{{ blazar_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[blazar_api.group][0] }}" diff --git a/ansible/roles/blazar/tasks/config.yml b/ansible/roles/blazar/tasks/config.yml index d411eecf85..086a36cac1 100644 --- a/ansible/roles/blazar/tasks/config.yml +++ b/ansible/roles/blazar/tasks/config.yml @@ -12,7 +12,7 @@ - name: Check if policies shall be overwritten stat: path: "{{ item }}" - run_once: True + run_once: true delegate_to: localhost register: blazar_policy with_first_found: From 13f5a7b9daa8c9a8051746cb810806b73137481a Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:06:50 +0100 Subject: [PATCH 108/165] ansible-lint: Fix yaml[truthy] in ceilometer role Change-Id: I719be4c046c0bb5153a616621259909bce6964c3 Signed-off-by: Michal Nasiadka --- ansible/roles/ceilometer/defaults/main.yml | 8 ++++---- .../roles/ceilometer/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/ceilometer/tasks/config.yml | 14 +++++++------- ansible/roles/ceilometer/tasks/precheck.yml | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/ansible/roles/ceilometer/defaults/main.yml b/ansible/roles/ceilometer/defaults/main.yml index 8f47ced666..7e41f8ba15 100644 --- a/ansible/roles/ceilometer/defaults/main.yml +++ b/ansible/roles/ceilometer/defaults/main.yml @@ -3,7 +3,7 @@ ceilometer_services: ceilometer-notification: container_name: ceilometer_notification group: ceilometer-notification - enabled: True + enabled: true image: "{{ ceilometer_notification_image_full }}" volumes: "{{ ceilometer_notification_default_volumes + ceilometer_notification_extra_volumes }}" dimensions: "{{ ceilometer_notification_dimensions }}" @@ -11,7 +11,7 @@ ceilometer_services: ceilometer-central: container_name: ceilometer_central group: ceilometer-central - enabled: True + enabled: true image: "{{ ceilometer_central_image_full }}" volumes: "{{ ceilometer_central_default_volumes + ceilometer_central_extra_volumes }}" dimensions: "{{ ceilometer_central_dimensions }}" @@ -19,8 +19,8 @@ ceilometer_services: ceilometer-compute: container_name: ceilometer_compute group: ceilometer-compute - enabled: True - privileged: True + enabled: true + privileged: true image: "{{ ceilometer_compute_image_full }}" volumes: "{{ ceilometer_compute_default_volumes + ceilometer_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ ceilometer_compute_dimensions }}" diff --git a/ansible/roles/ceilometer/tasks/bootstrap_service.yml b/ansible/roles/ceilometer/tasks/bootstrap_service.yml index c62b567653..532248a20c 100644 --- a/ansible/roles/ceilometer/tasks/bootstrap_service.yml +++ b/ansible/roles/ceilometer/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -18,5 +18,5 @@ name: "bootstrap_ceilometer" restart_policy: oneshot volumes: "{{ ceilometer_notification.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[ceilometer_notification.group][0] }}" diff --git a/ansible/roles/ceilometer/tasks/config.yml b/ansible/roles/ceilometer/tasks/config.yml index faa40fa9db..2ef8f72876 100644 --- a/ansible/roles/ceilometer/tasks/config.yml +++ b/ansible/roles/ceilometer/tasks/config.yml @@ -47,7 +47,7 @@ copy: src: "{{ node_custom_config }}/ceilometer/{{ ceilometer_custom_meters_local_folder }}/" dest: "{{ node_config_directory }}/{{ item.key }}/meters.d" - force: True + force: true mode: "0660" become: true when: @@ -59,7 +59,7 @@ path: "{{ node_custom_config }}/ceilometer/{{ ceilometer_dynamic_pollsters_local_folder }}" delegate_to: localhost register: ceilometer_dynamic_pollsters_folder - run_once: True + run_once: true - name: Set the variable that control the copy of dynamic pollsters definitions set_fact: @@ -102,7 +102,7 @@ copy: src: "{{ node_custom_config }}/ceilometer/polling.yaml" dest: "{{ node_config_directory }}/{{ item.key }}/polling.yaml" - force: True + force: true mode: "0660" become: true when: @@ -125,7 +125,7 @@ copy: src: "{{ node_custom_config }}/ceilometer/gnocchi_resources.yaml" dest: "{{ node_config_directory }}/{{ item.key }}/gnocchi_resources.yaml" - force: True + force: true mode: "0660" become: true when: @@ -142,7 +142,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: ceilometer_policy with_first_found: - files: "{{ supported_policy_format_list }}" @@ -196,7 +196,7 @@ copy: src: "{{ node_custom_config }}/ceilometer/event_definitions.yaml" dest: "{{ node_config_directory }}/ceilometer-notification/event_definitions.yaml" - force: True + force: true mode: "0660" become: true register: ceilometer_event_definitions_overwriting @@ -245,7 +245,7 @@ template: src: "{{ node_custom_config }}/ceilometer/pipeline.yaml" dest: "{{ node_config_directory }}/{{ item.key }}/pipeline.yaml" - force: True + force: true mode: "0660" become: true register: ceilometer_pipeline_overwriting diff --git a/ansible/roles/ceilometer/tasks/precheck.yml b/ansible/roles/ceilometer/tasks/precheck.yml index 4f5580f135..7a1aa6bf55 100644 --- a/ansible/roles/ceilometer/tasks/precheck.yml +++ b/ansible/roles/ceilometer/tasks/precheck.yml @@ -10,5 +10,5 @@ that: - not (enable_ceilometer | bool) or enable_gnocchi | bool or enable_ceilometer_prometheus_pushgateway | bool msg: "At least one Ceilometer publisher must be enabled" - run_once: True + run_once: true changed_when: false From edc23891cd588f2363504dce7db87a0be0a05da1 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:07:50 +0100 Subject: [PATCH 109/165] ansible-lint: Fix yaml[truthy] in certificates role Change-Id: I360c570ba9dc1a7c8324dc53145e3da8c818c738 Signed-off-by: Michal Nasiadka --- ansible/roles/certificates/tasks/generate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/certificates/tasks/generate.yml b/ansible/roles/certificates/tasks/generate.yml index d1b536853b..542603d45d 100644 --- a/ansible/roles/certificates/tasks/generate.yml +++ b/ansible/roles/certificates/tasks/generate.yml @@ -93,7 +93,7 @@ copy: src: "{{ kolla_external_fqdn_cert }}" dest: "{{ kolla_internal_fqdn_cert }}" - remote_src: yes + remote_src: true mode: "0660" when: - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == '' From 59ecf6c7cf8da97cc2fb09418eee8f2b33d02e06 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:08:37 +0100 Subject: [PATCH 110/165] ansible-lint: Fix yaml[truthy] in cinder role Change-Id: Id3e2e72698ad5c1ee547789524de816f879d142e Signed-off-by: Michal Nasiadka --- ansible/roles/cinder/defaults/main.yml | 6 +++--- ansible/roles/cinder/tasks/bootstrap.yml | 4 ++-- ansible/roles/cinder/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/cinder/tasks/config.yml | 5 ++--- ansible/roles/cinder/tasks/external_ceph.yml | 2 +- ansible/roles/cinder/tasks/precheck.yml | 4 ++-- ansible/roles/cinder/tasks/upgrade.yml | 4 ++-- ansible/roles/cinder/templates/cinder.conf.j2 | 2 +- 8 files changed, 15 insertions(+), 16 deletions(-) diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml index 77ef7fcc24..dfcd5c19fa 100644 --- a/ansible/roles/cinder/defaults/main.yml +++ b/ansible/roles/cinder/defaults/main.yml @@ -53,7 +53,7 @@ cinder_services: group: cinder-backup enabled: "{{ enable_cinder_backup | bool }}" image: "{{ cinder_backup_image_full }}" - privileged: True + privileged: true volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ cinder_backup_dimensions }}" healthcheck: "{{ cinder_backup_healthcheck }}" @@ -282,7 +282,7 @@ cinder_ceph_backends: cinder_backup_backend_ceph_name: "rbd-1" cinder_backup_ceph_backend: "{{ cinder_ceph_backends | selectattr('name', 'equalto', cinder_backup_backend_ceph_name) | list | first | combine({'pool': ceph_cinder_backup_pool_name, 'user': ceph_cinder_backup_user}) }}" -skip_cinder_backend_check: False +skip_cinder_backend_check: false cinder_enabled_backends: "{{ cinder_backends | selectattr('enabled', 'equalto', true) | list + cinder_ceph_backends | selectattr('enabled', 'equalto', true) | list }}" @@ -314,7 +314,7 @@ pure_san_ip: # Lightbits Storage Driver ############################### lightbits_nvme_tcp_backend_name: "lightbits_nvme_backend" -lightos_skip_ssl_verify: False +lightos_skip_ssl_verify: false lightos_api_port: 443 lightos_default_num_replicas: 3 lightos_api_address: diff --git a/ansible/roles/cinder/tasks/bootstrap.yml b/ansible/roles/cinder/tasks/bootstrap.yml index 03c540d35b..303de67c10 100644 --- a/ansible/roles/cinder/tasks/bootstrap.yml +++ b/ansible/roles/cinder/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ cinder_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ cinder_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['cinder-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ cinder_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['cinder-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/cinder/tasks/bootstrap_service.yml b/ansible/roles/cinder/tasks/bootstrap_service.yml index a203f7d5b1..a5774b9edc 100644 --- a/ansible/roles/cinder/tasks/bootstrap_service.yml +++ b/ansible/roles/cinder/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_cinder" restart_policy: oneshot volumes: "{{ cinder_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[cinder_api.group][0] }}" diff --git a/ansible/roles/cinder/tasks/config.yml b/ansible/roles/cinder/tasks/config.yml index 2b7a05bee5..8bee34fdfc 100644 --- a/ansible/roles/cinder/tasks/config.yml +++ b/ansible/roles/cinder/tasks/config.yml @@ -18,14 +18,13 @@ - include_tasks: external_ceph.yml when: - cinder_backend_ceph | bool - - inventory_hostname in groups['cinder-volume'] or - inventory_hostname in groups['cinder-backup'] + - inventory_hostname in groups['cinder-volume'] or inventory_hostname in groups['cinder-backup'] - name: Check if policies shall be overwritten stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: cinder_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/cinder/tasks/external_ceph.yml b/ansible/roles/cinder/tasks/external_ceph.yml index cf9c34fd13..71d71723e9 100644 --- a/ansible/roles/cinder/tasks/external_ceph.yml +++ b/ansible/roles/cinder/tasks/external_ceph.yml @@ -70,7 +70,7 @@ become: true file: path: "{{ node_config_directory }}/{{ item }}" - recurse: yes + recurse: true owner: "{{ config_owner_user }}" group: "{{ config_owner_group }}" when: inventory_hostname in groups[item] diff --git a/ansible/roles/cinder/tasks/precheck.yml b/ansible/roles/cinder/tasks/precheck.yml index 8f4fdff4a8..764498f538 100644 --- a/ansible/roles/cinder/tasks/precheck.yml +++ b/ansible/roles/cinder/tasks/precheck.yml @@ -27,7 +27,7 @@ - inventory_hostname in groups['cinder-api'] - name: Checking at least one valid backend is enabled for Cinder - run_once: True + run_once: true fail: msg: "Please enable at least one backend when enabling Cinder" when: @@ -56,7 +56,7 @@ - inventory_hostname in groups['cinder-volume'] - name: Checking for coordination backend if Ceph backend is enabled - run_once: True + run_once: true fail: msg: "Please enable valkey or etcd when using Cinder Ceph backend" when: diff --git a/ansible/roles/cinder/tasks/upgrade.yml b/ansible/roles/cinder/tasks/upgrade.yml index a402d547c1..0775f46664 100644 --- a/ansible/roles/cinder/tasks/upgrade.yml +++ b/ansible/roles/cinder/tasks/upgrade.yml @@ -22,7 +22,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_OSM: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -32,5 +32,5 @@ name: "bootstrap_cinder" restart_policy: oneshot volumes: "{{ cinder_api.volumes }}" - run_once: True + run_once: true delegate_to: "{{ groups[cinder_api.group][0] }}" diff --git a/ansible/roles/cinder/templates/cinder.conf.j2 b/ansible/roles/cinder/templates/cinder.conf.j2 index 92cd83a884..89171cdfb6 100644 --- a/ansible/roles/cinder/templates/cinder.conf.j2 +++ b/ansible/roles/cinder/templates/cinder.conf.j2 @@ -224,7 +224,7 @@ volume_backend_name = {{ lightbits_nvme_tcp_backend_name }} lightos_api_address = {{ lightbits_target_ips }} lightos_api_port = {{ lightbits_api_port }} lightos_default_num_replicas = {{ lightbits_default_num_replicas }} -lightos_skip_ssl_verify = {{ lightbits_skip_ssl_verify }} +lightos_skip_ssl_verify = {{ lightbits_skip_ssl_verify | bool }} lightos_jwt = {{ lightbits_JWT }} {% endif %} From f6bac41a97fc5e380dc6d3ce83ef7690c4654792 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:09:29 +0100 Subject: [PATCH 111/165] ansible-lint: Fix yaml[truthy] in cloudkitty role Change-Id: I215731b5cccf8354e69e6552472a9781f29a78d9 Signed-off-by: Michal Nasiadka --- ansible/roles/cloudkitty/defaults/main.yml | 4 ++-- ansible/roles/cloudkitty/tasks/bootstrap.yml | 16 ++++++++-------- .../roles/cloudkitty/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/cloudkitty/tasks/config.yml | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/ansible/roles/cloudkitty/defaults/main.yml b/ansible/roles/cloudkitty/defaults/main.yml index 7fcd44225d..401785cf7e 100644 --- a/ansible/roles/cloudkitty/defaults/main.yml +++ b/ansible/roles/cloudkitty/defaults/main.yml @@ -4,7 +4,7 @@ cloudkitty_services: container_name: "cloudkitty_api" group: "cloudkitty-api" image: "{{ cloudkitty_api_image_full }}" - enabled: True + enabled: true volumes: "{{ cloudkitty_api_default_volumes + cloudkitty_api_extra_volumes }}" dimensions: "{{ cloudkitty_api_dimensions }}" healthcheck: "{{ cloudkitty_api_healthcheck }}" @@ -30,7 +30,7 @@ cloudkitty_services: container_name: "cloudkitty_processor" group: "cloudkitty-processor" image: "{{ cloudkitty_processor_image_full }}" - enabled: True + enabled: true volumes: "{{ cloudkitty_processor_default_volumes + cloudkitty_processor_extra_volumes }}" dimensions: "{{ cloudkitty_processor_dimensions }}" healthcheck: "{{ cloudkitty_processor_healthcheck }}" diff --git a/ansible/roles/cloudkitty/tasks/bootstrap.yml b/ansible/roles/cloudkitty/tasks/bootstrap.yml index a0dc2ca1a1..78e381e199 100644 --- a/ansible/roles/cloudkitty/tasks/bootstrap.yml +++ b/ansible/roles/cloudkitty/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ cloudkitty_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ cloudkitty_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['cloudkitty-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ cloudkitty_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['cloudkitty-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -49,8 +49,8 @@ database_name: "{{ cloudkitty_influxdb_name }}" # The influxdb_database module and the InfluxDB 1.x Python client don't # support specifying a CA certificate file. - validate_certs: False - run_once: True + validate_certs: false + run_once: true delegate_to: "{{ groups['cloudkitty-api'][0] }}" when: cloudkitty_storage_backend == 'influxdb' @@ -89,11 +89,11 @@ url: "{{ cloudkitty_elasticsearch_url }}/{{ cloudkitty_elasticsearch_index_name }}" method: PUT status_code: 200 - return_content: yes + return_content: true body: | {} body_format: json - run_once: True + run_once: true delegate_to: "{{ groups['cloudkitty-api'][0] }}" when: - cloudkitty_storage_backend == 'elasticsearch' @@ -108,11 +108,11 @@ url: "{{ cloudkitty_opensearch_url }}/{{ cloudkitty_opensearch_index_name }}" method: PUT status_code: 200 - return_content: yes + return_content: true body: | {} body_format: json - run_once: True + run_once: true delegate_to: "{{ groups['cloudkitty-api'][0] }}" when: - cloudkitty_storage_backend == 'opensearch' diff --git a/ansible/roles/cloudkitty/tasks/bootstrap_service.yml b/ansible/roles/cloudkitty/tasks/bootstrap_service.yml index 17546383c7..3f20a86693 100644 --- a/ansible/roles/cloudkitty/tasks/bootstrap_service.yml +++ b/ansible/roles/cloudkitty/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_cloudkitty" restart_policy: oneshot volumes: "{{ cloudkitty_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[cloudkitty_api.group][0] }}" diff --git a/ansible/roles/cloudkitty/tasks/config.yml b/ansible/roles/cloudkitty/tasks/config.yml index 2b1d357d8f..89dcfeb03f 100644 --- a/ansible/roles/cloudkitty/tasks/config.yml +++ b/ansible/roles/cloudkitty/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: cloudkitty_policy with_first_found: - files: "{{ supported_policy_format_list }}" From c420acd57df754189a26f538d595274d46e19265 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:10:22 +0100 Subject: [PATCH 112/165] ansible-lint: Fix yaml[truthy] in collectd role Change-Id: Iffbe3a34365dcc77456d90400d69206714c31471 Signed-off-by: Michal Nasiadka --- ansible/roles/collectd/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/collectd/defaults/main.yml b/ansible/roles/collectd/defaults/main.yml index 20d339fdeb..0613b73a82 100644 --- a/ansible/roles/collectd/defaults/main.yml +++ b/ansible/roles/collectd/defaults/main.yml @@ -5,7 +5,7 @@ collectd_services: group: collectd enabled: true image: "{{ collectd_image_full }}" - privileged: True + privileged: true volumes: "{{ collectd_default_volumes + collectd_extra_volumes }}" dimensions: "{{ collectd_dimensions }}" From a70e6ba69e4019742c3cbfa4dfc64aaab907a047 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:11:36 +0100 Subject: [PATCH 113/165] ansible-lint: Fix yaml[truthy] in cron role Change-Id: Idbb9c4ba57429ad391ce0b8e67ab00feb0a90a78 Signed-off-by: Michal Nasiadka --- ansible/roles/cron/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/cron/defaults/main.yml b/ansible/roles/cron/defaults/main.yml index 909285ae18..562b6ba440 100644 --- a/ansible/roles/cron/defaults/main.yml +++ b/ansible/roles/cron/defaults/main.yml @@ -3,7 +3,7 @@ cron_services: cron: container_name: cron group: cron - enabled: True + enabled: true image: "{{ cron_image_full }}" environment: KOLLA_LOGROTATE_SCHEDULE: "{{ cron_logrotate_schedule }}" From e3b049b6c4efe76f90cada378877e47c20abf7a1 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:14:43 +0100 Subject: [PATCH 114/165] ansible-lint: Fix yaml[truthy] in cyborg role Change-Id: I2ef947abd611a0a9f1f6c4b3cb345abb106f4d7e Signed-off-by: Michal Nasiadka --- ansible/roles/cyborg/tasks/bootstrap.yml | 4 ++-- ansible/roles/cyborg/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/cyborg/tasks/config.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/cyborg/tasks/bootstrap.yml b/ansible/roles/cyborg/tasks/bootstrap.yml index d5308a0929..348ed40ee7 100644 --- a/ansible/roles/cyborg/tasks/bootstrap.yml +++ b/ansible/roles/cyborg/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ cyborg_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ cyborg_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['cyborg-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ cyborg_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['cyborg-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/cyborg/tasks/bootstrap_service.yml b/ansible/roles/cyborg/tasks/bootstrap_service.yml index c2f7816184..9d3693541f 100644 --- a/ansible/roles/cyborg/tasks/bootstrap_service.yml +++ b/ansible/roles/cyborg/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_cyborg" restart_policy: oneshot volumes: "{{ cyborg_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[cyborg_api.group][0] }}" diff --git a/ansible/roles/cyborg/tasks/config.yml b/ansible/roles/cyborg/tasks/config.yml index 463b1a127b..ce3a06f094 100644 --- a/ansible/roles/cyborg/tasks/config.yml +++ b/ansible/roles/cyborg/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: cyborg_policy with_first_found: - files: "{{ supported_policy_format_list }}" From 94dbb27ccaa201a8d3ad20679b755874d576bbf4 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:15:38 +0100 Subject: [PATCH 115/165] ansible-lint: Fix yaml[truthy] in designate role Change-Id: I056a0803f90bba78e779bdbbe4e6791e637a8974 Signed-off-by: Michal Nasiadka --- ansible/roles/designate/tasks/bootstrap.yml | 4 ++-- ansible/roles/designate/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/designate/tasks/config.yml | 2 +- ansible/roles/designate/tasks/update_pools.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/designate/tasks/bootstrap.yml b/ansible/roles/designate/tasks/bootstrap.yml index ff3d623928..7b2805724c 100644 --- a/ansible/roles/designate/tasks/bootstrap.yml +++ b/ansible/roles/designate/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ designate_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ item }}" - run_once: True + run_once: true delegate_to: "{{ groups['designate-central'][0] }}" with_items: - "{{ designate_database_name }}" @@ -34,7 +34,7 @@ host: "%" priv: "{{ item.database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['designate-central'][0] }}" with_items: - database_name: "{{ designate_database_name }}" diff --git a/ansible/roles/designate/tasks/bootstrap_service.yml b/ansible/roles/designate/tasks/bootstrap_service.yml index 8452faacc1..61af680181 100644 --- a/ansible/roles/designate/tasks/bootstrap_service.yml +++ b/ansible/roles/designate/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_designate" restart_policy: oneshot volumes: "{{ designate_central.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[designate_central.group][0] }}" diff --git a/ansible/roles/designate/tasks/config.yml b/ansible/roles/designate/tasks/config.yml index 2b05382488..a6bf8065a6 100644 --- a/ansible/roles/designate/tasks/config.yml +++ b/ansible/roles/designate/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: designate_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/designate/tasks/update_pools.yml b/ansible/roles/designate/tasks/update_pools.yml index edc4e7e6d9..b3ced413fb 100644 --- a/ansible/roles/designate/tasks/update_pools.yml +++ b/ansible/roles/designate/tasks/update_pools.yml @@ -2,5 +2,5 @@ - name: Non-destructive DNS pools update become: true command: "{{ kolla_container_engine }} exec -t designate_worker designate-manage pool update" - run_once: True + run_once: true delegate_to: "{{ groups['designate-worker'][0] }}" From 8641dc90c8333258d584f6d2b6ec501a7185f784 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:16:58 +0100 Subject: [PATCH 116/165] ansible-lint: Fix yaml[truthy] in destroy role Change-Id: I3d914a24e047a337cea82d7ebdf544e780363b98 Signed-off-by: Michal Nasiadka --- ansible/roles/destroy/tasks/cleanup_host.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/destroy/tasks/cleanup_host.yml b/ansible/roles/destroy/tasks/cleanup_host.yml index 3ed8705b1f..6d227f7618 100644 --- a/ansible/roles/destroy/tasks/cleanup_host.yml +++ b/ansible/roles/destroy/tasks/cleanup_host.yml @@ -24,7 +24,7 @@ - name: Disable octavia-interface service service: name: octavia-interface - enabled: no + enabled: false state: stopped failed_when: false From a3872b19d3eb042a12b9639580e58b387317aaf7 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:18:33 +0100 Subject: [PATCH 117/165] ansible-lint: Fix yaml[truthy] in fluentd role Change-Id: Ia31d049e4290bee04764b91dfbec2159de4461bf Signed-off-by: Michal Nasiadka --- ansible/roles/fluentd/tasks/config.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/fluentd/tasks/config.yml b/ansible/roles/fluentd/tasks/config.yml index d6c8cb649f..1f33b4852a 100644 --- a/ansible/roles/fluentd/tasks/config.yml +++ b/ansible/roles/fluentd/tasks/config.yml @@ -38,7 +38,7 @@ find: path: "{{ node_custom_config }}/fluentd/input" pattern: "*.conf" - run_once: True + run_once: true register: find_custom_fluentd_inputs delegate_to: localhost @@ -46,7 +46,7 @@ find: path: "{{ node_custom_config }}/fluentd/filter" pattern: "*.conf" - run_once: True + run_once: true register: find_custom_fluentd_filters delegate_to: localhost @@ -54,7 +54,7 @@ find: path: "{{ node_custom_config }}/fluentd/format" pattern: "*.conf" - run_once: True + run_once: true register: find_custom_fluentd_formats delegate_to: localhost @@ -62,7 +62,7 @@ find: path: "{{ node_custom_config }}/fluentd/output" pattern: "*.conf" - run_once: True + run_once: true register: find_custom_fluentd_outputs delegate_to: localhost From 833bb5d351ca43f0e0d71e2964688468dc64fd1d Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:19:39 +0100 Subject: [PATCH 118/165] ansible-lint: Fix yaml[truthy] in glance role Change-Id: I241b6b9f84db7b174cd521b5b5ebb7c37b182f0a Signed-off-by: Michal Nasiadka --- ansible/roles/glance/tasks/bootstrap.yml | 4 ++-- .../roles/glance/tasks/bootstrap_service.yml | 8 ++++---- ansible/roles/glance/tasks/config.yml | 2 +- ansible/roles/glance/tasks/external_ceph.yml | 2 +- .../roles/glance/tasks/rolling_upgrade.yml | 20 +++++++++---------- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/ansible/roles/glance/tasks/bootstrap.yml b/ansible/roles/glance/tasks/bootstrap.yml index 1f190be594..e183e51536 100644 --- a/ansible/roles/glance/tasks/bootstrap.yml +++ b/ansible/roles/glance/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ glance_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ glance_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['glance-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ glance_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['glance-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/glance/tasks/bootstrap_service.yml b/ansible/roles/glance/tasks/bootstrap_service.yml index 0de3cde62e..048cdfd5b0 100644 --- a/ansible/roles/glance/tasks/bootstrap_service.yml +++ b/ansible/roles/glance/tasks/bootstrap_service.yml @@ -12,7 +12,7 @@ login_password: "{{ database_password }}" variable: log_bin_trust_function_creators value: 1 - run_once: True + run_once: true delegate_to: "{{ glance_api_hosts[0] }}" when: - not use_preconfigured_databases | bool @@ -24,7 +24,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -34,7 +34,7 @@ name: "bootstrap_glance" restart_policy: oneshot volumes: "{{ glance_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ glance_api_hosts[0] }}" - name: Disable log_bin_trust_function_creators function @@ -50,7 +50,7 @@ login_password: "{{ database_password }}" variable: log_bin_trust_function_creators value: 0 - run_once: True + run_once: true delegate_to: "{{ glance_api_hosts[0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/glance/tasks/config.yml b/ansible/roles/glance/tasks/config.yml index 52ee34a973..6a72a1a298 100644 --- a/ansible/roles/glance/tasks/config.yml +++ b/ansible/roles/glance/tasks/config.yml @@ -17,7 +17,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: glance_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/glance/tasks/external_ceph.yml b/ansible/roles/glance/tasks/external_ceph.yml index 3daa656bda..d33cb1e58f 100644 --- a/ansible/roles/glance/tasks/external_ceph.yml +++ b/ansible/roles/glance/tasks/external_ceph.yml @@ -36,7 +36,7 @@ - name: Ensuring config directory has correct owner and permission file: path: "{{ node_config_directory }}/glance-api" - recurse: yes + recurse: true owner: "{{ config_owner_user }}" group: "{{ config_owner_group }}" become: true diff --git a/ansible/roles/glance/tasks/rolling_upgrade.yml b/ansible/roles/glance/tasks/rolling_upgrade.yml index 68f1f2098a..fad51377fa 100644 --- a/ansible/roles/glance/tasks/rolling_upgrade.yml +++ b/ansible/roles/glance/tasks/rolling_upgrade.yml @@ -1,7 +1,7 @@ --- - name: Start Glance upgrade set_fact: - glance_upgrading: True + glance_upgrading: true # Upgrade first node (NEW NODE in the Glance documentation) - include_tasks: config.yml @@ -23,7 +23,7 @@ login_password: "{{ database_password }}" variable: log_bin_trust_function_creators value: 1 - run_once: True + run_once: true delegate_to: "{{ glance_api_hosts[0] }}" when: - not use_preconfigured_databases | bool @@ -35,7 +35,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE: GLANCE_DB_EXPAND: @@ -46,7 +46,7 @@ name: "bootstrap_glance" restart_policy: oneshot volumes: "{{ glance_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ glance_api_hosts[0] }}" - name: Running Glance database migrate container @@ -56,7 +56,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE: GLANCE_DB_MIGRATE: @@ -67,7 +67,7 @@ name: "bootstrap_glance" restart_policy: oneshot volumes: "{{ glance_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ glance_api_hosts[0] }}" notify: - Restart glance-api container @@ -88,7 +88,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE: GLANCE_DB_CONTRACT: @@ -99,7 +99,7 @@ name: "bootstrap_glance" restart_policy: oneshot volumes: "{{ glance_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ glance_api_hosts[0] }}" - name: Disable log_bin_trust_function_creators function @@ -115,11 +115,11 @@ login_password: "{{ database_password }}" variable: log_bin_trust_function_creators value: 0 - run_once: True + run_once: true delegate_to: "{{ glance_api_hosts[0] }}" when: - not use_preconfigured_databases | bool - name: Finish Glance upgrade set_fact: - glance_upgrading: False + glance_upgrading: false From cb9c57afb4cd55cb71c80fd4c2110c914e00a3de Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:20:18 +0100 Subject: [PATCH 119/165] ansible-lint: Fix yaml[truthy] in gnocchi role Change-Id: I75f53151e06dc1af7f1fb109c50ce046bdfd7ec5 Signed-off-by: Michal Nasiadka --- ansible/roles/gnocchi/tasks/bootstrap.yml | 4 ++-- ansible/roles/gnocchi/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/gnocchi/tasks/config.yml | 2 +- ansible/roles/gnocchi/tasks/external_ceph.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/gnocchi/tasks/bootstrap.yml b/ansible/roles/gnocchi/tasks/bootstrap.yml index 53f300ec6e..088728cdfc 100644 --- a/ansible/roles/gnocchi/tasks/bootstrap.yml +++ b/ansible/roles/gnocchi/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ gnocchi_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ gnocchi_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['gnocchi-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ gnocchi_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['gnocchi-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/gnocchi/tasks/bootstrap_service.yml b/ansible/roles/gnocchi/tasks/bootstrap_service.yml index 3f9e8090cc..0322bdd8f0 100644 --- a/ansible/roles/gnocchi/tasks/bootstrap_service.yml +++ b/ansible/roles/gnocchi/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_gnocchi" restart_policy: oneshot volumes: "{{ gnocchi_api.volumes }}" - run_once: True + run_once: true delegate_to: "{{ groups[gnocchi_api.group][0] }}" diff --git a/ansible/roles/gnocchi/tasks/config.yml b/ansible/roles/gnocchi/tasks/config.yml index b978044e29..7d5868a48e 100644 --- a/ansible/roles/gnocchi/tasks/config.yml +++ b/ansible/roles/gnocchi/tasks/config.yml @@ -17,7 +17,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: gnocchi_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/gnocchi/tasks/external_ceph.yml b/ansible/roles/gnocchi/tasks/external_ceph.yml index c36fd98550..79357605b7 100644 --- a/ansible/roles/gnocchi/tasks/external_ceph.yml +++ b/ansible/roles/gnocchi/tasks/external_ceph.yml @@ -31,7 +31,7 @@ become: true file: path: "{{ node_config_directory }}/{{ item.key }}" - recurse: yes + recurse: true owner: "{{ config_owner_user }}" group: "{{ config_owner_group }}" with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}" From b1fce586d82b28104f3ceb560b2ba0e5278c4bdc Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:20:55 +0100 Subject: [PATCH 120/165] ansible-lint: Fix yaml[truthy] in grafana role Change-Id: Ifa86a134c5c0dc3d0a39f30330ddb30dd3b8f3c2 Signed-off-by: Michal Nasiadka --- ansible/roles/grafana/defaults/main.yml | 3 +-- ansible/roles/grafana/tasks/bootstrap.yml | 4 ++-- ansible/roles/grafana/tasks/config.yml | 6 +++--- ansible/roles/grafana/tasks/post_config.yml | 9 ++++----- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/ansible/roles/grafana/defaults/main.yml b/ansible/roles/grafana/defaults/main.yml index 80511c965f..444cf01ed5 100644 --- a/ansible/roles/grafana/defaults/main.yml +++ b/ansible/roles/grafana/defaults/main.yml @@ -49,7 +49,6 @@ grafana_database_shard: - user: "{{ grafana_database_user }}" shard_id: "{{ grafana_database_shard_id }}" - #################### # Datasource #################### @@ -57,7 +56,7 @@ grafana_data_sources: influxdb: enabled: "{{ enable_influxdb | bool }}" data: - isDefault: yes + isDefault: true database: "telegraf" name: "telegraf" type: "influxdb" diff --git a/ansible/roles/grafana/tasks/bootstrap.yml b/ansible/roles/grafana/tasks/bootstrap.yml index ab785914ee..54d12d960a 100644 --- a/ansible/roles/grafana/tasks/bootstrap.yml +++ b/ansible/roles/grafana/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ grafana_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ grafana_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['grafana'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ grafana_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['grafana'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/grafana/tasks/config.yml b/ansible/roles/grafana/tasks/config.yml index 509457210a..0c7cfecd11 100644 --- a/ansible/roles/grafana/tasks/config.yml +++ b/ansible/roles/grafana/tasks/config.yml @@ -16,8 +16,8 @@ - "prometheus.yaml" - "provisioning.yaml" delegate_to: localhost - changed_when: False - run_once: True + changed_when: false + run_once: true register: check_extra_conf_grafana - include_tasks: copy-certs.yml @@ -91,7 +91,7 @@ path: "{{ node_custom_config }}/grafana/dashboards" delegate_to: localhost register: grafana_custom_dashboards_folder - run_once: True + run_once: true - name: Remove templated Grafana dashboards become: true diff --git a/ansible/roles/grafana/tasks/post_config.yml b/ansible/roles/grafana/tasks/post_config.yml index 16e9246355..e90a2ab47f 100644 --- a/ansible/roles/grafana/tasks/post_config.yml +++ b/ansible/roles/grafana/tasks/post_config.yml @@ -33,13 +33,12 @@ password: "{{ grafana_admin_password }}" body: "{{ item.value.data | to_json }}" body_format: json - force_basic_auth: yes + force_basic_auth: true status_code: 200, 409 register: response - run_once: True + run_once: true changed_when: response.status == 200 - failed_when: response.status not in [200, 409] or - response.status == 409 and ("name already exists" not in response.json.message | default("")) + failed_when: response.status not in [200, 409] or response.status == 409 and ("name already exists" not in response.json.message | default("")) with_dict: "{{ grafana_data_sources }}" when: item.value.enabled | bool @@ -53,7 +52,7 @@ method: PUT user: "{{ grafana_admin_username }}" password: "{{ grafana_admin_password }}" - force_basic_auth: yes + force_basic_auth: true status_code: 200 register: grafana_response changed_when: grafana_response.status == 200 From 69122b2f9a2700f31e2ad85d02a6e5a6caad7ef8 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:21:43 +0100 Subject: [PATCH 121/165] ansible-lint: Fix yaml[truthy] in hacluster role Change-Id: Ifd89ad139899a9f97ee56b8821237a33122c8620 Signed-off-by: Michal Nasiadka --- ansible/roles/hacluster/tasks/bootstrap.yml | 22 ++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/ansible/roles/hacluster/tasks/bootstrap.yml b/ansible/roles/hacluster/tasks/bootstrap.yml index baf83bbc97..d1fb59fed1 100644 --- a/ansible/roles/hacluster/tasks/bootstrap.yml +++ b/ansible/roles/hacluster/tasks/bootstrap.yml @@ -4,9 +4,9 @@ path: "{{ node_custom_config }}/{{ item }}" state: directory delegate_to: localhost - changed_when: False - check_mode: no - run_once: True + changed_when: false + check_mode: false + run_once: true with_items: - hacluster-corosync - hacluster-pacemaker @@ -15,28 +15,28 @@ stat: path: "{{ node_custom_config }}/hacluster-corosync/authkey" delegate_to: localhost - run_once: True + run_once: true register: hacluster_corosync_authkey_file - name: Check if Pacemaker authkey file exists stat: path: "{{ node_custom_config }}/hacluster-pacemaker/authkey" delegate_to: localhost - run_once: True + run_once: true register: hacluster_pacemaker_authkey_file - name: Generating Corosync authkey file command: "dd if=/dev/urandom of={{ node_custom_config }}/hacluster-corosync/authkey bs=4096 count=1" delegate_to: localhost - changed_when: False - check_mode: no - run_once: True + changed_when: false + check_mode: false + run_once: true when: not hacluster_corosync_authkey_file.stat.exists - name: Generating Pacemaker authkey file command: "dd if=/dev/urandom of={{ node_custom_config }}/hacluster-pacemaker/authkey bs=4096 count=1" delegate_to: localhost - changed_when: False - check_mode: no - run_once: True + changed_when: false + check_mode: false + run_once: true when: not hacluster_pacemaker_authkey_file.stat.exists From 10840ed62929983ba545b356e1f77cb505442b81 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:22:24 +0100 Subject: [PATCH 122/165] ansible-lint: Fix yaml[truthy] in haproxy-config role Change-Id: Ie83e990414aa19973b26185b47e1a575f52db288 Signed-off-by: Michal Nasiadka --- ansible/roles/haproxy-config/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/haproxy-config/tasks/main.yml b/ansible/roles/haproxy-config/tasks/main.yml index 3967a434e8..4a93e428e5 100644 --- a/ansible/roles/haproxy-config/tasks/main.yml +++ b/ansible/roles/haproxy-config/tasks/main.yml @@ -24,7 +24,7 @@ vars: service: "{{ item.value }}" blockinfile: - create: yes + create: true path: "{{ node_config_directory }}/haproxy/external-frontend-map" insertafter: EOF marker: "# {mark} {{ item.key }}" From 57dc769269a6fa22ac16e6993626f171f1999e06 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:23:07 +0100 Subject: [PATCH 123/165] ansible-lint: Fix yaml[truthy] in heat role Change-Id: Ic18b7c25a13955da572f98b2dbca7d673a35746f Signed-off-by: Michal Nasiadka --- ansible/roles/heat/tasks/bootstrap.yml | 4 ++-- ansible/roles/heat/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/heat/tasks/config.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/heat/tasks/bootstrap.yml b/ansible/roles/heat/tasks/bootstrap.yml index 72268b4fb1..556472e5d9 100644 --- a/ansible/roles/heat/tasks/bootstrap.yml +++ b/ansible/roles/heat/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ heat_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ heat_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['heat-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ heat_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['heat-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/heat/tasks/bootstrap_service.yml b/ansible/roles/heat/tasks/bootstrap_service.yml index 872c456f13..38fd5d777d 100644 --- a/ansible/roles/heat/tasks/bootstrap_service.yml +++ b/ansible/roles/heat/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -26,5 +26,5 @@ name: "bootstrap_heat" restart_policy: oneshot volumes: "{{ heat_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[heat_api.group][0] }}" diff --git a/ansible/roles/heat/tasks/config.yml b/ansible/roles/heat/tasks/config.yml index 10271bd8d4..2d306f3fab 100644 --- a/ansible/roles/heat/tasks/config.yml +++ b/ansible/roles/heat/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: heat_policy with_first_found: - files: "{{ supported_policy_format_list }}" From dc9c77c5e3d350fc8ff40226fd6849db61a5f419 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:23:48 +0100 Subject: [PATCH 124/165] ansible-lint: Fix yaml[truthy] in horizon role Change-Id: Ia3177178a177cd0c8fe434ba75b9af11bc998f8a Signed-off-by: Michal Nasiadka --- ansible/roles/horizon/tasks/bootstrap.yml | 4 ++-- ansible/roles/horizon/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/horizon/tasks/policy_item.yml | 3 +-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/ansible/roles/horizon/tasks/bootstrap.yml b/ansible/roles/horizon/tasks/bootstrap.yml index d92ce12cdb..431a500643 100644 --- a/ansible/roles/horizon/tasks/bootstrap.yml +++ b/ansible/roles/horizon/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ horizon_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ horizon_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['horizon'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ horizon_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['horizon'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/horizon/tasks/bootstrap_service.yml b/ansible/roles/horizon/tasks/bootstrap_service.yml index 74454a9911..e956993558 100644 --- a/ansible/roles/horizon/tasks/bootstrap_service.yml +++ b/ansible/roles/horizon/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_horizon" restart_policy: oneshot volumes: "{{ horizon.volumes }}" - run_once: True + run_once: true delegate_to: "{{ groups[horizon.group][0] }}" diff --git a/ansible/roles/horizon/tasks/policy_item.yml b/ansible/roles/horizon/tasks/policy_item.yml index 7b427108a3..ab3b222e38 100644 --- a/ansible/roles/horizon/tasks/policy_item.yml +++ b/ansible/roles/horizon/tasks/policy_item.yml @@ -1,5 +1,4 @@ --- - - name: Update policy file name set_fact: supported_policy_files: "{{ supported_policy_format_list | map('regex_replace', '(.+)', project_name + '_\\1') | list }}" @@ -8,7 +7,7 @@ stat: path: "{{ fullpath }}" delegate_to: localhost - run_once: True + run_once: true register: overwritten_files with_first_found: - files: "{{ supported_policy_files }}" From b7f7374fa0ea765fb267a513df1dfa4f42b306b2 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:24:38 +0100 Subject: [PATCH 125/165] ansible-lint: Fix yaml[truthy] in influxdb role Change-Id: I1ad2a1371de9d0e1f36675871ac9e500888a2e82 Signed-off-by: Michal Nasiadka --- ansible/roles/influxdb/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/influxdb/defaults/main.yml b/ansible/roles/influxdb/defaults/main.yml index 62b3505a59..59b6d14f01 100644 --- a/ansible/roles/influxdb/defaults/main.yml +++ b/ansible/roles/influxdb/defaults/main.yml @@ -21,7 +21,7 @@ influxdb_services: # Enable the disk based time series index (recommended for all users). For # more information see here: # https://docs.influxdata.com/influxdb/v1.7/concepts/time-series-index/ -influxdb_enable_tsi: True +influxdb_enable_tsi: true #################### # Docker From 8953850c19882670f00de2fbc6413c74fd9cf7e7 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:25:19 +0100 Subject: [PATCH 126/165] ansible-lint: Fix yaml[truthy] in ironic role Change-Id: I8213b6fe13dac146ded0db903dd0b00123e22e43 Signed-off-by: Michal Nasiadka --- ansible/roles/ironic/defaults/main.yml | 2 +- ansible/roles/ironic/tasks/bootstrap.yml | 4 ++-- ansible/roles/ironic/tasks/bootstrap_service.yml | 6 +++--- ansible/roles/ironic/tasks/config.yml | 2 +- ansible/roles/ironic/tasks/precheck.yml | 2 +- ansible/roles/ironic/tasks/rolling_upgrade.yml | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml index 9c80ad6fea..02042aad6b 100644 --- a/ansible/roles/ironic/defaults/main.yml +++ b/ansible/roles/ironic/defaults/main.yml @@ -34,7 +34,7 @@ ironic_services: group: ironic-conductor enabled: true image: "{{ ironic_conductor_image_full }}" - privileged: True + privileged: true volumes: "{{ ironic_conductor_default_volumes + ironic_conductor_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ ironic_conductor_dimensions }}" healthcheck: "{{ ironic_conductor_healthcheck }}" diff --git a/ansible/roles/ironic/tasks/bootstrap.yml b/ansible/roles/ironic/tasks/bootstrap.yml index b6dc71a078..8ed261602e 100644 --- a/ansible/roles/ironic/tasks/bootstrap.yml +++ b/ansible/roles/ironic/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ ironic_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ item.database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups[item.group][0] }}" with_items: - database_name: "{{ ironic_database_name }}" @@ -36,7 +36,7 @@ host: "%" priv: "{{ item.database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups[item.group][0] }}" with_items: - database_name: "{{ ironic_database_name }}" diff --git a/ansible/roles/ironic/tasks/bootstrap_service.yml b/ansible/roles/ironic/tasks/bootstrap_service.yml index 2374bae120..edf20fc476 100644 --- a/ansible/roles/ironic/tasks/bootstrap_service.yml +++ b/ansible/roles/ironic/tasks/bootstrap_service.yml @@ -12,7 +12,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: "{{ upgrade_environment if ironic_enable_rolling_upgrade | bool else bootstrap_environment }}" image: "{{ ironic_api.image }}" labels: @@ -20,7 +20,7 @@ name: "bootstrap_ironic" restart_policy: oneshot volumes: "{{ ironic_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[ironic_api.group][0] }}" when: inventory_hostname in groups[ironic_api.group] @@ -31,7 +31,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: TFTPBOOT_PATH: /var/lib/ironic/tftpboot HTTPBOOT_PATH: /var/lib/ironic/httpboot diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml index 9b7627e189..9060fdd5af 100644 --- a/ansible/roles/ironic/tasks/config.yml +++ b/ansible/roles/ironic/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: ironic_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/ironic/tasks/precheck.yml b/ansible/roles/ironic/tasks/precheck.yml index cb5a21839f..ec69c8f958 100644 --- a/ansible/roles/ironic/tasks/precheck.yml +++ b/ansible/roles/ironic/tasks/precheck.yml @@ -55,7 +55,7 @@ stat: path: "{{ ironic_agent_files_directory }}/ironic/{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: result failed_when: not result.stat.exists when: diff --git a/ansible/roles/ironic/tasks/rolling_upgrade.yml b/ansible/roles/ironic/tasks/rolling_upgrade.yml index 6bda5569fb..673ab7d2bf 100644 --- a/ansible/roles/ironic/tasks/rolling_upgrade.yml +++ b/ansible/roles/ironic/tasks/rolling_upgrade.yml @@ -36,7 +36,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_OSM: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -46,6 +46,6 @@ name: "bootstrap_ironic" restart_policy: oneshot volumes: "{{ ironic_api.volumes }}" - run_once: True + run_once: true delegate_to: "{{ groups[ironic_api.group][0] }}" when: inventory_hostname in groups[ironic_api.group] From 6a9feb98e02d340901040ed489a840dd4d70f7f6 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:26:18 +0100 Subject: [PATCH 127/165] ansible-lint: Fix yaml[truthy] in iscsi role Change-Id: I18c29512f250494adae32de77ff44b9d46fa5a64 Signed-off-by: Michal Nasiadka --- ansible/roles/iscsi/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/iscsi/defaults/main.yml b/ansible/roles/iscsi/defaults/main.yml index 13aac3a889..f4d0ed1e8b 100644 --- a/ansible/roles/iscsi/defaults/main.yml +++ b/ansible/roles/iscsi/defaults/main.yml @@ -6,7 +6,7 @@ iscsi_services: enabled: "{{ enable_iscsid_for_cinder or enable_iscsid_for_ironic }}" image: "{{ iscsid_image_full }}" ipc_mode: "host" - privileged: True + privileged: true volumes: "{{ iscsid_default_volumes + iscsid_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ iscsid_dimensions }}" tgtd: @@ -15,7 +15,7 @@ iscsi_services: enabled: "{{ enable_tgtd }}" image: "{{ tgtd_image_full }}" ipc_mode: "host" - privileged: True + privileged: true volumes: "{{ tgtd_default_volumes + tgtd_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ tgtd_dimensions }}" From cabed00211ba7d73c5ba365fd96c90bc820f6372 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:27:10 +0100 Subject: [PATCH 128/165] ansible-lint: Fix yaml[truthy] in keystone role Change-Id: I0a2c6c4cbeacf1719db34cd5b8282fba13de04d1 Signed-off-by: Michal Nasiadka --- ansible/roles/keystone/handlers/main.yml | 8 ++--- ansible/roles/keystone/tasks/bootstrap.yml | 4 +-- .../keystone/tasks/bootstrap_service.yml | 8 ++--- ansible/roles/keystone/tasks/config.yml | 8 ++--- .../keystone/tasks/distribute_fernet.yml | 2 +- ansible/roles/keystone/tasks/register.yml | 6 ++-- .../tasks/register_identity_providers.yml | 30 +++++++++---------- ansible/roles/keystone/tasks/upgrade.yml | 4 +-- 8 files changed, 35 insertions(+), 35 deletions(-) diff --git a/ansible/roles/keystone/handlers/main.yml b/ansible/roles/keystone/handlers/main.yml index 8d5f1fd049..24ee0d520d 100644 --- a/ansible/roles/keystone/handlers/main.yml +++ b/ansible/roles/keystone/handlers/main.yml @@ -10,7 +10,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -21,7 +21,7 @@ restart_policy: oneshot volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" - run_once: True + run_once: true when: - kolla_action == "upgrade" - inventory_hostname == groups[service.group][0] @@ -90,7 +90,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_FINISH_UPGRADE: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -101,6 +101,6 @@ restart_policy: oneshot volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" - run_once: True + run_once: true when: - kolla_action == "upgrade" diff --git a/ansible/roles/keystone/tasks/bootstrap.yml b/ansible/roles/keystone/tasks/bootstrap.yml index 7af22ddea5..89e4cdd004 100644 --- a/ansible/roles/keystone/tasks/bootstrap.yml +++ b/ansible/roles/keystone/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ keystone_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ keystone_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['keystone'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ keystone_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['keystone'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/keystone/tasks/bootstrap_service.yml b/ansible/roles/keystone/tasks/bootstrap_service.yml index 27e52f7901..f76d8f787d 100644 --- a/ansible/roles/keystone/tasks/bootstrap_service.yml +++ b/ansible/roles/keystone/tasks/bootstrap_service.yml @@ -38,7 +38,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -48,7 +48,7 @@ name: "bootstrap_keystone" restart_policy: oneshot volumes: "{{ keystone.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true - name: Running Keystone fernet bootstrap container vars: @@ -57,7 +57,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" image: "{{ keystone_fernet.image }}" @@ -70,7 +70,7 @@ name: "bootstrap_keystone_fernet" restart_policy: oneshot volumes: "{{ keystone_fernet.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups['keystone'][0] }}" when: - groups['keystone_fernet_running_True'] is not defined diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml index 3ad2472810..f3f103dc2b 100644 --- a/ansible/roles/keystone/tasks/config.yml +++ b/ansible/roles/keystone/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: keystone_policy with_first_found: - files: "{{ supported_policy_format_list }}" @@ -32,7 +32,7 @@ stat: path: "{{ node_custom_config }}/keystone/domains" delegate_to: localhost - run_once: True + run_once: true register: keystone_domain_directory - include_tasks: copy-certs.yml @@ -89,7 +89,7 @@ - name: Get file list in custom domains folder find: path: "{{ node_custom_config }}/keystone/domains" - recurse: no + recurse: false file_type: file delegate_to: localhost register: keystone_domains @@ -184,7 +184,7 @@ stat: path: "{{ node_custom_config }}/keystone/keystone-paste.ini" delegate_to: localhost - run_once: True + run_once: true register: check_keystone_paste_ini when: service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/keystone/tasks/distribute_fernet.yml b/ansible/roles/keystone/tasks/distribute_fernet.yml index 91a2305ada..1991e7c453 100644 --- a/ansible/roles/keystone/tasks/distribute_fernet.yml +++ b/ansible/roles/keystone/tasks/distribute_fernet.yml @@ -12,7 +12,7 @@ - name: Run key distribution become: true command: "{{ kolla_container_engine }} exec -t {{ keystone_services['keystone-fernet']['container_name'] }} /usr/bin/fernet-push.sh" - run_once: True + run_once: true delegate_to: >- {% if groups['keystone_fernet_running'] is defined -%} {{ groups['keystone_fernet_running'][0] }}{%- else -%}{{ groups['keystone'][0] }}{%- endif %} diff --git a/ansible/roles/keystone/tasks/register.yml b/ansible/roles/keystone/tasks/register.yml index 733c3f903d..3789eb7216 100644 --- a/ansible/roles/keystone/tasks/register.yml +++ b/ansible/roles/keystone/tasks/register.yml @@ -8,7 +8,7 @@ register: keystone_bootstrap changed_when: (keystone_bootstrap.stdout | from_json).changed failed_when: (keystone_bootstrap.stdout | from_json).failed - run_once: True + run_once: true with_items: "{{ multiple_regions_names }}" - import_role: @@ -16,7 +16,7 @@ vars: service_ks_register_auth: "{{ openstack_keystone_auth }}" service_ks_register_services: "{{ keystone_ks_services }}" - run_once: True + run_once: true - name: Creating default user role become: true @@ -29,4 +29,4 @@ endpoint_type: "{{ openstack_interface }}" cacert: "{{ openstack_cacert }}" region_name: "{{ openstack_region_name }}" - run_once: True + run_once: true diff --git a/ansible/roles/keystone/tasks/register_identity_providers.yml b/ansible/roles/keystone/tasks/register_identity_providers.yml index 75cd40aa9e..ced1d73dd6 100644 --- a/ansible/roles/keystone/tasks/register_identity_providers.yml +++ b/ansible/roles/keystone/tasks/register_identity_providers.yml @@ -12,9 +12,9 @@ --os-region-name={{ openstack_region_name }} {% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }} {% endif %} mapping list -c ID --format value - run_once: True - changed_when: False - become: True + run_once: true + changed_when: false + become: true register: existing_mappings_register - name: Register existing mappings @@ -34,7 +34,7 @@ --os-region-name={{ openstack_region_name }} {% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }} {% endif %} mapping delete {{ item }} - run_once: True + run_once: true become: true with_items: "{{ existing_mappings }}" when: @@ -52,7 +52,7 @@ endpoint_type: "{{ openstack_interface }}" cacert: "{{ openstack_cacert }}" region_name: "{{ openstack_region_name }}" - run_once: True + run_once: true with_items: "{{ keystone_identity_providers }}" - name: Register attribute mappings in OpenStack @@ -71,7 +71,7 @@ mapping create --rules "{{ keystone_container_federation_oidc_attribute_mappings_folder }}/{{ item.file | basename }}" {{ item.name }} - run_once: True + run_once: true when: - item.name not in existing_mappings with_items: "{{ keystone_identity_mappings }}" @@ -92,7 +92,7 @@ mapping set --rules="{{ keystone_container_federation_oidc_attribute_mappings_folder }}/{{ item.file | basename }}" {{ item.name }} - run_once: True + run_once: true when: - item.name in existing_mappings with_items: "{{ keystone_identity_mappings }}" @@ -111,8 +111,8 @@ --os-region-name={{ openstack_region_name }} {% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }} {% endif %} identity provider list -c ID --format value - run_once: True - changed_when: False + run_once: true + changed_when: false register: existing_idps_register - name: Register existing idps @@ -133,7 +133,7 @@ --os-region-name={ openstack_region_name }} {% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }}{% endif %} identity provider delete {{ item }} - run_once: True + run_once: true with_items: "{{ existing_idps }}" when: - item not in (keystone_identity_providers | map(attribute='name') | list) @@ -157,7 +157,7 @@ --remote-id "{{ item.identifier }}" --domain "{{ item.openstack_domain }}" {{ item.name }} - run_once: True + run_once: true when: - item.name not in existing_idps with_items: "{{ keystone_identity_providers }}" @@ -179,7 +179,7 @@ --description "{{ item.public_name }}" --remote-id "{{ item.identifier }}" "{{ item.name }}" - run_once: True + run_once: true when: - item.name in existing_idps with_items: "{{ keystone_identity_providers }}" @@ -201,7 +201,7 @@ --mapping {{ item.attribute_mapping }} --identity-provider {{ item.name }} {{ item.protocol }} - run_once: True + run_once: true when: - item.name not in existing_idps with_items: "{{ keystone_identity_providers }}" @@ -223,9 +223,9 @@ --identity-provider {{ item.name }} --mapping {{ item.attribute_mapping }} {{ item.protocol }} - run_once: True + run_once: true register: result - failed_when: result.rc not in [0, 1] # This command returns RC 1 on success, so we need to add this to avoid fails. + failed_when: result.rc not in [0, 1] # This command returns RC 1 on success, so we need to add this to avoid fails. when: - item.name in existing_idps with_items: "{{ keystone_identity_providers }}" diff --git a/ansible/roles/keystone/tasks/upgrade.yml b/ansible/roles/keystone/tasks/upgrade.yml index 2f1c64edea..88422c9287 100644 --- a/ansible/roles/keystone/tasks/upgrade.yml +++ b/ansible/roles/keystone/tasks/upgrade.yml @@ -16,7 +16,7 @@ login_password: "{{ database_password }}" variable: log_bin_trust_function_creators value: 1 - run_once: True + run_once: true when: - inventory_hostname == groups['keystone'][0] - not use_preconfigured_databases | bool @@ -47,6 +47,6 @@ login_password: "{{ database_password }}" variable: log_bin_trust_function_creators value: 0 - run_once: True + run_once: true when: - not use_preconfigured_databases | bool From 43f255d9dfd8d2f2cf0b90281cd12c577136b5bb Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:27:54 +0100 Subject: [PATCH 129/165] ansible-lint: Fix yaml[truthy] in kuryr role Change-Id: I1ceeba74f13dc26d112d65d98bb1b3c2f2cabd1c Signed-off-by: Michal Nasiadka --- ansible/roles/kuryr/defaults/main.yml | 5 ++--- ansible/roles/kuryr/tasks/config.yml | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ansible/roles/kuryr/defaults/main.yml b/ansible/roles/kuryr/defaults/main.yml index 22edf3d03a..753e0abf58 100644 --- a/ansible/roles/kuryr/defaults/main.yml +++ b/ansible/roles/kuryr/defaults/main.yml @@ -11,9 +11,9 @@ kuryr_services: kuryr: container_name: kuryr group: compute - enabled: True + enabled: true image: "{{ kuryr_image_full }}" - privileged: True + privileged: true cap_add: - NET_ADMIN volumes: "{{ kuryr_default_volumes + kuryr_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" @@ -69,7 +69,6 @@ kuryr_keystone_user: "kuryr" openstack_kuryr_auth: "{{ openstack_auth }}" - #################### # Kolla #################### diff --git a/ansible/roles/kuryr/tasks/config.yml b/ansible/roles/kuryr/tasks/config.yml index 10bfff47db..6f6099cbcb 100644 --- a/ansible/roles/kuryr/tasks/config.yml +++ b/ansible/roles/kuryr/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: kuryr_policy with_first_found: - files: "{{ supported_policy_format_list }}" From 9ae77af3be44d74c7446eec6db318db5d7ec219f Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:29:08 +0100 Subject: [PATCH 130/165] ansible-lint: Fix yaml[truthy] in loadbalancer role Change-Id: Ia3d5ddd102dfb4e34237bda0016b93e329fd1a86 Signed-off-by: Michal Nasiadka --- ansible/roles/loadbalancer/defaults/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/loadbalancer/defaults/main.yml b/ansible/roles/loadbalancer/defaults/main.yml index 995d85ebfe..9f7bd0b04f 100644 --- a/ansible/roles/loadbalancer/defaults/main.yml +++ b/ansible/roles/loadbalancer/defaults/main.yml @@ -5,7 +5,7 @@ loadbalancer_services: group: loadbalancer enabled: true image: "{{ haproxy_image_full }}" - privileged: True + privileged: true volumes: "{{ haproxy_default_volumes + haproxy_extra_volumes }}" dimensions: "{{ haproxy_dimensions }}" healthcheck: "{{ haproxy_healthcheck }}" @@ -14,7 +14,7 @@ loadbalancer_services: group: loadbalancer enabled: "{{ enable_proxysql | bool }}" image: "{{ proxysql_image_full }}" - privileged: False + privileged: false volumes: "{{ proxysql_default_volumes + proxysql_extra_volumes }}" dimensions: "{{ proxysql_dimensions }}" healthcheck: "{{ proxysql_healthcheck }}" @@ -23,7 +23,7 @@ loadbalancer_services: group: loadbalancer enabled: "{{ enable_keepalived | bool }}" image: "{{ keepalived_image_full }}" - privileged: True + privileged: true volumes: "{{ keepalived_default_volumes + keepalived_extra_volumes }}" dimensions: "{{ keepalived_dimensions }}" haproxy-ssh: @@ -183,11 +183,11 @@ haproxy_host_ipv4_tcp_retries2: "KOLLA_UNSET" # HAProxy socket admin permissions enable haproxy_socket_level_admin: "{{ enable_letsencrypt | bool }}" -kolla_externally_managed_cert: False +kolla_externally_managed_cert: false # Allow to disable keepalived tracking script (e.g. for single node environments # where this proves problematic in some cases) -keepalived_track_script_enabled: True +keepalived_track_script_enabled: true # Default backend for single external frontend (for missing mappings) haproxy_external_single_frontend_default_backend: "horizon_external_back" From 27c8ee89cab2f13e5b1a429e59220897aa9ee836 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:30:07 +0100 Subject: [PATCH 131/165] ansible-lint: Fix yaml[truthy] in magnum role Change-Id: I86dc2ff6de71034451f01bd2260226416b93c2ba Signed-off-by: Michal Nasiadka --- ansible/roles/magnum/tasks/bootstrap.yml | 4 ++-- ansible/roles/magnum/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/magnum/tasks/config.yml | 4 ++-- ansible/roles/magnum/tasks/register.yml | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ansible/roles/magnum/tasks/bootstrap.yml b/ansible/roles/magnum/tasks/bootstrap.yml index 6d3da7609c..4fb78ea307 100644 --- a/ansible/roles/magnum/tasks/bootstrap.yml +++ b/ansible/roles/magnum/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ magnum_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ magnum_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['magnum-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ magnum_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['magnum-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/magnum/tasks/bootstrap_service.yml b/ansible/roles/magnum/tasks/bootstrap_service.yml index 9ebf3aefc4..dca4f632ce 100644 --- a/ansible/roles/magnum/tasks/bootstrap_service.yml +++ b/ansible/roles/magnum/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_magnum" restart_policy: oneshot volumes: "{{ magnum_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[magnum_api.group][0] }}" diff --git a/ansible/roles/magnum/tasks/config.yml b/ansible/roles/magnum/tasks/config.yml index 5921da31e0..d23065d853 100644 --- a/ansible/roles/magnum/tasks/config.yml +++ b/ansible/roles/magnum/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: magnum_policy with_first_found: - files: "{{ supported_policy_format_list }}" @@ -32,7 +32,7 @@ stat: path: "{{ node_custom_config }}/magnum/kubeconfig" delegate_to: localhost - run_once: True + run_once: true register: magnum_kubeconfig_file - name: Copying over kubeconfig file diff --git a/ansible/roles/magnum/tasks/register.yml b/ansible/roles/magnum/tasks/register.yml index 8d1d4aacf8..3cdf06be26 100644 --- a/ansible/roles/magnum/tasks/register.yml +++ b/ansible/roles/magnum/tasks/register.yml @@ -19,7 +19,7 @@ cacert: "{{ openstack_cacert }}" region_name: "{{ openstack_region_name }}" register: trustee_domain - run_once: True + run_once: true - name: Creating Magnum trustee user become: true @@ -35,7 +35,7 @@ endpoint_type: "{{ openstack_interface }}" cacert: "{{ openstack_cacert }}" region_name: "{{ openstack_region_name }}" - run_once: True + run_once: true - name: Creating Magnum trustee user role become: true @@ -50,5 +50,5 @@ endpoint_type: "{{ openstack_interface }}" cacert: "{{ openstack_cacert }}" region_name: "{{ openstack_region_name }}" - run_once: True + run_once: true when: not ansible_check_mode From a9e352f7ec73883ca6f5ea4c873775cca905c652 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 24 Nov 2025 09:39:12 +0100 Subject: [PATCH 132/165] CI: Add site.yml to all jobs trigger Change-Id: I20e8571464e20ea0e0981a52db3182402ff8f01c Signed-off-by: Michal Nasiadka --- zuul.d/base.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 7a9b481cbe..cd46c19679 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -12,6 +12,7 @@ - openstack/kolla-ansible - openstack/requirements files: + - ^ansible/site.yml - ^ansible/group_vars/all/common.yml - ^requirements-core.yml - ^roles/kolla-ansible-test-dashboard/ From 9171b0e5a1bb2acd62be80f3894600a2bba45cce Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:30:58 +0100 Subject: [PATCH 133/165] ansible-lint: Fix yaml[truthy] in manila role Change-Id: I4e8a88d69654ca1f5665ed2db74279e82dbbf6b4 Signed-off-by: Michal Nasiadka --- ansible/roles/manila/defaults/main.yml | 12 ++++++------ ansible/roles/manila/tasks/bootstrap.yml | 4 ++-- ansible/roles/manila/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/manila/tasks/config.yml | 2 +- ansible/roles/manila/tasks/external_ceph.yml | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ansible/roles/manila/defaults/main.yml b/ansible/roles/manila/defaults/main.yml index 6435a5e462..1e973feb5d 100644 --- a/ansible/roles/manila/defaults/main.yml +++ b/ansible/roles/manila/defaults/main.yml @@ -4,7 +4,7 @@ manila_services: container_name: "manila_api" group: "manila-api" image: "{{ manila_api_image_full }}" - enabled: True + enabled: true volumes: "{{ manila_api_default_volumes + manila_api_extra_volumes }}" dimensions: "{{ manila_api_dimensions }}" healthcheck: "{{ manila_api_healthcheck }}" @@ -30,7 +30,7 @@ manila_services: container_name: "manila_scheduler" group: "manila-scheduler" image: "{{ manila_scheduler_image_full }}" - enabled: True + enabled: true volumes: "{{ manila_scheduler_default_volumes + manila_scheduler_extra_volumes }}" dimensions: "{{ manila_scheduler_dimensions }}" healthcheck: "{{ manila_scheduler_healthcheck }}" @@ -38,8 +38,8 @@ manila_services: container_name: "manila_share" group: "manila-share" image: "{{ manila_share_image_full }}" - enabled: True - privileged: True + enabled: true + privileged: true volumes: "{{ manila_share_default_volumes + manila_share_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ manila_share_dimensions }}" healthcheck: "{{ manila_share_healthcheck }}" @@ -47,8 +47,8 @@ manila_services: container_name: "manila_data" group: "manila-data" image: "{{ manila_data_image_full }}" - enabled: True - privileged: True + enabled: true + privileged: true volumes: "{{ manila_data_default_volumes + manila_data_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ manila_data_dimensions }}" healthcheck: "{{ manila_data_healthcheck }}" diff --git a/ansible/roles/manila/tasks/bootstrap.yml b/ansible/roles/manila/tasks/bootstrap.yml index 9e1872cdfe..c04f95d97b 100644 --- a/ansible/roles/manila/tasks/bootstrap.yml +++ b/ansible/roles/manila/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ manila_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ manila_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['manila-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ manila_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['manila-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/manila/tasks/bootstrap_service.yml b/ansible/roles/manila/tasks/bootstrap_service.yml index f3e72da72d..fabcc5c5a3 100644 --- a/ansible/roles/manila/tasks/bootstrap_service.yml +++ b/ansible/roles/manila/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_manila" restart_policy: oneshot volumes: "{{ manila_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[manila_api.group][0] }}" diff --git a/ansible/roles/manila/tasks/config.yml b/ansible/roles/manila/tasks/config.yml index 1607316a26..0cb6ae9454 100644 --- a/ansible/roles/manila/tasks/config.yml +++ b/ansible/roles/manila/tasks/config.yml @@ -18,7 +18,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: manila_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/manila/tasks/external_ceph.yml b/ansible/roles/manila/tasks/external_ceph.yml index 3b70b0ad4f..84851e6b78 100644 --- a/ansible/roles/manila/tasks/external_ceph.yml +++ b/ansible/roles/manila/tasks/external_ceph.yml @@ -39,7 +39,7 @@ become: true file: path: "{{ node_config_directory }}/{{ item }}" - recurse: yes + recurse: true owner: "{{ config_owner_user }}" group: "{{ config_owner_group }}" when: inventory_hostname in groups[item] From 7152b97a16cef6042d9dfb0bfa91f50868ba2c7c Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:31:39 +0100 Subject: [PATCH 134/165] ansible-lint: Fix yaml[truthy] in mariadb role Change-Id: Iebf507ea4c970bbde896bd8fde344f433b6e6366 Signed-off-by: Michal Nasiadka --- ansible/roles/mariadb/tasks/backup.yml | 4 ++-- ansible/roles/mariadb/tasks/bootstrap_cluster.yml | 2 +- ansible/roles/mariadb/tasks/check.yml | 2 +- ansible/roles/mariadb/tasks/loadbalancer.yml | 4 ++-- ansible/roles/mariadb/tasks/lookup_cluster.yml | 6 +++--- ansible/roles/mariadb/tasks/post-upgrade.yml | 2 +- ansible/roles/mariadb/tasks/recover_cluster.yml | 10 +++++----- ansible/roles/mariadb/tasks/register.yml | 4 ++-- 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/ansible/roles/mariadb/tasks/backup.yml b/ansible/roles/mariadb/tasks/backup.yml index 0ddd8521d2..c07d7ded6f 100644 --- a/ansible/roles/mariadb/tasks/backup.yml +++ b/ansible/roles/mariadb/tasks/backup.yml @@ -17,14 +17,14 @@ action: "start_container" command: "bash -c 'sudo -E kolla_set_configs && /usr/local/bin/{{ cmd }}'" common_options: "{{ docker_common_options }}" - detach: False + detach: false # NOTE(mgoddard): Try to use the same image as the MariaDB server container # to avoid compatibility issues. See # https://bugs.launchpad.net/kolla-ansible/+bug/2058644. image: "{{ container_facts.containers[mariadb_services.mariadb.container_name].Config.Image | default(mariadb_services.mariadb.image) }}" name: "mariabackup" restart_policy: oneshot - remove_on_exit: True + remove_on_exit: true environment: BACKUP_TYPE: "{{ mariadb_backup_type }}" volumes: diff --git a/ansible/roles/mariadb/tasks/bootstrap_cluster.yml b/ansible/roles/mariadb/tasks/bootstrap_cluster.yml index 9a412c3579..34c463cb83 100644 --- a/ansible/roles/mariadb/tasks/bootstrap_cluster.yml +++ b/ansible/roles/mariadb/tasks/bootstrap_cluster.yml @@ -7,7 +7,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" diff --git a/ansible/roles/mariadb/tasks/check.yml b/ansible/roles/mariadb/tasks/check.yml index c0296bd4c5..78d5e481d2 100644 --- a/ansible/roles/mariadb/tasks/check.yml +++ b/ansible/roles/mariadb/tasks/check.yml @@ -15,7 +15,7 @@ query: "SHOW DATABASES;" register: result until: result is success - changed_when: False + changed_when: false retries: 6 delay: 10 become: true diff --git a/ansible/roles/mariadb/tasks/loadbalancer.yml b/ansible/roles/mariadb/tasks/loadbalancer.yml index bc7439dfd8..9bfa2ac6c9 100644 --- a/ansible/roles/mariadb/tasks/loadbalancer.yml +++ b/ansible/roles/mariadb/tasks/loadbalancer.yml @@ -37,8 +37,8 @@ with_dict: "{{ mariadb_shards_info.shards }}" loop_control: label: "{{ host }}" - failed_when: False - run_once: True + failed_when: false + run_once: true - name: "Configure loadbalancer for {{ project_name }}" import_role: diff --git a/ansible/roles/mariadb/tasks/lookup_cluster.yml b/ansible/roles/mariadb/tasks/lookup_cluster.yml index 207e1f90b8..5103af0723 100644 --- a/ansible/roles/mariadb/tasks/lookup_cluster.yml +++ b/ansible/roles/mariadb/tasks/lookup_cluster.yml @@ -27,7 +27,7 @@ timeout: 10 search_regex: "MariaDB" register: check_mariadb_port_liveness - ignore_errors: yes + ignore_errors: true - name: Divide hosts by their MariaDB service port liveness group_by: @@ -76,5 +76,5 @@ msg: MariaDB cluster is not synced. Please wait for WSREP sync before proceeding. when: - groups[mariadb_shard_group + '_port_alive_True'] is defined - - groups[mariadb_shard_group + '_sync_status_Synced'] is not defined or - groups[mariadb_shard_group + '_port_alive_True'] | sort != groups[mariadb_shard_group + '_sync_status_Synced'] | sort + - groups[mariadb_shard_group + '_sync_status_Synced'] is not defined or groups[mariadb_shard_group + '_port_alive_True'] | sort != + groups[mariadb_shard_group + '_sync_status_Synced'] | sort diff --git a/ansible/roles/mariadb/tasks/post-upgrade.yml b/ansible/roles/mariadb/tasks/post-upgrade.yml index ac878bef6b..70a184812a 100644 --- a/ansible/roles/mariadb/tasks/post-upgrade.yml +++ b/ansible/roles/mariadb/tasks/post-upgrade.yml @@ -7,7 +7,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false dimensions: "{{ service.dimensions }}" environment: KOLLA_UPGRADE: diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml index 37877ae599..0f1f0f9de2 100644 --- a/ansible/roles/mariadb/tasks/recover_cluster.yml +++ b/ansible/roles/mariadb/tasks/recover_cluster.yml @@ -11,7 +11,7 @@ delegate_to: localhost connection: local changed_when: false - check_mode: no + check_mode: false run_once: true with_fileglob: "/tmp/kolla_mariadb_recover_inventory_name_*" @@ -61,7 +61,7 @@ path: /tmp/mariadb_tmp.log state: absent changed_when: false - check_mode: no + check_mode: false - name: Registering MariaDB seqno variable set_fact: @@ -102,10 +102,10 @@ - name: Set grastate.dat file from MariaDB container in bootstrap host become: true lineinfile: - create: yes + create: true dest: "{{ container_engine_volumes_path }}/mariadb/_data/grastate.dat" - regexp: 'safe_to_bootstrap:(.*)$' - line: 'safe_to_bootstrap: 1' + regexp: "safe_to_bootstrap:(.*)$" + line: "safe_to_bootstrap: 1" state: present when: - bootstrap_host is defined diff --git a/ansible/roles/mariadb/tasks/register.yml b/ansible/roles/mariadb/tasks/register.yml index 1b733afa70..b7a73ce519 100644 --- a/ansible/roles/mariadb/tasks/register.yml +++ b/ansible/roles/mariadb/tasks/register.yml @@ -50,7 +50,7 @@ password: "{{ mariadb_backup_database_password }}" host: "%" priv: "*.*:RELOAD,PROCESS,LOCK TABLES,BINLOG MONITOR/{{ mariadb_backup_database_schema }}.mariadb_backup_history:CREATE,ALTER,INSERT" - append_privs: True + append_privs: true when: - enable_mariabackup | bool - inventory_hostname == mariadb_backup_host @@ -70,7 +70,7 @@ password: "{{ mariadb_backup_database_password }}" host: "%" priv: "{{ mariadb_backup_database_schema }}.*:CREATE,INSERT,SELECT" - append_privs: True + append_privs: true when: - enable_mariabackup | bool - inventory_hostname == mariadb_backup_host From fb9d66ec52a01638519f1c71c70286cd227548c0 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:37:42 +0100 Subject: [PATCH 135/165] ansible-lint: Fix yaml[truthy] in masakari role Change-Id: I9c3da9d4e99f30947d3bdf4bca72c3e3f836a9a8 Signed-off-by: Michal Nasiadka --- ansible/roles/masakari/defaults/main.yml | 2 +- ansible/roles/masakari/tasks/bootstrap.yml | 4 ++-- ansible/roles/masakari/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/masakari/tasks/config.yml | 2 +- ansible/roles/masakari/tasks/precheck.yml | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ansible/roles/masakari/defaults/main.yml b/ansible/roles/masakari/defaults/main.yml index c0713b05cc..496a0f0191 100644 --- a/ansible/roles/masakari/defaults/main.yml +++ b/ansible/roles/masakari/defaults/main.yml @@ -39,7 +39,7 @@ masakari_services: enabled: "{{ enable_masakari_instancemonitor | bool }}" image: "{{ masakari_monitors_image_full }}" volumes: "{{ masakari_instancemonitor_default_volumes + masakari_instancemonitor_extra_volumes }}" - privileged: True + privileged: true dimensions: "{{ masakari_instancemonitor_dimensions }}" masakari-hostmonitor: container_name: masakari_hostmonitor diff --git a/ansible/roles/masakari/tasks/bootstrap.yml b/ansible/roles/masakari/tasks/bootstrap.yml index 22eaa252e6..6b5f9d1b0c 100644 --- a/ansible/roles/masakari/tasks/bootstrap.yml +++ b/ansible/roles/masakari/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ masakari_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ masakari_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['masakari-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ masakari_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['masakari-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/masakari/tasks/bootstrap_service.yml b/ansible/roles/masakari/tasks/bootstrap_service.yml index 62f02128cf..40a11650d1 100644 --- a/ansible/roles/masakari/tasks/bootstrap_service.yml +++ b/ansible/roles/masakari/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_masakari" restart_policy: "oneshot" volumes: "{{ masakari_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[masakari_api.group][0] }}" diff --git a/ansible/roles/masakari/tasks/config.yml b/ansible/roles/masakari/tasks/config.yml index 16e14e99f4..2edbcdb355 100644 --- a/ansible/roles/masakari/tasks/config.yml +++ b/ansible/roles/masakari/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: masakari_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/masakari/tasks/precheck.yml b/ansible/roles/masakari/tasks/precheck.yml index 9db46ab9fb..a5486ac615 100644 --- a/ansible/roles/masakari/tasks/precheck.yml +++ b/ansible/roles/masakari/tasks/precheck.yml @@ -27,7 +27,7 @@ - inventory_hostname in groups['masakari-api'] - name: Checking for coordination backend if running in multinode setup - run_once: True + run_once: true fail: msg: "Please enable Valkey or etcd when running in multinode scenario." when: From bc2690e55540c0164d3fbfad9fe5e9c003607b5f Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:39:50 +0100 Subject: [PATCH 136/165] ansible-lint: Fix yaml[truthy] in mistral role Change-Id: Ifcd30459b610bf37098eae0e2960b2e27715d5ab Signed-off-by: Michal Nasiadka --- ansible/roles/mistral/tasks/bootstrap.yml | 4 ++-- ansible/roles/mistral/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/mistral/tasks/config.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/mistral/tasks/bootstrap.yml b/ansible/roles/mistral/tasks/bootstrap.yml index 215bc09a5a..4e7cb2b2f9 100644 --- a/ansible/roles/mistral/tasks/bootstrap.yml +++ b/ansible/roles/mistral/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ mistral_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ mistral_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['mistral-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ mistral_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['mistral-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/mistral/tasks/bootstrap_service.yml b/ansible/roles/mistral/tasks/bootstrap_service.yml index 3fb1f2bb23..b2aa88c765 100644 --- a/ansible/roles/mistral/tasks/bootstrap_service.yml +++ b/ansible/roles/mistral/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_mistral" restart_policy: oneshot volumes: "{{ mistral_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[mistral_api.group][0] }}" diff --git a/ansible/roles/mistral/tasks/config.yml b/ansible/roles/mistral/tasks/config.yml index d9ea731db3..aba2fbf273 100644 --- a/ansible/roles/mistral/tasks/config.yml +++ b/ansible/roles/mistral/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: mistral_policy with_first_found: - files: "{{ supported_policy_format_list }}" From 7a04e69e3a10054bf231b25cd2570f98694ad1ac Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:40:47 +0100 Subject: [PATCH 137/165] ansible-lint: Fix yaml[truthy] in multipathd role Change-Id: I69ec0f94e21a53f5c5f42a4c60a851c15f083aac Signed-off-by: Michal Nasiadka --- ansible/roles/multipathd/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/multipathd/defaults/main.yml b/ansible/roles/multipathd/defaults/main.yml index 60ade00f52..0e45239247 100644 --- a/ansible/roles/multipathd/defaults/main.yml +++ b/ansible/roles/multipathd/defaults/main.yml @@ -5,7 +5,7 @@ multipathd_services: group: multipathd enabled: true ipc_mode: "host" - privileged: True + privileged: true image: "{{ multipathd_image_full }}" volumes: "{{ multipathd_default_volumes + multipathd_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" From 0311c0bed06b872bb19516904f9887eedaad5d7f Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 24 Nov 2025 08:56:30 +0100 Subject: [PATCH 138/165] fluentd: Fix running with enable_fluentd: no Since we broken out fluentd to it's own role - we removed the when guards that checked for enable_fluentd inside the role. That has effectively broken running with fluentd disabled. Closes-Bug: #2132288 Change-Id: If202d1a6c848515236e44664af8407dda8441328 Signed-off-by: Michal Nasiadka --- ansible/site.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible/site.yml b/ansible/site.yml index 8d742462b4..ad7ac1854f 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -33,6 +33,7 @@ - enable_cyborg_{{ enable_cyborg | bool }} - enable_designate_{{ enable_designate | bool }} - enable_etcd_{{ enable_etcd | bool }} + - enable_fluentd_{{ enable_fluentd | bool }} - enable_glance_{{ enable_glance | bool }} - enable_gnocchi_{{ enable_gnocchi | bool }} - enable_grafana_{{ enable_grafana | bool }} @@ -116,6 +117,7 @@ gather_facts: false hosts: - fluentd + - '&enable_fluentd_True' serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ fluentd_max_fail_percentage | From c1a1c46920e95e13c5a585a2b96963f8f139d536 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:41:38 +0100 Subject: [PATCH 139/165] ansible-lint: Fix yaml[truthy] in neutron role Change-Id: I93214d97b9d7911a4f8e6d947a04fd8cb7ebde21 Signed-off-by: Michal Nasiadka --- ansible/roles/neutron/defaults/main.yml | 22 +++++++++---------- ansible/roles/neutron/tasks/bootstrap.yml | 4 ++-- .../roles/neutron/tasks/bootstrap_service.yml | 4 ++-- .../neutron/tasks/config-neutron-fake.yml | 4 ++-- ansible/roles/neutron/tasks/config.yml | 6 ++--- ansible/roles/neutron/tasks/precheck.yml | 6 ++--- .../roles/neutron/tasks/rolling_upgrade.yml | 10 ++++----- 7 files changed, 28 insertions(+), 28 deletions(-) diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml index 8d3dcdcfd7..d9dacd4c25 100644 --- a/ansible/roles/neutron/defaults/main.yml +++ b/ansible/roles/neutron/defaults/main.yml @@ -61,7 +61,7 @@ neutron_services: container_name: "neutron_openvswitch_agent" image: "{{ neutron_openvswitch_agent_image_full }}" enabled: "{{ neutron_plugin_agent == 'openvswitch' }}" - privileged: True + privileged: true host_in_groups: >- {{ (inventory_hostname in groups['compute'] @@ -83,7 +83,7 @@ neutron_services: cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}" container_name: "neutron_dhcp_agent" image: "{{ neutron_dhcp_agent_image_full }}" - privileged: True + privileged: true enabled: "{{ neutron_plugin_agent != 'ovn' or neutron_ovn_dhcp_agent | bool }}" group: "neutron-dhcp-agent" host_in_groups: "{{ inventory_hostname in groups['neutron-dhcp-agent'] }}" @@ -99,7 +99,7 @@ neutron_services: cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}" container_name: "neutron_l3_agent" image: "{{ neutron_l3_agent_image_full }}" - privileged: True + privileged: true enabled: "{{ neutron_plugin_agent != 'ovn' }}" environment: KOLLA_IMAGE: "{{ neutron_l3_agent_image_full }}" @@ -117,7 +117,7 @@ neutron_services: neutron-sriov-agent: container_name: "neutron_sriov_agent" image: "{{ neutron_sriov_agent_image_full }}" - privileged: True + privileged: true enabled: "{{ enable_neutron_sriov | bool }}" host_in_groups: "{{ inventory_hostname in groups['compute'] }}" volumes: "{{ neutron_sriov_agent_default_volumes + neutron_sriov_agent_extra_volumes }}" @@ -133,7 +133,7 @@ neutron_services: neutron-eswitchd: container_name: "neutron_eswitchd" image: "{{ neutron_eswitchd_image_full }}" - privileged: True + privileged: true enabled: "{{ enable_neutron_mlnx | bool }}" host_in_groups: "{{ inventory_hostname in groups['compute'] }}" volumes: "{{ neutron_eswitchd_default_volumes + neutron_eswitchd_extra_volumes }}" @@ -141,7 +141,7 @@ neutron_services: neutron-metadata-agent: container_name: "neutron_metadata_agent" image: "{{ neutron_metadata_agent_image_full }}" - privileged: True + privileged: true enabled: "{{ neutron_plugin_agent != 'ovn' }}" host_in_groups: >- {{ @@ -155,7 +155,7 @@ neutron_services: cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}" container_name: "neutron_ovn_metadata_agent" image: "{{ neutron_ovn_metadata_agent_image_full }}" - privileged: True + privileged: true enabled: "{{ neutron_plugin_agent == 'ovn' }}" host_in_groups: "{{ inventory_hostname in groups['neutron-ovn-metadata-agent'] }}" volumes: "{{ neutron_ovn_metadata_agent_default_volumes + neutron_ovn_metadata_agent_extra_volumes }}" @@ -169,7 +169,7 @@ neutron_services: neutron-bgp-dragent: container_name: "neutron_bgp_dragent" image: "{{ neutron_bgp_dragent_image_full }}" - privileged: True + privileged: true enabled: "{{ enable_neutron_bgp_dragent | bool }}" group: "neutron-bgp-dragent" host_in_groups: "{{ inventory_hostname in groups['neutron-bgp-dragent'] }}" @@ -179,7 +179,7 @@ neutron_services: neutron-infoblox-ipam-agent: container_name: "neutron_infoblox_ipam_agent" image: "{{ neutron_infoblox_ipam_agent_image_full }}" - privileged: True + privileged: true enabled: "{{ enable_neutron_infoblox_ipam_agent | bool }}" group: "neutron-infoblox-ipam-agent" host_in_groups: "{{ inventory_hostname in groups['neutron-infoblox-ipam-agent'] }}" @@ -188,7 +188,7 @@ neutron_services: neutron-metering-agent: container_name: "neutron_metering_agent" image: "{{ neutron_metering_agent_image_full }}" - privileged: True + privileged: true enabled: "{{ enable_neutron_metering | bool }}" group: "neutron-metering-agent" host_in_groups: "{{ inventory_hostname in groups['neutron-metering-agent'] }}" @@ -197,7 +197,7 @@ neutron_services: ironic-neutron-agent: container_name: "ironic_neutron_agent" image: "{{ ironic_neutron_agent_image_full }}" - privileged: False + privileged: false enabled: "{{ enable_ironic_neutron_agent | bool }}" group: "ironic-neutron-agent" host_in_groups: "{{ inventory_hostname in groups['ironic-neutron-agent'] }}" diff --git a/ansible/roles/neutron/tasks/bootstrap.yml b/ansible/roles/neutron/tasks/bootstrap.yml index a58a07b570..b7afe8e98c 100644 --- a/ansible/roles/neutron/tasks/bootstrap.yml +++ b/ansible/roles/neutron/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ neutron_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ neutron_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['neutron-server'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ neutron_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['neutron-server'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/neutron/tasks/bootstrap_service.yml b/ansible/roles/neutron/tasks/bootstrap_service.yml index 094f4ca968..3ec0172c35 100644 --- a/ansible/roles/neutron/tasks/bootstrap_service.yml +++ b/ansible/roles/neutron/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -17,5 +17,5 @@ name: "bootstrap_neutron" restart_policy: oneshot volumes: "{{ neutron_server.volumes }}" - run_once: True + run_once: true delegate_to: "{{ groups[neutron_server.group][0] }}" diff --git a/ansible/roles/neutron/tasks/config-neutron-fake.yml b/ansible/roles/neutron/tasks/config-neutron-fake.yml index ea26ba7c5d..bd94dfd559 100644 --- a/ansible/roles/neutron/tasks/config-neutron-fake.yml +++ b/ansible/roles/neutron/tasks/config-neutron-fake.yml @@ -4,7 +4,7 @@ file: path: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}" state: "directory" - recurse: yes + recurse: true mode: "0770" with_sequence: start=1 end={{ num_nova_fake_per_node }} when: inventory_hostname in groups['compute'] @@ -84,7 +84,7 @@ become: true file: path: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}" - recurse: yes + recurse: true owner: "{{ config_owner_user }}" group: "{{ config_owner_group }}" when: inventory_hostname in groups['compute'] diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml index 9c5a9702b5..76705426e2 100644 --- a/ansible/roles/neutron/tasks/config.yml +++ b/ansible/roles/neutron/tasks/config.yml @@ -13,8 +13,8 @@ find: path: "{{ node_custom_config }}/neutron/plugins/" delegate_to: localhost - run_once: True - changed_when: False + run_once: true + changed_when: false register: check_extra_ml2_plugins - include_tasks: copy-certs.yml @@ -25,7 +25,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: neutron_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/neutron/tasks/precheck.yml b/ansible/roles/neutron/tasks/precheck.yml index 16f7b0db44..46ba3db3d2 100644 --- a/ansible/roles/neutron/tasks/precheck.yml +++ b/ansible/roles/neutron/tasks/precheck.yml @@ -33,7 +33,7 @@ - groups['neutron-l3-agent'] | length > 1 fail_msg: "Number of network agents are less than two when enabling agent ha" changed_when: false - run_once: True + run_once: true when: - enable_neutron_agent_ha | bool @@ -51,7 +51,7 @@ assert: that: enable_ironic | bool fail_msg: "Ironic must be enabled when using networking-baremetal/ironic-neutron-agent" - run_once: True + run_once: true when: - enable_ironic_neutron_agent | bool @@ -64,7 +64,7 @@ - neutron_dns_domain != "openstacklocal" fail_msg: "The neutron_dns_domain value has to be non-empty and must end with a period '.'" changed_when: false - run_once: True + run_once: true when: - neutron_dns_integration | bool diff --git a/ansible/roles/neutron/tasks/rolling_upgrade.yml b/ansible/roles/neutron/tasks/rolling_upgrade.yml index d0a98f94ad..6b66ef5d99 100644 --- a/ansible/roles/neutron/tasks/rolling_upgrade.yml +++ b/ansible/roles/neutron/tasks/rolling_upgrade.yml @@ -17,7 +17,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE: NEUTRON_DB_EXPAND: @@ -29,7 +29,7 @@ name: "bootstrap_neutron" restart_policy: oneshot volumes: "{{ neutron_server.volumes }}" - run_once: True + run_once: true delegate_to: "{{ groups['neutron-server'][0] }}" - name: Flush Handlers @@ -39,7 +39,7 @@ become: true command: "{{ kolla_container_engine }} exec -t neutron_server neutron-db-manage --subproject {{ item }} has_offline_migrations" register: neutron_check_contract_db_stdout - ignore_errors: yes + ignore_errors: true with_items: "{{ neutron_rolling_upgrade_services }}" when: inventory_hostname == groups['neutron-server'][0] @@ -69,7 +69,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE: NEUTRON_DB_CONTRACT: @@ -81,7 +81,7 @@ name: "bootstrap_neutron" restart_policy: oneshot volumes: "{{ neutron_server.volumes }}" - run_once: True + run_once: true delegate_to: "{{ groups['neutron-server'][0] }}" # Flush handlers here to ensure neutron-server containers are started From 54ce978461623f5b4a6953f28867474249015144 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:43:23 +0100 Subject: [PATCH 140/165] ansible-lint: Fix yaml[truthy] in nova role Change-Id: Ia653bfbbf04e25f5bd002526247645dffef95b2b Signed-off-by: Michal Nasiadka --- ansible/roles/nova-cell/defaults/main.yml | 10 +++++----- ansible/roles/nova-cell/handlers/main.yml | 4 ++-- ansible/roles/nova-cell/tasks/bootstrap_service.yml | 2 +- ansible/roles/nova-cell/tasks/config-nova-fake.yml | 4 ++-- ansible/roles/nova-cell/tasks/config.yml | 6 +++--- ansible/roles/nova-cell/tasks/config_bootstrap.yml | 2 +- ansible/roles/nova-cell/tasks/get_cell_settings.yml | 2 +- .../roles/nova-cell/tasks/online_data_migrations.yml | 2 +- ansible/roles/nova/defaults/main.yml | 8 ++++---- ansible/roles/nova/tasks/bootstrap.yml | 4 ++-- ansible/roles/nova/tasks/bootstrap_service.yml | 2 +- ansible/roles/nova/tasks/config.yml | 6 +++--- ansible/roles/nova/tasks/config_bootstrap.yml | 2 +- ansible/roles/nova/tasks/online_data_migrations.yml | 2 +- ansible/roles/nova/tasks/upgrade.yml | 4 ++-- ansible/roles/service-rabbitmq/tasks/main.yml | 2 +- 16 files changed, 31 insertions(+), 31 deletions(-) diff --git a/ansible/roles/nova-cell/defaults/main.yml b/ansible/roles/nova-cell/defaults/main.yml index 88f35ff73e..1cc4a04aa8 100644 --- a/ansible/roles/nova-cell/defaults/main.yml +++ b/ansible/roles/nova-cell/defaults/main.yml @@ -9,7 +9,7 @@ nova_cell_services: image: "{{ nova_libvirt_image_full }}" pid_mode: "host" cgroupns_mode: "host" - privileged: True + privileged: true volumes: "{{ nova_libvirt_default_volumes + nova_libvirt_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ nova_libvirt_dimensions }}" healthcheck: "{{ nova_libvirt_healthcheck }}" @@ -47,7 +47,7 @@ nova_cell_services: nova-conductor: container_name: "nova_conductor" group: "{{ nova_cell_conductor_group }}" - enabled: True + enabled: true image: "{{ nova_conductor_image_full }}" volumes: "{{ nova_conductor_default_volumes + nova_conductor_extra_volumes }}" dimensions: "{{ nova_conductor_dimensions }}" @@ -58,7 +58,7 @@ nova_cell_services: image: "{{ nova_compute_image_full }}" environment: LIBGUESTFS_BACKEND: "direct" - privileged: True + privileged: true enabled: "{{ not enable_nova_fake | bool }}" ipc_mode: "host" volumes: "{{ nova_compute_default_volumes + nova_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" @@ -307,7 +307,7 @@ nova_libvirt_default_dimensions: soft: 67108864 # 64 MiB hard: 67108864 # 64 MiB -nova_libvirt_dimensions: "{{ default_container_dimensions | combine(nova_libvirt_default_dimensions, recursive=True) }}" +nova_libvirt_dimensions: "{{ default_container_dimensions | combine(nova_libvirt_default_dimensions, recursive=true) }}" nova_ssh_dimensions: "{{ default_container_dimensions }}" nova_novncproxy_dimensions: "{{ default_container_dimensions }}" nova_spicehtml5proxy_dimensions: "{{ default_container_dimensions }}" @@ -583,7 +583,7 @@ qemu_max_files: 32768 qemu_max_processes: 131072 # Use TLS for libvirt connections and live migration libvirt_tls: false -# Should kolla-ansible manage/copy the certs. False, assumes the deployer is +# Should kolla-ansible manage/copy the certs. False, assumes the deployer is # responsible for making the TLS certs show up in the config directories # also means the deployer is responsible for restarting the nova_compute and # nova_libvirt containers when the key changes, as we can't know when to do that diff --git a/ansible/roles/nova-cell/handlers/main.yml b/ansible/roles/nova-cell/handlers/main.yml index 17658ef2ed..2dcca1546f 100644 --- a/ansible/roles/nova-cell/handlers/main.yml +++ b/ansible/roles/nova-cell/handlers/main.yml @@ -107,7 +107,7 @@ until: restart_nova_libvirt is success notify: "{{ nova_libvirt_notify }}" - # need to wait kolla_set_configs script to overwrite sasl config file +# need to wait kolla_set_configs script to overwrite sasl config file - name: Checking libvirt container is ready become: true shell: @@ -181,7 +181,7 @@ common_options: "{{ docker_common_options }}" name: "nova_compute_fake_{{ item }}" image: "{{ nova_compute_image_full }}" - privileged: True + privileged: true volumes: - "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" diff --git a/ansible/roles/nova-cell/tasks/bootstrap_service.yml b/ansible/roles/nova-cell/tasks/bootstrap_service.yml index e4a4d2b2c7..a886211de9 100644 --- a/ansible/roles/nova-cell/tasks/bootstrap_service.yml +++ b/ansible/roles/nova-cell/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" diff --git a/ansible/roles/nova-cell/tasks/config-nova-fake.yml b/ansible/roles/nova-cell/tasks/config-nova-fake.yml index 8959dfa149..45702ff01b 100644 --- a/ansible/roles/nova-cell/tasks/config-nova-fake.yml +++ b/ansible/roles/nova-cell/tasks/config-nova-fake.yml @@ -38,7 +38,7 @@ become: true file: path: "{{ node_config_directory }}/nova-compute-fake-{{ item }}" - recurse: yes + recurse: true owner: "{{ config_owner_user }}" group: "{{ config_owner_group }}" with_sequence: start=1 end={{ num_nova_fake_per_node }} @@ -52,7 +52,7 @@ common_options: "{{ docker_common_options }}" name: "nova_compute_fake_{{ item }}" image: "{{ nova_compute_image_full }}" - privileged: True + privileged: true volumes: - "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" diff --git a/ansible/roles/nova-cell/tasks/config.yml b/ansible/roles/nova-cell/tasks/config.yml index 7783d5ee09..c2da1e528f 100644 --- a/ansible/roles/nova-cell/tasks/config.yml +++ b/ansible/roles/nova-cell/tasks/config.yml @@ -22,7 +22,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: nova_policy with_first_found: - files: "{{ supported_policy_format_list }}" @@ -41,7 +41,7 @@ stat: path: "{{ node_custom_config }}/nova/vendordata.json" delegate_to: localhost - run_once: True + run_once: true register: vendordata_file - name: Set vendordata file path @@ -184,7 +184,7 @@ src: "{{ vendordata_file_path }}" dest: "{{ node_config_directory }}/{{ item }}/vendordata.json" mode: "0660" - become: True + become: true when: - vendordata_file_path is defined - service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/nova-cell/tasks/config_bootstrap.yml b/ansible/roles/nova-cell/tasks/config_bootstrap.yml index 59ce1eb9f8..442320ecf3 100644 --- a/ansible/roles/nova-cell/tasks/config_bootstrap.yml +++ b/ansible/roles/nova-cell/tasks/config_bootstrap.yml @@ -41,7 +41,7 @@ project_services: nova-cell-bootstrap: group: "{{ nova_cell_conductor_group }}" - enabled: True + enabled: true when: - nova_cell_copy_certs | bool - inventory_hostname == groups[nova_cell_conductor_group][0] diff --git a/ansible/roles/nova-cell/tasks/get_cell_settings.yml b/ansible/roles/nova-cell/tasks/get_cell_settings.yml index 301f3e4034..30706663e5 100644 --- a/ansible/roles/nova-cell/tasks/get_cell_settings.yml +++ b/ansible/roles/nova-cell/tasks/get_cell_settings.yml @@ -7,7 +7,7 @@ action: "start_container" command: bash -c 'sudo -E kolla_set_configs && sudo -E kolla_copy_cacerts && nova-manage cell_v2 list_cells --verbose' common_options: "{{ docker_common_options }}" - detach: False + detach: false image: "{{ nova_conductor.image }}" labels: BOOTSTRAP: diff --git a/ansible/roles/nova-cell/tasks/online_data_migrations.yml b/ansible/roles/nova-cell/tasks/online_data_migrations.yml index 9240c14e0d..c9e0a0cccd 100644 --- a/ansible/roles/nova-cell/tasks/online_data_migrations.yml +++ b/ansible/roles/nova-cell/tasks/online_data_migrations.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_OSM: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" diff --git a/ansible/roles/nova/defaults/main.yml b/ansible/roles/nova/defaults/main.yml index dec9d06146..9845b41b08 100644 --- a/ansible/roles/nova/defaults/main.yml +++ b/ansible/roles/nova/defaults/main.yml @@ -4,8 +4,8 @@ nova_services: container_name: "nova_api" group: "nova-api" image: "{{ nova_api_image_full }}" - enabled: True - privileged: True + enabled: true + privileged: true volumes: "{{ nova_api_default_volumes + nova_api_extra_volumes }}" dimensions: "{{ nova_api_dimensions }}" healthcheck: "{{ nova_api_healthcheck }}" @@ -34,7 +34,7 @@ nova_services: container_name: "nova_metadata" group: "nova-metadata" image: "{{ nova_api_image_full }}" - enabled: True + enabled: true volumes: "{{ nova_metadata_default_volumes + nova_metadata_extra_volumes }}" dimensions: "{{ nova_metadata_dimensions }}" healthcheck: "{{ nova_metadata_healthcheck }}" @@ -63,7 +63,7 @@ nova_services: container_name: "nova_scheduler" group: "nova-scheduler" image: "{{ nova_scheduler_image_full }}" - enabled: True + enabled: true volumes: "{{ nova_scheduler_default_volumes + nova_scheduler_extra_volumes }}" dimensions: "{{ nova_scheduler_dimensions }}" healthcheck: "{{ nova_scheduler_healthcheck }}" diff --git a/ansible/roles/nova/tasks/bootstrap.yml b/ansible/roles/nova/tasks/bootstrap.yml index cde4c04f2e..53b65cb4ab 100644 --- a/ansible/roles/nova/tasks/bootstrap.yml +++ b/ansible/roles/nova/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ nova_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ item }}" - run_once: True + run_once: true with_items: - "{{ nova_cell0_database_name }}" - "{{ nova_api_database_name }}" @@ -43,7 +43,7 @@ database_password: "{{ nova_api_database_password }}" loop_control: label: "{{ item.database_name }}" - run_once: True + run_once: true when: - not use_preconfigured_databases | bool no_log: true diff --git a/ansible/roles/nova/tasks/bootstrap_service.yml b/ansible/roles/nova/tasks/bootstrap_service.yml index 0719ee96ef..9ca46365ed 100644 --- a/ansible/roles/nova/tasks/bootstrap_service.yml +++ b/ansible/roles/nova/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" diff --git a/ansible/roles/nova/tasks/config.yml b/ansible/roles/nova/tasks/config.yml index 6666d1a3e3..8df855380c 100644 --- a/ansible/roles/nova/tasks/config.yml +++ b/ansible/roles/nova/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: nova_policy with_first_found: - files: "{{ supported_policy_format_list }}" @@ -32,7 +32,7 @@ stat: path: "{{ node_custom_config }}/nova/vendordata.json" delegate_to: localhost - run_once: True + run_once: true register: vendordata_file - name: Set vendordata file path @@ -110,7 +110,7 @@ src: "{{ vendordata_file_path }}" dest: "{{ node_config_directory }}/{{ item }}/vendordata.json" mode: "0660" - become: True + become: true when: - vendordata_file_path is defined - service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/nova/tasks/config_bootstrap.yml b/ansible/roles/nova/tasks/config_bootstrap.yml index aa62d437a6..f6ecd4e75e 100644 --- a/ansible/roles/nova/tasks/config_bootstrap.yml +++ b/ansible/roles/nova/tasks/config_bootstrap.yml @@ -18,7 +18,7 @@ project_services: nova-api-bootstrap: group: "nova-api" - enabled: True + enabled: true run_once: true when: - nova_copy_certs | bool diff --git a/ansible/roles/nova/tasks/online_data_migrations.yml b/ansible/roles/nova/tasks/online_data_migrations.yml index cd9f69bafe..6a6b465a90 100644 --- a/ansible/roles/nova/tasks/online_data_migrations.yml +++ b/ansible/roles/nova/tasks/online_data_migrations.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_OSM: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" diff --git a/ansible/roles/nova/tasks/upgrade.yml b/ansible/roles/nova/tasks/upgrade.yml index 538b548b72..005959c271 100644 --- a/ansible/roles/nova/tasks/upgrade.yml +++ b/ansible/roles/nova/tasks/upgrade.yml @@ -9,7 +9,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_UPGRADE_CHECK: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -19,7 +19,7 @@ name: "nova_upgrade_checks" restart_policy: oneshot volumes: "{{ nova_api_default_volumes + nova_api_extra_volumes }}" - run_once: True + run_once: true register: nova_upgrade_check_stdout delegate_to: "{{ groups['nova-api'][0] }}" failed_when: false diff --git a/ansible/roles/service-rabbitmq/tasks/main.yml b/ansible/roles/service-rabbitmq/tasks/main.yml index dcdc7d5da3..415d988ef9 100644 --- a/ansible/roles/service-rabbitmq/tasks/main.yml +++ b/ansible/roles/service-rabbitmq/tasks/main.yml @@ -41,4 +41,4 @@ until: service_rabbitmq_result is success retries: "{{ service_rabbitmq_retries }}" delay: "{{ service_rabbitmq_delay }}" - no_log: True + no_log: true From 3460e92a3fbc3f669b794aa9b35280a9849f0b0b Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:44:41 +0100 Subject: [PATCH 141/165] ansible-lint: Fix yaml[truthy] in octavia role Change-Id: Ib22ea26475cb0d23a8ac164da7887e041930a24a Signed-off-by: Michal Nasiadka --- ansible/roles/octavia/tasks/bootstrap.yml | 8 ++++---- .../roles/octavia/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/octavia/tasks/config.yml | 4 ++-- .../octavia/tasks/get_resources_info.yml | 10 +++++----- ansible/roles/octavia/tasks/hm-interface.yml | 16 +++++++-------- ansible/roles/octavia/tasks/precheck.yml | 6 +++--- ansible/roles/octavia/tasks/prepare.yml | 20 +++++++++---------- ansible/roles/octavia/tasks/register.yml | 2 +- 8 files changed, 35 insertions(+), 35 deletions(-) diff --git a/ansible/roles/octavia/tasks/bootstrap.yml b/ansible/roles/octavia/tasks/bootstrap.yml index ee2b4f1398..e51746de5a 100644 --- a/ansible/roles/octavia/tasks/bootstrap.yml +++ b/ansible/roles/octavia/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ octavia_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ octavia_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -28,7 +28,7 @@ login_user: "{{ octavia_persistence_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ octavia_persistence_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -49,7 +49,7 @@ host: "%" priv: "{{ octavia_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -70,7 +70,7 @@ host: "%" priv: "{{ octavia_persistence_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/octavia/tasks/bootstrap_service.yml b/ansible/roles/octavia/tasks/bootstrap_service.yml index c1c7b95010..c8e7113bd4 100644 --- a/ansible/roles/octavia/tasks/bootstrap_service.yml +++ b/ansible/roles/octavia/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_octavia" restart_policy: oneshot volumes: "{{ octavia_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[octavia_api.group][0] }}" diff --git a/ansible/roles/octavia/tasks/config.yml b/ansible/roles/octavia/tasks/config.yml index c5c581240f..774203e59a 100644 --- a/ansible/roles/octavia/tasks/config.yml +++ b/ansible/roles/octavia/tasks/config.yml @@ -16,7 +16,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: octavia_policy with_first_found: - files: "{{ supported_policy_format_list }}" @@ -117,7 +117,7 @@ owner: "{{ config_owner_user }}" group: "{{ config_owner_group }}" mode: "0400" - become: True + become: true when: - inventory_hostname in groups[octavia_services['octavia-worker']['group']] - octavia_auto_configure | bool diff --git a/ansible/roles/octavia/tasks/get_resources_info.yml b/ansible/roles/octavia/tasks/get_resources_info.yml index ce6841d544..71576360c7 100644 --- a/ansible/roles/octavia/tasks/get_resources_info.yml +++ b/ansible/roles/octavia/tasks/get_resources_info.yml @@ -10,13 +10,13 @@ endpoint_type: "{{ openstack_interface }}" region_name: "{{ openstack_region_name }}" name: "{{ octavia_amp_flavor.name }}" - run_once: True + run_once: true check_mode: false delegate_to: "{{ groups['octavia-api'][0] }}" register: flavor_results - name: Get {{ octavia_service_auth_project }} project id - become: True + become: true kolla_toolbox: container_engine: "{{ kolla_container_engine }}" module_name: openstack.cloud.project_info @@ -26,7 +26,7 @@ endpoint_type: "{{ openstack_interface }}" region_name: "{{ openstack_region_name }}" name: "{{ octavia_service_auth_project }}" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" check_mode: false register: project_info @@ -48,7 +48,7 @@ loop: "{{ octavia_amp_security_groups.values() | list }}" loop_control: label: "{{ item.name }}" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" check_mode: false register: sec_grp_info @@ -65,7 +65,7 @@ region_name: "{{ openstack_region_name }}" name: "{{ octavia_amp_network['name'] }}" register: network_results - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" check_mode: false diff --git a/ansible/roles/octavia/tasks/hm-interface.yml b/ansible/roles/octavia/tasks/hm-interface.yml index 2a547ffd07..279261957b 100644 --- a/ansible/roles/octavia/tasks/hm-interface.yml +++ b/ansible/roles/octavia/tasks/hm-interface.yml @@ -12,7 +12,7 @@ state: present network: "{{ octavia_amp_network['name'] }}" security_groups: "{{ octavia_amp_security_groups['health-mgr-sec-grp']['name'] }}" - device_owner: 'Octavia:health-mgr' + device_owner: "Octavia:health-mgr" name: "octavia-listen-port-{{ ansible_facts.nodename }}" register: port_info @@ -20,7 +20,7 @@ # value to {{ ansible_facts.nodename }}, once os_port support this parameter, remove the task below # https://docs.ansible.com/ansible/latest/modules/os_port_module.html#parameters - name: Update Octavia health manager port host_id - become: True + become: true vars: port_id: "{{ port_info.port.id }}" command: > @@ -42,7 +42,7 @@ vars: port_mac: "{{ port_info.port.mac_address }}" port_id: "{{ port_info.port.id }}" - become: True + become: true command: > {{ kolla_container_engine }} exec openvswitch_vswitchd ovs-vsctl --may-exist \ add-port br-int {{ octavia_network_interface }} \ @@ -62,25 +62,25 @@ mode: "0664" - name: Create octavia-interface service - become: True + become: true template: src: octavia-interface.service.j2 dest: /etc/systemd/system/octavia-interface.service register: octavia_interface - name: Restart octavia-interface.service if required - become: True + become: true systemd: name: octavia-interface - daemon_reload: yes + daemon_reload: true state: restarted when: octavia_interface.changed - name: Enable and start octavia-interface.service - become: True + become: true service: name: octavia-interface - enabled: yes + enabled: true state: started - name: Wait for interface {{ octavia_network_interface }} ip appear diff --git a/ansible/roles/octavia/tasks/precheck.yml b/ansible/roles/octavia/tasks/precheck.yml index 23cf4d431b..6dbe9aef51 100644 --- a/ansible/roles/octavia/tasks/precheck.yml +++ b/ansible/roles/octavia/tasks/precheck.yml @@ -42,7 +42,7 @@ stat: path: "{{ node_custom_config }}/octavia/{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: result failed_when: not result.stat.exists when: @@ -59,7 +59,7 @@ msg: > Neutron plugin agent {{ neutron_plugin_agent }} is not supported when octavia_network_type is tenant, only openvswitch is supported currently - run_once: True + run_once: true when: - octavia_auto_configure | bool - octavia_network_type == "tenant" @@ -69,6 +69,6 @@ assert: that: enable_valkey | bool fail_msg: "Valkey must be enabled when using Octavia Jobboard" - run_once: True + run_once: true when: - enable_octavia_jobboard | bool diff --git a/ansible/roles/octavia/tasks/prepare.yml b/ansible/roles/octavia/tasks/prepare.yml index b75bcd548b..817008e25c 100644 --- a/ansible/roles/octavia/tasks/prepare.yml +++ b/ansible/roles/octavia/tasks/prepare.yml @@ -19,12 +19,12 @@ ephemeral: "{{ octavia_amp_flavor.ephemeral | default(omit, true) }}" swap: "{{ octavia_amp_flavor.swap | default(omit, true) }}" extra_specs: "{{ octavia_amp_flavor.extra_specs | default(omit, true) }}" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" register: amphora_flavor_info - name: Create nova keypair for amphora - become: True + become: true kolla_toolbox: container_engine: "{{ kolla_container_engine }}" module_name: openstack.cloud.keypair @@ -36,11 +36,11 @@ state: present name: "{{ octavia_amp_ssh_key_name }}" public_key: "{{ octavia_amp_ssh_key.public_key }}" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" - name: Get {{ octavia_service_auth_project }} project id - become: True + become: true kolla_toolbox: container_engine: "{{ kolla_container_engine }}" module_name: openstack.cloud.project_info @@ -50,7 +50,7 @@ endpoint_type: "{{ openstack_interface }}" region_name: "{{ openstack_region_name }}" name: "{{ octavia_service_auth_project }}" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" register: project_info @@ -70,7 +70,7 @@ loop_control: label: "{{ item.name }}" when: item.enabled | bool - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" register: sec_grp_info @@ -93,7 +93,7 @@ - "{{ octavia_amp_security_groups }}" - rules when: item.0.enabled | bool - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" - name: Create loadbalancer management network @@ -115,7 +115,7 @@ external: "{{ octavia_amp_network['external'] | default(omit) }}" shared: "{{ octavia_amp_network['shared'] | default(omit) }}" register: network_info - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" - name: Create loadbalancer management subnet @@ -140,7 +140,7 @@ ip_version: "{{ octavia_amp_network['subnet']['ip_version'] | default(omit) }}" ipv6_address_mode: "{{ octavia_amp_network['subnet']['ipv6_address_mode'] | default(omit) }}" ipv6_ra_mode: "{{ octavia_amp_network['subnet']['ipv6_ra_mode'] | default(omit) }}" - run_once: True + run_once: true delegate_to: "{{ groups['octavia-api'][0] }}" - name: Create loadbalancer management router for IPv6 @@ -156,6 +156,6 @@ state: present name: "{{ octavia_amp_router['name'] }}" interfaces: "{{ octavia_amp_router['subnet'] }}" - run_once: True + run_once: true when: octavia_network_address_family == "ipv6" delegate_to: "{{ groups['octavia-api'][0] }}" diff --git a/ansible/roles/octavia/tasks/register.yml b/ansible/roles/octavia/tasks/register.yml index 7bf995e765..c01154c24a 100644 --- a/ansible/roles/octavia/tasks/register.yml +++ b/ansible/roles/octavia/tasks/register.yml @@ -17,5 +17,5 @@ endpoint_type: "{{ openstack_interface }}" cacert: "{{ openstack_cacert }}" region_name: "{{ openstack_region_name }}" - run_once: True + run_once: true with_items: "{{ octavia_required_roles }}" From 589cec413cf3124312cede1d32d9f59648f2238e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:45:34 +0100 Subject: [PATCH 142/165] ansible-lint: Fix yaml[truthy] in octavia-certificates role Change-Id: Ia1422d985ce53697b4020642fa5e131be6260b46 Signed-off-by: Michal Nasiadka --- ansible/roles/octavia-certificates/tasks/check_expiry.yml | 2 +- ansible/roles/octavia-certificates/tasks/client_ca.yml | 5 ++--- ansible/roles/octavia-certificates/tasks/client_cert.yml | 1 - ansible/roles/octavia-certificates/tasks/server_ca.yml | 1 - 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/ansible/roles/octavia-certificates/tasks/check_expiry.yml b/ansible/roles/octavia-certificates/tasks/check_expiry.yml index 66ed8e4b0c..0f2ee4041d 100644 --- a/ansible/roles/octavia-certificates/tasks/check_expiry.yml +++ b/ansible/roles/octavia-certificates/tasks/check_expiry.yml @@ -17,7 +17,7 @@ - item.valid_at.point_1 fail_msg: "{{ item.item }} will expire within {{ octavia_certs_expiry_limit }} days, on {{ item.not_after }}" success_msg: "{{ item.item }} will not expire within {{ octavia_certs_expiry_limit }} days. It expires on {{ item.not_after }}" - quiet: True + quiet: true loop: "{{ cert_info.results }}" loop_control: label: "{{ item.item }}" diff --git a/ansible/roles/octavia-certificates/tasks/client_ca.yml b/ansible/roles/octavia-certificates/tasks/client_ca.yml index 08c0214746..87917c48e4 100644 --- a/ansible/roles/octavia-certificates/tasks/client_ca.yml +++ b/ansible/roles/octavia-certificates/tasks/client_ca.yml @@ -1,17 +1,16 @@ --- - - name: Create client_ca index.txt copy: content: '' dest: "{{ octavia_certs_work_dir }}/client_ca/index.txt" - force: no + force: false mode: "0660" - name: Create client_ca serial copy: content: "1000\n" dest: "{{ octavia_certs_work_dir }}/client_ca/serial" - force: no + force: false mode: "0660" - name: Create client_ca private key diff --git a/ansible/roles/octavia-certificates/tasks/client_cert.yml b/ansible/roles/octavia-certificates/tasks/client_cert.yml index 384c7d81b0..5040a1809a 100644 --- a/ansible/roles/octavia-certificates/tasks/client_cert.yml +++ b/ansible/roles/octavia-certificates/tasks/client_cert.yml @@ -1,5 +1,4 @@ --- - # NOTE(yoctozepto): This should ideally be per controller, i.e. controller # generates its key&CSR and this CA signs it. diff --git a/ansible/roles/octavia-certificates/tasks/server_ca.yml b/ansible/roles/octavia-certificates/tasks/server_ca.yml index 15c30f8934..6e46c51a60 100644 --- a/ansible/roles/octavia-certificates/tasks/server_ca.yml +++ b/ansible/roles/octavia-certificates/tasks/server_ca.yml @@ -1,5 +1,4 @@ --- - - name: Generate server_ca private key command: > openssl genrsa -aes256 -out server_ca.key.pem From 3dac649afd4a280a1813e7cd6b979d00e312144e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:48:33 +0100 Subject: [PATCH 143/165] ansible-lint: Fix yaml[truthy] in opensearch role Change-Id: I12857ee0d5f66b9c9eaa8ff9e3a4655d665eb8a8 Signed-off-by: Michal Nasiadka --- ansible/roles/opensearch/handlers/main.yml | 6 +++--- ansible/roles/opensearch/tasks/post-config.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/opensearch/handlers/main.yml b/ansible/roles/opensearch/handlers/main.yml index 9791d1b82e..0a279a9b0f 100644 --- a/ansible/roles/opensearch/handlers/main.yml +++ b/ansible/roles/opensearch/handlers/main.yml @@ -10,8 +10,8 @@ url: "{{ opensearch_internal_endpoint }}/_cluster/settings" method: PUT status_code: 200 - return_content: yes - body: "{{ opensearch_shard_body | to_json }}" # noqa jinja[invalid] + return_content: true + body: "{{ opensearch_shard_body | to_json }}" # noqa jinja[invalid] body_format: json ca_path: "{{ openstack_cacert }}" delegate_to: "{{ groups['opensearch'][0] }}" @@ -29,7 +29,7 @@ url: "{{ opensearch_internal_endpoint }}/_flush" method: POST status_code: 200 - return_content: yes + return_content: true body_format: json ca_path: "{{ openstack_cacert }}" delegate_to: "{{ groups['opensearch'][0] }}" diff --git a/ansible/roles/opensearch/tasks/post-config.yml b/ansible/roles/opensearch/tasks/post-config.yml index 4a79104339..27e0271eb1 100644 --- a/ansible/roles/opensearch/tasks/post-config.yml +++ b/ansible/roles/opensearch/tasks/post-config.yml @@ -24,7 +24,7 @@ url: "{{ opensearch_internal_endpoint }}/_plugins/_ism/policies/retention" method: GET status_code: 200, 404 - return_content: yes + return_content: true ca_path: "{{ openstack_cacert }}" register: opensearch_retention_policy_check delegate_to: "{{ groups['opensearch'][0] }}" @@ -39,7 +39,7 @@ url: "{{ opensearch_internal_endpoint }}/_plugins/_ism/policies/retention" method: PUT status_code: 201 - return_content: yes + return_content: true body: "{{ opensearch_retention_policy | from_yaml | to_json }}" body_format: json ca_path: "{{ openstack_cacert }}" @@ -62,7 +62,7 @@ url: "{{ opensearch_internal_endpoint }}/_plugins/_ism/add/{{ opensearch_log_index_prefix }}-*" method: POST status_code: 200 - return_content: yes + return_content: true body: "{{ opensearch_set_policy_body | to_json }}" body_format: json ca_path: "{{ openstack_cacert }}" From bcf697ecbc8b1d06f162e45a564865af691cde6b Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:49:22 +0100 Subject: [PATCH 144/165] ansible-lint: Fix yaml[truthy] in openvswitch role Change-Id: Iaaac66afa529d060cb64b3d97dd53fb93f37e4d9 Signed-off-by: Michal Nasiadka --- ansible/roles/openvswitch/defaults/main.yml | 2 +- ansible/roles/openvswitch/handlers/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/openvswitch/defaults/main.yml b/ansible/roles/openvswitch/defaults/main.yml index 4857ab4c3b..284ba0e70e 100644 --- a/ansible/roles/openvswitch/defaults/main.yml +++ b/ansible/roles/openvswitch/defaults/main.yml @@ -29,7 +29,7 @@ openvswitch_services: or inventory_hostname in groups['neutron-l3-agent'] or inventory_hostname in groups['neutron-metadata-agent'] }} - privileged: True + privileged: true volumes: "{{ openvswitch_vswitchd_default_volumes + openvswitch_vswitchd_extra_volumes }}" dimensions: "{{ openvswitch_vswitchd_dimensions }}" healthcheck: "{{ openvswitch_vswitchd_healthcheck }}" diff --git a/ansible/roles/openvswitch/handlers/main.yml b/ansible/roles/openvswitch/handlers/main.yml index 7cad1a7d4b..653aed93d0 100644 --- a/ansible/roles/openvswitch/handlers/main.yml +++ b/ansible/roles/openvswitch/handlers/main.yml @@ -20,7 +20,7 @@ command: "{{ kolla_container_engine }} exec openvswitch_db ovs-vsctl --no-wait show" register: check_result until: check_result is success - changed_when: False + changed_when: false retries: 30 delay: 2 From e839d38dcaa0bfe6213eb86940a61473a9bfb323 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:50:20 +0100 Subject: [PATCH 145/165] ansible-lint: Fix yaml[truthy] in ovn-db role Change-Id: Ibe411b7e3b7f99f1f723e5cebb33e9ad586074ae Signed-off-by: Michal Nasiadka --- ansible/roles/ovn-db/tasks/bootstrap-db.yml | 4 ++-- ansible/roles/ovn-db/tasks/lookup_cluster.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/ovn-db/tasks/bootstrap-db.yml b/ansible/roles/ovn-db/tasks/bootstrap-db.yml index afeda55c2b..290101850f 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-db.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-db.yml @@ -15,7 +15,7 @@ {{ kolla_container_engine }} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound - changed_when: False + changed_when: false register: ovn_nb_cluster_status - name: Configure OVN NB connection settings @@ -40,7 +40,7 @@ {{ kolla_container_engine }} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound - changed_when: False + changed_when: false register: ovn_sb_cluster_status - name: Configure OVN SB connection settings diff --git a/ansible/roles/ovn-db/tasks/lookup_cluster.yml b/ansible/roles/ovn-db/tasks/lookup_cluster.yml index c387aaf3b3..7f8e16e66d 100644 --- a/ansible/roles/ovn-db/tasks/lookup_cluster.yml +++ b/ansible/roles/ovn-db/tasks/lookup_cluster.yml @@ -50,7 +50,7 @@ connect_timeout: 1 timeout: 10 register: check_ovn_nb_db_port_liveness - ignore_errors: yes + ignore_errors: true - name: Divide hosts by their OVN NB service port liveness group_by: @@ -101,7 +101,7 @@ connect_timeout: 1 timeout: 10 register: check_ovn_sb_db_port_liveness - ignore_errors: yes + ignore_errors: true - name: Divide hosts by their OVN SB service port liveness group_by: From 1d293bfb2e8040bfa7f43ed54df888163f74b6fe Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:51:28 +0100 Subject: [PATCH 146/165] ansible-lint: Fix yaml[truthy] in ovs-dpdk role Change-Id: I2cec7cdf67e2e7446f2f60dfb2a67621c79ee702 Signed-off-by: Michal Nasiadka --- ansible/roles/ovs-dpdk/defaults/main.yml | 2 +- ansible/roles/ovs-dpdk/handlers/main.yml | 4 ++-- ansible/roles/ovs-dpdk/tasks/config.yml | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/ovs-dpdk/defaults/main.yml b/ansible/roles/ovs-dpdk/defaults/main.yml index 5a6955c031..0ce66ff2ab 100644 --- a/ansible/roles/ovs-dpdk/defaults/main.yml +++ b/ansible/roles/ovs-dpdk/defaults/main.yml @@ -22,7 +22,7 @@ ovsdpdk_services: container_name: "ovsdpdk_vswitchd" image: "{{ ovsdpdk_vswitchd_image_full }}" enabled: "{{ neutron_plugin_agent in ['openvswitch'] }}" - privileged: True + privileged: true host_in_groups: >- {{ inventory_hostname in groups['compute'] diff --git a/ansible/roles/ovs-dpdk/handlers/main.yml b/ansible/roles/ovs-dpdk/handlers/main.yml index 9d1b1e343d..25573806d6 100644 --- a/ansible/roles/ovs-dpdk/handlers/main.yml +++ b/ansible/roles/ovs-dpdk/handlers/main.yml @@ -27,7 +27,7 @@ command: "{{ kolla_container_engine }} exec {{ service.container_name }} ovs-vsctl --no-wait show" register: check_result until: check_result is success - changed_when: False + changed_when: false retries: 30 delay: 2 @@ -60,7 +60,7 @@ name: "{{ service.container_name }}" image: "{{ service.image }}" volumes: "{{ service.volumes }}" - privileged: "{{ service.privileged | default(True) }}" + privileged: "{{ service.privileged | default(true) }}" dimensions: "{{ service.dimensions }}" - name: Ensuring ovsdpdk bridges are properly setup named diff --git a/ansible/roles/ovs-dpdk/tasks/config.yml b/ansible/roles/ovs-dpdk/tasks/config.yml index 2c563d75eb..d17911b61e 100644 --- a/ansible/roles/ovs-dpdk/tasks/config.yml +++ b/ansible/roles/ovs-dpdk/tasks/config.yml @@ -1,6 +1,6 @@ --- - name: Ensuring config directories exist - become: True + become: true file: path: "{{ node_config_directory }}/{{ item.key }}" state: "directory" @@ -25,7 +25,7 @@ mode: "0770" - name: Install ovs-dpdkctl service and config - become: True + become: true command: "{{ node_config_directory }}/ovsdpdk-db/ovs-dpdkctl.sh install" environment: CONFIG_FILE: "{{ node_config_directory }}/ovsdpdk-db/ovs-dpdkctl.conf" @@ -38,7 +38,7 @@ ovs_physical_port_policy: "{{ ovs_physical_port_policy }}" - name: Binds the interface to the target driver specified in the config - become: True + become: true command: "{{ node_config_directory }}/ovsdpdk-db/ovs-dpdkctl.sh bind_nics" environment: CONFIG_FILE: "{{ node_config_directory }}/ovsdpdk-db/ovs-dpdkctl.conf" From db5654d169be66a3ebcbb7faa067f4521865dd26 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:52:19 +0100 Subject: [PATCH 147/165] ansible-lint: Fix yaml[truthy] in placement role Change-Id: I6c2e59b798804e62f09effc7875c8f25fe930158 Signed-off-by: Michal Nasiadka --- ansible/roles/placement/defaults/main.yml | 2 +- ansible/roles/placement/tasks/bootstrap.yml | 4 ++-- ansible/roles/placement/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/placement/tasks/config.yml | 2 +- ansible/roles/placement/tasks/upgrade.yml | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/roles/placement/defaults/main.yml b/ansible/roles/placement/defaults/main.yml index 8a36139ad0..ff1b9d1685 100644 --- a/ansible/roles/placement/defaults/main.yml +++ b/ansible/roles/placement/defaults/main.yml @@ -4,7 +4,7 @@ placement_services: container_name: "placement_api" group: "placement-api" image: "{{ placement_api_image_full }}" - enabled: True + enabled: true volumes: "{{ placement_api_default_volumes + placement_api_extra_volumes }}" dimensions: "{{ placement_api_dimensions }}" healthcheck: "{{ placement_api_healthcheck }}" diff --git a/ansible/roles/placement/tasks/bootstrap.yml b/ansible/roles/placement/tasks/bootstrap.yml index 310c6c1252..109dd307ab 100644 --- a/ansible/roles/placement/tasks/bootstrap.yml +++ b/ansible/roles/placement/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ placement_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ placement_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['placement-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ placement_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['placement-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/placement/tasks/bootstrap_service.yml b/ansible/roles/placement/tasks/bootstrap_service.yml index 9a9ded8e0f..9375f2ff24 100644 --- a/ansible/roles/placement/tasks/bootstrap_service.yml +++ b/ansible/roles/placement/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_placement" restart_policy: oneshot volumes: "{{ placement_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[placement_api.group][0] }}" diff --git a/ansible/roles/placement/tasks/config.yml b/ansible/roles/placement/tasks/config.yml index 8926746825..585f78693d 100644 --- a/ansible/roles/placement/tasks/config.yml +++ b/ansible/roles/placement/tasks/config.yml @@ -13,7 +13,7 @@ stat: path: "{{ item }}" delegate_to: localhost - run_once: True + run_once: true register: placement_policy with_first_found: - files: "{{ supported_policy_format_list }}" diff --git a/ansible/roles/placement/tasks/upgrade.yml b/ansible/roles/placement/tasks/upgrade.yml index 8853cd9f13..ee01b0d93d 100644 --- a/ansible/roles/placement/tasks/upgrade.yml +++ b/ansible/roles/placement/tasks/upgrade.yml @@ -20,7 +20,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_OSM: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -30,5 +30,5 @@ name: "bootstrap_placement" restart_policy: oneshot volumes: "{{ placement_api.volumes }}" - run_once: True + run_once: true delegate_to: "{{ groups[placement_api.group][0] }}" From c0585c4c321ce6e014d6dbcec3b344a02cd6b5ed Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:53:16 +0100 Subject: [PATCH 148/165] ansible-lint: Fix yaml[truthy] in prechecks role Change-Id: I376d9b09dff64a49c1961ff4aa7e3db8bf8b285d Signed-off-by: Michal Nasiadka --- ansible/roles/prechecks/tasks/service_checks.yml | 10 +++++----- ansible/roles/prechecks/tasks/user_checks.yml | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ansible/roles/prechecks/tasks/service_checks.yml b/ansible/roles/prechecks/tasks/service_checks.yml index 3f2a711da1..20b6e34480 100644 --- a/ansible/roles/prechecks/tasks/service_checks.yml +++ b/ansible/roles/prechecks/tasks/service_checks.yml @@ -25,7 +25,7 @@ command: cmd: grep '^[^#].*:\s*$' "{{ node_config }}/passwords.yml" delegate_to: localhost - run_once: True + run_once: true register: result changed_when: false failed_when: result.stdout | regex_replace('(.*ssh_key.*)', '') is search(":") @@ -33,7 +33,7 @@ - name: Check if nscd is running command: pgrep nscd - ignore_errors: yes + ignore_errors: true failed_when: false changed_when: false check_mode: false @@ -48,7 +48,7 @@ - nscd_status.rc == 0 - name: Validate that internal and external vip address are different when TLS is enabled only on either the internal and external network - run_once: True + run_once: true fail: msg: 'kolla_external_vip_address and kolla_internal_vip_address must not be the same when only one network has TLS enabled' changed_when: false @@ -58,14 +58,14 @@ - kolla_same_external_internal_vip | bool - name: Validate that enable_ceph is disabled - run_once: True + run_once: true fail: msg: "We are sorry but enable_ceph is no longer supported. Please use external ceph support." when: - (enable_ceph | default()) | bool - name: Validate that enable_redis is disabled - run_once: True + run_once: true assert: that: - not (enable_redis | default(false)) | bool diff --git a/ansible/roles/prechecks/tasks/user_checks.yml b/ansible/roles/prechecks/tasks/user_checks.yml index 94fd5b4668..bdf9617701 100644 --- a/ansible/roles/prechecks/tasks/user_checks.yml +++ b/ansible/roles/prechecks/tasks/user_checks.yml @@ -13,8 +13,8 @@ # NOTE(duonghq): it's only a basic check, should be refined later - name: Check if ansible user can do passwordless sudo command: "true" - become: yes + become: true register: result failed_when: result is failed - changed_when: False + changed_when: false check_mode: false From ee177b36f0a66f3d05f8244f7111da825e6017fa Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:54:34 +0100 Subject: [PATCH 149/165] ansible-lint: Fix yaml[truthy] in prometheus role Change-Id: I9af1f767782904ce3eddd997a225b01f82bda3af Signed-off-by: Michal Nasiadka --- ansible/roles/prometheus/tasks/bootstrap.yml | 2 +- ansible/roles/prometheus/tasks/config.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/prometheus/tasks/bootstrap.yml b/ansible/roles/prometheus/tasks/bootstrap.yml index 9cc8e2634c..c6c4ffc270 100644 --- a/ansible/roles/prometheus/tasks/bootstrap.yml +++ b/ansible/roles/prometheus/tasks/bootstrap.yml @@ -22,7 +22,7 @@ with_dict: "{{ mariadb_shards_info.shards }}" loop_control: label: "{{ shard_host }}" - run_once: True + run_once: true when: - enable_prometheus_mysqld_exporter | bool - prometheus_services['prometheus-mysqld-exporter'] | service_enabled_and_mapped_to_host diff --git a/ansible/roles/prometheus/tasks/config.yml b/ansible/roles/prometheus/tasks/config.yml index 3bbc497b72..1b98f40c81 100644 --- a/ansible/roles/prometheus/tasks/config.yml +++ b/ansible/roles/prometheus/tasks/config.yml @@ -25,7 +25,7 @@ find: path: "{{ node_custom_config }}/prometheus/" pattern: "*.rules" - run_once: True + run_once: true delegate_to: localhost register: prometheus_alert_rules when: @@ -115,7 +115,7 @@ find: path: "{{ node_custom_config }}/prometheus/" pattern: "*.tmpl" - run_once: True + run_once: true delegate_to: localhost register: alertmanager_notification_templates when: From 2f4ae42d87019661dd7467570eb6d0b858921c2d Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:55:43 +0100 Subject: [PATCH 150/165] ansible-lint: Fix yaml[truthy] in prune-images role Change-Id: I9b4d3aa66e9e42d6d7816e3a1aeb619368ac3041 Signed-off-by: Michal Nasiadka --- ansible/roles/prune-images/tasks/prune_images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/prune-images/tasks/prune_images.yml b/ansible/roles/prune-images/tasks/prune_images.yml index c1cea204fd..29519fea35 100644 --- a/ansible/roles/prune-images/tasks/prune_images.yml +++ b/ansible/roles/prune-images/tasks/prune_images.yml @@ -2,7 +2,7 @@ - name: Pruning Kolla images become: true docker_prune: - images: yes + images: true images_filters: label: kolla_version timeout: "{{ docker_image_prune_timeout }}" From c51c42adc9e8554e90134730a8fac8ae31b867af Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:56:31 +0100 Subject: [PATCH 151/165] ansible-lint: Fix yaml[truthy] in rabbitmq role Change-Id: I4d4bfd90124e61bdd3fc68be0b6211d8d8f64849 Signed-off-by: Michal Nasiadka --- ansible/roles/rabbitmq/defaults/main.yml | 4 ++-- ansible/roles/rabbitmq/tasks/bootstrap.yml | 2 +- ansible/roles/rabbitmq/tasks/restart_services.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/rabbitmq/defaults/main.yml b/ansible/roles/rabbitmq/defaults/main.yml index f6f3e4069b..3a329b187a 100644 --- a/ansible/roles/rabbitmq/defaults/main.yml +++ b/ansible/roles/rabbitmq/defaults/main.yml @@ -91,13 +91,13 @@ rabbitmq_extra_config: {} rabbitmq_enable_prometheus_plugin: "{{ enable_prometheus }}" rabbitmq_plugins: - name: "rabbitmq_management" - enabled: True + enabled: true - name: "rabbitmq_prometheus" enabled: "{{ rabbitmq_enable_prometheus_plugin | bool }}" rabbitmq_enabled_plugins: "{{ rabbitmq_plugins | selectattr('enabled', 'equalto', true) | list }}" -kolla_externally_managed_cert: False +kolla_externally_managed_cert: false rabbitmq_version_suffix: "" diff --git a/ansible/roles/rabbitmq/tasks/bootstrap.yml b/ansible/roles/rabbitmq/tasks/bootstrap.yml index a63e2073c9..97822c2aae 100644 --- a/ansible/roles/rabbitmq/tasks/bootstrap.yml +++ b/ansible/roles/rabbitmq/tasks/bootstrap.yml @@ -15,7 +15,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: "{{ service.bootstrap_environment }}" image: "{{ service.image }}" labels: diff --git a/ansible/roles/rabbitmq/tasks/restart_services.yml b/ansible/roles/rabbitmq/tasks/restart_services.yml index 017fb5a4e0..9f5e8765ba 100644 --- a/ansible/roles/rabbitmq/tasks/restart_services.yml +++ b/ansible/roles/rabbitmq/tasks/restart_services.yml @@ -1,6 +1,6 @@ --- - name: Get info on RabbitMQ container - become: True + become: true kolla_container_facts: action: get_containers container_engine: "{{ kolla_container_engine }}" From 36ad77e46a3fe903c4cb6242f2f7f452437ae9da Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:57:16 +0100 Subject: [PATCH 152/165] ansible-lint: Fix yaml[truthy] in service-cert-copy role Change-Id: Iab3ecd94083434f47e4589f72424cf283b4803fb Signed-off-by: Michal Nasiadka --- ansible/roles/service-cert-copy/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/service-cert-copy/defaults/main.yml b/ansible/roles/service-cert-copy/defaults/main.yml index 24b97c760a..cda28bf1d3 100644 --- a/ansible/roles/service-cert-copy/defaults/main.yml +++ b/ansible/roles/service-cert-copy/defaults/main.yml @@ -1,4 +1,4 @@ --- -kolla_externally_managed_cert: False +kolla_externally_managed_cert: false kolla_copy_backend_tls_files: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_enable_tls_backend', default=false) }}" From 24d1545e8adab01ed230555718fcdf60a9f5e080 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 14:58:30 +0100 Subject: [PATCH 153/165] ansible-lint: Fix yaml[truthy] in service-config-validate role Change-Id: I8285a0518e66f5f0b679ae69a985a78dba7d4b68 Signed-off-by: Michal Nasiadka --- .../roles/service-config-validate/tasks/validate.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/service-config-validate/tasks/validate.yml b/ansible/roles/service-config-validate/tasks/validate.yml index 7a522645b3..9cf956ed7c 100644 --- a/ansible/roles/service-config-validate/tasks/validate.yml +++ b/ansible/roles/service-config-validate/tasks/validate.yml @@ -1,6 +1,6 @@ --- - name: "{{ project_name }} : {{ service.container_name }} | Get info on container" - become: True + become: true kolla_container_facts: action: get_containers container_engine: "{{ kolla_container_engine }}" @@ -9,7 +9,7 @@ register: container_info - name: "{{ project_name }} : {{ service.container_name }} | Validate configurations" - become: True + become: true command: > {{ kolla_container_engine }} exec {{ service.container_name }} bash -c "[[ -f {{ inner_item['config'] }} ]] && oslo-config-validator --config-file {{ inner_item['generator'] }} --input-file {{ inner_item['config'] }}" @@ -21,10 +21,10 @@ loop_control: label: "{{ inner_item['config'] | basename }}" loop_var: inner_item - changed_when: False + changed_when: false - name: "{{ project_name }} : {{ service.container_name }} | Ensure log directory exists" - become: True + become: true file: path: "{{ output_dir }}" state: directory @@ -34,7 +34,7 @@ delegate_to: localhost - name: "{{ project_name }} : {{ service.container_name }} | Log configuration errors" - become: True + become: true copy: content: "{{ inner_item.stderr }}" dest: "{{ output_dir }}/{{ inner_item.inner_item.config | basename }}.err" From 90a02fd74b963f619b016f5dab8c8a0a3f54df0a Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 15:01:10 +0100 Subject: [PATCH 154/165] ansible-lint: Fix yaml[truthy] in skyline role Change-Id: I91f20053ddc40e6b08d1d80bf84ecdb215e69496 Signed-off-by: Michal Nasiadka --- ansible/roles/skyline/tasks/bootstrap.yml | 4 ++-- ansible/roles/skyline/tasks/bootstrap_service.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/skyline/tasks/bootstrap.yml b/ansible/roles/skyline/tasks/bootstrap.yml index 57893c3412..7b1168ebcd 100644 --- a/ansible/roles/skyline/tasks/bootstrap.yml +++ b/ansible/roles/skyline/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ skyline_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ skyline_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['skyline-apiserver'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ skyline_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['skyline-apiserver'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/skyline/tasks/bootstrap_service.yml b/ansible/roles/skyline/tasks/bootstrap_service.yml index 61831b2c33..4536684262 100644 --- a/ansible/roles/skyline/tasks/bootstrap_service.yml +++ b/ansible/roles/skyline/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_skyline" restart_policy: oneshot volumes: "{{ skyline_apiserver.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[skyline_apiserver.group][0] }}" From c8808ccaa1cf9a11b3fb52cd6f64897d7cc75e74 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 15:02:09 +0100 Subject: [PATCH 155/165] ansible-lint: Fix yaml[truthy] in tacker role Change-Id: Ie04247f7db1eb9fd66d2fc9e6058b76e0296db20 Signed-off-by: Michal Nasiadka --- ansible/roles/tacker/tasks/bootstrap.yml | 4 ++-- ansible/roles/tacker/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/tacker/tasks/config.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/tacker/tasks/bootstrap.yml b/ansible/roles/tacker/tasks/bootstrap.yml index 1f0ae366e2..aa4effcf01 100644 --- a/ansible/roles/tacker/tasks/bootstrap.yml +++ b/ansible/roles/tacker/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ tacker_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ tacker_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['tacker-server'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ tacker_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['tacker-server'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/tacker/tasks/bootstrap_service.yml b/ansible/roles/tacker/tasks/bootstrap_service.yml index 7075bd44f4..9e6d872e5c 100644 --- a/ansible/roles/tacker/tasks/bootstrap_service.yml +++ b/ansible/roles/tacker/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_tacker" restart_policy: oneshot volumes: "{{ tacker_server.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[tacker_server.group][0] }}" diff --git a/ansible/roles/tacker/tasks/config.yml b/ansible/roles/tacker/tasks/config.yml index f6473b0d31..255494273c 100644 --- a/ansible/roles/tacker/tasks/config.yml +++ b/ansible/roles/tacker/tasks/config.yml @@ -12,7 +12,7 @@ - name: Check if policies shall be overwritten stat: path: "{{ item }}" - run_once: True + run_once: true delegate_to: localhost register: tacker_policy with_first_found: From be51eaef3cc56c4eb7a2a8af640e7038c5e95b7f Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 15:06:50 +0100 Subject: [PATCH 156/165] ansible-lint: Fix yaml[truthy] in trove role Change-Id: Ica1a6a89d42b5fec8e4f07f28f24176442e01b15 Signed-off-by: Michal Nasiadka --- ansible/roles/trove/tasks/bootstrap.yml | 4 ++-- ansible/roles/trove/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/trove/tasks/config.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/trove/tasks/bootstrap.yml b/ansible/roles/trove/tasks/bootstrap.yml index ef820d1bf2..2c3711bcd7 100644 --- a/ansible/roles/trove/tasks/bootstrap.yml +++ b/ansible/roles/trove/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ trove_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ trove_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['trove-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ trove_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['trove-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/trove/tasks/bootstrap_service.yml b/ansible/roles/trove/tasks/bootstrap_service.yml index 74766ed07a..d30eaf7cf9 100644 --- a/ansible/roles/trove/tasks/bootstrap_service.yml +++ b/ansible/roles/trove/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_trove" restart_policy: oneshot volumes: "{{ trove_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[trove_api.group][0] }}" diff --git a/ansible/roles/trove/tasks/config.yml b/ansible/roles/trove/tasks/config.yml index 8427d0079b..459b71c404 100644 --- a/ansible/roles/trove/tasks/config.yml +++ b/ansible/roles/trove/tasks/config.yml @@ -12,7 +12,7 @@ - name: Check if policies shall be overwritten stat: path: "{{ item }}" - run_once: True + run_once: true delegate_to: localhost register: trove_policy with_first_found: From ec72553d007395739435a885fd9b52388170aa41 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 15:09:28 +0100 Subject: [PATCH 157/165] ansible-lint: Fix yaml[truthy] in watcher role Change-Id: Ib1cf619c742f2d37b5cc22134a7d68e53e70e573 Signed-off-by: Michal Nasiadka --- ansible/roles/watcher/tasks/bootstrap.yml | 4 ++-- ansible/roles/watcher/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/watcher/tasks/config.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/watcher/tasks/bootstrap.yml b/ansible/roles/watcher/tasks/bootstrap.yml index ffa029e522..c1e7f14c79 100644 --- a/ansible/roles/watcher/tasks/bootstrap.yml +++ b/ansible/roles/watcher/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ watcher_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ watcher_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['watcher-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ watcher_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['watcher-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/watcher/tasks/bootstrap_service.yml b/ansible/roles/watcher/tasks/bootstrap_service.yml index 36a72f3831..36374689ef 100644 --- a/ansible/roles/watcher/tasks/bootstrap_service.yml +++ b/ansible/roles/watcher/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_watcher" restart_policy: oneshot volumes: "{{ watcher_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[watcher_api.group][0] }}" diff --git a/ansible/roles/watcher/tasks/config.yml b/ansible/roles/watcher/tasks/config.yml index 1b21a5202d..fdaefc185f 100644 --- a/ansible/roles/watcher/tasks/config.yml +++ b/ansible/roles/watcher/tasks/config.yml @@ -12,7 +12,7 @@ - name: Check if policies shall be overwritten stat: path: "{{ item }}" - run_once: True + run_once: true delegate_to: localhost register: watcher_policy with_first_found: From c16eb15be7b02c313ceddb1c220be2fc24e5db57 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 15:11:23 +0100 Subject: [PATCH 158/165] ansible-lint: Fix yaml[truthy] in zun role Change-Id: I53d4e293ab946bbef7ae9ee70869411a9e2e20cc Signed-off-by: Michal Nasiadka --- ansible/roles/zun/defaults/main.yml | 4 ++-- ansible/roles/zun/tasks/bootstrap.yml | 4 ++-- ansible/roles/zun/tasks/bootstrap_service.yml | 4 ++-- ansible/roles/zun/tasks/config.yml | 2 +- ansible/roles/zun/tasks/external_ceph.yml | 2 +- ansible/roles/zun/tasks/precheck.yml | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ansible/roles/zun/defaults/main.yml b/ansible/roles/zun/defaults/main.yml index 094f40fbe9..c418f88b07 100644 --- a/ansible/roles/zun/defaults/main.yml +++ b/ansible/roles/zun/defaults/main.yml @@ -50,7 +50,7 @@ zun_services: group: zun-compute enabled: true image: "{{ zun_compute_image_full }}" - privileged: True + privileged: true volumes: "{{ zun_compute_default_volumes + zun_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ zun_compute_dimensions }}" healthcheck: "{{ zun_compute_healthcheck }}" @@ -59,7 +59,7 @@ zun_services: group: zun-cni-daemon enabled: true image: "{{ zun_cni_daemon_image_full }}" - privileged: True + privileged: true volumes: "{{ zun_cni_daemon_default_volumes + zun_cni_daemon_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ zun_cni_daemon_dimensions }}" healthcheck: "{{ zun_cni_daemon_healthcheck }}" diff --git a/ansible/roles/zun/tasks/bootstrap.yml b/ansible/roles/zun/tasks/bootstrap.yml index a18acdea7f..3ec453be6e 100644 --- a/ansible/roles/zun/tasks/bootstrap.yml +++ b/ansible/roles/zun/tasks/bootstrap.yml @@ -11,7 +11,7 @@ login_user: "{{ zun_database_shard_root_user }}" login_password: "{{ database_password }}" name: "{{ zun_database_name }}" - run_once: True + run_once: true delegate_to: "{{ groups['zun-api'][0] }}" when: - not use_preconfigured_databases | bool @@ -32,7 +32,7 @@ host: "%" priv: "{{ zun_database_name }}.*:ALL" append_privs: "yes" - run_once: True + run_once: true delegate_to: "{{ groups['zun-api'][0] }}" when: - not use_preconfigured_databases | bool diff --git a/ansible/roles/zun/tasks/bootstrap_service.yml b/ansible/roles/zun/tasks/bootstrap_service.yml index d618b07c23..b68dc002a4 100644 --- a/ansible/roles/zun/tasks/bootstrap_service.yml +++ b/ansible/roles/zun/tasks/bootstrap_service.yml @@ -6,7 +6,7 @@ kolla_container: action: "start_container" common_options: "{{ docker_common_options }}" - detach: False + detach: false environment: KOLLA_BOOTSTRAP: KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" @@ -16,5 +16,5 @@ name: "bootstrap_zun" restart_policy: oneshot volumes: "{{ zun_api.volumes | reject('equalto', '') | list }}" - run_once: True + run_once: true delegate_to: "{{ groups[zun_api.group][0] }}" diff --git a/ansible/roles/zun/tasks/config.yml b/ansible/roles/zun/tasks/config.yml index 7ef4c7e3b5..850f353d72 100644 --- a/ansible/roles/zun/tasks/config.yml +++ b/ansible/roles/zun/tasks/config.yml @@ -17,7 +17,7 @@ - name: Check if policies shall be overwritten stat: path: "{{ item }}" - run_once: True + run_once: true delegate_to: localhost register: zun_policy with_first_found: diff --git a/ansible/roles/zun/tasks/external_ceph.yml b/ansible/roles/zun/tasks/external_ceph.yml index e57410b6bb..1afe869cf4 100644 --- a/ansible/roles/zun/tasks/external_ceph.yml +++ b/ansible/roles/zun/tasks/external_ceph.yml @@ -18,6 +18,6 @@ become: true file: path: "{{ node_config_directory }}/zun-compute" - recurse: yes + recurse: true owner: "{{ config_owner_user }}" group: "{{ config_owner_group }}" diff --git a/ansible/roles/zun/tasks/precheck.yml b/ansible/roles/zun/tasks/precheck.yml index 780a78aa09..cd727ebe3b 100644 --- a/ansible/roles/zun/tasks/precheck.yml +++ b/ansible/roles/zun/tasks/precheck.yml @@ -54,6 +54,6 @@ assert: that: enable_kuryr | bool fail_msg: "kuryr is required but not enabled" - run_once: True + run_once: true when: - enable_zun | bool From 4b0c08d5b5165591da916c6d9e21e0bf0d3d000b Mon Sep 17 00:00:00 2001 From: OpenStack Release Bot Date: Mon, 24 Nov 2025 16:15:23 +0000 Subject: [PATCH 159/165] reno: Update master for unmaintained/2024.1 Update the 2024.1 release notes configuration to build from unmaintained/2024.1. Change-Id: I221e2882a763103a56a995c81c8acf91c6326666 Signed-off-by: OpenStack Release Bot Generated-By: openstack/project-config:roles/copy-release-tools-scripts/files/release-tools/change_reno_branch_to_unmaintained.sh --- releasenotes/source/2024.1.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/releasenotes/source/2024.1.rst b/releasenotes/source/2024.1.rst index 4977a4f1a0..6896656be6 100644 --- a/releasenotes/source/2024.1.rst +++ b/releasenotes/source/2024.1.rst @@ -3,4 +3,4 @@ =========================== .. release-notes:: - :branch: stable/2024.1 + :branch: unmaintained/2024.1 From 78f3c2b5de0470c0b1d350d0865663bb6f17f49e Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 12 Nov 2025 15:24:01 +0100 Subject: [PATCH 160/165] ansible-lint: Remove yaml[truthy] from excludes Normalize quotes and some other easy fixes done by ansible-lint --fix Also drop etc from excludes and ignore comment check in etc/kolla/globals.yml Pinning to ansible-lint<25.11.0 because 25.11.1 is breaking without any messages Change-Id: I7dee3866191cef84cd12b1816cdab0a02380ec7f Signed-off-by: Michal Nasiadka --- .ansible-lint | 7 +- .ansible-lint-ignore | 1 + .gitignore | 1 + ansible/bifrost.yml | 4 +- ansible/gather-facts.yml | 2 +- ansible/group_vars/all/aodh.yml | 2 +- ansible/group_vars/all/barbican.yml | 2 +- ansible/group_vars/all/blazar.yml | 2 +- ansible/group_vars/all/ceilometer.yml | 6 +- ansible/group_vars/all/ceph-rgw.yml | 2 +- ansible/group_vars/all/ceph.yml | 2 +- ansible/group_vars/all/cinder.yml | 24 +- ansible/group_vars/all/cloudkitty.yml | 2 +- ansible/group_vars/all/collectd.yml | 2 +- ansible/group_vars/all/common.yml | 32 +-- ansible/group_vars/all/cyborg.yml | 2 +- ansible/group_vars/all/designate.yml | 6 +- ansible/group_vars/all/etcd.yml | 2 +- ansible/group_vars/all/fluentd.yml | 2 +- ansible/group_vars/all/glance.yml | 12 +- ansible/group_vars/all/gnocchi.yml | 4 +- ansible/group_vars/all/grafana.yml | 2 +- ansible/group_vars/all/haproxy.yml | 35 +-- ansible/group_vars/all/ironic.yml | 6 +- ansible/group_vars/all/keystone.yml | 1 - ansible/group_vars/all/kuryr.yml | 2 +- ansible/group_vars/all/letsencrypt.yml | 2 +- ansible/group_vars/all/magnum.yml | 2 +- ansible/group_vars/all/manila.yml | 14 +- ansible/group_vars/all/mariadb.yml | 10 +- ansible/group_vars/all/masakari.yml | 2 +- ansible/group_vars/all/memcached.yml | 4 +- ansible/group_vars/all/mistral.yml | 2 +- ansible/group_vars/all/multipathd.yml | 2 +- ansible/group_vars/all/neutron.yml | 39 ++- ansible/group_vars/all/nova.yml | 12 +- ansible/group_vars/all/octavia.yml | 2 +- ansible/group_vars/all/openvswitch.yml | 2 +- ansible/group_vars/all/prometheus.yml | 6 +- ansible/group_vars/all/rabbitmq.yml | 2 +- ansible/group_vars/all/skyline.yml | 2 +- ansible/group_vars/all/tacker.yml | 2 +- ansible/group_vars/all/telegraf.yml | 4 +- ansible/group_vars/all/trove.yml | 4 +- ansible/group_vars/all/valkey.yml | 2 +- ansible/group_vars/all/watcher.yml | 2 +- ansible/group_vars/all/zun.yml | 9 +- ansible/kolla-host.yml | 4 +- ansible/mariadb.yml | 10 +- ansible/mariadb_backup.yml | 6 +- ansible/mariadb_recovery.yml | 6 +- ansible/nova.yml | 20 +- ansible/post-deploy.yml | 2 +- ansible/rabbitmq.yml | 30 +- ansible/roles/cinder/defaults/main.yml | 2 +- ansible/roles/common/defaults/main.yml | 6 +- ansible/roles/etcd/handlers/main.yml | 14 +- ansible/roles/horizon/defaults/main.yml | 2 +- ansible/roles/ironic/tasks/precheck.yml | 2 +- ansible/roles/keystone/defaults/main.yml | 4 +- ansible/roles/letsencrypt/defaults/main.yml | 1 - ansible/roles/magnum/defaults/main.yml | 2 +- .../roles/nova-cell/tasks/create_cells.yml | 4 +- .../nova-cell/tasks/discover_computes.yml | 2 +- .../roles/nova-cell/tasks/external_ceph.yml | 2 +- ansible/roles/nova/tasks/map_cell0.yml | 6 +- ansible/roles/octavia/defaults/main.yml | 6 +- ansible/roles/opensearch/defaults/main.yml | 2 +- .../roles/openvswitch/tasks/config-host.yml | 2 +- .../roles/ovn-controller/tasks/setup-ovs.yml | 14 +- ansible/roles/valkey/tasks/upgrade.yml | 2 - ansible/site.yml | 235 +++++++--------- etc/kolla/globals.yml | 260 +++++++++--------- tools/setup-compute-libvirt.yml | 2 +- 74 files changed, 445 insertions(+), 494 deletions(-) create mode 100644 .ansible-lint-ignore diff --git a/.ansible-lint b/.ansible-lint index ad9a30abbc..8a566c1335 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -1,10 +1,12 @@ --- exclude_paths: - - etc + - releasenotes - roles - tests - zuul.d -strict: true +# NOTE(mnasiadka): Switched to false due to rules skipped via .ansible-lint-ignore causing +# failures +strict: false use_default_rules: true skip_list: # [E301] Commands should not change things if nothing needs doing @@ -37,5 +39,4 @@ skip_list: - risky-shell-pipe - command-instead-of-shell - command-instead-of-module - - yaml[truthy] - yaml[line-length] diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore new file mode 100644 index 0000000000..ab7637ed47 --- /dev/null +++ b/.ansible-lint-ignore @@ -0,0 +1 @@ +etc/kolla/globals.yml yaml[comments] skip \ No newline at end of file diff --git a/.gitignore b/.gitignore index e3f52d7e81..480b0ad3f9 100644 --- a/.gitignore +++ b/.gitignore @@ -53,6 +53,7 @@ releasenotes/build # Files generated by Ansible ansible/*.retry +.ansible/ # Others .DS_Store diff --git a/ansible/bifrost.yml b/ansible/bifrost.yml index ad02f05398..a390ea5cb3 100644 --- a/ansible/bifrost.yml +++ b/ansible/bifrost.yml @@ -2,5 +2,5 @@ - name: Apply role bifrost hosts: bifrost roles: - - { role: bifrost, - tags: bifrost} + - role: bifrost + tags: bifrost diff --git a/ansible/gather-facts.yml b/ansible/gather-facts.yml index 468b5ae695..cb0040a743 100644 --- a/ansible/gather-facts.yml +++ b/ansible/gather-facts.yml @@ -50,7 +50,7 @@ setup: filter: "{{ kolla_ansible_setup_filter }}" gather_subset: "{{ kolla_ansible_setup_gather_subset }}" - delegate_facts: True + delegate_facts: true delegate_to: "{{ item }}" with_items: "{{ delegate_hosts }}" when: diff --git a/ansible/group_vars/all/aodh.yml b/ansible/group_vars/all/aodh.yml index 2e431e11eb..6cb9e28fce 100644 --- a/ansible/group_vars/all/aodh.yml +++ b/ansible/group_vars/all/aodh.yml @@ -1,5 +1,5 @@ --- -enable_aodh: "no" +enable_aodh: false # Ports aodh_internal_fqdn: "{{ kolla_internal_fqdn }}" diff --git a/ansible/group_vars/all/barbican.yml b/ansible/group_vars/all/barbican.yml index 36e256a018..c7b6c10cee 100644 --- a/ansible/group_vars/all/barbican.yml +++ b/ansible/group_vars/all/barbican.yml @@ -1,5 +1,5 @@ --- -enable_barbican: "no" +enable_barbican: false ####################### # Barbican options diff --git a/ansible/group_vars/all/blazar.yml b/ansible/group_vars/all/blazar.yml index 10122ffdc4..f9463988b0 100644 --- a/ansible/group_vars/all/blazar.yml +++ b/ansible/group_vars/all/blazar.yml @@ -1,5 +1,5 @@ --- -enable_blazar: "no" +enable_blazar: false # Ports blazar_internal_fqdn: "{{ kolla_internal_fqdn }}" diff --git a/ansible/group_vars/all/ceilometer.yml b/ansible/group_vars/all/ceilometer.yml index 19e99eee03..0b178a4704 100644 --- a/ansible/group_vars/all/ceilometer.yml +++ b/ansible/group_vars/all/ceilometer.yml @@ -1,4 +1,4 @@ --- -enable_ceilometer: "no" -enable_ceilometer_ipmi: "no" -enable_ceilometer_prometheus_pushgateway: "no" +enable_ceilometer: false +enable_ceilometer_ipmi: false +enable_ceilometer_prometheus_pushgateway: false diff --git a/ansible/group_vars/all/ceph-rgw.yml b/ansible/group_vars/all/ceph-rgw.yml index 3d3d4802b7..dba16bae7f 100644 --- a/ansible/group_vars/all/ceph-rgw.yml +++ b/ansible/group_vars/all/ceph-rgw.yml @@ -1,5 +1,5 @@ --- -enable_ceph_rgw: "no" +enable_ceph_rgw: false enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}" ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}" diff --git a/ansible/group_vars/all/ceph.yml b/ansible/group_vars/all/ceph.yml index 987717a65e..ab48f06fca 100644 --- a/ansible/group_vars/all/ceph.yml +++ b/ansible/group_vars/all/ceph.yml @@ -3,7 +3,7 @@ # External Ceph options ################### # External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes) -external_ceph_cephx_enabled: "yes" +external_ceph_cephx_enabled: true ceph_cluster: "ceph" diff --git a/ansible/group_vars/all/cinder.yml b/ansible/group_vars/all/cinder.yml index 51948a18ad..9418adcc9d 100644 --- a/ansible/group_vars/all/cinder.yml +++ b/ansible/group_vars/all/cinder.yml @@ -1,21 +1,21 @@ --- -enable_cinder: "no" -enable_cinder_backup: "yes" +enable_cinder: false +enable_cinder_backup: true enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}" -enable_cinder_backend_lvm: "no" -enable_cinder_backend_nfs: "no" -enable_cinder_backend_quobyte: "no" -enable_cinder_backend_pure_iscsi: "no" -enable_cinder_backend_pure_fc: "no" -enable_cinder_backend_pure_roce: "no" -enable_cinder_backend_pure_nvme_tcp: "no" -enable_cinder_backend_lightbits: "no" +enable_cinder_backend_lvm: false +enable_cinder_backend_nfs: false +enable_cinder_backend_quobyte: false +enable_cinder_backend_pure_iscsi: false +enable_cinder_backend_pure_fc: false +enable_cinder_backend_pure_roce: false +enable_cinder_backend_pure_nvme_tcp: false +enable_cinder_backend_lightbits: false ################################# # Cinder options ################################# -cinder_backend_ceph: "no" -cinder_backend_huawei: "no" +cinder_backend_ceph: false +cinder_backend_huawei: false cinder_backend_huawei_xml_files: [] cinder_volume_group: "cinder-volumes" cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else 'tgtadm' }}" diff --git a/ansible/group_vars/all/cloudkitty.yml b/ansible/group_vars/all/cloudkitty.yml index a657c3bd0b..23f06708ce 100644 --- a/ansible/group_vars/all/cloudkitty.yml +++ b/ansible/group_vars/all/cloudkitty.yml @@ -1,5 +1,5 @@ --- -enable_cloudkitty: "no" +enable_cloudkitty: false ####################### # Cloudkitty options diff --git a/ansible/group_vars/all/collectd.yml b/ansible/group_vars/all/collectd.yml index 8e62327160..03282eafe5 100644 --- a/ansible/group_vars/all/collectd.yml +++ b/ansible/group_vars/all/collectd.yml @@ -1,4 +1,4 @@ --- -enable_collectd: "no" +enable_collectd: false collectd_udp_port: "25826" diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml index 9846af85cf..4664430f22 100644 --- a/ansible/group_vars/all/common.yml +++ b/ansible/group_vars/all/common.yml @@ -31,13 +31,13 @@ docker_image_name_prefix: "" docker_image_url: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}" docker_registry_username: # Please read the docs carefully before applying docker_registry_insecure. -docker_registry_insecure: "no" +docker_registry_insecure: false docker_runtime_directory: "" # Docker client timeout in seconds. docker_client_timeout: 120 # Docker networking options -docker_disable_default_iptables_rules: "yes" +docker_disable_default_iptables_rules: true docker_disable_default_network: "{{ docker_disable_default_iptables_rules }}" docker_disable_ip_forward: "{{ docker_disable_default_iptables_rules }}" @@ -79,14 +79,13 @@ container_engine_volumes_path: "{{ docker_volumes_path if kolla_container_engine # Podman has problem with mounting whole /run directory # described here: https://github.com/containers/podman/issues/16305 run_default_volumes_podman: - - '/run/netns:/run/netns:shared' - - '/run/lock/nova:/run/lock/nova:shared' + - "/run/netns:/run/netns:shared" + - "/run/lock/nova:/run/lock/nova:shared" - "/run/libvirt:/run/libvirt:shared" - "/run/nova:/run/nova:shared" - "/run/openvswitch:/run/openvswitch:shared" run_default_volumes_docker: [] - #################### # Dimensions options #################### @@ -112,7 +111,7 @@ default_podman_dimensions_el9: ##################### # Healthcheck options ##################### -enable_container_healthchecks: "yes" +enable_container_healthchecks: true # Healthcheck options for Docker containers # interval/timeout/start_period are in seconds default_container_healthcheck_interval: 30 @@ -125,7 +124,6 @@ default_container_healthcheck_start_period: 5 ####################### # Extra volumes for Docker Containers default_extra_volumes: [] - ################## # Firewall options ################## @@ -174,8 +172,8 @@ kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip kolla_dev_repos_directory: "/opt/stack/" kolla_dev_repos_git: "https://opendev.org/openstack" -kolla_dev_repos_pull: "no" -kolla_dev_mode: "no" +kolla_dev_repos_pull: false +kolla_dev_mode: false kolla_source_version: "{% if openstack_release == 'master' %}master{% else %}stable/{{ openstack_release }}{% endif %}" # Proxy settings for containers such as magnum that need internet access @@ -202,7 +200,6 @@ api_interface_address: "{{ 'api' | kolla_address }}" #################### kolla_container_engine: "docker" - ######################### # Internal Image options ######################### @@ -231,10 +228,10 @@ public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}" internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}" # Additional optional OpenStack features and services are specified here -enable_central_logging: "no" +enable_central_logging: false # Clean images options are specified here -enable_destroy_images: "no" +enable_destroy_images: false #################### # Global Options @@ -244,7 +241,6 @@ enable_destroy_images: "no" # - container1 # - container2 skip_stop_containers: [] - ################### # Messaging options ################### @@ -319,9 +315,9 @@ openstack_cacert: "" # Enable core OpenStack services. This includes: # glance, keystone, neutron, nova, heat, and horizon. -enable_openstack_core: "yes" +enable_openstack_core: true -enable_osprofiler: "no" +enable_osprofiler: false #################### # Osprofiler options @@ -334,8 +330,8 @@ osprofiler_backend_connection_string: "{{ valkey_connection_string if osprofiler ###################### # Backend TLS options ###################### -kolla_enable_tls_backend: "no" -kolla_verify_tls_backend: "yes" +kolla_enable_tls_backend: false +kolla_verify_tls_backend: true kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem" kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem" @@ -351,7 +347,7 @@ database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool) and database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool) and (enable_proxysql | bool)) else 'no' }}" # Optionally allow Kolla to set sysctl values -set_sysctl: "yes" +set_sysctl: true # Optionally change the path to sysctl.conf modified by Kolla Ansible plays. kolla_sysctl_conf_path: /etc/sysctl.conf diff --git a/ansible/group_vars/all/cyborg.yml b/ansible/group_vars/all/cyborg.yml index f8346b5ef2..19122bfbe7 100644 --- a/ansible/group_vars/all/cyborg.yml +++ b/ansible/group_vars/all/cyborg.yml @@ -1,5 +1,5 @@ --- -enable_cyborg: "no" +enable_cyborg: false cyborg_internal_fqdn: "{{ kolla_internal_fqdn }}" cyborg_external_fqdn: "{{ kolla_external_fqdn }}" diff --git a/ansible/group_vars/all/designate.yml b/ansible/group_vars/all/designate.yml index 343472a165..c0287f436a 100644 --- a/ansible/group_vars/all/designate.yml +++ b/ansible/group_vars/all/designate.yml @@ -1,5 +1,5 @@ --- -enable_designate: "no" +enable_designate: false designate_keystone_user: "designate" @@ -10,12 +10,12 @@ designate_keystone_user: "designate" designate_backend: "bind9" designate_ns_record: - "ns1.example.org" -designate_backend_external: "no" +designate_backend_external: false designate_backend_external_bind9_nameservers: "" # Valid options are [ '', valkey ] designate_coordination_backend: "{{ 'valkey' if enable_valkey | bool else '' }}" -designate_enable_notifications_sink: "no" +designate_enable_notifications_sink: false designate_notifications_topic_name: "notifications_designate" dns_interface: "{{ network_interface }}" diff --git a/ansible/group_vars/all/etcd.yml b/ansible/group_vars/all/etcd.yml index a6b1601196..fb3ae015c8 100644 --- a/ansible/group_vars/all/etcd.yml +++ b/ansible/group_vars/all/etcd.yml @@ -1,5 +1,5 @@ --- -enable_etcd: "no" +enable_etcd: false etcd_client_port: "2379" etcd_peer_port: "2380" diff --git a/ansible/group_vars/all/fluentd.yml b/ansible/group_vars/all/fluentd.yml index 13f41522d0..1d9fcbb28c 100644 --- a/ansible/group_vars/all/fluentd.yml +++ b/ansible/group_vars/all/fluentd.yml @@ -1,5 +1,5 @@ --- -enable_fluentd: "yes" +enable_fluentd: true enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}" fluentd_syslog_port: "5140" diff --git a/ansible/group_vars/all/glance.yml b/ansible/group_vars/all/glance.yml index baf705736b..11273e4f73 100644 --- a/ansible/group_vars/all/glance.yml +++ b/ansible/group_vars/all/glance.yml @@ -7,13 +7,13 @@ glance_keystone_user: "glance" # Glance options ####################### glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_s3 | bool) }}" -glance_backend_ceph: "no" -glance_backend_s3: "no" -enable_glance_image_cache: "no" +glance_backend_ceph: false +glance_backend_s3: false +enable_glance_image_cache: false glance_file_datadir_volume: "glance" -glance_enable_rolling_upgrade: "no" -glance_enable_property_protection: "no" -glance_enable_interoperable_image_import: "no" +glance_enable_rolling_upgrade: false +glance_enable_property_protection: false +glance_enable_interoperable_image_import: false glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}" # NOTE(mnasiadka): For use in common role glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}" diff --git a/ansible/group_vars/all/gnocchi.yml b/ansible/group_vars/all/gnocchi.yml index 3792bf7d08..db066184d6 100644 --- a/ansible/group_vars/all/gnocchi.yml +++ b/ansible/group_vars/all/gnocchi.yml @@ -1,6 +1,6 @@ --- -enable_gnocchi: "no" -enable_gnocchi_statsd: "no" +enable_gnocchi: false +enable_gnocchi_statsd: false ################# # Gnocchi options diff --git a/ansible/group_vars/all/grafana.yml b/ansible/group_vars/all/grafana.yml index 97c7d12945..15f4301149 100644 --- a/ansible/group_vars/all/grafana.yml +++ b/ansible/group_vars/all/grafana.yml @@ -1,5 +1,5 @@ --- -enable_grafana: "no" +enable_grafana: false enable_grafana_external: "{{ enable_grafana | bool }}" grafana_internal_fqdn: "{{ kolla_internal_fqdn }}" diff --git a/ansible/group_vars/all/haproxy.yml b/ansible/group_vars/all/haproxy.yml index d2e5a14a24..9566d7768c 100644 --- a/ansible/group_vars/all/haproxy.yml +++ b/ansible/group_vars/all/haproxy.yml @@ -1,20 +1,20 @@ --- -enable_haproxy: "yes" +enable_haproxy: true #################### # HAProxy options #################### haproxy_user: "openstack" haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}" -haproxy_enable_http2: "yes" +haproxy_enable_http2: true haproxy_http2_protocol: "alpn h2,http/1.1" -kolla_enable_tls_internal: "no" +kolla_enable_tls_internal: false kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}" kolla_certificates_dir: "{{ node_config }}/certificates" kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem" kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem" kolla_admin_openrc_cacert: "" -kolla_copy_ca_into_containers: "no" +kolla_copy_ca_into_containers: false haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" haproxy_backend_cacert_dir: "/etc/ssl/certs" haproxy_single_external_frontend: false @@ -23,25 +23,26 @@ haproxy_single_external_frontend_public_port: "{{ '443' if kolla_enable_tls_exte # configure SSL/TLS settings for haproxy config, one of [modern, intermediate, legacy]: kolla_haproxy_ssl_settings: "modern" -haproxy_ssl_settings: "{{ ssl_legacy_settings if kolla_haproxy_ssl_settings == 'legacy' else ssl_intermediate_settings if kolla_haproxy_ssl_settings == 'intermediate' else ssl_modern_settings | default(ssl_modern_settings) }}" +haproxy_ssl_settings: "{{ ssl_legacy_settings if kolla_haproxy_ssl_settings == 'legacy' else ssl_intermediate_settings if kolla_haproxy_ssl_settings == 'intermediate' + else ssl_modern_settings | default(ssl_modern_settings) }}" ssl_legacy_settings: | - ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES - ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11 + ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES + ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11 ssl_intermediate_settings: | - ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 - ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets - ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 - ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets + ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 + ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets + ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 + ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets ssl_modern_settings: | - ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets - ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets + ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets + ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets haproxy_stats_port: "1984" haproxy_monitor_port: "61313" diff --git a/ansible/group_vars/all/ironic.yml b/ansible/group_vars/all/ironic.yml index 97df0fe71d..9ea0376a27 100644 --- a/ansible/group_vars/all/ironic.yml +++ b/ansible/group_vars/all/ironic.yml @@ -1,9 +1,9 @@ --- -enable_ironic: "no" +enable_ironic: false enable_ironic_dnsmasq: "{{ enable_ironic | bool }}" -enable_ironic_neutron_agent: "no" +enable_ironic_neutron_agent: false enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" -enable_ironic_pxe_filter: "no" +enable_ironic_pxe_filter: false # Keystone user ironic_keystone_user: "ironic" diff --git a/ansible/group_vars/all/keystone.yml b/ansible/group_vars/all/keystone.yml index 8a644bd82f..a4da55199f 100644 --- a/ansible/group_vars/all/keystone.yml +++ b/ansible/group_vars/all/keystone.yml @@ -77,7 +77,6 @@ keystone_default_user_role: "member" # file: "/full/qualified/path/to/mapping/json/file/to/mappingId3" keystone_identity_providers: [] keystone_identity_mappings: [] - keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else keystone_public_listen_port }}" keystone_public_listen_port: "5000" keystone_internal_port: "5000" diff --git a/ansible/group_vars/all/kuryr.yml b/ansible/group_vars/all/kuryr.yml index 69464a0319..8d6fada178 100644 --- a/ansible/group_vars/all/kuryr.yml +++ b/ansible/group_vars/all/kuryr.yml @@ -1,4 +1,4 @@ --- -enable_kuryr: "no" +enable_kuryr: false kuryr_port: "23750" diff --git a/ansible/group_vars/all/letsencrypt.yml b/ansible/group_vars/all/letsencrypt.yml index 50007e8859..64b5ad9ef8 100644 --- a/ansible/group_vars/all/letsencrypt.yml +++ b/ansible/group_vars/all/letsencrypt.yml @@ -1,5 +1,5 @@ --- -enable_letsencrypt: "no" +enable_letsencrypt: false ##################### # ACME client options diff --git a/ansible/group_vars/all/magnum.yml b/ansible/group_vars/all/magnum.yml index 279e17d986..3d5f0d7c4d 100644 --- a/ansible/group_vars/all/magnum.yml +++ b/ansible/group_vars/all/magnum.yml @@ -1,5 +1,5 @@ --- -enable_magnum: "no" +enable_magnum: false magnum_internal_fqdn: "{{ kolla_internal_fqdn }}" magnum_external_fqdn: "{{ kolla_external_fqdn }}" diff --git a/ansible/group_vars/all/manila.yml b/ansible/group_vars/all/manila.yml index 526707331b..5416e9c72c 100644 --- a/ansible/group_vars/all/manila.yml +++ b/ansible/group_vars/all/manila.yml @@ -1,11 +1,11 @@ --- -enable_manila: "no" -enable_manila_backend_generic: "no" -enable_manila_backend_hnas: "no" -enable_manila_backend_cephfs_native: "no" -enable_manila_backend_cephfs_nfs: "no" -enable_manila_backend_glusterfs_nfs: "no" -enable_manila_backend_flashblade: "no" +enable_manila: false +enable_manila_backend_generic: false +enable_manila_backend_hnas: false +enable_manila_backend_cephfs_native: false +enable_manila_backend_cephfs_nfs: false +enable_manila_backend_glusterfs_nfs: false +enable_manila_backend_flashblade: false ceph_manila_user: "manila" diff --git a/ansible/group_vars/all/mariadb.yml b/ansible/group_vars/all/mariadb.yml index 5a5439fea8..f53f48a870 100644 --- a/ansible/group_vars/all/mariadb.yml +++ b/ansible/group_vars/all/mariadb.yml @@ -1,18 +1,18 @@ --- -enable_mariadb: "yes" -enable_mariabackup: "no" +enable_mariadb: true +enable_mariabackup: false ############################################# # MariaDB component-specific database details ############################################# # Whether to configure haproxy to load balance # the external MariaDB server(s) -enable_external_mariadb_load_balancer: "no" +enable_external_mariadb_load_balancer: false # Whether to use pre-configured databases / users -use_preconfigured_databases: "no" +use_preconfigured_databases: false # whether to use a common, preconfigured user # for all component databases -use_common_mariadb_user: "no" +use_common_mariadb_user: false mariadb_port: "{{ database_port }}" mariadb_wsrep_port: "4567" diff --git a/ansible/group_vars/all/masakari.yml b/ansible/group_vars/all/masakari.yml index 1f3d117e15..c1a29d9d24 100644 --- a/ansible/group_vars/all/masakari.yml +++ b/ansible/group_vars/all/masakari.yml @@ -1,5 +1,5 @@ --- -enable_masakari: "no" +enable_masakari: false enable_masakari_instancemonitor: "{{ enable_masakari | bool }}" enable_masakari_hostmonitor: "{{ enable_masakari | bool }}" diff --git a/ansible/group_vars/all/memcached.yml b/ansible/group_vars/all/memcached.yml index 282138b418..fb2925bf50 100644 --- a/ansible/group_vars/all/memcached.yml +++ b/ansible/group_vars/all/memcached.yml @@ -1,10 +1,10 @@ --- -enable_memcached: "yes" +enable_memcached: true # NOTE: Most memcached clients handle load-balancing via client side # hashing (consistent or not) logic, so going under the covers and messing # with things that the clients are not aware of is generally wrong -enable_haproxy_memcached: "no" +enable_haproxy_memcached: false memcached_port: "11211" memcache_security_strategy: "ENCRYPT" diff --git a/ansible/group_vars/all/mistral.yml b/ansible/group_vars/all/mistral.yml index 2e72f3cc60..2fbce79137 100644 --- a/ansible/group_vars/all/mistral.yml +++ b/ansible/group_vars/all/mistral.yml @@ -1,5 +1,5 @@ --- -enable_mistral: "no" +enable_mistral: false mistral_internal_fqdn: "{{ kolla_internal_fqdn }}" mistral_external_fqdn: "{{ kolla_external_fqdn }}" diff --git a/ansible/group_vars/all/multipathd.yml b/ansible/group_vars/all/multipathd.yml index bae55e51ca..d70afb87ef 100644 --- a/ansible/group_vars/all/multipathd.yml +++ b/ansible/group_vars/all/multipathd.yml @@ -1,2 +1,2 @@ --- -enable_multipathd: "no" +enable_multipathd: false diff --git a/ansible/group_vars/all/neutron.yml b/ansible/group_vars/all/neutron.yml index 2c3b9ca948..8dad06f893 100644 --- a/ansible/group_vars/all/neutron.yml +++ b/ansible/group_vars/all/neutron.yml @@ -1,24 +1,24 @@ --- enable_neutron: "{{ enable_openstack_core | bool }}" -enable_neutron_vpnaas: "no" -enable_neutron_sriov: "no" -enable_neutron_mlnx: "no" -enable_neutron_dvr: "no" -enable_neutron_fwaas: "no" -enable_neutron_qos: "no" -enable_neutron_agent_ha: "no" -enable_neutron_bgp_dragent: "no" -enable_neutron_provider_networks: "no" -enable_neutron_segments: "no" -enable_neutron_packet_logging: "no" -enable_neutron_sfc: "no" -enable_neutron_taas: "no" -enable_neutron_trunk: "no" -enable_neutron_metering: "no" -enable_neutron_infoblox_ipam_agent: "no" -enable_neutron_port_forwarding: "no" -neutron_enable_ovn_agent: "no" +enable_neutron_vpnaas: false +enable_neutron_sriov: false +enable_neutron_mlnx: false +enable_neutron_dvr: false +enable_neutron_fwaas: false +enable_neutron_qos: false +enable_neutron_agent_ha: false +enable_neutron_bgp_dragent: false +enable_neutron_provider_networks: false +enable_neutron_segments: false +enable_neutron_packet_logging: false +enable_neutron_sfc: false +enable_neutron_taas: false +enable_neutron_trunk: false +enable_neutron_metering: false +enable_neutron_infoblox_ipam_agent: false +enable_neutron_port_forwarding: false +neutron_enable_ovn_agent: false neutron_keystone_user: "neutron" @@ -50,7 +50,7 @@ computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_comput neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4" # Enable distributed floating ip for OVN deployments -neutron_ovn_distributed_fip: "no" +neutron_ovn_distributed_fip: false # SRIOV physnet:interface mappings when SRIOV is enabled # "sriovnet1" and tunnel_interface used here as placeholders @@ -60,7 +60,6 @@ neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}" # Set OVN network availability zones neutron_ovn_availability_zones: [] - neutron_internal_fqdn: "{{ kolla_internal_fqdn }}" neutron_external_fqdn: "{{ kolla_external_fqdn }}" neutron_internal_endpoint: "{{ neutron_internal_fqdn | kolla_url(internal_protocol, neutron_server_port) }}" diff --git a/ansible/group_vars/all/nova.yml b/ansible/group_vars/all/nova.yml index 48811be629..f1996cd73d 100644 --- a/ansible/group_vars/all/nova.yml +++ b/ansible/group_vars/all/nova.yml @@ -1,19 +1,19 @@ --- -enable_cells: "no" +enable_cells: false enable_nova: "{{ enable_openstack_core | bool }}" enable_nova_libvirt_container: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}" -enable_nova_serialconsole_proxy: "no" -enable_nova_ssh: "yes" +enable_nova_serialconsole_proxy: false +enable_nova_ssh: true ####################### # Nova options ####################### -nova_backend_ceph: "no" +nova_backend_ceph: false nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}" # Valid options are [ kvm, qemu ] nova_compute_virt_type: "kvm" nova_instance_datadir_volume: "{{ 'nova_compute' if enable_nova_libvirt_container | bool else '/var/lib/nova' }}" -nova_safety_upgrade: "no" +nova_safety_upgrade: false # Valid options are [ none, novnc, spice ] nova_console: "novnc" @@ -34,7 +34,7 @@ nova_cell0_database_address: "{{ nova_database_address }}" nova_cell0_database_password: "{{ nova_database_password }}" # Nova fake driver and the number of fake driver per compute node -enable_nova_fake: "no" +enable_nova_fake: false num_nova_fake_per_node: 5 migration_interface: "{{ api_interface }}" diff --git a/ansible/group_vars/all/octavia.yml b/ansible/group_vars/all/octavia.yml index c0a152cf4c..eaff670bc8 100644 --- a/ansible/group_vars/all/octavia.yml +++ b/ansible/group_vars/all/octavia.yml @@ -1,5 +1,5 @@ --- -enable_octavia: "no" +enable_octavia: false enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}" enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}" diff --git a/ansible/group_vars/all/openvswitch.yml b/ansible/group_vars/all/openvswitch.yml index 731f99d00b..d3cbcd5fdd 100644 --- a/ansible/group_vars/all/openvswitch.yml +++ b/ansible/group_vars/all/openvswitch.yml @@ -1,6 +1,6 @@ --- enable_openvswitch: "{{ enable_neutron | bool }}" -enable_ovs_dpdk: "no" +enable_ovs_dpdk: false ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}" diff --git a/ansible/group_vars/all/prometheus.yml b/ansible/group_vars/all/prometheus.yml index e372bf6429..f240e0d1cc 100644 --- a/ansible/group_vars/all/prometheus.yml +++ b/ansible/group_vars/all/prometheus.yml @@ -1,5 +1,5 @@ --- -enable_prometheus: "no" +enable_prometheus: false ############ # Prometheus @@ -13,9 +13,9 @@ enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}" enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}" enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}" enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}" -enable_prometheus_ceph_mgr_exporter: "no" +enable_prometheus_ceph_mgr_exporter: false enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}" -enable_prometheus_openstack_exporter_external: "no" +enable_prometheus_openstack_exporter_external: false enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_opensearch | bool }}" enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}" enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}" diff --git a/ansible/group_vars/all/rabbitmq.yml b/ansible/group_vars/all/rabbitmq.yml index 4773c14445..9e18951874 100644 --- a/ansible/group_vars/all/rabbitmq.yml +++ b/ansible/group_vars/all/rabbitmq.yml @@ -7,7 +7,7 @@ enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transpor rabbitmq_user: "openstack" rabbitmq_monitoring_user: "" # Whether to enable TLS encryption for RabbitMQ client-server communication. -rabbitmq_enable_tls: "no" +rabbitmq_enable_tls: false # CA certificate bundle in RabbitMQ container. rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" rabbitmq_datadir_volume: "rabbitmq" diff --git a/ansible/group_vars/all/skyline.yml b/ansible/group_vars/all/skyline.yml index 56225268a0..d3b92446da 100644 --- a/ansible/group_vars/all/skyline.yml +++ b/ansible/group_vars/all/skyline.yml @@ -1,5 +1,5 @@ --- -enable_skyline: "no" +enable_skyline: false skyline_apiserver_internal_fqdn: "{{ kolla_internal_fqdn }}" skyline_apiserver_external_fqdn: "{{ kolla_external_fqdn }}" diff --git a/ansible/group_vars/all/tacker.yml b/ansible/group_vars/all/tacker.yml index 936f5fbeab..66791e83f1 100644 --- a/ansible/group_vars/all/tacker.yml +++ b/ansible/group_vars/all/tacker.yml @@ -1,5 +1,5 @@ --- -enable_tacker: "no" +enable_tacker: false tacker_internal_fqdn: "{{ kolla_internal_fqdn }}" tacker_external_fqdn: "{{ kolla_external_fqdn }}" diff --git a/ansible/group_vars/all/telegraf.yml b/ansible/group_vars/all/telegraf.yml index 1ad350fd98..00d9e4be48 100644 --- a/ansible/group_vars/all/telegraf.yml +++ b/ansible/group_vars/all/telegraf.yml @@ -1,9 +1,9 @@ --- -enable_telegraf: "no" +enable_telegraf: false ########## # Telegraf ########## # Configure telegraf to use the docker daemon itself as an input for # telemetry data. -telegraf_enable_docker_input: "no" +telegraf_enable_docker_input: false diff --git a/ansible/group_vars/all/trove.yml b/ansible/group_vars/all/trove.yml index 420efad5e5..1305065774 100644 --- a/ansible/group_vars/all/trove.yml +++ b/ansible/group_vars/all/trove.yml @@ -1,6 +1,6 @@ --- -enable_trove: "no" -enable_trove_singletenant: "no" +enable_trove: false +enable_trove_singletenant: false trove_internal_fqdn: "{{ kolla_internal_fqdn }}" trove_external_fqdn: "{{ kolla_external_fqdn }}" diff --git a/ansible/group_vars/all/valkey.yml b/ansible/group_vars/all/valkey.yml index c970de6cec..8c69f70f89 100644 --- a/ansible/group_vars/all/valkey.yml +++ b/ansible/group_vars/all/valkey.yml @@ -1,5 +1,5 @@ --- -enable_valkey: "no" +enable_valkey: false valkey_connection_string: >- redis://{%- diff --git a/ansible/group_vars/all/watcher.yml b/ansible/group_vars/all/watcher.yml index 87d919c8f6..723876bc3a 100644 --- a/ansible/group_vars/all/watcher.yml +++ b/ansible/group_vars/all/watcher.yml @@ -1,5 +1,5 @@ --- -enable_watcher: "no" +enable_watcher: false watcher_internal_fqdn: "{{ kolla_internal_fqdn }}" watcher_external_fqdn: "{{ kolla_external_fqdn }}" diff --git a/ansible/group_vars/all/zun.yml b/ansible/group_vars/all/zun.yml index ed10ea5462..1d767401ce 100644 --- a/ansible/group_vars/all/zun.yml +++ b/ansible/group_vars/all/zun.yml @@ -1,16 +1,15 @@ --- -enable_zun: "no" +enable_zun: false # Extra docker options for Zun -docker_configure_for_zun: "no" +docker_configure_for_zun: false docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375 docker_zun_config: {} - # Extra containerd options for Zun -containerd_configure_for_zun: "no" +containerd_configure_for_zun: false # Enable Ceph backed Cinder Volumes for zun -zun_configure_for_cinder_ceph: "no" +zun_configure_for_cinder_ceph: false # 42463 is the static group id of the zun user in the Zun image. # If users customize this value on building the Zun images, diff --git a/ansible/kolla-host.yml b/ansible/kolla-host.yml index 37ab5cc1de..d50bd2f679 100644 --- a/ansible/kolla-host.yml +++ b/ansible/kolla-host.yml @@ -10,5 +10,5 @@ default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: openstack.kolla.baremetal, - tags: baremetal } + - role: openstack.kolla.baremetal + tags: baremetal diff --git a/ansible/mariadb.yml b/ansible/mariadb.yml index ab1c90b0f7..2e63236ece 100644 --- a/ansible/mariadb.yml +++ b/ansible/mariadb.yml @@ -4,7 +4,7 @@ gather_facts: false hosts: - mariadb - - '&enable_mariadb_True' + - "&enable_mariadb_True" max_fail_percentage: >- {{ mariadb_max_fail_percentage | default(kolla_max_fail_percentage) | @@ -19,7 +19,7 @@ gather_facts: false hosts: - mariadb_restart - - '&enable_mariadb_True' + - "&enable_mariadb_True" # Restart in batches serial: "33%" max_fail_percentage: >- @@ -37,7 +37,7 @@ gather_facts: false hosts: - mariadb_start - - '&enable_mariadb_True' + - "&enable_mariadb_True" # Start in batches serial: "33%" max_fail_percentage: >- @@ -55,7 +55,7 @@ gather_facts: false hosts: - mariadb_bootstrap_restart - - '&enable_mariadb_True' + - "&enable_mariadb_True" max_fail_percentage: >- {{ mariadb_max_fail_percentage | default(kolla_max_fail_percentage) | @@ -71,7 +71,7 @@ gather_facts: false hosts: - mariadb - - '&enable_mariadb_True' + - "&enable_mariadb_True" max_fail_percentage: >- {{ mariadb_max_fail_percentage | default(kolla_max_fail_percentage) | diff --git a/ansible/mariadb_backup.yml b/ansible/mariadb_backup.yml index dbe6070c2f..371991c865 100644 --- a/ansible/mariadb_backup.yml +++ b/ansible/mariadb_backup.yml @@ -6,6 +6,6 @@ default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: mariadb, - tags: mariadb, - when: enable_mariabackup | bool } + - role: mariadb + tags: mariadb + when: enable_mariabackup | bool diff --git a/ansible/mariadb_recovery.yml b/ansible/mariadb_recovery.yml index a3d72d40dd..6f69867db6 100644 --- a/ansible/mariadb_recovery.yml +++ b/ansible/mariadb_recovery.yml @@ -6,8 +6,8 @@ default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: mariadb, - tags: mariadb, - when: enable_mariadb | bool } + - role: mariadb + tags: mariadb + when: enable_mariadb | bool vars: mariadb_recover: true diff --git a/ansible/nova.yml b/ansible/nova.yml index c41d854075..cff3a52c3b 100644 --- a/ansible/nova.yml +++ b/ansible/nova.yml @@ -30,7 +30,7 @@ gather_facts: false hosts: - nova-api - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-bootstrap @@ -68,7 +68,7 @@ gather_facts: false hosts: - nova-conductor - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-bootstrap @@ -106,7 +106,7 @@ - nova-api - nova-scheduler - nova-super-conductor - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-api @@ -131,7 +131,7 @@ - nova-novncproxy - nova-serialproxy - nova-spicehtml5proxy - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-cell @@ -153,7 +153,7 @@ gather_facts: false hosts: - nova-scheduler - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-api @@ -181,7 +181,7 @@ gather_facts: false hosts: - nova-super-conductor - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-reload @@ -209,7 +209,7 @@ - nova-novncproxy - nova-serialproxy - nova-spicehtml5proxy - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-reload @@ -233,7 +233,7 @@ hosts: - nova-api - nova-scheduler - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-reload @@ -259,7 +259,7 @@ gather_facts: false hosts: - nova-api - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-api @@ -284,7 +284,7 @@ gather_facts: false hosts: - nova-conductor - - '&enable_nova_True' + - "&enable_nova_True" tags: - nova - nova-cell diff --git a/ansible/post-deploy.yml b/ansible/post-deploy.yml index 3629b1d3dc..ba009aba04 100644 --- a/ansible/post-deploy.yml +++ b/ansible/post-deploy.yml @@ -1,7 +1,7 @@ --- - name: Determining whether we need become=true hosts: localhost - gather_facts: no + gather_facts: false tasks: - name: Get stats of {{ node_config }} stat: diff --git a/ansible/rabbitmq.yml b/ansible/rabbitmq.yml index 6b405cc626..f21f6b9e83 100644 --- a/ansible/rabbitmq.yml +++ b/ansible/rabbitmq.yml @@ -4,7 +4,7 @@ gather_facts: false hosts: - rabbitmq - - '&enable_rabbitmq_True' + - "&enable_rabbitmq_True" max_fail_percentage: >- {{ rabbitmq_max_fail_percentage | default(kolla_max_fail_percentage) | @@ -15,23 +15,23 @@ - import_role: name: rabbitmq vars: - role_rabbitmq_cluster_cookie: '{{ rabbitmq_cluster_cookie }}' - role_rabbitmq_cluster_port: '{{ rabbitmq_cluster_port }}' - role_rabbitmq_epmd_port: '{{ rabbitmq_epmd_port }}' + role_rabbitmq_cluster_cookie: "{{ rabbitmq_cluster_cookie }}" + role_rabbitmq_cluster_port: "{{ rabbitmq_cluster_port }}" + role_rabbitmq_epmd_port: "{{ rabbitmq_epmd_port }}" role_rabbitmq_groups: rabbitmq - role_rabbitmq_management_port: '{{ rabbitmq_management_port }}' - role_rabbitmq_monitoring_password: '{{ rabbitmq_monitoring_password }}' - role_rabbitmq_monitoring_user: '{{ rabbitmq_monitoring_user }}' - role_rabbitmq_password: '{{ rabbitmq_password }}' - role_rabbitmq_port: '{{ rabbitmq_port }}' - role_rabbitmq_prometheus_port: '{{ rabbitmq_prometheus_port }}' - role_rabbitmq_user: '{{ rabbitmq_user }}' + role_rabbitmq_management_port: "{{ rabbitmq_management_port }}" + role_rabbitmq_monitoring_password: "{{ rabbitmq_monitoring_password }}" + role_rabbitmq_monitoring_user: "{{ rabbitmq_monitoring_user }}" + role_rabbitmq_password: "{{ rabbitmq_password }}" + role_rabbitmq_port: "{{ rabbitmq_port }}" + role_rabbitmq_prometheus_port: "{{ rabbitmq_prometheus_port }}" + role_rabbitmq_user: "{{ rabbitmq_user }}" - name: Restart rabbitmq services gather_facts: false hosts: - rabbitmq_restart - - '&enable_rabbitmq_True' + - "&enable_rabbitmq_True" # Restart in batches serial: "33%" max_fail_percentage: >- @@ -45,14 +45,14 @@ name: rabbitmq tasks_from: restart_services.yml vars: - role_rabbitmq_cluster_cookie: '{{ rabbitmq_cluster_cookie }}' + role_rabbitmq_cluster_cookie: "{{ rabbitmq_cluster_cookie }}" role_rabbitmq_groups: rabbitmq - name: Apply rabbitmq post-configuration gather_facts: false hosts: - rabbitmq - - '&enable_rabbitmq_True' + - "&enable_rabbitmq_True" max_fail_percentage: >- {{ rabbitmq_max_fail_percentage | default(kolla_max_fail_percentage) | @@ -66,5 +66,5 @@ tasks_from: post-deploy.yml when: kolla_action in ['deploy', 'reconfigure', 'upgrade'] vars: - role_rabbitmq_cluster_cookie: '{{ rabbitmq_cluster_cookie }}' + role_rabbitmq_cluster_cookie: "{{ rabbitmq_cluster_cookie }}" role_rabbitmq_groups: rabbitmq diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml index dfcd5c19fa..10112fc350 100644 --- a/ansible/roles/cinder/defaults/main.yml +++ b/ansible/roles/cinder/defaults/main.yml @@ -42,7 +42,7 @@ cinder_services: group: cinder-volume enabled: true image: "{{ cinder_volume_image_full }}" - privileged: True + privileged: true ipc_mode: "host" tmpfs: "{{ cinder_volume_tmpfs }}" volumes: "{{ cinder_volume_default_volumes + cinder_volume_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml index 715b335897..0facf9f85c 100644 --- a/ansible/roles/common/defaults/main.yml +++ b/ansible/roles/common/defaults/main.yml @@ -3,13 +3,13 @@ common_services: kolla-toolbox: container_name: kolla_toolbox group: kolla-toolbox - enabled: True + enabled: true image: "{{ kolla_toolbox_image_full }}" environment: ANSIBLE_NOCOLOR: "1" ANSIBLE_LIBRARY: "/usr/share/ansible" REQUESTS_CA_BUNDLE: "{{ openstack_cacert }}" - privileged: True + privileged: true volumes: "{{ kolla_toolbox_default_volumes + kolla_toolbox_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ kolla_toolbox_dimensions }}" @@ -29,7 +29,7 @@ kolla_toolbox_default_volumes: - "/etc/localtime:/etc/localtime:ro" - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - "/dev/:/dev/" - - "/run/:/run/{{ ':shared' if kolla_container_engine == 'docker' else '' }}" # see: https://github.com/containers/podman/issues/16305 + - "/run/:/run/{{ ':shared' if kolla_container_engine == 'docker' else '' }}" # see: https://github.com/containers/podman/issues/16305 - "kolla_logs:/var/log/kolla/" kolla_toolbox_extra_volumes: "{{ default_extra_volumes }}" diff --git a/ansible/roles/etcd/handlers/main.yml b/ansible/roles/etcd/handlers/main.yml index 539883fd2b..b1bb605e90 100644 --- a/ansible/roles/etcd/handlers/main.yml +++ b/ansible/roles/etcd/handlers/main.yml @@ -1,13 +1,13 @@ --- - name: Bootstrap etcd on new cluster - include_tasks: 'bootstrap_cluster.yml' + include_tasks: "bootstrap_cluster.yml" when: - kolla_action != "config" listen: - Bootstrap etcd cluster - name: Look up the cluster leader - include_tasks: 'lookup_leader.yml' + include_tasks: "lookup_leader.yml" listen: - Restart etcd container - Bootstrap etcd services @@ -15,7 +15,7 @@ - Check for deleted members - name: Bootstrap etcd on new services - include_tasks: 'bootstrap_services.yml' + include_tasks: "bootstrap_services.yml" when: - groups.etcd_had_volume_False is defined - inventory_hostname in groups.etcd_had_volume_False @@ -25,7 +25,7 @@ # When upgrading an etcd cluster we have to do it one by one - name: Upgrade etcd non-leaders - include_tasks: 'restart_services.yml' + include_tasks: "restart_services.yml" when: - inventory_hostname not in (groups.etcd_is_leader_True | default([])) - kolla_action == "upgrade" @@ -36,7 +36,7 @@ # When there is no upgrade we can restart 25% of the services without # losing quorum. - name: Rolling restart of etcd non-leaders - include_tasks: 'restart_services.yml' + include_tasks: "restart_services.yml" when: - inventory_hostname not in (groups.etcd_is_leader_True | default([])) - groups.etcd.index(inventory_hostname) % 4 == item @@ -52,7 +52,7 @@ - 3 - name: Restart etcd leader - include_tasks: 'restart_services.yml' + include_tasks: "restart_services.yml" when: - inventory_hostname in (groups.etcd_is_leader_True | default([])) listen: @@ -61,7 +61,7 @@ - Bootstrap etcd cluster - name: Remove deleted members - include_tasks: 'remove_deleted_members.yml' + include_tasks: "remove_deleted_members.yml" when: - kolla_action != "config" listen: diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml index 01a6e3082a..762899830b 100644 --- a/ansible/roles/horizon/defaults/main.yml +++ b/ansible/roles/horizon/defaults/main.yml @@ -162,7 +162,7 @@ horizon_source_version: "{{ kolla_source_version }}" # In some cases, such as when using OIDC, horizon will need to be configured with Keystone's public URL. # Therefore, instead of overriding the whole "horizon_keystone_url", this change allows an easier integration because # the Keystone public URL is already defined with variable "keystone_public_url". -horizon_use_keystone_public_url: False +horizon_use_keystone_public_url: false ################### # Copy certificates diff --git a/ansible/roles/ironic/tasks/precheck.yml b/ansible/roles/ironic/tasks/precheck.yml index ec69c8f958..c066c47b58 100644 --- a/ansible/roles/ironic/tasks/precheck.yml +++ b/ansible/roles/ironic/tasks/precheck.yml @@ -72,7 +72,7 @@ msg: > ironic_dnsmasq_dhcp_ranges must be a list connection: local - run_once: True + run_once: true when: - enable_ironic_dnsmasq | bool - not ironic_dnsmasq_dhcp_ranges is sequence diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml index 0b98275f8c..86bf9094c9 100644 --- a/ansible/roles/keystone/defaults/main.yml +++ b/ansible/roles/keystone/defaults/main.yml @@ -274,8 +274,8 @@ horizon_trusted_dashboards: "{{ ['%s://%s/auth/websso/' % (public_protocol, koll skyline_trusted_dashboards: "{{ ['%s/api/openstack/skyline/api/v1/websso' % (skyline_console_public_endpoint)] if enable_skyline | bool else [] }}" keystone_trusted_dashboards: "{{ horizon_trusted_dashboards + skyline_trusted_dashboards }}" keystone_enable_federation_openid: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}" -keystone_should_remove_attribute_mappings: False -keystone_should_remove_identity_providers: False +keystone_should_remove_attribute_mappings: false +keystone_should_remove_identity_providers: false keystone_federation_oidc_response_type: "id_token" # can be set to any supported headers, according to # https://github.com/OpenIDC/mod_auth_openidc/blob/ea3af872dcdbb4634a7e541c5e8c7326dafbb090/auth_openidc.conf diff --git a/ansible/roles/letsencrypt/defaults/main.yml b/ansible/roles/letsencrypt/defaults/main.yml index 089b8d944b..05afa1cd92 100644 --- a/ansible/roles/letsencrypt/defaults/main.yml +++ b/ansible/roles/letsencrypt/defaults/main.yml @@ -15,7 +15,6 @@ letsencrypt_services: volumes: "{{ letsencrypt_webserver_default_volumes + letsencrypt_webserver_extra_volumes }}" dimensions: "{{ letsencrypt_webserver_dimensions }}" - ############## # LetsEncrypt ############## diff --git a/ansible/roles/magnum/defaults/main.yml b/ansible/roles/magnum/defaults/main.yml index d723571ce5..c6d3a89993 100644 --- a/ansible/roles/magnum/defaults/main.yml +++ b/ansible/roles/magnum/defaults/main.yml @@ -72,7 +72,7 @@ magnum_database_shard: #################### # Magnum #################### -enable_cluster_user_trust: False +enable_cluster_user_trust: false # The default cinder volume type to be used for container storage volume in clusters # that specify the docker-volume-size option. For example gp1, io1 etc default_docker_volume_type: "" diff --git a/ansible/roles/nova-cell/tasks/create_cells.yml b/ansible/roles/nova-cell/tasks/create_cells.yml index 66c7f18759..0d6f0fafec 100644 --- a/ansible/roles/nova-cell/tasks/create_cells.yml +++ b/ansible/roles/nova-cell/tasks/create_cells.yml @@ -11,7 +11,7 @@ action: "start_container" command: bash -c 'sudo -E kolla_set_configs && sudo kolla_copy_cacerts && nova-manage cell_v2 create_cell{% if nova_cell_name %} --name {{ nova_cell_name }}{% endif %}' common_options: "{{ docker_common_options }}" - detach: False + detach: false image: "{{ nova_conductor.image }}" labels: BOOTSTRAP: @@ -37,7 +37,7 @@ action: "start_container" command: "bash -c 'sudo -E kolla_set_configs && sudo kolla_copy_cacerts && nova-manage cell_v2 update_cell --cell_uuid {{ nova_cell_settings.cell_uuid }}'" common_options: "{{ docker_common_options }}" - detach: False + detach: false image: "{{ nova_conductor.image }}" labels: BOOTSTRAP: diff --git a/ansible/roles/nova-cell/tasks/discover_computes.yml b/ansible/roles/nova-cell/tasks/discover_computes.yml index b49285b7e6..99a2b0627f 100644 --- a/ansible/roles/nova-cell/tasks/discover_computes.yml +++ b/ansible/roles/nova-cell/tasks/discover_computes.yml @@ -19,4 +19,4 @@ become: true command: > {{ kolla_container_engine }} exec -t nova_conductor nova-manage cell_v2 discover_hosts --by-service --cell_uuid {{ nova_cell_settings.cell_uuid }} - changed_when: False + changed_when: false diff --git a/ansible/roles/nova-cell/tasks/external_ceph.yml b/ansible/roles/nova-cell/tasks/external_ceph.yml index 9f4fe9cbc6..dc20c6ad1e 100644 --- a/ansible/roles/nova-cell/tasks/external_ceph.yml +++ b/ansible/roles/nova-cell/tasks/external_ceph.yml @@ -202,4 +202,4 @@ result: "{{ cinder_cephx_raw_key | default }}" enabled: "{{ cinder_backend_ceph }}" notify: "{{ libvirt_restart_handlers }}" - no_log: True + no_log: true diff --git a/ansible/roles/nova/tasks/map_cell0.yml b/ansible/roles/nova/tasks/map_cell0.yml index 69a1204fd8..e9acab03dc 100644 --- a/ansible/roles/nova/tasks/map_cell0.yml +++ b/ansible/roles/nova/tasks/map_cell0.yml @@ -1,5 +1,5 @@ --- -- run_once: True +- run_once: true delegate_to: "{{ groups[nova_api.group][0] }}" block: - name: Create cell0 mappings @@ -11,7 +11,7 @@ action: "start_container" command: bash -c 'sudo -E kolla_set_configs && sudo -E kolla_copy_cacerts && nova-manage cell_v2 map_cell0 --database_connection {{ nova_cell0_connection }}' common_options: "{{ docker_common_options }}" - detach: False + detach: false image: "{{ nova_api.image }}" labels: BOOTSTRAP: @@ -48,7 +48,7 @@ --database_connection {{ nova_cell0_connection }} --transport-url {{ nova_cell0_transport_url }}' common_options: "{{ docker_common_options }}" - detach: False + detach: false image: "{{ nova_api.image }}" labels: BOOTSTRAP: diff --git a/ansible/roles/octavia/defaults/main.yml b/ansible/roles/octavia/defaults/main.yml index e683e67669..4930a07688 100644 --- a/ansible/roles/octavia/defaults/main.yml +++ b/ansible/roles/octavia/defaults/main.yml @@ -326,7 +326,7 @@ octavia_user_auth: # - vcpus octavia_amp_flavor: name: "amphora" - is_public: no + is_public: false vcpus: 1 ram: 1024 disk: 5 @@ -380,8 +380,8 @@ octavia_amp_network: subnet: name: lb-mgmt-subnet cidr: "{{ octavia_amp_network_cidr }}" - no_gateway_ip: yes - enable_dhcp: yes + no_gateway_ip: true + enable_dhcp: true # Octavia management network subnet CIDR. octavia_amp_network_cidr: 10.1.0.0/24 diff --git a/ansible/roles/opensearch/defaults/main.yml b/ansible/roles/opensearch/defaults/main.yml index f8b45f5b83..f2c13bf686 100644 --- a/ansible/roles/opensearch/defaults/main.yml +++ b/ansible/roles/opensearch/defaults/main.yml @@ -58,7 +58,7 @@ opensearch_services: #################### # Register Opensearch internal endpoint in the Keystone service catalogue -opensearch_enable_keystone_registration: False +opensearch_enable_keystone_registration: false opensearch_cluster_name: "kolla_logging" opensearch_heap_size: "1g" diff --git a/ansible/roles/openvswitch/tasks/config-host.yml b/ansible/roles/openvswitch/tasks/config-host.yml index dfd4f8cff4..250a7b66a0 100644 --- a/ansible/roles/openvswitch/tasks/config-host.yml +++ b/ansible/roles/openvswitch/tasks/config-host.yml @@ -10,7 +10,7 @@ # as a error, so it has to be created beforehand. # See: https://github.com/containers/podman/issues/14781 - name: Create /run/openvswitch directory on host - become: True + become: true file: path: /run/openvswitch state: directory diff --git a/ansible/roles/ovn-controller/tasks/setup-ovs.yml b/ansible/roles/ovn-controller/tasks/setup-ovs.yml index 49325aa0c3..5c037b4c72 100644 --- a/ansible/roles/ovn-controller/tasks/setup-ovs.yml +++ b/ansible/roles/ovn-controller/tasks/setup-ovs.yml @@ -15,13 +15,15 @@ # Format: physnet1:br1,physnet2:br2 ovn_mappings: "{{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}" # Format: physnet1:00:11:22:33:44:55,physnet2:00:11:22:33:44:56 - ovn_macs: "{% for physnet, bridge in neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) %}{{ physnet }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname + bridge) }}{% if not loop.last %},{% endif %}{% endfor %}" + ovn_macs: "{% for physnet, bridge in neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) %}{{ physnet }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname + + bridge) }}{% if not loop.last %},{% endif %}{% endfor %}" ovn_cms_opts: >- {{ ( [] + ( ['enable-chassis-as-gw'] if inventory_hostname in groups['ovn-controller-network'] else [] ) - + ( ['availability-zones=' + neutron_ovn_availability_zones | join(':')] if inventory_hostname in groups['ovn-controller-network'] and neutron_ovn_availability_zones else [] ) + + ( ['availability-zones=' + neutron_ovn_availability_zones | join(':')] if inventory_hostname in groups['ovn-controller-network'] and neutron_ovn_availability_zones + else [] ) ) | join(',') }} become: true @@ -43,7 +45,11 @@ - { name: ovn-remote-probe-interval, value: "{{ ovn_remote_probe_interval }}" } - { name: ovn-openflow-probe-interval, value: "{{ ovn_openflow_probe_interval }}" } - { name: ovn-monitor-all, value: "{{ ovn_monitor_all | bool }}" } - - { name: ovn-bridge-mappings, value: "{{ ovn_mappings }}", state: "{{ 'present' if (inventory_hostname in groups['ovn-controller-network'] or computes_need_external_bridge | bool) else 'absent' }}" } - - { name: ovn-chassis-mac-mappings, value: "{{ ovn_macs }}", state: "{{ 'present' if inventory_hostname in groups['ovn-controller-compute'] else 'absent' }}" } + - name: ovn-bridge-mappings + value: "{{ ovn_mappings }}" + state: "{{ 'present' if (inventory_hostname in groups['ovn-controller-network'] or computes_need_external_bridge | bool) else 'absent' }}" + - name: ovn-chassis-mac-mappings + value: "{{ ovn_macs }}" + state: "{{ 'present' if inventory_hostname in groups['ovn-controller-compute'] else 'absent' }}" - { name: ovn-cms-options, value: "{{ ovn_cms_opts }}", state: "{{ 'present' if ovn_cms_opts != '' else 'absent' }}" } when: inventory_hostname in groups.get('ovn-controller', []) diff --git a/ansible/roles/valkey/tasks/upgrade.yml b/ansible/roles/valkey/tasks/upgrade.yml index 0fa7338875..0de57486a5 100644 --- a/ansible/roles/valkey/tasks/upgrade.yml +++ b/ansible/roles/valkey/tasks/upgrade.yml @@ -17,7 +17,6 @@ - name: Perform Redis to Valkey migration steps when: redis_container_facts.containers['redis'] is defined block: - - name: Set temporary Valkey migration vars set_fact: valkey_server_port: "6380" @@ -120,7 +119,6 @@ valkey_sentinel_monitor_name: "kolla" _valkey_migration: false - - name: Reconfigure/Redeploy Valkey on default ports import_tasks: reconfigure.yml diff --git a/ansible/site.yml b/ansible/site.yml index ad7ac1854f..1a05fbc56a 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -132,7 +132,7 @@ gather_facts: false hosts: - loadbalancer - - '&enable_loadbalancer_True' + - "&enable_loadbalancer_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ loadbalancer_max_fail_percentage | @@ -351,85 +351,79 @@ gather_facts: false hosts: - opensearch - - '&enable_opensearch_True' + - "&enable_opensearch_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ opensearch_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: opensearch, - tags: opensearch } + - { role: opensearch, tags: opensearch } - name: Apply role letsencrypt gather_facts: false hosts: - letsencrypt - - '&enable_letsencrypt_True' + - "&enable_letsencrypt_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ letsencrypt_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: letsencrypt, - tags: letsencrypt } + - { role: letsencrypt, tags: letsencrypt } - name: Apply role collectd gather_facts: false hosts: - collectd - - '&enable_collectd_True' + - "&enable_collectd_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ collectd_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: collectd, - tags: collectd } + - { role: collectd, tags: collectd } - name: Apply role influxdb gather_facts: false hosts: - influxdb - - '&enable_influxdb_True' + - "&enable_influxdb_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ influxdb_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: influxdb, - tags: influxdb } + - { role: influxdb, tags: influxdb } - name: Apply role telegraf gather_facts: false hosts: - telegraf - - '&enable_telegraf_True' + - "&enable_telegraf_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ telegraf_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: telegraf, - tags: telegraf } + - { role: telegraf, tags: telegraf } - name: Apply role valkey gather_facts: false hosts: - valkey - - '&enable_valkey_True' + - "&enable_valkey_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ valkey_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: valkey, - tags: valkey } + - { role: valkey, tags: valkey } # MariaDB deployment is more complicated than other services, so is covered in # its own playbook. @@ -439,15 +433,14 @@ gather_facts: false hosts: - memcached - - '&enable_memcached_True' + - "&enable_memcached_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ memcached_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: memcached, - tags: [memcache, memcached] } + - { role: memcached, tags: [memcache, memcached] } - name: Apply role prometheus gather_facts: false @@ -460,60 +453,55 @@ - prometheus-elasticsearch-exporter - prometheus-blackbox-exporter - prometheus-libvirt-exporter - - '&enable_prometheus_True' + - "&enable_prometheus_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ prometheus_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: prometheus, - tags: prometheus } + - { role: prometheus, tags: prometheus } - name: Apply role prometheus-node-exporters gather_facts: false hosts: - prometheus-node-exporter - prometheus-cadvisor - - '&enable_prometheus_True' + - "&enable_prometheus_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ prometheus_node_exporters_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: prometheus-node-exporters, - tags: [prometheus, prometheus-node-exporters], - when: enable_prometheus | bool } + - { role: prometheus-node-exporters, tags: [prometheus, prometheus-node-exporters], when: enable_prometheus | bool } - name: Apply role iscsi gather_facts: false hosts: - iscsid - tgtd - - '&enable_iscsid_True' + - "&enable_iscsid_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ iscsid_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: iscsi, - tags: iscsi } + - { role: iscsi, tags: iscsi } - name: Apply role multipathd gather_facts: false hosts: - multipathd - - '&enable_multipathd_True' + - "&enable_multipathd_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ multipathd_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: multipathd, - tags: multipathd } + - { role: multipathd, tags: multipathd } - import_playbook: rabbitmq.yml @@ -521,28 +509,26 @@ gather_facts: false hosts: - etcd - - '&enable_etcd_True' + - "&enable_etcd_True" max_fail_percentage: >- {{ etcd_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: etcd, - tags: etcd } + - { role: etcd, tags: etcd } - name: Apply role keystone gather_facts: false hosts: - keystone - - '&enable_keystone_True' + - "&enable_keystone_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ keystone_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: keystone, - tags: keystone } + - { role: keystone, tags: keystone } - name: Apply role ceph-rgw gather_facts: false @@ -550,29 +536,27 @@ # NOTE(mgoddard): This is only used to register Keystone services, and # can run on any host running kolla-toolbox. - kolla-toolbox - - '&enable_ceph_rgw_True' + - "&enable_ceph_rgw_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ ceph_rgw_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: ceph-rgw, - tags: ceph-rgw } + - { role: ceph-rgw, tags: ceph-rgw } - name: Apply role glance gather_facts: false hosts: - glance-api - - '&enable_glance_True' + - "&enable_glance_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ glance_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: glance, - tags: glance } + - { role: glance, tags: glance } - name: Apply role ironic gather_facts: false @@ -581,15 +565,14 @@ - ironic-conductor - ironic-tftp - ironic-http - - '&enable_ironic_True' + - "&enable_ironic_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ ironic_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: ironic, - tags: ironic } + - { role: ironic, tags: ironic } - name: Apply role cinder gather_facts: false @@ -598,73 +581,66 @@ - cinder-backup - cinder-scheduler - cinder-volume - - '&enable_cinder_True' + - "&enable_cinder_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ cinder_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: cinder, - tags: cinder } + - { role: cinder, tags: cinder } - name: Apply role placement gather_facts: false hosts: - placement-api - - '&enable_placement_True' + - "&enable_placement_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ placement_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: placement, - tags: placement } + - { role: placement, tags: placement } - name: Apply role openvswitch gather_facts: false hosts: - openvswitch - - '&enable_openvswitch_True_enable_ovs_dpdk_False' + - "&enable_openvswitch_True_enable_ovs_dpdk_False" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ openvswitch_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: openvswitch, - tags: openvswitch, - when: "(enable_openvswitch | bool) and not (enable_ovs_dpdk | bool)"} + - { role: openvswitch, tags: openvswitch, when: "(enable_openvswitch | bool) and not (enable_ovs_dpdk | bool)" } - name: Apply role ovs-dpdk gather_facts: false hosts: - openvswitch - - '&enable_openvswitch_True_enable_ovs_dpdk_True' + - "&enable_openvswitch_True_enable_ovs_dpdk_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ ovs_dpdk_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: ovs-dpdk, - tags: ovs-dpdk, - when: "(enable_openvswitch | bool) and (enable_ovs_dpdk | bool)"} + - { role: ovs-dpdk, tags: ovs-dpdk, when: "(enable_openvswitch | bool) and (enable_ovs_dpdk | bool)" } - name: Apply role ovn-controller gather_facts: false hosts: - ovn-controller - - '&enable_ovn_True' + - "&enable_ovn_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ ovn_controller_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: ovn-controller, - tags: [ovn, ovn-controller] } + - { role: ovn-controller, tags: [ovn, ovn-controller] } - name: Apply role ovn-db gather_facts: false @@ -672,15 +648,14 @@ - ovn-nb-db - ovn-northd - ovn-sb-db - - '&enable_ovn_True' + - "&enable_ovn_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ ovn_db_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: ovn-db, - tags: [ovn, ovn-db] } + - { role: ovn-db, tags: [ovn, ovn-db] } # Nova deployment is more complicated than other services, so is covered in its # own playbook. @@ -700,44 +675,41 @@ - neutron-infoblox-ipam-agent - compute - manila-share - - '&enable_neutron_True' + - "&enable_neutron_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ neutron_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: neutron, - tags: neutron } + - { role: neutron, tags: neutron } - name: Apply role kuryr gather_facts: false hosts: - compute - - '&enable_kuryr_True' + - "&enable_kuryr_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ kuryr_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: kuryr, - tags: kuryr } + - { role: kuryr, tags: kuryr } - name: Apply role hacluster gather_facts: false hosts: - hacluster - hacluster-remote - - '&enable_hacluster_True' + - "&enable_hacluster_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ hacluster_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: hacluster, - tags: hacluster } + - { role: hacluster, tags: hacluster } - name: Apply role heat gather_facts: false @@ -745,44 +717,41 @@ - heat-api - heat-api-cfn - heat-engine - - '&enable_heat_True' + - "&enable_heat_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ heat_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: heat, - tags: heat } + - { role: heat, tags: heat } - name: Apply role horizon gather_facts: false hosts: - horizon - - '&enable_horizon_True' + - "&enable_horizon_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ horizon_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: horizon, - tags: horizon } + - { role: horizon, tags: horizon } - name: Apply role magnum gather_facts: false hosts: - magnum-api - magnum-conductor - - '&enable_magnum_True' + - "&enable_magnum_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ magnum_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: magnum, - tags: magnum } + - { role: magnum, tags: magnum } - name: Apply role mistral gather_facts: false @@ -791,15 +760,14 @@ - mistral-engine - mistral-executor - mistral-event-engine - - '&enable_mistral_True' + - "&enable_mistral_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ mistral_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: mistral, - tags: mistral } + - { role: mistral, tags: mistral } - name: Apply role manila gather_facts: false @@ -808,15 +776,14 @@ - manila-data - manila-share - manila-scheduler - - '&enable_manila_True' + - "&enable_manila_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ manila_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: manila, - tags: manila } + - { role: manila, tags: manila } - name: Apply role gnocchi gather_facts: false @@ -824,15 +791,14 @@ - gnocchi-api - gnocchi-metricd - gnocchi-statsd - - '&enable_gnocchi_True' + - "&enable_gnocchi_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ gnocchi_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: gnocchi, - tags: gnocchi } + - { role: gnocchi, tags: gnocchi } - name: Apply role ceilometer gather_facts: false @@ -841,15 +807,14 @@ - ceilometer-notification - ceilometer-compute - ceilometer-ipmi - - '&enable_ceilometer_True' + - "&enable_ceilometer_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ ceilometer_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: ceilometer, - tags: ceilometer } + - { role: ceilometer, tags: ceilometer } - name: Apply role aodh gather_facts: false @@ -858,15 +823,14 @@ - aodh-evaluator - aodh-listener - aodh-notifier - - '&enable_aodh_True' + - "&enable_aodh_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ aodh_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: aodh, - tags: aodh } + - { role: aodh, tags: aodh } - name: Apply role barbican gather_facts: false @@ -874,15 +838,14 @@ - barbican-api - barbican-keystone-listener - barbican-worker - - '&enable_barbican_True' + - "&enable_barbican_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ barbican_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: barbican, - tags: barbican } + - { role: barbican, tags: barbican } - name: Apply role cyborg gather_facts: false @@ -890,15 +853,14 @@ - cyborg-api - cyborg-agent - cyborg-conductor - - '&enable_cyborg_True' + - "&enable_cyborg_True" serial: '{{ serial|default("0") }}' max_fail_percentage: >- {{ cyborg_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: cyborg, - tags: cyborg } + - { role: cyborg, tags: cyborg } - name: Apply role designate gather_facts: false @@ -910,15 +872,14 @@ - designate-worker - designate-sink - designate-backend-bind9 - - '&enable_designate_True' + - "&enable_designate_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ designate_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: designate, - tags: designate } + - { role: designate, tags: designate } - name: Apply role trove gather_facts: false @@ -926,15 +887,14 @@ - trove-api - trove-conductor - trove-taskmanager - - '&enable_trove_True' + - "&enable_trove_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ trove_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: trove, - tags: trove } + - { role: trove, tags: trove } - name: Apply role watcher gather_facts: false @@ -942,59 +902,55 @@ - watcher-api - watcher-engine - watcher-applier - - '&enable_watcher_True' + - "&enable_watcher_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ watcher_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: watcher, - tags: watcher } + - { role: watcher, tags: watcher } - name: Apply role grafana gather_facts: false hosts: - grafana - - '&enable_grafana_True' + - "&enable_grafana_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ grafana_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: grafana, - tags: grafana } + - { role: grafana, tags: grafana } - name: Apply role cloudkitty gather_facts: false hosts: - cloudkitty-api - cloudkitty-processor - - '&enable_cloudkitty_True' + - "&enable_cloudkitty_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ cloudkitty_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: cloudkitty, - tags: cloudkitty } + - { role: cloudkitty, tags: cloudkitty } - name: Apply role tacker gather_facts: false hosts: - tacker-server - tacker-conductor - - '&enable_tacker_True' + - "&enable_tacker_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ tacker_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: tacker, - tags: tacker } + - { role: tacker, tags: tacker } - name: Apply role octavia gather_facts: false @@ -1003,15 +959,14 @@ - octavia-health-manager - octavia-housekeeping - octavia-worker - - '&enable_octavia_True' + - "&enable_octavia_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ octavia_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: octavia, - tags: octavia } + - { role: octavia, tags: octavia } - name: Apply role zun gather_facts: false @@ -1020,30 +975,28 @@ - zun-wsproxy - zun-compute - zun-cni-daemon - - '&enable_zun_True' + - "&enable_zun_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ zun_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: zun, - tags: zun } + - { role: zun, tags: zun } - name: Apply role blazar gather_facts: false hosts: - blazar-api - blazar-manager - - '&enable_blazar_True' + - "&enable_blazar_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ blazar_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: blazar, - tags: blazar } + - { role: blazar, tags: blazar } - name: Apply role masakari gather_facts: false @@ -1052,26 +1005,24 @@ - masakari-engine - masakari-hostmonitor - masakari-instancemonitor - - '&enable_masakari_True' + - "&enable_masakari_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ masakari_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: masakari, - tags: masakari } + - { role: masakari, tags: masakari } - name: Apply role skyline gather_facts: false hosts: - skyline - - '&enable_skyline_True' + - "&enable_skyline_True" serial: '{{ kolla_serial|default("0") }}' max_fail_percentage: >- {{ skyline_max_fail_percentage | default(kolla_max_fail_percentage) | default(100) }} roles: - - { role: skyline, - tags: skyline } + - { role: skyline, tags: skyline } diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index 36f5b4806e..6bf2b9e267 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -6,7 +6,7 @@ # the parameter and change its value. # Dummy variable to allow Ansible to accept this file. -workaround_ansible_issue_8743: yes +workaround_ansible_issue_8743: true ################### # Ansible options @@ -99,7 +99,7 @@ workaround_ansible_issue_8743: yes # Custom docker registry settings: #docker_registry: # Please read the docs carefully before applying docker_registry_insecure. -#docker_registry_insecure: "no" +#docker_registry_insecure: false #docker_registry_username: # docker_registry_password is set in the passwords.yml file. @@ -109,15 +109,15 @@ workaround_ansible_issue_8743: yes # Docker client timeout in seconds. #docker_client_timeout: 120 -#docker_configure_for_zun: "no" -#containerd_configure_for_zun: "no" +#docker_configure_for_zun: false +#containerd_configure_for_zun: false #containerd_grpc_gid: 42463 ################### # Messaging options ################### # Whether to enable TLS for oslo.messaging communication with RabbitMQ. -#om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}" +# om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}" # CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS. #om_rabbitmq_cacert: "{{ rabbitmq_cacert }}" @@ -168,18 +168,18 @@ workaround_ansible_issue_8743: yes # Configure Neutron upgrade option, currently Kolla support # two upgrade ways for Neutron: legacy_upgrade and rolling_upgrade -# The variable "neutron_enable_rolling_upgrade: yes" is meaning rolling_upgrade +# The variable "neutron_enable_rolling_upgrade: true" is meaning rolling_upgrade # were enabled and opposite # Neutron rolling upgrade were enable by default -#neutron_enable_rolling_upgrade: "yes" +#neutron_enable_rolling_upgrade: true # Enable wrapper containers to keep Neutron agent restarts isolated from the main service containers -#neutron_agents_wrappers: "yes" +#neutron_agents_wrappers: true # Configure neutron logging framework to log ingress/egress connections to instances # for security groups rules. More information can be found here: # https://docs.openstack.org/neutron/latest/admin/config-logging.html -#enable_neutron_packet_logging: "no" +#enable_neutron_packet_logging: false #################### # keepalived options @@ -215,7 +215,7 @@ workaround_ansible_issue_8743: yes ##################### # Healthcheck options ##################### -#enable_container_healthchecks: "yes" +#enable_container_healthchecks: true # Healthcheck options for Docker containers # interval/timeout/start_period are in seconds #default_container_healthcheck_interval: 30 @@ -229,9 +229,9 @@ workaround_ansible_issue_8743: yes # Configures firewalld on both ubuntu and centos systems # for enabled services. # firewalld should be installed beforehand. -# disable_firewall: "true" -# enable_external_api_firewalld: "false" -# external_api_firewalld_zone: "public" +#disable_firewall: "true" +#enable_external_api_firewalld: "false" +#external_api_firewalld_zone: "public" ############# # TLS options @@ -239,16 +239,16 @@ workaround_ansible_issue_8743: yes # To provide encryption and authentication on the kolla_external_vip_interface, # TLS can be enabled. When TLS is enabled, certificates must be provided to # allow clients to perform authentication. -#kolla_enable_tls_internal: "no" +#kolla_enable_tls_internal: false #kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}" #kolla_certificates_dir: "{{ node_config }}/certificates" #kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem" #kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem" #kolla_admin_openrc_cacert: "" -#kolla_copy_ca_into_containers: "no" +#kolla_copy_ca_into_containers: false #haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}" #haproxy_backend_cacert_dir: "/etc/ssl/certs" -#database_enable_tls_backend: "{{ 'yes' if kolla_enable_tls_backend | bool and enable_proxysql | bool else 'no' }}" +#database_enable_tls_backend: "{{ 'true' if kolla_enable_tls_backend | bool and enable_proxysql | bool else 'false' }}" ################## # Backend options ################## @@ -258,8 +258,8 @@ workaround_ansible_issue_8743: yes ##################### # Backend TLS options ##################### -#kolla_enable_tls_backend: "no" -#kolla_verify_tls_backend: "yes" +#kolla_enable_tls_backend: false +#kolla_verify_tls_backend: true #kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem" #kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem" @@ -286,13 +286,13 @@ workaround_ansible_issue_8743: yes # LetsEncrypt certificate server options #################### #letsencrypt_cert_server: "https://acme-v02.api.letsencrypt.org/directory" -# attempt to renew Let's Encrypt certificate every 12 hours +# Attempt to renew Let's Encrypt certificate every 12 hours #letsencrypt_cron_renew_schedule: "0 */12 * * *" #################### # LetsEncrypt external account binding options #################### -#letsencrypt_external_account_binding: "no" +#letsencrypt_external_account_binding: false #letsencrypt_eab_hmac: "" #letsencrypt_eab_key_id: "" @@ -315,53 +315,53 @@ workaround_ansible_issue_8743: yes # Enable core OpenStack services. This includes: # glance, keystone, neutron, nova, heat, and horizon. -#enable_openstack_core: "yes" +#enable_openstack_core: true # These roles are required for Kolla to be operation, however a savvy deployer # could disable some of these required roles and run their own services. #enable_glance: "{{ enable_openstack_core | bool }}" -#enable_hacluster: "no" -#enable_haproxy: "yes" +#enable_hacluster: false +#enable_haproxy: true #enable_keepalived: "{{ enable_haproxy | bool }}" #enable_keystone: "{{ enable_openstack_core | bool }}" -#enable_mariadb: "yes" -#enable_memcached: "yes" +#enable_mariadb: true +#enable_memcached: true #enable_neutron: "{{ enable_openstack_core | bool }}" #enable_nova: "{{ enable_openstack_core | bool }}" -#enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}" +#enable_rabbitmq: "{{ 'true' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'false' }}" # OpenStack services can be enabled or disabled with these options -#enable_aodh: "no" -#enable_barbican: "no" -#enable_blazar: "no" -#enable_ceilometer: "no" -#enable_ceilometer_ipmi: "no" -#enable_cells: "no" -#enable_central_logging: "no" -#enable_ceph_rgw: "no" +#enable_aodh: false +#enable_barbican: false +#enable_blazar: false +#enable_ceilometer: false +#enable_ceilometer_ipmi: false +#enable_cells: false +#enable_central_logging: false +#enable_ceph_rgw: false #enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}" -#enable_cinder: "no" -#enable_cinder_backup: "yes" +#enable_cinder: false +#enable_cinder_backup: true #enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}" -#enable_cinder_backend_lvm: "no" -#enable_cinder_backend_nfs: "no" -#enable_cinder_backend_quobyte: "no" -#enable_cinder_backend_pure_iscsi: "no" -#enable_cinder_backend_pure_fc: "no" -#enable_cinder_backend_pure_roce: "no" -#enable_cinder_backend_pure_nvme_tcp: "no" -#enable_cinder_backend_lightbits: "no" -#enable_cloudkitty: "no" -#enable_collectd: "no" -#enable_cyborg: "no" -#enable_designate: "no" -#enable_destroy_images: "no" -#enable_etcd: "no" -#enable_fluentd: "yes" +#enable_cinder_backend_lvm: false +#enable_cinder_backend_nfs: false +#enable_cinder_backend_quobyte: false +#enable_cinder_backend_pure_iscsi: false +#enable_cinder_backend_pure_fc: false +#enable_cinder_backend_pure_roce: false +#enable_cinder_backend_pure_nvme_tcp: false +#enable_cinder_backend_lightbits: false +#enable_cloudkitty: false +#enable_collectd: false +#enable_cyborg: false +#enable_designate: false +#enable_destroy_images: false +#enable_etcd: false +#enable_fluentd: true #enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}" -#enable_gnocchi: "no" -#enable_gnocchi_statsd: "no" -#enable_grafana: "no" +#enable_gnocchi: false +#enable_gnocchi_statsd: false +#enable_grafana: false #enable_grafana_external: "{{ enable_grafana | bool }}" #enable_heat: "{{ enable_openstack_core | bool }}" #enable_horizon: "{{ enable_openstack_core | bool }}" @@ -382,41 +382,41 @@ workaround_ansible_issue_8743: yes #enable_horizon_watcher: "{{ enable_watcher | bool }}" #enable_horizon_zun: "{{ enable_zun | bool }}" #enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}" -#enable_ironic: "no" -#enable_ironic_neutron_agent: "no" +#enable_ironic: false +#enable_ironic_neutron_agent: false #enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" -#enable_ironic_pxe_filter: "no" +#enable_ironic_pxe_filter: false #enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}" -#enable_kuryr: "no" -#enable_magnum: "no" -#enable_manila: "no" -#enable_manila_backend_generic: "no" -#enable_manila_backend_hnas: "no" -#enable_manila_backend_cephfs_native: "no" -#enable_manila_backend_cephfs_nfs: "no" -#enable_manila_backend_glusterfs_nfs: "no" -#enable_manila_backend_flashblade: "no" -#enable_mariabackup: "no" -#enable_masakari: "no" -#enable_mistral: "no" -#enable_multipathd: "no" -#enable_neutron_vpnaas: "no" -#enable_neutron_sriov: "no" -#enable_neutron_dvr: "no" -#enable_neutron_fwaas: "no" -#enable_neutron_qos: "no" -#enable_neutron_agent_ha: "no" -#enable_neutron_bgp_dragent: "no" -#enable_neutron_provider_networks: "no" -#enable_neutron_segments: "no" -#enable_neutron_sfc: "no" -#enable_neutron_trunk: "no" -#enable_neutron_metering: "no" -#enable_neutron_infoblox_ipam_agent: "no" -#enable_neutron_port_forwarding: "no" -#enable_nova_serialconsole_proxy: "no" -#enable_nova_ssh: "yes" -#enable_octavia: "no" +#enable_kuryr: false +#enable_magnum: false +#enable_manila: false +#enable_manila_backend_generic: false +#enable_manila_backend_hnas: false +#enable_manila_backend_cephfs_native: false +#enable_manila_backend_cephfs_nfs: false +#enable_manila_backend_glusterfs_nfs: false +#enable_manila_backend_flashblade: false +#enable_mariabackup: false +#enable_masakari: false +#enable_mistral: false +#enable_multipathd: false +#enable_neutron_vpnaas: false +#enable_neutron_sriov: false +#enable_neutron_dvr: false +#enable_neutron_fwaas: false +#enable_neutron_qos: false +#enable_neutron_agent_ha: false +#enable_neutron_bgp_dragent: false +#enable_neutron_provider_networks: false +#enable_neutron_segments: false +#enable_neutron_sfc: false +#enable_neutron_trunk: false +#enable_neutron_metering: false +#enable_neutron_infoblox_ipam_agent: false +#enable_neutron_port_forwarding: false +#enable_nova_serialconsole_proxy: false +#enable_nova_ssh: true +#enable_octavia: false #enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}" #enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}" #enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}" @@ -424,19 +424,19 @@ workaround_ansible_issue_8743: yes #enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}" #enable_openvswitch: "{{ enable_neutron }}" #enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}" -#enable_ovs_dpdk: "no" -#enable_osprofiler: "no" +#enable_ovs_dpdk: false +#enable_osprofiler: false #enable_placement: "{{ enable_nova | bool or enable_zun | bool }}" -#enable_prometheus: "no" -#enable_proxysql: "yes" -#enable_valkey: "no" -#enable_skyline: "no" -#enable_tacker: "no" -#enable_telegraf: "no" -#enable_trove: "no" -#enable_trove_singletenant: "no" -#enable_watcher: "no" -#enable_zun: "no" +#enable_prometheus: false +#enable_proxysql: true +#enable_valkey: false +#enable_skyline: false +#enable_tacker: false +#enable_telegraf: false +#enable_trove: false +#enable_trove_singletenant: false +#enable_watcher: false +#enable_zun: false ############# # S3 options @@ -462,7 +462,7 @@ workaround_ansible_issue_8743: yes # and not to busy wait (+sbwt none +sbwtdcpu none +sbwtdio none): #rabbitmq_server_additional_erl_args: "+S 2:2 +sbwt none +sbwtdcpu none +sbwtdio none" # Whether to enable TLS encryption for RabbitMQ client-server communication. -#rabbitmq_enable_tls: "no" +#rabbitmq_enable_tls: false # CA certificate bundle in RabbitMQ container. #rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}" @@ -476,7 +476,7 @@ workaround_ansible_issue_8743: yes # External Ceph options ####################### # External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes) -#external_ceph_cephx_enabled: "yes" +#external_ceph_cephx_enabled: true # Glance #ceph_glance_user: "glance" @@ -518,16 +518,16 @@ workaround_ansible_issue_8743: yes # Glance - Image Options ######################## # Configure image backend. -#glance_backend_ceph: "no" -#glance_backend_file: "yes" -#glance_backend_s3: "no" -#enable_glance_image_cache: "no" -#glance_enable_property_protection: "no" -#glance_enable_interoperable_image_import: "no" +#glance_backend_ceph: false +#glance_backend_file: true +#glance_backend_s3: false +#enable_glance_image_cache: false +#glance_enable_property_protection: false +#glance_enable_interoperable_image_import: false # Configure glance upgrade option. # Due to this feature being experimental in glance, -# the default value is "no". -#glance_enable_rolling_upgrade: "no" +# the default value is false. +#glance_enable_rolling_upgrade: false #################### # Glance S3 Backend @@ -563,7 +563,7 @@ workaround_ansible_issue_8743: yes # Cinder - Block Storage Options ################################ # Enable / disable Cinder backends -#cinder_backend_ceph: "no" +#cinder_backend_ceph: false #cinder_volume_group: "cinder-volumes" # Valid options are [ '', valkey, etcd ] #cinder_coordination_backend: "{{ 'valkey' if enable_valkey | bool else 'etcd' if enable_etcd | bool else '' }}" @@ -604,7 +604,7 @@ workaround_ansible_issue_8743: yes ######################## # Nova - Compute Options ######################## -#nova_backend_ceph: "no" +#nova_backend_ceph: false # Valid options are [ qemu, kvm ] #nova_compute_virt_type: "kvm" @@ -614,10 +614,10 @@ workaround_ansible_issue_8743: yes # The "nova_safety_upgrade" controls whether the nova services # are all stopped before rolling upgrade to the new version, -# for the safety and availability. If "nova_safety_upgrade" is "yes", +# for the safety and availability. If "nova_safety_upgrade" is true, # that will stop all nova services (except nova-compute) for no failed # API operations before upgrade to the new version. And opposite. -#nova_safety_upgrade: "no" +#nova_safety_upgrade: false # Valid options are [ none, novnc, spice ] #nova_console: "novnc" @@ -626,10 +626,10 @@ workaround_ansible_issue_8743: yes # Neutron - networking options ############################## # Enable distributed floating ip for OVN deployments -#neutron_ovn_distributed_fip: "no" +#neutron_ovn_distributed_fip: false # Enable DHCP agent(s) to use with OVN -#neutron_ovn_dhcp_agent: "no" +#neutron_ovn_dhcp_agent: false ############################# # Horizon - Dashboard Options @@ -645,7 +645,7 @@ workaround_ansible_issue_8743: yes # list of ranges - at least one must be configured, for example: # - range: 192.168.0.10,192.168.0.100 # See Kolla Ansible docs on Ironic for details. -#ironic_dnsmasq_dhcp_ranges: +# ironic_dnsmasq_dhcp_ranges: # PXE bootloader file for Ironic inspection, relative to /var/lib/ironic/tftpboot. #ironic_dnsmasq_boot_file: "pxelinux.0" @@ -654,7 +654,7 @@ workaround_ansible_issue_8743: yes # The variable "ironic_enable_rolling_upgrade: yes" is meaning rolling_upgrade # were enabled and opposite # Rolling upgrade were enable by default -#ironic_enable_rolling_upgrade: "yes" +#ironic_enable_rolling_upgrade: true # List of extra kernel parameters passed to the kernel used during inspection #ironic_kernel_cmdline_extras: [] @@ -726,7 +726,7 @@ workaround_ansible_issue_8743: yes #enable_prometheus_memcached_exporter: "{{ enable_prometheus | bool }}" #enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}" #enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}" -#enable_prometheus_ceph_mgr_exporter: "no" +#enable_prometheus_ceph_mgr_exporter: false #enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}" #enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}" #enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}" @@ -765,7 +765,7 @@ workaround_ansible_issue_8743: yes ########## # Configure telegraf to use the docker daemon itself as an input for # telemetry data. -#telegraf_enable_docker_input: "no" +#telegraf_enable_docker_input: false ########################################## # Octavia - openstack loadbalancer Options @@ -795,16 +795,16 @@ workaround_ansible_issue_8743: yes # Octavia security groups. lb-mgmt-sec-grp is for amphorae. #octavia_amp_security_groups: -# mgmt-sec-grp: -# name: "lb-mgmt-sec-grp" -# rules: -# - protocol: icmp -# - protocol: tcp -# src_port: 22 -# dst_port: 22 -# - protocol: tcp -# src_port: "{{ octavia_amp_listen_port }}" -# dst_port: "{{ octavia_amp_listen_port }}" +# mgmt-sec-grp: +# name: "lb-mgmt-sec-grp" +# rules: +# - protocol: icmp +# - protocol: tcp +# src_port: 22 +# dst_port: 22 +# - protocol: tcp +# src_port: "{{ octavia_amp_listen_port }}" +# dst_port: "{{ octavia_amp_listen_port }}" # Octavia management network. # See os_network and os_subnet for details. Supported parameters: @@ -862,4 +862,4 @@ workaround_ansible_issue_8743: yes ############## # If `etcd_remove_deleted_members` is enabled, Kolla Ansible will automatically # remove etcd members from the cluster that are no longer in the inventory. -#etcd_remove_deleted_members: "no" +#etcd_remove_deleted_members: false diff --git a/tools/setup-compute-libvirt.yml b/tools/setup-compute-libvirt.yml index 9df033bec9..d544d26261 100644 --- a/tools/setup-compute-libvirt.yml +++ b/tools/setup-compute-libvirt.yml @@ -18,7 +18,7 @@ - name: Enable/start systemd artifacts systemd: - enabled: yes + enabled: true state: started name: "{{ item }}" with_items: "{{ systemd_artifacts }}" From 68c9cba6c12cb37863c843bb7eb6fd9a3cc54db3 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 25 Nov 2025 07:51:16 +0100 Subject: [PATCH 161/165] ansible-lint: Fix command-instead-of-module/shell Change-Id: I4099b338b0d16bedf5731a3a63eeaad27a321d9b Signed-off-by: Michal Nasiadka --- .ansible-lint | 2 -- ansible/roles/loadbalancer/tasks/config_validate.yml | 2 +- ansible/roles/valkey/tasks/check.yml | 2 +- ansible/roles/valkey/tasks/upgrade.yml | 4 ++-- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index 8a566c1335..5c0c0640a1 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -37,6 +37,4 @@ skip_list: - var-naming[no-role-prefix] - risky-file-permissions - risky-shell-pipe - - command-instead-of-shell - - command-instead-of-module - yaml[line-length] diff --git a/ansible/roles/loadbalancer/tasks/config_validate.yml b/ansible/roles/loadbalancer/tasks/config_validate.yml index cfb336919c..acb6f99128 100644 --- a/ansible/roles/loadbalancer/tasks/config_validate.yml +++ b/ansible/roles/loadbalancer/tasks/config_validate.yml @@ -2,7 +2,7 @@ - name: Validating haproxy config files vars: service: "{{ loadbalancer_services['haproxy'] }}" - shell: >- + command: >- {{ kolla_container_engine }} exec -i haproxy haproxy -c -f /etc/haproxy/haproxy.cfg -f /etc/haproxy/services.d/ register: haproxy_config_validation_result diff --git a/ansible/roles/valkey/tasks/check.yml b/ansible/roles/valkey/tasks/check.yml index d7531527ac..1e3c863623 100644 --- a/ansible/roles/valkey/tasks/check.yml +++ b/ansible/roles/valkey/tasks/check.yml @@ -5,7 +5,7 @@ - name: Valkey ping pong check become: true - shell: >- + command: >- {{ kolla_container_engine }} exec valkey_server valkey-cli -h {{ api_interface_address }} -a {{ valkey_master_password }} ping register: valkey_check diff --git a/ansible/roles/valkey/tasks/upgrade.yml b/ansible/roles/valkey/tasks/upgrade.yml index 0de57486a5..5a7499dedf 100644 --- a/ansible/roles/valkey/tasks/upgrade.yml +++ b/ansible/roles/valkey/tasks/upgrade.yml @@ -36,7 +36,7 @@ become: true delegate_to: "{{ valkey_master_host }}" run_once: true - shell: >- + command: >- {{ kolla_container_engine }} exec valkey_server valkey-cli -h {{ api_interface_address }} -p {{ valkey_server_port }} info replication register: valkey_replication @@ -81,7 +81,7 @@ become: true delegate_to: "{{ valkey_master_host }}" run_once: true - shell: >- + command: >- {{ kolla_container_engine }} exec valkey_server valkey-cli -h {{ api_interface_address }} -p {{ valkey_server_port }} info replication register: valkey_role From a65b87e9b656c9eac2bbd938d248cec91289bed6 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Thu, 20 Oct 2022 14:07:38 +0200 Subject: [PATCH 162/165] docs: when reno is required In Zed PTG we decided to limit the amount of release notes this change adds the criteria when reno is required to the docs [1]: https://etherpad.opendev.org/p/kolla-zed-ptg#L149 Change-Id: I4f153a619eb57a75ebdb1aba4b71e422b30d74fe Signed-off-by: Michal Nasiadka --- doc/source/contributor/release-notes.rst | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/source/contributor/release-notes.rst b/doc/source/contributor/release-notes.rst index 5c783b9b84..766c64a39a 100644 --- a/doc/source/contributor/release-notes.rst +++ b/doc/source/contributor/release-notes.rst @@ -22,9 +22,12 @@ Kolla Ansible (just like Kolla) uses the following release notes sections: * ``prelude`` --- filled in by the PTL before each release or RC. Other release note types may be applied per common sense. -Each change should include a release note unless being a ``TrivialFix`` -change or affecting only docs or CI. Such changes should `not` include -a release note to avoid confusion. + +When a release note is required: + +- ``feature`` - best included with docs change (if separate from the code) +- ``user impacting`` - to improve visibility of the change for users + Remember release notes are mostly for end users which, in case of Kolla, are OpenStack administrators/operators. In case of doubt, the core team will let you know what is required. From bb4feef1b905bb5a65feec5d6c2e2217504a69a5 Mon Sep 17 00:00:00 2001 From: Bertrand Lanson Date: Sun, 30 Nov 2025 13:25:31 +0100 Subject: [PATCH 163/165] Fix variable name typo in haproxy single external frontend The loadbalancer role used haproxy_external_single_frontend_public_port (hardcoded to 443) instead of haproxy_single_external_frontend_public_port (smart default based on TLS setting). This broke HTTP-only deployments where services expected port 80 but haproxy bound to port 443. Closes-bug: #2133456 Change-Id: I128f7cab52818d369ee7d3252d81383cf49eca97 Signed-off-by: Bertrand Lanson --- ansible/roles/loadbalancer/defaults/main.yml | 2 +- .../templates/haproxy/haproxy_external_frontend.cfg.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/loadbalancer/defaults/main.yml b/ansible/roles/loadbalancer/defaults/main.yml index 9f7bd0b04f..7e25f7a764 100644 --- a/ansible/roles/loadbalancer/defaults/main.yml +++ b/ansible/roles/loadbalancer/defaults/main.yml @@ -192,7 +192,7 @@ keepalived_track_script_enabled: true # Default backend for single external frontend (for missing mappings) haproxy_external_single_frontend_default_backend: "horizon_external_back" -haproxy_external_single_frontend_public_port: "443" +haproxy_single_external_frontend_public_port: "443" haproxy_external_single_frontend_options: - option httplog diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2 index 0d56555c5c..ae9f01d2a5 100644 --- a/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2 +++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2 @@ -7,6 +7,6 @@ frontend external_frontend {{ http_option }} {% endfor %} http-request set-header X-Forwarded-Proto https if { ssl_fc } - bind {{ kolla_external_vip_address }}:{{ haproxy_external_single_frontend_public_port }} {{ external_tls_bind_info }} + bind {{ kolla_external_vip_address }}:{{ haproxy_single_external_frontend_public_port }} {{ external_tls_bind_info }} use_backend %[req.hdr(host),lower,map_dom(/etc/haproxy/external-frontend-map,{{ haproxy_external_single_frontend_default_backend }})] http-request deny if { path -i -m beg /server-status } From 1902193390867bed09ec2daa24e8d38ebcb0d15b Mon Sep 17 00:00:00 2001 From: Mohsen Sepandar Date: Sun, 16 Nov 2025 13:46:02 +0000 Subject: [PATCH 164/165] Add two unused variables to haproxy-config template file To add extra configuration lines to the backend section of the haproxy config files (tcp and http) for each service, the 'haproxy_backend_http_extra' and 'haproxy_backend_tcp_extra' variables were added to its jinja2 template file. Change-Id: I9e0e4b07af16463064e709896b83b6c86a799340 Signed-off-by: Mohsen Sepandar Closes-Bug: #1914393 --- .../templates/haproxy_single_service_split.cfg.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2 b/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2 index 7c3ad3fd59..76a92e9b0d 100644 --- a/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2 +++ b/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2 @@ -138,10 +138,10 @@ backend {{ service_name }}_back {% set host_group = haproxy_service.host_group|default(service.group) %} {# Additional options can be defined in config, and are additive to the global extras #} {% set frontend_tcp_extra = haproxy_service.frontend_tcp_extra|default([]) + haproxy_frontend_tcp_extra %} - {% set backend_tcp_extra = haproxy_service.backend_tcp_extra|default([]) %} + {% set backend_tcp_extra = haproxy_service.backend_tcp_extra|default([]) + haproxy_backend_tcp_extra %} {% set frontend_http_extra = haproxy_service.frontend_http_extra|default([]) + haproxy_frontend_http_extra %} {% set frontend_redirect_extra = haproxy_service.frontend_redirect_extra|default([]) + haproxy_frontend_redirect_extra %} - {% set backend_http_extra = haproxy_service.backend_http_extra|default([]) %} + {% set backend_http_extra = haproxy_service.backend_http_extra|default([]) + haproxy_backend_http_extra %} {% set tls_backend = haproxy_service.tls_backend|default(false) %} {# Allow for basic auth #} {% set auth_user = haproxy_service.auth_user|default() %} From 2de7ee8983b9d81c03f079be9e56af13749d7beb Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Mon, 1 Dec 2025 08:18:42 +0100 Subject: [PATCH 165/165] designate: Fix config.yml after truthy fixes Change-Id: Ia5ad5a92c2f1f3a9197e2f6f078c97ccbe95b445 Signed-off-by: Michal Nasiadka --- ansible/roles/designate/tasks/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/designate/tasks/config.yml b/ansible/roles/designate/tasks/config.yml index a6bf8065a6..f921a69d73 100644 --- a/ansible/roles/designate/tasks/config.yml +++ b/ansible/roles/designate/tasks/config.yml @@ -92,7 +92,7 @@ mode: "0660" become: true when: - - designate_backend == 'bind9' and designate_backend_external == 'no' + - designate_backend == 'bind9' and not designate_backend_external | bool - item.key in [ "designate-backend-bind9", "designate-worker" ] with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}" @@ -103,7 +103,7 @@ mode: "0660" become: true when: - - designate_backend == 'bind9' and designate_backend_external == 'no' + - designate_backend == 'bind9' and not designate_backend_external | bool - item.key in [ "designate-backend-bind9", "designate-worker" ] with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"