diff --git a/README.rst b/README.rst index 6a1ccf9620..4ee2c7dd04 100644 --- a/README.rst +++ b/README.rst @@ -64,7 +64,6 @@ Kolla Ansible deploys containers for the following OpenStack projects: - Skyline (`APIServer `__ and `Console `__) - `Tacker `__ - `Trove `__ -- `Venus `__ - `Watcher `__ - `Zun `__ diff --git a/ansible/action_plugins/merge_configs.py b/ansible/action_plugins/merge_configs.py index a825835506..67a7da1cd5 100644 --- a/ansible/action_plugins/merge_configs.py +++ b/ansible/action_plugins/merge_configs.py @@ -20,10 +20,21 @@ from ansible import constants from ansible.plugins import action +# TODO(dougszu): From Ansible 12 onwards we must explicitly trust templates. +# Since this feature is not supported in previous releases, we define a +# noop method here for backwards compatibility. This can be removed in the +# G cycle. +try: + from ansible.template import trust_as_template +except ImportError: + def trust_as_template(template): + return template + from io import StringIO from oslo_config import iniparser + _ORPHAN_SECTION = 'TEMPORARY_ORPHAN_VARIABLE_SECTION' DOCUMENTATION = ''' @@ -150,7 +161,7 @@ def read_config(self, source, config): # Only use config if present if os.access(source, os.R_OK): with open(source, 'r') as f: - template_data = f.read() + template_data = trust_as_template(f.read()) # set search path to mimic 'template' module behavior searchpath = [ diff --git a/ansible/action_plugins/merge_yaml.py b/ansible/action_plugins/merge_yaml.py index ea7350bf73..d2c15eb702 100644 --- a/ansible/action_plugins/merge_yaml.py +++ b/ansible/action_plugins/merge_yaml.py @@ -23,6 +23,16 @@ from ansible import errors as ansible_errors from ansible.plugins import action +# TODO(dougszu): From Ansible 12 onwards we must explicitly trust templates. +# Since this feature is not supported in previous releases, we define a +# noop method here for backwards compatibility. This can be removed in the +# G cycle. +try: + from ansible.template import trust_as_template +except ImportError: + def trust_as_template(template): + return template + DOCUMENTATION = ''' --- module: merge_yaml @@ -91,7 +101,7 @@ def read_config(self, source): # Only use config if present if source and os.access(source, os.R_OK): with open(source, 'r') as f: - template_data = f.read() + template_data = trust_as_template(f.read()) # set search path to mimic 'template' module behavior searchpath = [ diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml deleted file mode 100644 index bc40c355f7..0000000000 --- a/ansible/group_vars/all.yml +++ /dev/null @@ -1,1412 +0,0 @@ ---- -# The options in this file can be overridden in 'globals.yml' - -# The "temp" files that are created before merge need to stay persistent due -# to the fact that ansible will register a "change" if it has to create them -# again. Persistent files allow for idempotency -container_config_directory: "/var/lib/kolla/config_files" - -# The directory on the deploy host containing globals.yml. -node_config: "{{ CONFIG_DIR | default('/etc/kolla') }}" - -# The directory to merge custom config files the kolla's config files -node_custom_config: "{{ node_config }}/config" - -# The directory to store the config files on the destination node -node_config_directory: "/etc/kolla" - -# The group which own node_config_directory, you can use a non-root -# user to deploy kolla -config_owner_user: "root" -config_owner_group: "root" - -################### -# Ansible options -################### - -# This variable is used as the "filter" argument for the setup module. For -# instance, if one wants to remove/ignore all Neutron interface facts: -# kolla_ansible_setup_filter: "ansible_[!qt]*" -# By default, we do not provide a filter. -kolla_ansible_setup_filter: "{{ omit }}" - -# This variable is used as the "gather_subset" argument for the setup module. -# For instance, if one wants to avoid collecting facts via facter: -# kolla_ansible_setup_gather_subset: "all,!facter" -# By default, we do not provide a gather subset. -kolla_ansible_setup_gather_subset: "{{ omit }}" - -# This variable determines which hosts require facts when using --limit. Facts -# will be gathered using delegation for hosts in this list that are not -# included in the limit. -# By default, this list includes all hosts. -kolla_ansible_delegate_facts_hosts: "{{ groups['all'] }}" - -################### -# Kolla options -################### -# Valid options are [ COPY_ONCE, COPY_ALWAYS ] -config_strategy: "COPY_ALWAYS" - -# Valid options are ['centos', 'debian', 'rocky', 'ubuntu'] -kolla_base_distro: "rocky" - -kolla_internal_vip_address: "{{ kolla_internal_address | default('') }}" -kolla_internal_fqdn: "{{ kolla_internal_vip_address }}" -kolla_external_vip_address: "{{ kolla_internal_vip_address }}" -kolla_same_external_internal_vip: "{{ kolla_external_vip_address | ansible.utils.ipaddr('address') == kolla_internal_vip_address | ansible.utils.ipaddr('address') }}" -kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip | bool else kolla_external_vip_address }}" - -kolla_dev_repos_directory: "/opt/stack/" -kolla_dev_repos_git: "https://opendev.org/openstack" -kolla_dev_repos_pull: "no" -kolla_dev_mode: "no" -kolla_source_version: "{% if openstack_release == 'master' %}master{% else %}stable/{{ openstack_release }}{% endif %}" - -# Proxy settings for containers such as magnum that need internet access -container_http_proxy: "" -container_https_proxy: "" -container_no_proxy: "localhost,127.0.0.1" - -container_proxy_no_proxy_entries: - - "{{ container_no_proxy }}" - - "{{ api_interface_address }}" - - "{{ kolla_internal_vip_address | default('') }}" - -container_proxy: - http_proxy: "{{ container_http_proxy }}" - https_proxy: "{{ container_https_proxy }}" - no_proxy: "{{ container_proxy_no_proxy_entries | select | join(',') }}" - -# By default, Kolla API services bind to the network address assigned -# to the api_interface. Allow the bind address to be an override. -api_interface_address: "{{ 'api' | kolla_address }}" - - -#################### -# Database options -#################### -database_address: "{{ kolla_internal_fqdn }}" -database_user: "root" -database_port: "3306" -database_connection_recycle_time: 10 -database_max_pool_size: 1 -database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}" -database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}" - -#################### -# Container engine options -#################### -kolla_container_engine: "docker" - -#################### -# Docker options -#################### -docker_registry_email: -docker_registry: "quay.io" -docker_namespace: "openstack.kolla" -docker_image_name_prefix: "" -docker_image_url: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}" -docker_registry_username: -# Please read the docs carefully before applying docker_registry_insecure. -docker_registry_insecure: "no" -docker_runtime_directory: "" -# Docker client timeout in seconds. -docker_client_timeout: 120 - -# Docker networking options -docker_disable_default_iptables_rules: "yes" -docker_disable_default_network: "{{ docker_disable_default_iptables_rules }}" -docker_disable_ip_forward: "{{ docker_disable_default_iptables_rules }}" - -# Retention settings for Docker logs -docker_log_max_file: "5" -docker_log_max_size: "50m" - -# Valid options are [ no, on-failure, always, unless-stopped ] -docker_restart_policy: "unless-stopped" - -# '0' means unlimited retries (applies only to 'on-failure' policy) -docker_restart_policy_retry: "10" - -# Extra docker options for Zun -docker_configure_for_zun: "no" -docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375 -docker_zun_config: {} - -# Extra containerd options for Zun -containerd_configure_for_zun: "no" - -# Enable Ceph backed Cinder Volumes for zun -zun_configure_for_cinder_ceph: "no" - -# 42463 is the static group id of the zun user in the Zun image. -# If users customize this value on building the Zun images, -# they need to change this config accordingly. -containerd_grpc_gid: 42463 - -# Timeout after Docker sends SIGTERM before sending SIGKILL. -docker_graceful_timeout: 60 - -# Common options used throughout Docker -docker_common_options: - auth_email: "{{ docker_registry_email }}" - auth_password: "{{ docker_registry_password }}" - auth_registry: "{{ docker_registry }}" - auth_username: "{{ docker_registry_username }}" - environment: - KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" - restart_policy: "{{ docker_restart_policy }}" - restart_retries: "{{ docker_restart_policy_retry }}" - graceful_timeout: "{{ docker_graceful_timeout }}" - client_timeout: "{{ docker_client_timeout }}" - container_engine: "{{ kolla_container_engine }}" - -# Container engine specific volume paths -docker_volumes_path: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes" -podman_volumes_path: "{{ docker_runtime_directory or '/var/lib/containers' }}/storage/volumes" -container_engine_volumes_path: "{{ docker_volumes_path if kolla_container_engine == 'docker' else podman_volumes_path }}" - -##################### -# Volumes under /run -##################### -# Podman has problem with mounting whole /run directory -# described here: https://github.com/containers/podman/issues/16305 -run_default_volumes_podman: - - '/run/netns:/run/netns:shared' - - '/run/lock/nova:/run/lock/nova:shared' - - "/run/libvirt:/run/libvirt:shared" - - "/run/nova:/run/nova:shared" - - "/run/openvswitch:/run/openvswitch:shared" - -run_default_volumes_docker: [] - -run_default_subdirectories: - - '/run/netns' - - '/run/lock/nova' - - "/run/libvirt" - - "/run/nova" - - "/run/openvswitch" - -#################### -# Dimensions options -#################### -# Dimension options for Docker Containers -# NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9) -# fixes at least rabbitmq and mariadb -default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else '{}' }}" -default_container_dimensions_el9: "{{ default_docker_dimensions_el9 if kolla_container_engine == 'docker' else default_podman_dimensions_el9 }}" -default_docker_dimensions_el9: - ulimits: - nofile: - soft: 1048576 - hard: 1048576 -default_podman_dimensions_el9: - ulimits: - RLIMIT_NOFILE: - soft: 1048576 - hard: 1048576 - RLIMIT_NPROC: - soft: 1048576 - hard: 1048576 - -##################### -# Healthcheck options -##################### -enable_container_healthchecks: "yes" -# Healthcheck options for Docker containers -# interval/timeout/start_period are in seconds -default_container_healthcheck_interval: 30 -default_container_healthcheck_timeout: 30 -default_container_healthcheck_retries: 3 -default_container_healthcheck_start_period: 5 - -####################### -# Extra volumes options -####################### -# Extra volumes for Docker Containers -default_extra_volumes: [] - -#################### -# keepalived options -#################### -# Arbitrary unique number from 0..255 -keepalived_virtual_router_id: "51" - - -####################### -## Opensearch Options -######################## -opensearch_datadir_volume: "opensearch" - -opensearch_internal_endpoint: "{{ opensearch_address | kolla_url(internal_protocol, opensearch_port) }}" -opensearch_dashboards_internal_fqdn: "{{ kolla_internal_fqdn }}" -opensearch_dashboards_external_fqdn: "{{ kolla_external_fqdn }}" -opensearch_dashboards_internal_endpoint: "{{ opensearch_dashboards_internal_fqdn | kolla_url(internal_protocol, opensearch_dashboards_port) }}" -opensearch_dashboards_external_endpoint: "{{ opensearch_dashboards_external_fqdn | kolla_url(public_protocol, opensearch_dashboards_port_external) }}" -opensearch_dashboards_user: "opensearch" -opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}" - -################### -# Messaging options -################### -# oslo.messaging rpc transport valid options are [ rabbit, amqp ] -om_rpc_transport: "rabbit" -om_rpc_user: "{{ rabbitmq_user }}" -om_rpc_password: "{{ rabbitmq_password }}" -om_rpc_port: "{{ rabbitmq_port }}" -om_rpc_group: "rabbitmq" -om_rpc_vhost: "/" - -rpc_transport_url: "{{ om_rpc_transport }}://{% for host in groups[om_rpc_group] %}{{ om_rpc_user }}:{{ om_rpc_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_rpc_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_rpc_vhost }}" - -# oslo.messaging notify transport valid options are [ rabbit ] -om_notify_transport: "rabbit" -om_notify_user: "{{ rabbitmq_user }}" -om_notify_password: "{{ rabbitmq_password }}" -om_notify_port: "{{ rabbitmq_port }}" -om_notify_group: "rabbitmq" -om_notify_vhost: "/" - -notify_transport_url: "{{ om_notify_transport }}://{% for host in groups[om_notify_group] %}{{ om_notify_user }}:{{ om_notify_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_notify_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_notify_vhost }}" - -# Whether to enable TLS for oslo.messaging communication with RabbitMQ. -om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}" -# CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS. -om_rabbitmq_cacert: "{{ rabbitmq_cacert }}" -om_rabbitmq_qos_prefetch_count: "1" - -om_enable_rabbitmq_stream_fanout: true - -#################### -# Networking options -#################### -network_interface: "eth0" -neutron_external_interface: "eth1" -kolla_external_vip_interface: "{{ network_interface }}" -api_interface: "{{ network_interface }}" -migration_interface: "{{ api_interface }}" -tunnel_interface: "{{ network_interface }}" -octavia_network_interface: "{{ 'o-hm0' if octavia_network_type == 'tenant' else api_interface }}" -bifrost_network_interface: "{{ network_interface }}" -dns_interface: "{{ network_interface }}" -dpdk_tunnel_interface: "{{ neutron_external_interface }}" -ironic_http_interface: "{{ api_interface }}" -ironic_tftp_interface: "{{ api_interface }}" - -# Configure the address family (AF) per network. -# Valid options are [ ipv4, ipv6 ] -network_address_family: "ipv4" -api_address_family: "{{ network_address_family }}" -storage_address_family: "{{ network_address_family }}" -migration_address_family: "{{ api_address_family }}" -tunnel_address_family: "{{ network_address_family }}" -octavia_network_address_family: "{{ api_address_family }}" -bifrost_network_address_family: "{{ network_address_family }}" -dns_address_family: "{{ network_address_family }}" -dpdk_tunnel_address_family: "{{ network_address_family }}" -ironic_http_address_family: "{{ api_address_family }}" -ironic_tftp_address_family: "{{ api_address_family }}" - -migration_interface_address: "{{ 'migration' | kolla_address }}" -tunnel_interface_address: "{{ 'tunnel' | kolla_address }}" -octavia_network_interface_address: "{{ 'octavia_network' | kolla_address }}" -dpdk_tunnel_interface_address: "{{ 'dpdk_tunnel' | kolla_address }}" -ironic_http_interface_address: "{{ 'ironic_http' | kolla_address }}" -ironic_tftp_interface_address: "{{ 'ironic_tftp' | kolla_address }}" - -# Valid options are [ openvswitch, ovn, linuxbridge ] -# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable. -# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html -neutron_plugin_agent: "openvswitch" - -# Valid options are [ internal, infoblox ] -neutron_ipam_driver: "internal" - -# The default ports used by each service. -# The list should be in alphabetical order -aodh_internal_fqdn: "{{ kolla_internal_fqdn }}" -aodh_external_fqdn: "{{ kolla_external_fqdn }}" -aodh_internal_endpoint: "{{ aodh_internal_fqdn | kolla_url(internal_protocol, aodh_api_port) }}" -aodh_public_endpoint: "{{ aodh_external_fqdn | kolla_url(public_protocol, aodh_api_public_port) }}" -aodh_api_port: "8042" -aodh_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else aodh_api_port }}" -aodh_api_listen_port: "{{ aodh_api_port }}" - -barbican_internal_fqdn: "{{ kolla_internal_fqdn }}" -barbican_external_fqdn: "{{ kolla_external_fqdn }}" -barbican_internal_endpoint: "{{ barbican_internal_fqdn | kolla_url(internal_protocol, barbican_api_port) }}" -barbican_public_endpoint: "{{ barbican_external_fqdn | kolla_url(public_protocol, barbican_api_public_port) }}" -barbican_api_port: "9311" -barbican_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else barbican_api_port }}" -barbican_api_listen_port: "{{ barbican_api_port }}" - -blazar_internal_fqdn: "{{ kolla_internal_fqdn }}" -blazar_external_fqdn: "{{ kolla_external_fqdn }}" -blazar_internal_base_endpoint: "{{ blazar_internal_fqdn | kolla_url(internal_protocol, blazar_api_port) }}" -blazar_public_base_endpoint: "{{ blazar_external_fqdn | kolla_url(public_protocol, blazar_api_public_port) }}" -blazar_api_port: "1234" -blazar_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else blazar_api_port }}" -blazar_api_listen_port: "{{ blazar_api_port }}" - -ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}" -ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}" -ceph_rgw_internal_base_endpoint: "{{ ceph_rgw_internal_fqdn | kolla_url(internal_protocol, ceph_rgw_port) }}" -ceph_rgw_public_base_endpoint: "{{ ceph_rgw_external_fqdn | kolla_url(public_protocol, ceph_rgw_public_port) }}" -ceph_rgw_port: "6780" -ceph_rgw_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ceph_rgw_port }}" - -cinder_internal_fqdn: "{{ kolla_internal_fqdn }}" -cinder_external_fqdn: "{{ kolla_external_fqdn }}" -cinder_internal_base_endpoint: "{{ cinder_internal_fqdn | kolla_url(internal_protocol, cinder_api_port) }}" -cinder_public_base_endpoint: "{{ cinder_external_fqdn | kolla_url(public_protocol, cinder_api_public_port) }}" -cinder_api_port: "8776" -cinder_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cinder_api_port }}" -cinder_api_listen_port: "{{ cinder_api_port }}" - -cloudkitty_internal_fqdn: "{{ kolla_internal_fqdn }}" -cloudkitty_external_fqdn: "{{ kolla_external_fqdn }}" -cloudkitty_internal_endpoint: "{{ cloudkitty_internal_fqdn | kolla_url(internal_protocol, cloudkitty_api_port) }}" -cloudkitty_public_endpoint: "{{ cloudkitty_external_fqdn | kolla_url(public_protocol, cloudkitty_api_public_port) }}" -cloudkitty_api_port: "8889" -cloudkitty_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cloudkitty_api_port }}" -cloudkitty_api_listen_port: "{{ cloudkitty_api_port }}" - -collectd_udp_port: "25826" - -cyborg_internal_fqdn: "{{ kolla_internal_fqdn }}" -cyborg_external_fqdn: "{{ kolla_external_fqdn }}" -cyborg_api_port: "6666" -cyborg_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cyborg_api_port }}" -cyborg_api_listen_port: "{{ cyborg_api_port }}" - -designate_internal_fqdn: "{{ kolla_internal_fqdn }}" -designate_external_fqdn: "{{ kolla_external_fqdn }}" -designate_internal_endpoint: "{{ designate_internal_fqdn | kolla_url(internal_protocol, designate_api_port) }}" -designate_public_endpoint: "{{ designate_external_fqdn | kolla_url(public_protocol, designate_api_public_port) }}" -designate_api_port: "9001" -designate_api_listen_port: "{{ designate_api_port }}" -designate_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else designate_api_port }}" -designate_bind_port: "53" -designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}" -designate_rndc_port: "953" - -etcd_client_port: "2379" -etcd_peer_port: "2380" -etcd_enable_tls: "{{ kolla_enable_tls_backend }}" -etcd_protocol: "{{ 'https' if etcd_enable_tls | bool else 'http' }}" - -fluentd_syslog_port: "5140" - -glance_internal_fqdn: "{{ kolla_internal_fqdn }}" -glance_external_fqdn: "{{ kolla_external_fqdn }}" -glance_internal_endpoint: "{{ glance_internal_fqdn | kolla_url(internal_protocol, glance_api_port) }}" -glance_public_endpoint: "{{ glance_external_fqdn | kolla_url(public_protocol, glance_api_public_port) }}" -glance_api_port: "9292" -glance_api_listen_port: "{{ glance_api_port }}" -glance_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else glance_api_port }}" -glance_tls_proxy_stats_port: "9293" - -gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}" -gnocchi_external_fqdn: "{{ kolla_external_fqdn }}" -gnocchi_internal_endpoint: "{{ gnocchi_internal_fqdn | kolla_url(internal_protocol, gnocchi_api_port) }}" -gnocchi_public_endpoint: "{{ gnocchi_external_fqdn | kolla_url(public_protocol, gnocchi_api_public_port) }}" -gnocchi_api_port: "8041" -gnocchi_api_listen_port: "{{ gnocchi_api_port }}" -gnocchi_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else gnocchi_api_port }}" - -grafana_internal_fqdn: "{{ kolla_internal_fqdn }}" -grafana_external_fqdn: "{{ kolla_external_fqdn }}" -grafana_internal_endpoint: "{{ grafana_internal_fqdn | kolla_url(internal_protocol, grafana_server_port) }}" -grafana_public_endpoint: "{{ grafana_external_fqdn | kolla_url(public_protocol, grafana_server_public_port) }}" -grafana_server_port: "3000" -grafana_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else grafana_server_port }}" -grafana_server_listen_port: "{{ grafana_server_port }}" - -haproxy_stats_port: "1984" -haproxy_monitor_port: "61313" -haproxy_ssh_port: "2985" -# configure SSL/TLS settings for haproxy config, one of [modern, intermediate, legacy]: -kolla_haproxy_ssl_settings: "modern" - -haproxy_ssl_settings: "{{ ssl_legacy_settings if kolla_haproxy_ssl_settings == 'legacy' else ssl_intermediate_settings if kolla_haproxy_ssl_settings == 'intermediate' else ssl_modern_settings | default(ssl_modern_settings) }}" - -ssl_legacy_settings: | - ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES - ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11 - -ssl_intermediate_settings: | - ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 - ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets - ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 - ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets - -ssl_modern_settings: | - ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets - ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 - ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets - -heat_internal_fqdn: "{{ kolla_internal_fqdn }}" -heat_external_fqdn: "{{ kolla_external_fqdn }}" -heat_internal_base_endpoint: "{{ heat_internal_fqdn | kolla_url(internal_protocol, heat_api_port) }}" -heat_public_base_endpoint: "{{ heat_external_fqdn | kolla_url(public_protocol, heat_api_public_port) }}" -heat_api_port: "8004" -heat_api_listen_port: "{{ heat_api_port }}" -heat_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_port }}" -heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}" -heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}" -heat_cfn_internal_base_endpoint: "{{ heat_cfn_internal_fqdn | kolla_url(internal_protocol, heat_api_cfn_port) }}" -heat_cfn_public_base_endpoint: "{{ heat_cfn_external_fqdn | kolla_url(public_protocol, heat_api_cfn_public_port) }}" -heat_api_cfn_port: "8000" -heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}" -heat_api_cfn_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_cfn_port }}" - -horizon_internal_fqdn: "{{ kolla_internal_fqdn }}" -horizon_external_fqdn: "{{ kolla_external_fqdn }}" -horizon_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}" -horizon_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}" -horizon_port: "80" -horizon_tls_port: "443" -horizon_listen_port: "{{ horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}" - -influxdb_http_port: "8086" - -ironic_internal_fqdn: "{{ kolla_internal_fqdn }}" -ironic_external_fqdn: "{{ kolla_external_fqdn }}" -ironic_internal_endpoint: "{{ ironic_internal_fqdn | kolla_url(internal_protocol, ironic_api_port) }}" -ironic_public_endpoint: "{{ ironic_external_fqdn | kolla_url(public_protocol, ironic_api_public_port) }}" -ironic_api_port: "6385" -ironic_api_listen_port: "{{ ironic_api_port }}" -ironic_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_api_port }}" -ironic_http_port: "8089" -ironic_prometheus_exporter_port: "9608" - -iscsi_port: "3260" - -keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else keystone_public_listen_port }}" -keystone_public_listen_port: "5000" -keystone_internal_port: "5000" -keystone_internal_listen_port: "{{ keystone_internal_port }}" - -keystone_ssh_port: "8023" - -kuryr_port: "23750" - -letsencrypt_webserver_port: "8081" -letsencrypt_managed_certs: "{{ '' if not enable_letsencrypt | bool else ('internal' if letsencrypt_internal_cert_server != '' and kolla_same_external_internal_vip | bool else ('internal,external' if letsencrypt_internal_cert_server != '' and letsencrypt_external_cert_server != '' else ('internal' if letsencrypt_internal_cert_server != '' else ('external' if letsencrypt_external_cert_server != '' and not kolla_same_external_internal_vip | bool else '')))) }}" -letsencrypt_external_cert_server: "https://acme-v02.api.letsencrypt.org/directory" -letsencrypt_internal_cert_server: "" - -magnum_internal_fqdn: "{{ kolla_internal_fqdn }}" -magnum_external_fqdn: "{{ kolla_external_fqdn }}" -magnum_internal_base_endpoint: "{{ magnum_internal_fqdn | kolla_url(internal_protocol, magnum_api_port) }}" -magnum_public_base_endpoint: "{{ magnum_external_fqdn | kolla_url(public_protocol, magnum_api_public_port) }}" -magnum_api_port: "9511" -magnum_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else magnum_api_port }}" -magnum_api_listen_port: "{{ magnum_api_port }}" - -manila_internal_fqdn: "{{ kolla_internal_fqdn }}" -manila_external_fqdn: "{{ kolla_external_fqdn }}" -manila_internal_base_endpoint: "{{ manila_internal_fqdn | kolla_url(internal_protocol, manila_api_port) }}" -manila_public_base_endpoint: "{{ manila_external_fqdn | kolla_url(public_protocol, manila_api_public_port) }}" -manila_api_port: "8786" -manila_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else manila_api_port }}" -manila_api_listen_port: "{{ manila_api_port }}" - -mariadb_port: "{{ database_port }}" -mariadb_wsrep_port: "4567" -mariadb_ist_port: "4568" -mariadb_sst_port: "4444" -mariadb_clustercheck_port: "4569" -mariadb_enable_tls_backend: "{{ database_enable_tls_backend }}" - -mariadb_monitor_user: "{{ 'monitor' if enable_proxysql | bool else 'haproxy' }}" - -mariadb_datadir_volume: "mariadb" - -mariadb_default_database_shard_id: 0 -mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}" -mariadb_shard_id: "{{ mariadb_default_database_shard_id }}" -mariadb_shard_name: "shard_{{ mariadb_shard_id }}" -mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}" -mariadb_loadbalancer: "{{ 'proxysql' if enable_proxysql | bool else 'haproxy' }}" -mariadb_backup_target: "{{ 'active' if mariadb_loadbalancer == 'haproxy' else 'replica' }}" -mariadb_shard_root_user_prefix: "root_shard_" -mariadb_shard_backup_user_prefix: "backup_shard_" -mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}" - -masakari_internal_fqdn: "{{ kolla_internal_fqdn }}" -masakari_external_fqdn: "{{ kolla_external_fqdn }}" -masakari_internal_endpoint: "{{ masakari_internal_fqdn | kolla_url(internal_protocol, masakari_api_port) }}" -masakari_public_endpoint: "{{ masakari_external_fqdn | kolla_url(public_protocol, masakari_api_public_port) }}" -masakari_api_port: "15868" -masakari_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else masakari_api_port }}" -masakari_api_listen_port: "{{ masakari_api_port }}" -masakari_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" - -memcached_port: "11211" -memcache_security_strategy: "ENCRYPT" - -mistral_internal_fqdn: "{{ kolla_internal_fqdn }}" -mistral_external_fqdn: "{{ kolla_external_fqdn }}" -mistral_internal_base_endpoint: "{{ mistral_internal_fqdn | kolla_url(internal_protocol, mistral_api_port) }}" -mistral_public_base_endpoint: "{{ mistral_external_fqdn | kolla_url(public_protocol, mistral_api_public_port) }}" -mistral_api_port: "8989" -mistral_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else mistral_api_port }}" -mistral_api_listen_port: "{{ mistral_api_port }}" - -neutron_internal_fqdn: "{{ kolla_internal_fqdn }}" -neutron_external_fqdn: "{{ kolla_external_fqdn }}" -neutron_internal_endpoint: "{{ neutron_internal_fqdn | kolla_url(internal_protocol, neutron_server_port) }}" -neutron_public_endpoint: "{{ neutron_external_fqdn | kolla_url(public_protocol, neutron_server_public_port) }}" -neutron_server_port: "9696" -neutron_server_listen_port: "{{ neutron_server_port }}" -neutron_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else neutron_server_port }}" - -nova_internal_fqdn: "{{ kolla_internal_fqdn }}" -nova_external_fqdn: "{{ kolla_external_fqdn }}" -nova_internal_base_endpoint: "{{ nova_internal_fqdn | kolla_url(internal_protocol, nova_api_port) }}" -nova_public_base_endpoint: "{{ nova_external_fqdn | kolla_url(public_protocol, nova_api_public_port) }}" -nova_api_port: "8774" -nova_api_listen_port: "{{ nova_api_port }}" -nova_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_api_port }}" -nova_metadata_internal_fqdn: "{{ kolla_internal_fqdn }}" -nova_metadata_external_fqdn: "{{ kolla_external_fqdn }}" -nova_metadata_port: "8775" -nova_metadata_listen_port: "{{ nova_metadata_port }}" -nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}" -nova_novncproxy_port: "6080" -nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}" -nova_novncproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_novncproxy_port }}" -nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}" -nova_spicehtml5proxy_port: "6082" -nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}" -nova_spicehtml5proxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_spicehtml5proxy_port }}" -nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}" -nova_serialproxy_port: "6083" -nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}" -nova_serialproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_serialproxy_port }}" -nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}" - -octavia_internal_fqdn: "{{ kolla_internal_fqdn }}" -octavia_external_fqdn: "{{ kolla_external_fqdn }}" -octavia_internal_endpoint: "{{ octavia_internal_fqdn | kolla_url(internal_protocol, octavia_api_port) }}" -octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol, octavia_api_public_port) }}" -octavia_api_port: "9876" -octavia_api_listen_port: "{{ octavia_api_port }}" -octavia_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else octavia_api_port }}" -octavia_health_manager_port: "5555" - -# NOTE: If an external ElasticSearch cluster port is specified, -# we default to using that port in services with ElasticSearch -# endpoints. This is for backwards compatibility. -opensearch_port: "{{ elasticsearch_port | default('9200') }}" -opensearch_dashboards_port: "5601" -opensearch_dashboards_port_external: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else opensearch_dashboards_port }}" -opensearch_dashboards_listen_port: "{{ opensearch_dashboards_port }}" - -ovn_nb_db_port: "6641" -ovn_sb_db_port: "6642" -# OVN SB Relay related variables -ovn_sb_db_relay_count: "{{ ((groups['ovn-controller'] | length) / ovn_sb_db_relay_compute_per_relay | int) | round(0, 'ceil') | int }}" -ovn_sb_db_relay_compute_per_relay: "50" -ovn_sb_db_relay_port_prefix: "1664" -ovn_sb_db_relay_port: "{{ ovn_sb_db_relay_port_prefix ~ ovn_sb_db_relay_client_group_id }}" -ovn_sb_db_relay_client_group_id: "{{ range(1, ovn_sb_db_relay_count | int + 1) | random(seed=inventory_hostname) }}" -ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" -ovn_sb_connection: "{{ ovn_sb_connection_relay if enable_ovn_sb_db_relay | bool else ovn_sb_connection_no_relay }}" -ovn_sb_connection_no_relay: "{% for host in groups['ovn-sb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" -ovn_sb_connection_relay: "{% for host in groups['ovn-sb-db-relay'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_relay_port }}{% if not loop.last %},{% endif %}{% endfor %}" - -ovsdb_port: "6640" - -placement_internal_fqdn: "{{ kolla_internal_fqdn }}" -placement_external_fqdn: "{{ kolla_external_fqdn }}" -placement_internal_endpoint: "{{ placement_internal_fqdn | kolla_url(internal_protocol, placement_api_port) }}" -placement_public_endpoint: "{{ placement_external_fqdn | kolla_url(public_protocol, placement_api_public_port) }}" -# Default Placement API port of 8778 already in use -placement_api_port: "8780" -placement_api_listen_port: "{{ placement_api_port }}" -placement_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else placement_api_port }}" - -prometheus_internal_fqdn: "{{ kolla_internal_fqdn }}" -prometheus_external_fqdn: "{{ kolla_external_fqdn }}" -prometheus_internal_endpoint: "{{ prometheus_internal_fqdn | kolla_url(internal_protocol, prometheus_port) }}" -prometheus_public_endpoint: "{{ prometheus_external_fqdn | kolla_url(public_protocol, prometheus_public_port) }}" -prometheus_port: "9091" -prometheus_listen_port: "{{ prometheus_port }}" -prometheus_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_port }}" -prometheus_node_exporter_port: "9100" -prometheus_mysqld_exporter_port: "9104" -prometheus_haproxy_exporter_port: "9101" -prometheus_memcached_exporter_port: "9150" -prometheus_rabbitmq_exporter_port: "{{ rabbitmq_prometheus_port }}" -# Default cadvisor port of 8080 already in use -prometheus_cadvisor_port: "18080" -prometheus_fluentd_integration_port: "24231" -prometheus_libvirt_exporter_port: "9177" -prometheus_etcd_integration_port: "{{ etcd_client_port }}" -proxysql_prometheus_exporter_port: "6070" - -# Prometheus alertmanager ports -prometheus_alertmanager_internal_fqdn: "{{ kolla_internal_fqdn }}" -prometheus_alertmanager_external_fqdn: "{{ kolla_external_fqdn }}" -prometheus_alertmanager_internal_endpoint: "{{ prometheus_alertmanager_internal_fqdn | kolla_url(internal_protocol, prometheus_alertmanager_port) }}" -prometheus_alertmanager_public_endpoint: "{{ prometheus_alertmanager_external_fqdn | kolla_url(public_protocol, prometheus_alertmanager_public_port) }}" -prometheus_alertmanager_port: "9093" -prometheus_alertmanager_cluster_port: "9094" -prometheus_alertmanager_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_alertmanager_port }}" -prometheus_alertmanager_listen_port: "{{ prometheus_alertmanager_port }}" - -# Prometheus openstack-exporter ports -prometheus_openstack_exporter_port: "9198" -prometheus_elasticsearch_exporter_port: "9108" - -# Prometheus blackbox-exporter ports -prometheus_blackbox_exporter_port: "9115" - -# Prometheus instance label to use for metrics -prometheus_instance_label: - -proxysql_admin_port: "6032" - -rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}" -rabbitmq_management_port: "15672" -rabbitmq_cluster_port: "25672" -rabbitmq_epmd_port: "4369" -rabbitmq_prometheus_port: "15692" - -redis_port: "6379" -redis_sentinel_port: "26379" - -skyline_apiserver_internal_fqdn: "{{ kolla_internal_fqdn }}" -skyline_apiserver_external_fqdn: "{{ kolla_external_fqdn }}" -skyline_apiserver_internal_endpoint: "{{ skyline_apiserver_internal_fqdn | kolla_url(internal_protocol, skyline_apiserver_port) }}" -skyline_apiserver_public_endpoint: "{{ skyline_apiserver_external_fqdn | kolla_url(public_protocol, skyline_apiserver_public_port) }}" -skyline_console_internal_fqdn: "{{ kolla_internal_fqdn }}" -skyline_console_external_fqdn: "{{ kolla_external_fqdn }}" -skyline_console_internal_endpoint: "{{ skyline_console_internal_fqdn | kolla_url(internal_protocol, skyline_console_port) }}" -skyline_console_public_endpoint: "{{ skyline_console_external_fqdn | kolla_url(public_protocol, skyline_console_public_port) }}" -skyline_apiserver_port: "9998" -skyline_apiserver_listen_port: "{{ skyline_apiserver_port }}" -skyline_apiserver_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_apiserver_port }}" -skyline_console_port: "9999" -skyline_console_listen_port: "{{ skyline_console_port }}" -skyline_console_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_console_port }}" -skyline_enable_sso: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}" - -syslog_udp_port: "{{ fluentd_syslog_port }}" - -tacker_internal_fqdn: "{{ kolla_internal_fqdn }}" -tacker_external_fqdn: "{{ kolla_external_fqdn }}" -tacker_internal_endpoint: "{{ tacker_internal_fqdn | kolla_url(internal_protocol, tacker_server_port) }}" -tacker_public_endpoint: "{{ tacker_external_fqdn | kolla_url(public_protocol, tacker_server_public_port) }}" -tacker_server_port: "9890" -tacker_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else tacker_server_port }}" -tacker_server_listen_port: "{{ tacker_server_port }}" - -trove_internal_fqdn: "{{ kolla_internal_fqdn }}" -trove_external_fqdn: "{{ kolla_external_fqdn }}" -trove_internal_base_endpoint: "{{ trove_internal_fqdn | kolla_url(internal_protocol, trove_api_port) }}" -trove_public_base_endpoint: "{{ trove_external_fqdn | kolla_url(public_protocol, trove_api_public_port) }}" -trove_api_port: "8779" -trove_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else trove_api_port }}" -trove_api_listen_port: "{{ trove_api_port }}" - -venus_internal_fqdn: "{{ kolla_internal_fqdn }}" -venus_external_fqdn: "{{ kolla_external_fqdn }}" -venus_internal_endpoint: "{{ venus_internal_fqdn | kolla_url(internal_protocol, venus_api_port) }}" -venus_public_endpoint: "{{ venus_external_fqdn | kolla_url(public_protocol, venus_api_public_port) }}" -venus_api_port: "10010" -venus_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else venus_api_port }}" -venus_api_listen_port: "{{ venus_api_port }}" - -watcher_internal_fqdn: "{{ kolla_internal_fqdn }}" -watcher_external_fqdn: "{{ kolla_external_fqdn }}" -watcher_internal_endpoint: "{{ watcher_internal_fqdn | kolla_url(internal_protocol, watcher_api_port) }}" -watcher_public_endpoint: "{{ watcher_external_fqdn | kolla_url(public_protocol, watcher_api_public_port) }}" -watcher_api_port: "9322" -watcher_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else watcher_api_port }}" -watcher_api_listen_port: "{{ watcher_api_port }}" - -zun_api_port: "9517" -zun_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else zun_api_port }}" -zun_api_listen_port: "{{ zun_api_port }}" -zun_wsproxy_internal_fqdn: "{{ kolla_internal_fqdn }}" -zun_wsproxy_external_fqdn: "{{ kolla_external_fqdn }}" -zun_wsproxy_port: "6784" -zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}" -zun_cni_daemon_port: "9036" -zun_internal_fqdn: "{{ kolla_internal_fqdn }}" -zun_external_fqdn: "{{ kolla_external_fqdn }}" -zun_internal_base_endpoint: "{{ zun_internal_fqdn | kolla_url(internal_protocol, zun_api_port) }}" -zun_public_base_endpoint: "{{ zun_external_fqdn | kolla_url(public_protocol, zun_api_public_port) }}" - -public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}" -internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}" - -################## -# Firewall options -################## -enable_external_api_firewalld: "false" -external_api_firewalld_zone: "public" - -#################### -# OpenStack options -#################### -openstack_release: "master" -# Docker image tag used by default. -openstack_tag: "{{ openstack_release }}-{{ kolla_base_distro }}-{{ kolla_base_distro_version }}{{ openstack_tag_suffix }}" -openstack_tag_suffix: "" -openstack_logging_debug: "False" - -openstack_region_name: "RegionOne" - -# A list of policy file formats that are supported by Oslo.policy -supported_policy_format_list: - - policy.yaml - - policy.json - -# In the context of multi-regions, list here the name of all your regions. -multiple_regions_names: - - "{{ openstack_region_name }}" - -openstack_service_workers: "{{ [ansible_facts.processor_vcpus, 5] | min }}" -openstack_service_rpc_workers: "{{ [ansible_facts.processor_vcpus, 3] | min }}" - -# Optionally allow Kolla to set sysctl values -set_sysctl: "yes" - -# Optionally change the path to sysctl.conf modified by Kolla Ansible plays. -kolla_sysctl_conf_path: /etc/sysctl.conf - -# Endpoint type used to connect with OpenStack services with ansible modules. -# Valid options are [ public, internal ] -openstack_interface: "internal" - -# Openstack CA certificate bundle file -# CA bundle file must be added to both the Horizon and Kolla Toolbox containers -openstack_cacert: "" - -# Enable core OpenStack services. This includes: -# glance, keystone, neutron, nova, heat, and horizon. -enable_openstack_core: "yes" - -# These roles are required for Kolla to be operation, however a savvy deployer -# could disable some of these required roles and run their own services. -enable_glance: "{{ enable_openstack_core | bool }}" -enable_haproxy: "yes" -enable_keepalived: "{{ enable_haproxy | bool }}" -enable_loadbalancer: "{{ enable_haproxy | bool or enable_keepalived | bool or enable_proxysql | bool }}" -enable_keystone: "{{ enable_openstack_core | bool }}" -enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}" -enable_mariadb: "yes" -enable_memcached: "yes" -enable_neutron: "{{ enable_openstack_core | bool }}" -enable_nova: "{{ enable_openstack_core | bool }}" -enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}" - -# NOTE: Most memcached clients handle load-balancing via client side -# hashing (consistent or not) logic, so going under the covers and messing -# with things that the clients are not aware of is generally wrong -enable_haproxy_memcached: "no" - -# Additional optional OpenStack features and services are specified here -enable_aodh: "no" -enable_barbican: "no" -enable_blazar: "no" -enable_ceilometer: "no" -enable_ceilometer_ipmi: "no" -enable_ceilometer_prometheus_pushgateway: "no" -enable_cells: "no" -enable_central_logging: "no" -enable_ceph_rgw: "no" -enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}" -enable_cinder: "no" -enable_cinder_backup: "yes" -enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}" -enable_cinder_backend_lvm: "no" -enable_cinder_backend_nfs: "no" -enable_cinder_backend_quobyte: "no" -enable_cinder_backend_pure_iscsi: "no" -enable_cinder_backend_pure_fc: "no" -enable_cinder_backend_pure_roce: "no" -enable_cinder_backend_pure_nvme_tcp: "no" -enable_cinder_backend_lightbits: "no" -enable_cloudkitty: "no" -enable_collectd: "no" -enable_cyborg: "no" -enable_designate: "no" -enable_etcd: "no" -enable_fluentd: "yes" -enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}" -enable_gnocchi: "no" -enable_gnocchi_statsd: "no" -enable_grafana: "no" -enable_grafana_external: "{{ enable_grafana | bool }}" -enable_hacluster: "{{ enable_masakari_hostmonitor | bool }}" -enable_heat: "{{ enable_openstack_core | bool }}" -enable_horizon: "{{ enable_openstack_core | bool }}" -enable_horizon_blazar: "{{ enable_blazar | bool }}" -enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}" -enable_horizon_designate: "{{ enable_designate | bool }}" -enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}" -enable_horizon_heat: "{{ enable_heat | bool }}" -enable_horizon_ironic: "{{ enable_ironic | bool }}" -enable_horizon_magnum: "{{ enable_magnum | bool }}" -enable_horizon_manila: "{{ enable_manila | bool }}" -enable_horizon_masakari: "{{ enable_masakari | bool }}" -enable_horizon_mistral: "{{ enable_mistral | bool }}" -enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}" -enable_horizon_octavia: "{{ enable_octavia | bool }}" -enable_horizon_tacker: "{{ enable_tacker | bool }}" -enable_horizon_trove: "{{ enable_trove | bool }}" -enable_horizon_venus: "{{ enable_venus | bool }}" -enable_horizon_watcher: "{{ enable_watcher | bool }}" -enable_horizon_zun: "{{ enable_zun | bool }}" -enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}" -enable_ironic: "no" -enable_ironic_dnsmasq: "{{ enable_ironic | bool }}" -enable_ironic_neutron_agent: "no" -enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" -enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}" -enable_kuryr: "no" -enable_letsencrypt: "no" -enable_magnum: "no" -enable_manila: "no" -enable_manila_backend_generic: "no" -enable_manila_backend_hnas: "no" -enable_manila_backend_cephfs_native: "no" -enable_manila_backend_cephfs_nfs: "no" -enable_manila_backend_glusterfs_nfs: "no" -enable_manila_backend_flashblade: "no" -enable_mariabackup: "no" -enable_masakari: "no" -enable_masakari_instancemonitor: "{{ enable_masakari | bool }}" -enable_masakari_hostmonitor: "{{ enable_masakari | bool }}" -enable_mistral: "no" -enable_multipathd: "no" -enable_neutron_vpnaas: "no" -enable_neutron_sriov: "no" -enable_neutron_mlnx: "no" -enable_neutron_dvr: "no" -enable_neutron_fwaas: "no" -enable_neutron_qos: "no" -enable_neutron_agent_ha: "no" -enable_neutron_bgp_dragent: "no" -enable_neutron_provider_networks: "no" -enable_neutron_segments: "no" -enable_neutron_packet_logging: "no" -enable_neutron_sfc: "no" -enable_neutron_taas: "no" -enable_neutron_trunk: "no" -enable_neutron_metering: "no" -enable_neutron_infoblox_ipam_agent: "no" -enable_neutron_port_forwarding: "no" -enable_nova_libvirt_container: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}" -enable_nova_serialconsole_proxy: "no" -enable_nova_ssh: "yes" -enable_octavia: "no" -enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}" -enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}" -enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}" -enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}" -enable_ovn_sb_db_relay: "{{ enable_ovn | bool }}" -enable_ovs_dpdk: "no" -enable_osprofiler: "no" -enable_placement: "{{ enable_nova | bool or enable_zun | bool }}" -enable_prometheus: "no" -enable_proxysql: "yes" -enable_redis: "no" -enable_skyline: "no" -enable_tacker: "no" -enable_telegraf: "no" -enable_trove: "no" -enable_trove_singletenant: "no" -enable_venus: "no" -enable_watcher: "no" -enable_zun: "no" - -ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}" -designate_keystone_user: "designate" -ironic_keystone_user: "ironic" -neutron_keystone_user: "neutron" -nova_keystone_user: "nova" -placement_keystone_user: "placement" -cinder_keystone_user: "cinder" -glance_keystone_user: "glance" - -# Nova fake driver and the number of fake driver per compute node -enable_nova_fake: "no" -num_nova_fake_per_node: 5 - -# Clean images options are specified here -enable_destroy_images: "no" - -#################### -# Global Options -#################### -# List of containers to skip during stop command in YAML list format -# skip_stop_containers: -# - container1 -# - container2 -skip_stop_containers: [] - -#################### -# Logging options -#################### - -# NOTE: If an external ElasticSearch cluster address is configured, all -# services with ElasticSearch endpoints should be configured to log -# to the external cluster by default. This is for backwards compatibility. -opensearch_address: "{{ elasticsearch_address if elasticsearch_address is defined else kolla_internal_fqdn }}" -enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}" -enable_opensearch_dashboards: "{{ enable_opensearch | bool }}" -enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}" - -#################### -# Redis options -#################### -redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}default:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}" -redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes" - -#################### -# Osprofiler options -#################### -# valid values: ["elasticsearch", "redis"] -osprofiler_backend: "elasticsearch" -opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}" -osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string }}" - -#################### -# RabbitMQ options -#################### -rabbitmq_user: "openstack" -rabbitmq_monitoring_user: "" -# Whether to enable TLS encryption for RabbitMQ client-server communication. -rabbitmq_enable_tls: "no" -# CA certificate bundle in RabbitMQ container. -rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" -rabbitmq_datadir_volume: "rabbitmq" - -#################### -# HAProxy options -#################### -haproxy_user: "openstack" -haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}" -haproxy_enable_http2: "yes" -haproxy_http2_protocol: "alpn h2,http/1.1" -kolla_enable_tls_internal: "no" -kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}" -kolla_certificates_dir: "{{ node_config }}/certificates" -kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem" -kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem" -kolla_admin_openrc_cacert: "" -kolla_copy_ca_into_containers: "no" -haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" -haproxy_backend_cacert_dir: "/etc/ssl/certs" -haproxy_single_external_frontend: false -haproxy_single_external_frontend_public_port: "{{ '443' if kolla_enable_tls_external | bool else '80' }}" - -################## -# Backend options -################## -kolla_httpd_keep_alive: "60" -kolla_httpd_timeout: "60" - -###################### -# Backend TLS options -###################### -kolla_enable_tls_backend: "no" -kolla_verify_tls_backend: "yes" -kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem" -kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem" - -##################### -# ACME client options -##################### -acme_client_lego: "server lego {{ api_interface_address }}:{{ letsencrypt_webserver_port }}" -acme_client_servers: "{% set arr = [] %}{% if enable_letsencrypt | bool %}{{ arr.append(acme_client_lego) }}{% endif %}{{ arr }}" - -#################### -# Keystone options -#################### -keystone_internal_fqdn: "{{ kolla_internal_fqdn }}" -keystone_external_fqdn: "{{ kolla_external_fqdn }}" - -keystone_internal_url: "{{ keystone_internal_fqdn | kolla_url(internal_protocol, keystone_internal_port) }}" -keystone_public_url: "{{ keystone_external_fqdn | kolla_url(public_protocol, keystone_public_port) }}" - -keystone_admin_user: "admin" -keystone_admin_project: "admin" - -# Whether or not to apply changes to service user passwords when services are -# reconfigured -update_keystone_service_user_passwords: true - -default_project_domain_name: "Default" -default_project_domain_id: "default" - -default_user_domain_name: "Default" -default_user_domain_id: "default" - -# Keystone fernet token expiry in seconds. Default is 1 day. -fernet_token_expiry: 86400 -# Keystone window to allow expired fernet tokens. Default is 2 days. -fernet_token_allow_expired_window: 172800 -# Keystone fernet key rotation interval in seconds. Default is sum of token -# expiry and allow expired window, 3 days. This ensures the minimum number -# of keys are active. If this interval is lower than the sum of the token -# expiry and allow expired window, multiple active keys will be necessary. -fernet_key_rotation_interval: "{{ fernet_token_expiry + fernet_token_allow_expired_window }}" - -keystone_default_user_role: "member" - -# OpenStack authentication string. You should only need to override these if you -# are changing the admin tenant/project or user. -openstack_auth: - auth_url: "{{ keystone_internal_url }}" - username: "{{ keystone_admin_user }}" - password: "{{ keystone_admin_password }}" - project_name: "{{ keystone_admin_project }}" - domain_name: "default" - user_domain_name: "default" - -####################### -# Glance options -####################### -glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_s3 | bool) }}" -glance_backend_ceph: "no" -glance_backend_s3: "no" -enable_glance_image_cache: "no" -glance_file_datadir_volume: "glance" -glance_enable_rolling_upgrade: "no" -glance_enable_property_protection: "no" -glance_enable_interoperable_image_import: "no" -glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}" -# NOTE(mnasiadka): For use in common role -glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}" - -####################### -# Barbican options -####################### -# Valid options are [ simple_crypto, p11_crypto ] -barbican_crypto_plugin: "simple_crypto" -barbican_library_path: "/usr/lib/libCryptoki2_64.so" - -################# -# Gnocchi options -################# -# Valid options are [ file, ceph ] -gnocchi_backend_storage: "file" - -# Valid options are [redis, ''] -gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}" -gnocchi_metric_datadir_volume: "gnocchi" - -################################# -# Cinder options -################################# -cinder_backend_ceph: "no" -cinder_backend_huawei: "no" -cinder_backend_huawei_xml_files: [] -cinder_volume_group: "cinder-volumes" -cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else 'tgtadm' }}" -# Valid options are [ '', redis, etcd ] -cinder_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" - -# Valid options are [ nfs, ceph, s3 ] -cinder_backup_driver: "ceph" -cinder_backup_share: "" -cinder_backup_mount_options_nfs: "" - -####################### -# Cloudkitty options -####################### -# Valid options are 'sqlalchemy' or 'influxdb'. The default value is -# 'influxdb', which matches the default in Cloudkitty since the Stein release. -# When the backend is "influxdb", we also enable Influxdb. -# Also, when using 'influxdb' as the backend, we trigger the configuration/use -# of Cloudkitty storage backend version 2. -cloudkitty_storage_backend: "influxdb" - -####################### -# Designate options -####################### -# Valid options are [ bind9, infoblox ] -designate_backend: "bind9" -designate_ns_record: - - "ns1.example.org" -designate_backend_external: "no" -designate_backend_external_bind9_nameservers: "" -# Valid options are [ '', redis ] -designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}" - -designate_enable_notifications_sink: "no" -designate_notifications_topic_name: "notifications_designate" - -####################### -# Neutron options -####################### -neutron_bgp_router_id: "1.1.1.1" -neutron_bridge_name: "{{ 'br_dpdk' if enable_ovs_dpdk | bool else 'br-ex' }}" -neutron_physical_networks: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index }}{% if not loop.last %},{% endif %}{% endfor %}" -# Comma-separated type of enabled ml2 type drivers -neutron_type_drivers: "flat,vlan,vxlan{% if neutron_plugin_agent == 'ovn' %},geneve{% endif %}" -# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers') -# NOTE: for ironic this list should also contain 'flat' -neutron_tenant_network_types: "{% if neutron_plugin_agent == 'ovn' %}geneve{% else %}vxlan{% endif %}" - -# valid values: ["dvr", "dvr_no_external"] -neutron_compute_dvr_mode: "dvr" -computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_compute_dvr_mode == 'dvr') or enable_neutron_provider_networks | bool or neutron_ovn_distributed_fip | bool }}" - -# Default DNS resolvers for virtual networks -neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4" - -# Set legacy iptables to allow kernels not supporting iptables-nft -neutron_legacy_iptables: "no" - -# Enable distributed floating ip for OVN deployments -neutron_ovn_distributed_fip: "no" - -# SRIOV physnet:interface mappings when SRIOV is enabled -# "sriovnet1" and tunnel_interface used here as placeholders -neutron_sriov_physnet_mappings: - sriovnet1: "{{ tunnel_interface }}" -neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}" - -# Set OVN network availability zones -neutron_ovn_availability_zones: [] - -# Enable OVN agent -neutron_enable_ovn_agent: "no" - -####################### -# Nova options -####################### -nova_backend_ceph: "no" -nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}" -# Valid options are [ kvm, qemu ] -nova_compute_virt_type: "kvm" -nova_instance_datadir_volume: "{{ 'nova_compute' if enable_nova_libvirt_container | bool else '/var/lib/nova' }}" -nova_safety_upgrade: "no" -# Valid options are [ none, novnc, spice ] -nova_console: "novnc" - -####################### -# Nova Database -####################### -nova_database_shard_id: "{{ mariadb_default_database_shard_id | int }}" -nova_cell0_database_shard_id: "{{ nova_database_shard_id | int }}" - -# These are kept for backwards compatibility, as cell0 references them. -nova_database_name: "nova" -nova_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova{% endif %}" -nova_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}" - -nova_cell0_database_name: "{{ nova_database_name }}_cell0" -nova_cell0_database_user: "{{ nova_database_user }}" -nova_cell0_database_address: "{{ nova_database_address }}" -nova_cell0_database_password: "{{ nova_database_password }}" - -####################### -# Horizon options -####################### -horizon_backend_database: false -horizon_keystone_multidomain: False - -# Enable deploying custom horizon policy files for services that don't have a -# horizon plugin but have a policy file. Override these when you have services -# not deployed by kolla-ansible but want custom policy files deployed for them -# in horizon. -enable_ceilometer_horizon_policy_file: "{{ enable_ceilometer }}" -enable_cinder_horizon_policy_file: "{{ enable_cinder }}" -enable_glance_horizon_policy_file: "{{ enable_glance }}" -enable_heat_horizon_policy_file: "{{ enable_heat }}" -enable_keystone_horizon_policy_file: "{{ enable_keystone }}" -enable_neutron_horizon_policy_file: "{{ enable_neutron }}" -enable_nova_horizon_policy_file: "{{ enable_nova }}" - -horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}" - -################### -# External Ceph options -################### -# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes) -external_ceph_cephx_enabled: "yes" - -ceph_cluster: "ceph" - -# External Ceph pool names -ceph_cinder_pool_name: "volumes" -ceph_cinder_backup_pool_name: "backups" -ceph_glance_pool_name: "images" -ceph_gnocchi_pool_name: "gnocchi" -ceph_nova_pool_name: "vms" - -ceph_cinder_backup_user: "cinder-backup" -ceph_cinder_user: "cinder" -ceph_glance_user: "glance" -ceph_gnocchi_user: "gnocchi" -ceph_manila_user: "manila" -ceph_nova_user: "{{ ceph_cinder_user }}" - -############################################# -# MariaDB component-specific database details -############################################# -# Whether to configure haproxy to load balance -# the external MariaDB server(s) -enable_external_mariadb_load_balancer: "no" -# Whether to use pre-configured databases / users -use_preconfigured_databases: "no" -# whether to use a common, preconfigured user -# for all component databases -use_common_mariadb_user: "no" - -############ -# Prometheus -############ -enable_prometheus_server: "{{ enable_prometheus | bool }}" -enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}" -enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}" -enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}" -enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}" -enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}" -enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}" -enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}" -enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}" -enable_prometheus_ceph_mgr_exporter: "no" -enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}" -enable_prometheus_openstack_exporter_external: "no" -enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_opensearch | bool }}" -enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}" -enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}" -enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}" -enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}" -enable_prometheus_proxysql_exporter: "{{ enable_prometheus | bool and enable_proxysql | bool }}" - -prometheus_alertmanager_user: "admin" -prometheus_ceph_exporter_interval: "{{ prometheus_scrape_interval }}" -prometheus_grafana_user: "grafana" -prometheus_haproxy_user: "haproxy" -prometheus_skyline_user: "skyline" -prometheus_scrape_interval: "60s" -prometheus_openstack_exporter_interval: "{{ prometheus_scrape_interval }}" -prometheus_openstack_exporter_timeout: "45s" -prometheus_elasticsearch_exporter_interval: "{{ prometheus_scrape_interval }}" -prometheus_cmdline_extras: -prometheus_ceph_mgr_exporter_endpoints: [] -prometheus_openstack_exporter_endpoint_type: "internal" -prometheus_openstack_exporter_compute_api_version: "latest" -prometheus_libvirt_exporter_interval: "60s" - - -#################### -# InfluxDB options -#################### -influxdb_address: "{{ kolla_internal_fqdn }}" -influxdb_datadir_volume: "influxdb" - -influxdb_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, influxdb_http_port) }}" - -######################### -# Internal Image options -######################### -kolla_base_distro_version_default_map: { - "centos": "stream9", - "debian": "bookworm", - "rocky": "9", - "ubuntu": "noble", -} - -distro_python_version: "3" - -kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}" - -############# -# S3 options -############# -# Common options for S3 Cinder Backup and Glance S3 backend. -s3_url: -s3_bucket: -s3_access_key: -s3_secret_key: - -########## -# Telegraf -########## -# Configure telegraf to use the docker daemon itself as an input for -# telemetry data. -telegraf_enable_docker_input: "no" - -# Valid options are [ '', redis, etcd ] -ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" - -########## -# Octavia -########## -# Whether to run Kolla-Ansible's automatic configuration for Octavia. -# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no` -# and keep your other Octavia config like before. -octavia_auto_configure: "{{ 'amphora' in octavia_provider_drivers }}" - -# Octavia network type options are [ tenant, provider ] -# * tenant indicates that we will create a tenant network and a network -# interface on the Octavia worker nodes for communication with amphorae. -# * provider indicates that we will create a flat or vlan provider network. -# In this case octavia_network_interface should be set to a network interface -# on the Octavia worker nodes on the same provider network. -octavia_network_type: "provider" - -################################### -# Identity federation configuration -################################### -# Here we configure all of the IdPs meta information that will be required to implement identity federation with OpenStack Keystone. -# We require the administrator to enter the following metadata: -# * name (internal name of the IdP in Keystone); -# * openstack_domain (the domain in Keystone that the IdP belongs to) -# * protocol (the federated protocol used by the IdP; e.g. openid or saml); -# * identifier (the IdP identifier; e.g. https://accounts.google.com); -# * public_name (the public name that will be shown for users in Horizon); -# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration); -# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol -# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf, -# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}'); -# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem'; -# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid); -# -# The IdPs meta information are to be presented to Kolla-Ansible as the following example: -# keystone_identity_providers: -# - name: "myidp1" -# openstack_domain: "my-domain" -# protocol: "openid" -# identifier: "https://accounts.google.com" -# public_name: "Authenticate via myidp1" -# attribute_mapping: "mappingId1" -# metadata_folder: "path/to/metadata/folder" -# certificate_file: "path/to/certificate/file.pem" -# -# We also need to configure the attribute mapping that is used by IdPs. -# The configuration of attribute mappings is a list of objects, where each -# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP -# object in the IdPs set), and the 'file' with a full qualified path to a mapping file. -# keystone_identity_mappings: -# - name: "mappingId1" -# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1" -# - name: "mappingId2" -# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2" -# - name: "mappingId3" -# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3" -keystone_identity_providers: [] -keystone_identity_mappings: [] - -#################### -# Corosync options -#################### - -# this is UDP port -hacluster_corosync_port: 5405 diff --git a/ansible/group_vars/all/aodh.yml b/ansible/group_vars/all/aodh.yml new file mode 100644 index 0000000000..2e431e11eb --- /dev/null +++ b/ansible/group_vars/all/aodh.yml @@ -0,0 +1,11 @@ +--- +enable_aodh: "no" + +# Ports +aodh_internal_fqdn: "{{ kolla_internal_fqdn }}" +aodh_external_fqdn: "{{ kolla_external_fqdn }}" +aodh_internal_endpoint: "{{ aodh_internal_fqdn | kolla_url(internal_protocol, aodh_api_port) }}" +aodh_public_endpoint: "{{ aodh_external_fqdn | kolla_url(public_protocol, aodh_api_public_port) }}" +aodh_api_port: "8042" +aodh_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else aodh_api_port }}" +aodh_api_listen_port: "{{ aodh_api_port }}" diff --git a/ansible/group_vars/all/barbican.yml b/ansible/group_vars/all/barbican.yml new file mode 100644 index 0000000000..36e256a018 --- /dev/null +++ b/ansible/group_vars/all/barbican.yml @@ -0,0 +1,17 @@ +--- +enable_barbican: "no" + +####################### +# Barbican options +####################### +# Valid options are [ simple_crypto, p11_crypto ] +barbican_crypto_plugin: "simple_crypto" +barbican_library_path: "/usr/lib/libCryptoki2_64.so" + +barbican_internal_fqdn: "{{ kolla_internal_fqdn }}" +barbican_external_fqdn: "{{ kolla_external_fqdn }}" +barbican_internal_endpoint: "{{ barbican_internal_fqdn | kolla_url(internal_protocol, barbican_api_port) }}" +barbican_public_endpoint: "{{ barbican_external_fqdn | kolla_url(public_protocol, barbican_api_public_port) }}" +barbican_api_port: "9311" +barbican_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else barbican_api_port }}" +barbican_api_listen_port: "{{ barbican_api_port }}" diff --git a/ansible/group_vars/all/bifrost.yml b/ansible/group_vars/all/bifrost.yml new file mode 100644 index 0000000000..a42e6c9ea1 --- /dev/null +++ b/ansible/group_vars/all/bifrost.yml @@ -0,0 +1,3 @@ +--- +bifrost_network_interface: "{{ network_interface }}" +bifrost_network_address_family: "{{ network_address_family }}" diff --git a/ansible/group_vars/all/blazar.yml b/ansible/group_vars/all/blazar.yml new file mode 100644 index 0000000000..10122ffdc4 --- /dev/null +++ b/ansible/group_vars/all/blazar.yml @@ -0,0 +1,11 @@ +--- +enable_blazar: "no" + +# Ports +blazar_internal_fqdn: "{{ kolla_internal_fqdn }}" +blazar_external_fqdn: "{{ kolla_external_fqdn }}" +blazar_internal_base_endpoint: "{{ blazar_internal_fqdn | kolla_url(internal_protocol, blazar_api_port) }}" +blazar_public_base_endpoint: "{{ blazar_external_fqdn | kolla_url(public_protocol, blazar_api_public_port) }}" +blazar_api_port: "1234" +blazar_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else blazar_api_port }}" +blazar_api_listen_port: "{{ blazar_api_port }}" diff --git a/ansible/group_vars/all/ceilometer.yml b/ansible/group_vars/all/ceilometer.yml new file mode 100644 index 0000000000..19e99eee03 --- /dev/null +++ b/ansible/group_vars/all/ceilometer.yml @@ -0,0 +1,4 @@ +--- +enable_ceilometer: "no" +enable_ceilometer_ipmi: "no" +enable_ceilometer_prometheus_pushgateway: "no" diff --git a/ansible/group_vars/all/ceph-rgw.yml b/ansible/group_vars/all/ceph-rgw.yml new file mode 100644 index 0000000000..3d3d4802b7 --- /dev/null +++ b/ansible/group_vars/all/ceph-rgw.yml @@ -0,0 +1,10 @@ +--- +enable_ceph_rgw: "no" +enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}" + +ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}" +ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}" +ceph_rgw_internal_base_endpoint: "{{ ceph_rgw_internal_fqdn | kolla_url(internal_protocol, ceph_rgw_port) }}" +ceph_rgw_public_base_endpoint: "{{ ceph_rgw_external_fqdn | kolla_url(public_protocol, ceph_rgw_public_port) }}" +ceph_rgw_port: "6780" +ceph_rgw_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ceph_rgw_port }}" diff --git a/ansible/group_vars/all/ceph.yml b/ansible/group_vars/all/ceph.yml new file mode 100644 index 0000000000..987717a65e --- /dev/null +++ b/ansible/group_vars/all/ceph.yml @@ -0,0 +1,21 @@ +--- +################### +# External Ceph options +################### +# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes) +external_ceph_cephx_enabled: "yes" + +ceph_cluster: "ceph" + +# External Ceph pool names +ceph_cinder_pool_name: "volumes" +ceph_cinder_backup_pool_name: "backups" +ceph_glance_pool_name: "images" +ceph_gnocchi_pool_name: "gnocchi" +ceph_nova_pool_name: "vms" + +ceph_cinder_backup_user: "cinder-backup" +ceph_cinder_user: "cinder" +ceph_glance_user: "glance" +ceph_gnocchi_user: "gnocchi" +ceph_nova_user: "{{ ceph_cinder_user }}" diff --git a/ansible/group_vars/all/cinder.yml b/ansible/group_vars/all/cinder.yml new file mode 100644 index 0000000000..e4659674ea --- /dev/null +++ b/ansible/group_vars/all/cinder.yml @@ -0,0 +1,40 @@ +--- +enable_cinder: "no" +enable_cinder_backup: "yes" +enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}" +enable_cinder_backend_lvm: "no" +enable_cinder_backend_nfs: "no" +enable_cinder_backend_quobyte: "no" +enable_cinder_backend_pure_iscsi: "no" +enable_cinder_backend_pure_fc: "no" +enable_cinder_backend_pure_roce: "no" +enable_cinder_backend_pure_nvme_tcp: "no" +enable_cinder_backend_lightbits: "no" + +################################# +# Cinder options +################################# +cinder_backend_ceph: "no" +cinder_backend_huawei: "no" +cinder_backend_huawei_xml_files: [] +cinder_volume_group: "cinder-volumes" +cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else 'tgtadm' }}" +# Valid options are [ '', redis, etcd ] +cinder_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" + +# Valid options are [ nfs, ceph, s3 ] +cinder_backup_driver: "ceph" +cinder_backup_share: "" +cinder_backup_mount_options_nfs: "" + +storage_address_family: "{{ network_address_family }}" + +cinder_keystone_user: "cinder" + +cinder_internal_fqdn: "{{ kolla_internal_fqdn }}" +cinder_external_fqdn: "{{ kolla_external_fqdn }}" +cinder_internal_base_endpoint: "{{ cinder_internal_fqdn | kolla_url(internal_protocol, cinder_api_port) }}" +cinder_public_base_endpoint: "{{ cinder_external_fqdn | kolla_url(public_protocol, cinder_api_public_port) }}" +cinder_api_port: "8776" +cinder_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cinder_api_port }}" +cinder_api_listen_port: "{{ cinder_api_port }}" diff --git a/ansible/group_vars/all/cloudkitty.yml b/ansible/group_vars/all/cloudkitty.yml new file mode 100644 index 0000000000..a657c3bd0b --- /dev/null +++ b/ansible/group_vars/all/cloudkitty.yml @@ -0,0 +1,20 @@ +--- +enable_cloudkitty: "no" + +####################### +# Cloudkitty options +####################### +# Valid options are 'sqlalchemy' or 'influxdb'. The default value is +# 'influxdb', which matches the default in Cloudkitty since the Stein release. +# When the backend is "influxdb", we also enable Influxdb. +# Also, when using 'influxdb' as the backend, we trigger the configuration/use +# of Cloudkitty storage backend version 2. +cloudkitty_storage_backend: "influxdb" + +cloudkitty_internal_fqdn: "{{ kolla_internal_fqdn }}" +cloudkitty_external_fqdn: "{{ kolla_external_fqdn }}" +cloudkitty_internal_endpoint: "{{ cloudkitty_internal_fqdn | kolla_url(internal_protocol, cloudkitty_api_port) }}" +cloudkitty_public_endpoint: "{{ cloudkitty_external_fqdn | kolla_url(public_protocol, cloudkitty_api_public_port) }}" +cloudkitty_api_port: "8889" +cloudkitty_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cloudkitty_api_port }}" +cloudkitty_api_listen_port: "{{ cloudkitty_api_port }}" diff --git a/ansible/group_vars/all/collectd.yml b/ansible/group_vars/all/collectd.yml new file mode 100644 index 0000000000..8e62327160 --- /dev/null +++ b/ansible/group_vars/all/collectd.yml @@ -0,0 +1,4 @@ +--- +enable_collectd: "no" + +collectd_udp_port: "25826" diff --git a/ansible/group_vars/all/common.yml b/ansible/group_vars/all/common.yml new file mode 100644 index 0000000000..4a82c29ea3 --- /dev/null +++ b/ansible/group_vars/all/common.yml @@ -0,0 +1,359 @@ +--- +################### +# Ansible options +################### + +# This variable is used as the "filter" argument for the setup module. For +# instance, if one wants to remove/ignore all Neutron interface facts: +# kolla_ansible_setup_filter: "ansible_[!qt]*" +# By default, we do not provide a filter. +kolla_ansible_setup_filter: "{{ omit }}" + +# This variable is used as the "gather_subset" argument for the setup module. +# For instance, if one wants to avoid collecting facts via facter: +# kolla_ansible_setup_gather_subset: "all,!facter" +# By default, we do not provide a gather subset. +kolla_ansible_setup_gather_subset: "{{ omit }}" + +# This variable determines which hosts require facts when using --limit. Facts +# will be gathered using delegation for hosts in this list that are not +# included in the limit. +# By default, this list includes all hosts. +kolla_ansible_delegate_facts_hosts: "{{ groups['all'] }}" + +#################### +# Docker options +#################### +docker_registry_email: +docker_registry: "quay.io" +docker_namespace: "openstack.kolla" +docker_image_name_prefix: "" +docker_image_url: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}" +docker_registry_username: +# Please read the docs carefully before applying docker_registry_insecure. +docker_registry_insecure: "no" +docker_runtime_directory: "" +# Docker client timeout in seconds. +docker_client_timeout: 120 + +# Docker networking options +docker_disable_default_iptables_rules: "yes" +docker_disable_default_network: "{{ docker_disable_default_iptables_rules }}" +docker_disable_ip_forward: "{{ docker_disable_default_iptables_rules }}" + +# Retention settings for Docker logs +docker_log_max_file: "5" +docker_log_max_size: "50m" + +# Valid options are [ no, on-failure, always, unless-stopped ] +docker_restart_policy: "unless-stopped" + +# '0' means unlimited retries (applies only to 'on-failure' policy) +docker_restart_policy_retry: "10" + +# Timeout after Docker sends SIGTERM before sending SIGKILL. +docker_graceful_timeout: 60 + +# Common options used throughout Docker +docker_common_options: + auth_email: "{{ docker_registry_email }}" + auth_password: "{{ docker_registry_password }}" + auth_registry: "{{ docker_registry }}" + auth_username: "{{ docker_registry_username }}" + environment: + KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" + restart_policy: "{{ docker_restart_policy }}" + restart_retries: "{{ docker_restart_policy_retry }}" + graceful_timeout: "{{ docker_graceful_timeout }}" + client_timeout: "{{ docker_client_timeout }}" + container_engine: "{{ kolla_container_engine }}" + +# Container engine specific volume paths +docker_volumes_path: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes" +podman_volumes_path: "{{ docker_runtime_directory or '/var/lib/containers' }}/storage/volumes" +container_engine_volumes_path: "{{ docker_volumes_path if kolla_container_engine == 'docker' else podman_volumes_path }}" + +##################### +# Volumes under /run +##################### +# Podman has problem with mounting whole /run directory +# described here: https://github.com/containers/podman/issues/16305 +run_default_volumes_podman: + - '/run/netns:/run/netns:shared' + - '/run/lock/nova:/run/lock/nova:shared' + - "/run/libvirt:/run/libvirt:shared" + - "/run/nova:/run/nova:shared" + - "/run/openvswitch:/run/openvswitch:shared" + +run_default_volumes_docker: [] + +#################### +# Dimensions options +#################### +# Dimension options for Docker Containers +# NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9) +# fixes at least rabbitmq and mariadb +default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else {} }}" +default_container_dimensions_el9: "{{ default_docker_dimensions_el9 if kolla_container_engine == 'docker' else default_podman_dimensions_el9 }}" +default_docker_dimensions_el9: + ulimits: + nofile: + soft: 1048576 + hard: 1048576 +default_podman_dimensions_el9: + ulimits: + RLIMIT_NOFILE: + soft: 1048576 + hard: 1048576 + RLIMIT_NPROC: + soft: 1048576 + hard: 1048576 + +##################### +# Healthcheck options +##################### +enable_container_healthchecks: "yes" +# Healthcheck options for Docker containers +# interval/timeout/start_period are in seconds +default_container_healthcheck_interval: 30 +default_container_healthcheck_timeout: 30 +default_container_healthcheck_retries: 3 +default_container_healthcheck_start_period: 5 + +####################### +# Extra volumes options +####################### +# Extra volumes for Docker Containers +default_extra_volumes: [] + +################## +# Firewall options +################## +enable_external_api_firewalld: "false" +external_api_firewalld_zone: "public" + +################## +# Backend options +################## +kolla_httpd_keep_alive: "60" +kolla_httpd_timeout: "60" + +# The "temp" files that are created before merge need to stay persistent due +# to the fact that ansible will register a "change" if it has to create them +# again. Persistent files allow for idempotency +container_config_directory: "/var/lib/kolla/config_files" + +# The directory on the deploy host containing globals.yml. +node_config: "{{ CONFIG_DIR | default('/etc/kolla') }}" + +# The directory to merge custom config files the kolla's config files +node_custom_config: "{{ node_config }}/config" + +# The directory to store the config files on the destination node +node_config_directory: "/etc/kolla" + +# The group which own node_config_directory, you can use a non-root +# user to deploy kolla +config_owner_user: "root" +config_owner_group: "root" + +################### +# Kolla options +################### +# Valid options are [ COPY_ONCE, COPY_ALWAYS ] +config_strategy: "COPY_ALWAYS" + +# Valid options are ['centos', 'debian', 'rocky', 'ubuntu'] +kolla_base_distro: "rocky" + +kolla_internal_vip_address: "{{ kolla_internal_address | default('') }}" +kolla_internal_fqdn: "{{ kolla_internal_vip_address }}" +kolla_external_vip_address: "{{ kolla_internal_vip_address }}" +kolla_same_external_internal_vip: "{{ kolla_external_vip_address | ansible.utils.ipaddr('address') == kolla_internal_vip_address | ansible.utils.ipaddr('address') }}" +kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip | bool else kolla_external_vip_address }}" + +kolla_dev_repos_directory: "/opt/stack/" +kolla_dev_repos_git: "https://opendev.org/openstack" +kolla_dev_repos_pull: "no" +kolla_dev_mode: "no" +kolla_source_version: "{% if openstack_release == 'master' %}master{% else %}stable/{{ openstack_release }}{% endif %}" + +# Proxy settings for containers such as magnum that need internet access +container_http_proxy: "" +container_https_proxy: "" +container_no_proxy: "localhost,127.0.0.1" + +container_proxy_no_proxy_entries: + - "{{ container_no_proxy }}" + - "{{ api_interface_address }}" + - "{{ kolla_internal_vip_address | default('') }}" + +container_proxy: + http_proxy: "{{ container_http_proxy }}" + https_proxy: "{{ container_https_proxy }}" + no_proxy: "{{ container_proxy_no_proxy_entries | select | join(',') }}" + +# By default, Kolla API services bind to the network address assigned +# to the api_interface. Allow the bind address to be an override. +api_interface_address: "{{ 'api' | kolla_address }}" + +#################### +# Container engine options +#################### +kolla_container_engine: "docker" + + +######################### +# Internal Image options +######################### +kolla_base_distro_version_default_map: { + "centos": "stream10", + "debian": "bookworm", + "rocky": "10", + "ubuntu": "noble", +} + +distro_python_version: "3" + +kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}" + +#################### +# Networking options +#################### +network_interface: "eth0" +kolla_external_vip_interface: "{{ network_interface }}" +api_interface: "{{ network_interface }}" + +# Configure the address family (AF) per network. +# Valid options are [ ipv4, ipv6 ] +network_address_family: "ipv4" +api_address_family: "{{ network_address_family }}" + +public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}" +internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}" + +# Additional optional OpenStack features and services are specified here +enable_central_logging: "no" + +# Clean images options are specified here +enable_destroy_images: "no" + +#################### +# Global Options +#################### +# List of containers to skip during stop command in YAML list format +# skip_stop_containers: +# - container1 +# - container2 +skip_stop_containers: [] + +################### +# Messaging options +################### +# oslo.messaging rpc transport valid options are [ rabbit, amqp ] +om_rpc_transport: "rabbit" +om_rpc_user: "{{ rabbitmq_user }}" +om_rpc_password: "{{ rabbitmq_password }}" +om_rpc_port: "{{ rabbitmq_port }}" +om_rpc_group: "rabbitmq" +om_rpc_vhost: "/" + +rpc_transport_url: "{{ om_rpc_transport }}://{% for host in groups[om_rpc_group] %}{{ om_rpc_user }}:{{ om_rpc_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_rpc_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_rpc_vhost }}" + +# oslo.messaging notify transport valid options are [ rabbit ] +om_notify_transport: "rabbit" +om_notify_user: "{{ rabbitmq_user }}" +om_notify_password: "{{ rabbitmq_password }}" +om_notify_port: "{{ rabbitmq_port }}" +om_notify_group: "rabbitmq" +om_notify_vhost: "/" + +notify_transport_url: "{{ om_notify_transport }}://{% for host in groups[om_notify_group] %}{{ om_notify_user }}:{{ om_notify_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_notify_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_notify_vhost }}" + +# Whether to enable TLS for oslo.messaging communication with RabbitMQ. +om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}" +# CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS. +om_rabbitmq_cacert: "{{ rabbitmq_cacert }}" +om_rabbitmq_qos_prefetch_count: "1" + +om_enable_rabbitmq_stream_fanout: true + +# OpenStack authentication string. You should only need to override these if you +# are changing the admin tenant/project or user. +openstack_auth: + auth_url: "{{ keystone_internal_url }}" + username: "{{ keystone_admin_user }}" + password: "{{ keystone_admin_password }}" + project_name: "{{ keystone_admin_project }}" + domain_name: "default" + user_domain_name: "default" + +#################### +# OpenStack options +#################### +openstack_release: "master" +# Docker image tag used by default. +openstack_tag: "{{ openstack_release }}-{{ kolla_base_distro }}-{{ kolla_base_distro_version }}{{ openstack_tag_suffix }}" +openstack_tag_suffix: "" +openstack_logging_debug: "False" + +openstack_region_name: "RegionOne" + +# A list of policy file formats that are supported by Oslo.policy +supported_policy_format_list: + - policy.yaml + - policy.json + +# In the context of multi-regions, list here the name of all your regions. +multiple_regions_names: + - "{{ openstack_region_name }}" + +openstack_service_workers: "{{ [ansible_facts.processor_vcpus, 5] | min }}" +openstack_service_rpc_workers: "{{ [ansible_facts.processor_vcpus, 3] | min }}" + +# Endpoint type used to connect with OpenStack services with ansible modules. +# Valid options are [ public, internal ] +openstack_interface: "internal" + +# Openstack CA certificate bundle file +# CA bundle file must be added to both the Horizon and Kolla Toolbox containers +openstack_cacert: "" + +# Enable core OpenStack services. This includes: +# glance, keystone, neutron, nova, heat, and horizon. +enable_openstack_core: "yes" + +enable_osprofiler: "no" + +#################### +# Osprofiler options +#################### +# valid values: ["elasticsearch", "redis"] +osprofiler_backend: "elasticsearch" +opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}" +osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string }}" + +###################### +# Backend TLS options +###################### +kolla_enable_tls_backend: "no" +kolla_verify_tls_backend: "yes" +kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem" +kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem" + +#################### +# Database options +#################### +database_address: "{{ kolla_internal_fqdn }}" +database_user: "root" +database_port: "3306" +database_connection_recycle_time: 10 +database_max_pool_size: 1 +database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}" +database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}" + +# Optionally allow Kolla to set sysctl values +set_sysctl: "yes" + +# Optionally change the path to sysctl.conf modified by Kolla Ansible plays. +kolla_sysctl_conf_path: /etc/sysctl.conf diff --git a/ansible/group_vars/all/cyborg.yml b/ansible/group_vars/all/cyborg.yml new file mode 100644 index 0000000000..f8346b5ef2 --- /dev/null +++ b/ansible/group_vars/all/cyborg.yml @@ -0,0 +1,8 @@ +--- +enable_cyborg: "no" + +cyborg_internal_fqdn: "{{ kolla_internal_fqdn }}" +cyborg_external_fqdn: "{{ kolla_external_fqdn }}" +cyborg_api_port: "6666" +cyborg_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cyborg_api_port }}" +cyborg_api_listen_port: "{{ cyborg_api_port }}" diff --git a/ansible/group_vars/all/database.yml b/ansible/group_vars/all/database.yml new file mode 100644 index 0000000000..7e0e1bd033 --- /dev/null +++ b/ansible/group_vars/all/database.yml @@ -0,0 +1,11 @@ +--- +#################### +# Database options +#################### +database_address: "{{ kolla_internal_fqdn }}" +database_user: "root" +database_port: "3306" +database_connection_recycle_time: 10 +database_max_pool_size: 1 +database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}" +database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}" diff --git a/ansible/group_vars/all/designate.yml b/ansible/group_vars/all/designate.yml new file mode 100644 index 0000000000..e9916c5420 --- /dev/null +++ b/ansible/group_vars/all/designate.yml @@ -0,0 +1,33 @@ +--- +enable_designate: "no" + +designate_keystone_user: "designate" + +####################### +# Designate options +####################### +# Valid options are [ bind9, infoblox ] +designate_backend: "bind9" +designate_ns_record: + - "ns1.example.org" +designate_backend_external: "no" +designate_backend_external_bind9_nameservers: "" +# Valid options are [ '', redis ] +designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}" + +designate_enable_notifications_sink: "no" +designate_notifications_topic_name: "notifications_designate" + +dns_interface: "{{ network_interface }}" +dns_address_family: "{{ network_address_family }}" + +designate_internal_fqdn: "{{ kolla_internal_fqdn }}" +designate_external_fqdn: "{{ kolla_external_fqdn }}" +designate_internal_endpoint: "{{ designate_internal_fqdn | kolla_url(internal_protocol, designate_api_port) }}" +designate_public_endpoint: "{{ designate_external_fqdn | kolla_url(public_protocol, designate_api_public_port) }}" +designate_api_port: "9001" +designate_api_listen_port: "{{ designate_api_port }}" +designate_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else designate_api_port }}" +designate_bind_port: "53" +designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}" +designate_rndc_port: "953" diff --git a/ansible/group_vars/all/etcd.yml b/ansible/group_vars/all/etcd.yml new file mode 100644 index 0000000000..a6b1601196 --- /dev/null +++ b/ansible/group_vars/all/etcd.yml @@ -0,0 +1,7 @@ +--- +enable_etcd: "no" + +etcd_client_port: "2379" +etcd_peer_port: "2380" +etcd_enable_tls: "{{ kolla_enable_tls_backend }}" +etcd_protocol: "{{ 'https' if etcd_enable_tls | bool else 'http' }}" diff --git a/ansible/group_vars/all/fluentd.yml b/ansible/group_vars/all/fluentd.yml new file mode 100644 index 0000000000..13f41522d0 --- /dev/null +++ b/ansible/group_vars/all/fluentd.yml @@ -0,0 +1,6 @@ +--- +enable_fluentd: "yes" +enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}" + +fluentd_syslog_port: "5140" +syslog_udp_port: "{{ fluentd_syslog_port }}" diff --git a/ansible/group_vars/all/glance.yml b/ansible/group_vars/all/glance.yml new file mode 100644 index 0000000000..baf705736b --- /dev/null +++ b/ansible/group_vars/all/glance.yml @@ -0,0 +1,28 @@ +--- +enable_glance: "{{ enable_openstack_core | bool }}" + +glance_keystone_user: "glance" + +####################### +# Glance options +####################### +glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_s3 | bool) }}" +glance_backend_ceph: "no" +glance_backend_s3: "no" +enable_glance_image_cache: "no" +glance_file_datadir_volume: "glance" +glance_enable_rolling_upgrade: "no" +glance_enable_property_protection: "no" +glance_enable_interoperable_image_import: "no" +glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}" +# NOTE(mnasiadka): For use in common role +glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}" + +glance_internal_fqdn: "{{ kolla_internal_fqdn }}" +glance_external_fqdn: "{{ kolla_external_fqdn }}" +glance_internal_endpoint: "{{ glance_internal_fqdn | kolla_url(internal_protocol, glance_api_port) }}" +glance_public_endpoint: "{{ glance_external_fqdn | kolla_url(public_protocol, glance_api_public_port) }}" +glance_api_port: "9292" +glance_api_listen_port: "{{ glance_api_port }}" +glance_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else glance_api_port }}" +glance_tls_proxy_stats_port: "9293" diff --git a/ansible/group_vars/all/gnocchi.yml b/ansible/group_vars/all/gnocchi.yml new file mode 100644 index 0000000000..f103736b43 --- /dev/null +++ b/ansible/group_vars/all/gnocchi.yml @@ -0,0 +1,21 @@ +--- +enable_gnocchi: "no" +enable_gnocchi_statsd: "no" + +################# +# Gnocchi options +################# +# Valid options are [ file, ceph ] +gnocchi_backend_storage: "file" + +# Valid options are [redis, ''] +gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}" +gnocchi_metric_datadir_volume: "gnocchi" + +gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}" +gnocchi_external_fqdn: "{{ kolla_external_fqdn }}" +gnocchi_internal_endpoint: "{{ gnocchi_internal_fqdn | kolla_url(internal_protocol, gnocchi_api_port) }}" +gnocchi_public_endpoint: "{{ gnocchi_external_fqdn | kolla_url(public_protocol, gnocchi_api_public_port) }}" +gnocchi_api_port: "8041" +gnocchi_api_listen_port: "{{ gnocchi_api_port }}" +gnocchi_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else gnocchi_api_port }}" diff --git a/ansible/group_vars/all/grafana.yml b/ansible/group_vars/all/grafana.yml new file mode 100644 index 0000000000..97c7d12945 --- /dev/null +++ b/ansible/group_vars/all/grafana.yml @@ -0,0 +1,11 @@ +--- +enable_grafana: "no" +enable_grafana_external: "{{ enable_grafana | bool }}" + +grafana_internal_fqdn: "{{ kolla_internal_fqdn }}" +grafana_external_fqdn: "{{ kolla_external_fqdn }}" +grafana_internal_endpoint: "{{ grafana_internal_fqdn | kolla_url(internal_protocol, grafana_server_port) }}" +grafana_public_endpoint: "{{ grafana_external_fqdn | kolla_url(public_protocol, grafana_server_public_port) }}" +grafana_server_port: "3000" +grafana_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else grafana_server_port }}" +grafana_server_listen_port: "{{ grafana_server_port }}" diff --git a/ansible/group_vars/all/hacluster.yml b/ansible/group_vars/all/hacluster.yml new file mode 100644 index 0000000000..15a92447cc --- /dev/null +++ b/ansible/group_vars/all/hacluster.yml @@ -0,0 +1,9 @@ +--- +enable_hacluster: "{{ enable_masakari_hostmonitor | bool }}" + +#################### +# Corosync options +#################### + +# this is UDP port +hacluster_corosync_port: 5405 diff --git a/ansible/group_vars/all/haproxy.yml b/ansible/group_vars/all/haproxy.yml new file mode 100644 index 0000000000..d2e5a14a24 --- /dev/null +++ b/ansible/group_vars/all/haproxy.yml @@ -0,0 +1,48 @@ +--- +enable_haproxy: "yes" + +#################### +# HAProxy options +#################### +haproxy_user: "openstack" +haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}" +haproxy_enable_http2: "yes" +haproxy_http2_protocol: "alpn h2,http/1.1" +kolla_enable_tls_internal: "no" +kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}" +kolla_certificates_dir: "{{ node_config }}/certificates" +kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem" +kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem" +kolla_admin_openrc_cacert: "" +kolla_copy_ca_into_containers: "no" +haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" +haproxy_backend_cacert_dir: "/etc/ssl/certs" +haproxy_single_external_frontend: false +haproxy_single_external_frontend_public_port: "{{ '443' if kolla_enable_tls_external | bool else '80' }}" + +# configure SSL/TLS settings for haproxy config, one of [modern, intermediate, legacy]: +kolla_haproxy_ssl_settings: "modern" + +haproxy_ssl_settings: "{{ ssl_legacy_settings if kolla_haproxy_ssl_settings == 'legacy' else ssl_intermediate_settings if kolla_haproxy_ssl_settings == 'intermediate' else ssl_modern_settings | default(ssl_modern_settings) }}" + +ssl_legacy_settings: | + ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES + ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11 + +ssl_intermediate_settings: | + ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 + ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets + ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 + ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets + +ssl_modern_settings: | + ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets + ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets + +haproxy_stats_port: "1984" +haproxy_monitor_port: "61313" +haproxy_ssh_port: "2985" diff --git a/ansible/group_vars/all/heat.yml b/ansible/group_vars/all/heat.yml new file mode 100644 index 0000000000..8a87682269 --- /dev/null +++ b/ansible/group_vars/all/heat.yml @@ -0,0 +1,17 @@ +--- +enable_heat: "{{ enable_openstack_core | bool }}" + +heat_internal_fqdn: "{{ kolla_internal_fqdn }}" +heat_external_fqdn: "{{ kolla_external_fqdn }}" +heat_internal_base_endpoint: "{{ heat_internal_fqdn | kolla_url(internal_protocol, heat_api_port) }}" +heat_public_base_endpoint: "{{ heat_external_fqdn | kolla_url(public_protocol, heat_api_public_port) }}" +heat_api_port: "8004" +heat_api_listen_port: "{{ heat_api_port }}" +heat_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_port }}" +heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}" +heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}" +heat_cfn_internal_base_endpoint: "{{ heat_cfn_internal_fqdn | kolla_url(internal_protocol, heat_api_cfn_port) }}" +heat_cfn_public_base_endpoint: "{{ heat_cfn_external_fqdn | kolla_url(public_protocol, heat_api_cfn_public_port) }}" +heat_api_cfn_port: "8000" +heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}" +heat_api_cfn_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_cfn_port }}" diff --git a/ansible/group_vars/all/horizon.yml b/ansible/group_vars/all/horizon.yml new file mode 100644 index 0000000000..8e477ad9c1 --- /dev/null +++ b/ansible/group_vars/all/horizon.yml @@ -0,0 +1,48 @@ +--- +enable_horizon: "{{ enable_openstack_core | bool }}" +enable_horizon_blazar: "{{ enable_blazar | bool }}" +enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}" +enable_horizon_designate: "{{ enable_designate | bool }}" +enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}" +enable_horizon_heat: "{{ enable_heat | bool }}" +enable_horizon_ironic: "{{ enable_ironic | bool }}" +enable_horizon_magnum: "{{ enable_magnum | bool }}" +enable_horizon_manila: "{{ enable_manila | bool }}" +enable_horizon_masakari: "{{ enable_masakari | bool }}" +enable_horizon_mistral: "{{ enable_mistral | bool }}" +enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}" +enable_horizon_octavia: "{{ enable_octavia | bool }}" +enable_horizon_tacker: "{{ enable_tacker | bool }}" +enable_horizon_trove: "{{ enable_trove | bool }}" +enable_horizon_watcher: "{{ enable_watcher | bool }}" +enable_horizon_zun: "{{ enable_zun | bool }}" + +####################### +# Horizon options +####################### +horizon_backend_database: false +horizon_keystone_multidomain: false + +# Enable deploying custom horizon policy files for services that don't have a +# horizon plugin but have a policy file. Override these when you have services +# not deployed by kolla-ansible but want custom policy files deployed for them +# in horizon. +enable_ceilometer_horizon_policy_file: "{{ enable_ceilometer }}" +enable_cinder_horizon_policy_file: "{{ enable_cinder }}" +enable_glance_horizon_policy_file: "{{ enable_glance }}" +enable_heat_horizon_policy_file: "{{ enable_heat }}" +enable_keystone_horizon_policy_file: "{{ enable_keystone }}" +enable_neutron_horizon_policy_file: "{{ enable_neutron }}" +enable_nova_horizon_policy_file: "{{ enable_nova }}" + +# TLS +horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}" + +# Ports +horizon_internal_fqdn: "{{ kolla_internal_fqdn }}" +horizon_external_fqdn: "{{ kolla_external_fqdn }}" +horizon_internal_endpoint: "{{ horizon_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}" +horizon_public_endpoint: "{{ horizon_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}" +horizon_port: "80" +horizon_tls_port: "443" +horizon_listen_port: "{{ '8080' if enable_haproxy | bool else horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}" diff --git a/ansible/group_vars/all/influxdb.yml b/ansible/group_vars/all/influxdb.yml new file mode 100644 index 0000000000..c6ddb3c99d --- /dev/null +++ b/ansible/group_vars/all/influxdb.yml @@ -0,0 +1,12 @@ +--- +enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}" + +#################### +# InfluxDB options +#################### +influxdb_address: "{{ kolla_internal_fqdn }}" +influxdb_datadir_volume: "influxdb" + +influxdb_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, influxdb_http_port) }}" + +influxdb_http_port: "8086" diff --git a/ansible/group_vars/all/ironic.yml b/ansible/group_vars/all/ironic.yml new file mode 100644 index 0000000000..1acb6e7ab1 --- /dev/null +++ b/ansible/group_vars/all/ironic.yml @@ -0,0 +1,34 @@ +--- +enable_ironic: "no" +enable_ironic_dnsmasq: "{{ enable_ironic | bool }}" +enable_ironic_neutron_agent: "no" +enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" +enable_ironic_pxe_filter: "no" + +# Keystone user +ironic_keystone_user: "ironic" + +# Coordination backend +ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" + +# Network interfaces +ironic_http_interface: "{{ api_interface }}" +ironic_tftp_interface: "{{ api_interface }}" + +# Address family +ironic_http_address_family: "{{ api_address_family }}" +ironic_tftp_address_family: "{{ api_address_family }}" + +# Addresses +ironic_http_interface_address: "{{ 'ironic_http' | kolla_address }}" +ironic_tftp_interface_address: "{{ 'ironic_tftp' | kolla_address }}" + +ironic_internal_fqdn: "{{ kolla_internal_fqdn }}" +ironic_external_fqdn: "{{ kolla_external_fqdn }}" +ironic_internal_endpoint: "{{ ironic_internal_fqdn | kolla_url(internal_protocol, ironic_api_port) }}" +ironic_public_endpoint: "{{ ironic_external_fqdn | kolla_url(public_protocol, ironic_api_public_port) }}" +ironic_api_port: "6385" +ironic_api_listen_port: "{{ ironic_api_port }}" +ironic_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_api_port }}" +ironic_http_port: "8089" +ironic_prometheus_exporter_port: "9608" diff --git a/ansible/group_vars/all/iscsi.yml b/ansible/group_vars/all/iscsi.yml new file mode 100644 index 0000000000..20773bf867 --- /dev/null +++ b/ansible/group_vars/all/iscsi.yml @@ -0,0 +1,4 @@ +--- +enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}" + +iscsi_port: "3260" diff --git a/ansible/group_vars/all/keepalived.yml b/ansible/group_vars/all/keepalived.yml new file mode 100644 index 0000000000..b32dcdd4f3 --- /dev/null +++ b/ansible/group_vars/all/keepalived.yml @@ -0,0 +1,8 @@ +--- +enable_keepalived: "{{ enable_haproxy | bool }}" + +#################### +# keepalived options +#################### +# Arbitrary unique number from 0..255 +keepalived_virtual_router_id: "51" diff --git a/ansible/group_vars/all/keystone.yml b/ansible/group_vars/all/keystone.yml new file mode 100644 index 0000000000..8a644bd82f --- /dev/null +++ b/ansible/group_vars/all/keystone.yml @@ -0,0 +1,86 @@ +--- +enable_keystone: "{{ enable_openstack_core | bool }}" +enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}" + +#################### +# Keystone options +#################### +keystone_internal_fqdn: "{{ kolla_internal_fqdn }}" +keystone_external_fqdn: "{{ kolla_external_fqdn }}" + +keystone_internal_url: "{{ keystone_internal_fqdn | kolla_url(internal_protocol, keystone_internal_port) }}" +keystone_public_url: "{{ keystone_external_fqdn | kolla_url(public_protocol, keystone_public_port) }}" + +keystone_admin_user: "admin" +keystone_admin_project: "admin" + +# Whether or not to apply changes to service user passwords when services are +# reconfigured +update_keystone_service_user_passwords: true + +default_project_domain_name: "Default" +default_project_domain_id: "default" + +default_user_domain_name: "Default" +default_user_domain_id: "default" + +# Keystone fernet token expiry in seconds. Default is 1 day. +fernet_token_expiry: 86400 +# Keystone window to allow expired fernet tokens. Default is 2 days. +fernet_token_allow_expired_window: 172800 +# Keystone fernet key rotation interval in seconds. Default is sum of token +# expiry and allow expired window, 3 days. This ensures the minimum number +# of keys are active. If this interval is lower than the sum of the token +# expiry and allow expired window, multiple active keys will be necessary. +fernet_key_rotation_interval: "{{ fernet_token_expiry + fernet_token_allow_expired_window }}" + +keystone_default_user_role: "member" + +################################### +# Identity federation configuration +################################### +# Here we configure all of the IdPs meta information that will be required to implement identity federation with OpenStack Keystone. +# We require the administrator to enter the following metadata: +# * name (internal name of the IdP in Keystone); +# * openstack_domain (the domain in Keystone that the IdP belongs to) +# * protocol (the federated protocol used by the IdP; e.g. openid or saml); +# * identifier (the IdP identifier; e.g. https://accounts.google.com); +# * public_name (the public name that will be shown for users in Horizon); +# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration); +# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol +# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf, +# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}'); +# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem'; +# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid); +# +# The IdPs meta information are to be presented to Kolla-Ansible as the following example: +# keystone_identity_providers: +# - name: "myidp1" +# openstack_domain: "my-domain" +# protocol: "openid" +# identifier: "https://accounts.google.com" +# public_name: "Authenticate via myidp1" +# attribute_mapping: "mappingId1" +# metadata_folder: "path/to/metadata/folder" +# certificate_file: "path/to/certificate/file.pem" +# +# We also need to configure the attribute mapping that is used by IdPs. +# The configuration of attribute mappings is a list of objects, where each +# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP +# object in the IdPs set), and the 'file' with a full qualified path to a mapping file. +# keystone_identity_mappings: +# - name: "mappingId1" +# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1" +# - name: "mappingId2" +# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2" +# - name: "mappingId3" +# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3" +keystone_identity_providers: [] +keystone_identity_mappings: [] + +keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else keystone_public_listen_port }}" +keystone_public_listen_port: "5000" +keystone_internal_port: "5000" +keystone_internal_listen_port: "{{ keystone_internal_port }}" +keystone_listen_port: "{{ keystone_internal_listen_port }}" +keystone_ssh_port: "8023" diff --git a/ansible/group_vars/all/kuryr.yml b/ansible/group_vars/all/kuryr.yml new file mode 100644 index 0000000000..69464a0319 --- /dev/null +++ b/ansible/group_vars/all/kuryr.yml @@ -0,0 +1,4 @@ +--- +enable_kuryr: "no" + +kuryr_port: "23750" diff --git a/ansible/group_vars/all/letsencrypt.yml b/ansible/group_vars/all/letsencrypt.yml new file mode 100644 index 0000000000..50007e8859 --- /dev/null +++ b/ansible/group_vars/all/letsencrypt.yml @@ -0,0 +1,13 @@ +--- +enable_letsencrypt: "no" + +##################### +# ACME client options +##################### +acme_client_lego: "server lego {{ api_interface_address }}:{{ letsencrypt_webserver_port }}" +acme_client_servers: "{% set arr = [] %}{% if enable_letsencrypt | bool %}{{ arr.append(acme_client_lego) }}{% endif %}{{ arr }}" + +letsencrypt_webserver_port: "8081" +letsencrypt_managed_certs: "{{ '' if not enable_letsencrypt | bool else ('internal' if letsencrypt_internal_cert_server != '' and kolla_same_external_internal_vip | bool else ('internal,external' if letsencrypt_internal_cert_server != '' and letsencrypt_external_cert_server != '' else ('internal' if letsencrypt_internal_cert_server != '' else ('external' if letsencrypt_external_cert_server != '' and not kolla_same_external_internal_vip | bool else '')))) }}" +letsencrypt_external_cert_server: "https://acme-v02.api.letsencrypt.org/directory" +letsencrypt_internal_cert_server: "" diff --git a/ansible/group_vars/all/loadbalancer.yml b/ansible/group_vars/all/loadbalancer.yml new file mode 100644 index 0000000000..ef47f25553 --- /dev/null +++ b/ansible/group_vars/all/loadbalancer.yml @@ -0,0 +1,2 @@ +--- +enable_loadbalancer: "{{ enable_haproxy | bool or enable_keepalived | bool or enable_proxysql | bool }}" diff --git a/ansible/group_vars/all/magnum.yml b/ansible/group_vars/all/magnum.yml new file mode 100644 index 0000000000..279e17d986 --- /dev/null +++ b/ansible/group_vars/all/magnum.yml @@ -0,0 +1,10 @@ +--- +enable_magnum: "no" + +magnum_internal_fqdn: "{{ kolla_internal_fqdn }}" +magnum_external_fqdn: "{{ kolla_external_fqdn }}" +magnum_internal_base_endpoint: "{{ magnum_internal_fqdn | kolla_url(internal_protocol, magnum_api_port) }}" +magnum_public_base_endpoint: "{{ magnum_external_fqdn | kolla_url(public_protocol, magnum_api_public_port) }}" +magnum_api_port: "9511" +magnum_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else magnum_api_port }}" +magnum_api_listen_port: "{{ magnum_api_port }}" diff --git a/ansible/group_vars/all/manila.yml b/ansible/group_vars/all/manila.yml new file mode 100644 index 0000000000..526707331b --- /dev/null +++ b/ansible/group_vars/all/manila.yml @@ -0,0 +1,18 @@ +--- +enable_manila: "no" +enable_manila_backend_generic: "no" +enable_manila_backend_hnas: "no" +enable_manila_backend_cephfs_native: "no" +enable_manila_backend_cephfs_nfs: "no" +enable_manila_backend_glusterfs_nfs: "no" +enable_manila_backend_flashblade: "no" + +ceph_manila_user: "manila" + +manila_internal_fqdn: "{{ kolla_internal_fqdn }}" +manila_external_fqdn: "{{ kolla_external_fqdn }}" +manila_internal_base_endpoint: "{{ manila_internal_fqdn | kolla_url(internal_protocol, manila_api_port) }}" +manila_public_base_endpoint: "{{ manila_external_fqdn | kolla_url(public_protocol, manila_api_public_port) }}" +manila_api_port: "8786" +manila_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else manila_api_port }}" +manila_api_listen_port: "{{ manila_api_port }}" diff --git a/ansible/group_vars/all/mariadb.yml b/ansible/group_vars/all/mariadb.yml new file mode 100644 index 0000000000..5a5439fea8 --- /dev/null +++ b/ansible/group_vars/all/mariadb.yml @@ -0,0 +1,36 @@ +--- +enable_mariadb: "yes" +enable_mariabackup: "no" + +############################################# +# MariaDB component-specific database details +############################################# +# Whether to configure haproxy to load balance +# the external MariaDB server(s) +enable_external_mariadb_load_balancer: "no" +# Whether to use pre-configured databases / users +use_preconfigured_databases: "no" +# whether to use a common, preconfigured user +# for all component databases +use_common_mariadb_user: "no" + +mariadb_port: "{{ database_port }}" +mariadb_wsrep_port: "4567" +mariadb_ist_port: "4568" +mariadb_sst_port: "4444" +mariadb_enable_tls_backend: "{{ database_enable_tls_backend }}" + +mariadb_monitor_user: "monitor" + +mariadb_datadir_volume: "mariadb" + +mariadb_default_database_shard_id: 0 +mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}" +mariadb_shard_id: "{{ mariadb_default_database_shard_id }}" +mariadb_shard_name: "shard_{{ mariadb_shard_id }}" +mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}" +mariadb_loadbalancer: proxysql +mariadb_backup_target: replica +mariadb_shard_root_user_prefix: "root_shard_" +mariadb_shard_backup_user_prefix: "backup_shard_" +mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}" diff --git a/ansible/group_vars/all/masakari.yml b/ansible/group_vars/all/masakari.yml new file mode 100644 index 0000000000..bef390e719 --- /dev/null +++ b/ansible/group_vars/all/masakari.yml @@ -0,0 +1,13 @@ +--- +enable_masakari: "no" +enable_masakari_instancemonitor: "{{ enable_masakari | bool }}" +enable_masakari_hostmonitor: "{{ enable_masakari | bool }}" + +masakari_internal_fqdn: "{{ kolla_internal_fqdn }}" +masakari_external_fqdn: "{{ kolla_external_fqdn }}" +masakari_internal_endpoint: "{{ masakari_internal_fqdn | kolla_url(internal_protocol, masakari_api_port) }}" +masakari_public_endpoint: "{{ masakari_external_fqdn | kolla_url(public_protocol, masakari_api_public_port) }}" +masakari_api_port: "15868" +masakari_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else masakari_api_port }}" +masakari_api_listen_port: "{{ masakari_api_port }}" +masakari_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}" diff --git a/ansible/group_vars/all/memcached.yml b/ansible/group_vars/all/memcached.yml new file mode 100644 index 0000000000..282138b418 --- /dev/null +++ b/ansible/group_vars/all/memcached.yml @@ -0,0 +1,10 @@ +--- +enable_memcached: "yes" + +# NOTE: Most memcached clients handle load-balancing via client side +# hashing (consistent or not) logic, so going under the covers and messing +# with things that the clients are not aware of is generally wrong +enable_haproxy_memcached: "no" + +memcached_port: "11211" +memcache_security_strategy: "ENCRYPT" diff --git a/ansible/group_vars/all/mistral.yml b/ansible/group_vars/all/mistral.yml new file mode 100644 index 0000000000..2e72f3cc60 --- /dev/null +++ b/ansible/group_vars/all/mistral.yml @@ -0,0 +1,10 @@ +--- +enable_mistral: "no" + +mistral_internal_fqdn: "{{ kolla_internal_fqdn }}" +mistral_external_fqdn: "{{ kolla_external_fqdn }}" +mistral_internal_base_endpoint: "{{ mistral_internal_fqdn | kolla_url(internal_protocol, mistral_api_port) }}" +mistral_public_base_endpoint: "{{ mistral_external_fqdn | kolla_url(public_protocol, mistral_api_public_port) }}" +mistral_api_port: "8989" +mistral_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else mistral_api_port }}" +mistral_api_listen_port: "{{ mistral_api_port }}" diff --git a/ansible/group_vars/all/multipathd.yml b/ansible/group_vars/all/multipathd.yml new file mode 100644 index 0000000000..bae55e51ca --- /dev/null +++ b/ansible/group_vars/all/multipathd.yml @@ -0,0 +1,2 @@ +--- +enable_multipathd: "no" diff --git a/ansible/group_vars/all/neutron.yml b/ansible/group_vars/all/neutron.yml new file mode 100644 index 0000000000..2c3b9ca948 --- /dev/null +++ b/ansible/group_vars/all/neutron.yml @@ -0,0 +1,70 @@ +--- +enable_neutron: "{{ enable_openstack_core | bool }}" + +enable_neutron_vpnaas: "no" +enable_neutron_sriov: "no" +enable_neutron_mlnx: "no" +enable_neutron_dvr: "no" +enable_neutron_fwaas: "no" +enable_neutron_qos: "no" +enable_neutron_agent_ha: "no" +enable_neutron_bgp_dragent: "no" +enable_neutron_provider_networks: "no" +enable_neutron_segments: "no" +enable_neutron_packet_logging: "no" +enable_neutron_sfc: "no" +enable_neutron_taas: "no" +enable_neutron_trunk: "no" +enable_neutron_metering: "no" +enable_neutron_infoblox_ipam_agent: "no" +enable_neutron_port_forwarding: "no" +neutron_enable_ovn_agent: "no" + +neutron_keystone_user: "neutron" + +# Valid options are [ openvswitch, ovn ] +neutron_plugin_agent: "openvswitch" + +# Valid options are [ internal, infoblox ] +neutron_ipam_driver: "internal" + +eutron_external_interface: "eth1" + +####################### +# Neutron options +####################### +neutron_bgp_router_id: "1.1.1.1" +neutron_bridge_name: "{{ 'br_dpdk' if enable_ovs_dpdk | bool else 'br-ex' }}" +neutron_physical_networks: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index }}{% if not loop.last %},{% endif %}{% endfor %}" +# Comma-separated type of enabled ml2 type drivers +neutron_type_drivers: "flat,vlan,vxlan{% if neutron_plugin_agent == 'ovn' %},geneve{% endif %}" +# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers') +# NOTE: for ironic this list should also contain 'flat' +neutron_tenant_network_types: "{% if neutron_plugin_agent == 'ovn' %}geneve{% else %}vxlan{% endif %}" + +# valid values: ["dvr", "dvr_no_external"] +neutron_compute_dvr_mode: "dvr" +computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_compute_dvr_mode == 'dvr') or enable_neutron_provider_networks | bool or neutron_ovn_distributed_fip | bool }}" + +# Default DNS resolvers for virtual networks +neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4" + +# Enable distributed floating ip for OVN deployments +neutron_ovn_distributed_fip: "no" + +# SRIOV physnet:interface mappings when SRIOV is enabled +# "sriovnet1" and tunnel_interface used here as placeholders +neutron_sriov_physnet_mappings: + sriovnet1: "{{ tunnel_interface }}" +neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}" + +# Set OVN network availability zones +neutron_ovn_availability_zones: [] + +neutron_internal_fqdn: "{{ kolla_internal_fqdn }}" +neutron_external_fqdn: "{{ kolla_external_fqdn }}" +neutron_internal_endpoint: "{{ neutron_internal_fqdn | kolla_url(internal_protocol, neutron_server_port) }}" +neutron_public_endpoint: "{{ neutron_external_fqdn | kolla_url(public_protocol, neutron_server_public_port) }}" +neutron_server_port: "9696" +neutron_server_listen_port: "{{ neutron_server_port }}" +neutron_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else neutron_server_port }}" diff --git a/ansible/group_vars/all/nova.yml b/ansible/group_vars/all/nova.yml new file mode 100644 index 0000000000..48811be629 --- /dev/null +++ b/ansible/group_vars/all/nova.yml @@ -0,0 +1,69 @@ +--- +enable_cells: "no" +enable_nova: "{{ enable_openstack_core | bool }}" +enable_nova_libvirt_container: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}" +enable_nova_serialconsole_proxy: "no" +enable_nova_ssh: "yes" + +####################### +# Nova options +####################### +nova_backend_ceph: "no" +nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}" +# Valid options are [ kvm, qemu ] +nova_compute_virt_type: "kvm" +nova_instance_datadir_volume: "{{ 'nova_compute' if enable_nova_libvirt_container | bool else '/var/lib/nova' }}" +nova_safety_upgrade: "no" +# Valid options are [ none, novnc, spice ] +nova_console: "novnc" + +####################### +# Nova Database +####################### +nova_database_shard_id: "{{ mariadb_default_database_shard_id | int }}" +nova_cell0_database_shard_id: "{{ nova_database_shard_id | int }}" + +# These are kept for backwards compatibility, as cell0 references them. +nova_database_name: "nova" +nova_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova{% endif %}" +nova_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}" + +nova_cell0_database_name: "{{ nova_database_name }}_cell0" +nova_cell0_database_user: "{{ nova_database_user }}" +nova_cell0_database_address: "{{ nova_database_address }}" +nova_cell0_database_password: "{{ nova_database_password }}" + +# Nova fake driver and the number of fake driver per compute node +enable_nova_fake: "no" +num_nova_fake_per_node: 5 + +migration_interface: "{{ api_interface }}" +migration_interface_address: "{{ 'migration' | kolla_address }}" +migration_address_family: "{{ api_address_family }}" + +nova_keystone_user: "nova" + +nova_internal_fqdn: "{{ kolla_internal_fqdn }}" +nova_external_fqdn: "{{ kolla_external_fqdn }}" +nova_internal_base_endpoint: "{{ nova_internal_fqdn | kolla_url(internal_protocol, nova_api_port) }}" +nova_public_base_endpoint: "{{ nova_external_fqdn | kolla_url(public_protocol, nova_api_public_port) }}" +nova_api_port: "8774" +nova_api_listen_port: "{{ nova_api_port }}" +nova_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_api_port }}" +nova_metadata_internal_fqdn: "{{ kolla_internal_fqdn }}" +nova_metadata_external_fqdn: "{{ kolla_external_fqdn }}" +nova_metadata_port: "8775" +nova_metadata_listen_port: "{{ nova_metadata_port }}" +nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}" +nova_novncproxy_port: "6080" +nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}" +nova_novncproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_novncproxy_port }}" +nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}" +nova_spicehtml5proxy_port: "6082" +nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}" +nova_spicehtml5proxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_spicehtml5proxy_port }}" +nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}" +nova_serialproxy_port: "6083" +nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}" +nova_serialproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_serialproxy_port }}" +nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}" diff --git a/ansible/group_vars/all/octavia.yml b/ansible/group_vars/all/octavia.yml new file mode 100644 index 0000000000..c0a152cf4c --- /dev/null +++ b/ansible/group_vars/all/octavia.yml @@ -0,0 +1,33 @@ +--- +enable_octavia: "no" +enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}" +enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}" + +########## +# Octavia +########## +# Whether to run Kolla-Ansible's automatic configuration for Octavia. +# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no` +# and keep your other Octavia config like before. +octavia_auto_configure: "{{ 'amphora' in octavia_provider_drivers }}" + +# Octavia network type options are [ tenant, provider ] +# * tenant indicates that we will create a tenant network and a network +# interface on the Octavia worker nodes for communication with amphorae. +# * provider indicates that we will create a flat or vlan provider network. +# In this case octavia_network_interface should be set to a network interface +# on the Octavia worker nodes on the same provider network. +octavia_network_type: "provider" + +octavia_network_interface: "{{ 'o-hm0' if octavia_network_type == 'tenant' else api_interface }}" +octavia_network_address_family: "{{ api_address_family }}" +octavia_network_interface_address: "{{ 'octavia_network' | kolla_address }}" + +octavia_internal_fqdn: "{{ kolla_internal_fqdn }}" +octavia_external_fqdn: "{{ kolla_external_fqdn }}" +octavia_internal_endpoint: "{{ octavia_internal_fqdn | kolla_url(internal_protocol, octavia_api_port) }}" +octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol, octavia_api_public_port) }}" +octavia_api_port: "9876" +octavia_api_listen_port: "{{ octavia_api_port }}" +octavia_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else octavia_api_port }}" +octavia_health_manager_port: "5555" diff --git a/ansible/group_vars/all/opensearch.yml b/ansible/group_vars/all/opensearch.yml new file mode 100644 index 0000000000..21c208c67e --- /dev/null +++ b/ansible/group_vars/all/opensearch.yml @@ -0,0 +1,33 @@ +--- +#################### +# Logging options +#################### + +# NOTE: If an external ElasticSearch cluster address is configured, all +# services with ElasticSearch endpoints should be configured to log +# to the external cluster by default. This is for backwards compatibility. +opensearch_address: "{{ elasticsearch_address if elasticsearch_address is defined else kolla_internal_fqdn }}" +enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}" +enable_opensearch_dashboards: "{{ enable_opensearch | bool }}" +enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}" + +####################### +## Opensearch Options +######################## +opensearch_datadir_volume: "opensearch" + +opensearch_internal_endpoint: "{{ opensearch_address | kolla_url(internal_protocol, opensearch_port) }}" +opensearch_dashboards_internal_fqdn: "{{ kolla_internal_fqdn }}" +opensearch_dashboards_external_fqdn: "{{ kolla_external_fqdn }}" +opensearch_dashboards_internal_endpoint: "{{ opensearch_dashboards_internal_fqdn | kolla_url(internal_protocol, opensearch_dashboards_port) }}" +opensearch_dashboards_external_endpoint: "{{ opensearch_dashboards_external_fqdn | kolla_url(public_protocol, opensearch_dashboards_port_external) }}" +opensearch_dashboards_user: "opensearch" +opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}" + +# NOTE: If an external ElasticSearch cluster port is specified, +# we default to using that port in services with ElasticSearch +# endpoints. This is for backwards compatibility. +opensearch_port: "{{ elasticsearch_port | default('9200') }}" +opensearch_dashboards_port: "5601" +opensearch_dashboards_port_external: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else opensearch_dashboards_port }}" +opensearch_dashboards_listen_port: "{{ opensearch_dashboards_port }}" diff --git a/ansible/group_vars/all/openvswitch.yml b/ansible/group_vars/all/openvswitch.yml new file mode 100644 index 0000000000..731f99d00b --- /dev/null +++ b/ansible/group_vars/all/openvswitch.yml @@ -0,0 +1,14 @@ +--- +enable_openvswitch: "{{ enable_neutron | bool }}" +enable_ovs_dpdk: "no" + +ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}" + +tunnel_address_family: "{{ network_address_family }}" +dpdk_tunnel_address_family: "{{ network_address_family }}" +tunnel_interface: "{{ network_interface }}" +dpdk_tunnel_interface: "{{ neutron_external_interface }}" +tunnel_interface_address: "{{ 'tunnel' | kolla_address }}" +dpdk_tunnel_interface_address: "{{ 'dpdk_tunnel' | kolla_address }}" + +ovsdb_port: "6640" diff --git a/ansible/group_vars/all/ovn.yml b/ansible/group_vars/all/ovn.yml new file mode 100644 index 0000000000..dc9b4b6e8d --- /dev/null +++ b/ansible/group_vars/all/ovn.yml @@ -0,0 +1,16 @@ +--- +enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}" +enable_ovn_sb_db_relay: "{{ enable_ovn | bool }}" + +ovn_nb_db_port: "6641" +ovn_sb_db_port: "6642" +# OVN SB Relay related variables +ovn_sb_db_relay_count: "{{ ((groups['ovn-controller'] | length) / ovn_sb_db_relay_compute_per_relay | int) | round(0, 'ceil') | int }}" +ovn_sb_db_relay_compute_per_relay: "50" +ovn_sb_db_relay_port_prefix: "1664" +ovn_sb_db_relay_port: "{{ ovn_sb_db_relay_port_prefix ~ ovn_sb_db_relay_client_group_id }}" +ovn_sb_db_relay_client_group_id: "{{ range(1, ovn_sb_db_relay_count | int + 1) | random(seed=inventory_hostname) }}" +ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" +ovn_sb_connection: "{{ ovn_sb_connection_relay if enable_ovn_sb_db_relay | bool else ovn_sb_connection_no_relay }}" +ovn_sb_connection_no_relay: "{% for host in groups['ovn-sb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}" +ovn_sb_connection_relay: "{% for host in groups['ovn-sb-db-relay'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_relay_port }}{% if not loop.last %},{% endif %}{% endfor %}" diff --git a/ansible/group_vars/all/placement.yml b/ansible/group_vars/all/placement.yml new file mode 100644 index 0000000000..4949393eaa --- /dev/null +++ b/ansible/group_vars/all/placement.yml @@ -0,0 +1,13 @@ +--- +enable_placement: "{{ enable_nova | bool or enable_zun | bool }}" + +placement_keystone_user: "placement" + +placement_internal_fqdn: "{{ kolla_internal_fqdn }}" +placement_external_fqdn: "{{ kolla_external_fqdn }}" +placement_internal_endpoint: "{{ placement_internal_fqdn | kolla_url(internal_protocol, placement_api_port) }}" +placement_public_endpoint: "{{ placement_external_fqdn | kolla_url(public_protocol, placement_api_public_port) }}" +# Default Placement API port of 8778 already in use +placement_api_port: "8780" +placement_api_listen_port: "{{ placement_api_port }}" +placement_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else placement_api_port }}" diff --git a/ansible/group_vars/all/prometheus.yml b/ansible/group_vars/all/prometheus.yml new file mode 100644 index 0000000000..e372bf6429 --- /dev/null +++ b/ansible/group_vars/all/prometheus.yml @@ -0,0 +1,78 @@ +--- +enable_prometheus: "no" + +############ +# Prometheus +############ +enable_prometheus_server: "{{ enable_prometheus | bool }}" +enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}" +enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}" +enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}" +enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}" +enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}" +enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}" +enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}" +enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}" +enable_prometheus_ceph_mgr_exporter: "no" +enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}" +enable_prometheus_openstack_exporter_external: "no" +enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_opensearch | bool }}" +enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}" +enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}" +enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}" +enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}" +enable_prometheus_proxysql_exporter: "{{ enable_prometheus | bool and enable_proxysql | bool }}" + +prometheus_alertmanager_user: "admin" +prometheus_ceph_exporter_interval: "{{ prometheus_scrape_interval }}" +prometheus_grafana_user: "grafana" +prometheus_haproxy_user: "haproxy" +prometheus_skyline_user: "skyline" +prometheus_scrape_interval: "60s" +prometheus_openstack_exporter_interval: "{{ prometheus_scrape_interval }}" +prometheus_openstack_exporter_timeout: "45s" +prometheus_elasticsearch_exporter_interval: "{{ prometheus_scrape_interval }}" +prometheus_cmdline_extras: +prometheus_ceph_mgr_exporter_endpoints: [] +prometheus_openstack_exporter_endpoint_type: "internal" +prometheus_openstack_exporter_compute_api_version: "latest" +prometheus_libvirt_exporter_interval: "60s" + +prometheus_internal_fqdn: "{{ kolla_internal_fqdn }}" +prometheus_external_fqdn: "{{ kolla_external_fqdn }}" +prometheus_internal_endpoint: "{{ prometheus_internal_fqdn | kolla_url(internal_protocol, prometheus_port) }}" +prometheus_public_endpoint: "{{ prometheus_external_fqdn | kolla_url(public_protocol, prometheus_public_port) }}" +prometheus_port: "9091" +prometheus_listen_port: "{{ prometheus_port }}" +prometheus_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_port }}" +prometheus_node_exporter_port: "9100" +prometheus_mysqld_exporter_port: "9104" +prometheus_haproxy_exporter_port: "9101" +prometheus_memcached_exporter_port: "9150" +prometheus_rabbitmq_exporter_port: "{{ rabbitmq_prometheus_port }}" +# Default cadvisor port of 8080 already in use +prometheus_cadvisor_port: "18080" +prometheus_fluentd_integration_port: "24231" +prometheus_libvirt_exporter_port: "9177" +prometheus_etcd_integration_port: "{{ etcd_client_port }}" +proxysql_prometheus_exporter_port: "6070" + +# Prometheus alertmanager ports +prometheus_alertmanager_internal_fqdn: "{{ kolla_internal_fqdn }}" +prometheus_alertmanager_external_fqdn: "{{ kolla_external_fqdn }}" +prometheus_alertmanager_internal_endpoint: "{{ prometheus_alertmanager_internal_fqdn | kolla_url(internal_protocol, prometheus_alertmanager_port) }}" +prometheus_alertmanager_public_endpoint: "{{ prometheus_alertmanager_external_fqdn | kolla_url(public_protocol, prometheus_alertmanager_public_port) }}" +prometheus_alertmanager_port: "9093" +prometheus_alertmanager_cluster_port: "9094" +prometheus_alertmanager_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_alertmanager_port }}" +prometheus_alertmanager_listen_port: "{{ prometheus_alertmanager_port }}" + +# Prometheus openstack-exporter ports +prometheus_openstack_exporter_port: "9198" +prometheus_elasticsearch_exporter_port: "9108" + +# Prometheus blackbox-exporter ports +prometheus_blackbox_exporter_port: "9115" + +# Prometheus instance label to use for metrics +prometheus_instance_label: diff --git a/ansible/group_vars/all/proxysql.yml b/ansible/group_vars/all/proxysql.yml new file mode 100644 index 0000000000..386a9be564 --- /dev/null +++ b/ansible/group_vars/all/proxysql.yml @@ -0,0 +1,4 @@ +--- +enable_proxysql: "{{ enable_mariadb }}" + +proxysql_admin_port: "6032" diff --git a/ansible/group_vars/all/rabbitmq.yml b/ansible/group_vars/all/rabbitmq.yml new file mode 100644 index 0000000000..4773c14445 --- /dev/null +++ b/ansible/group_vars/all/rabbitmq.yml @@ -0,0 +1,19 @@ +--- +enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}" + +#################### +# RabbitMQ options +#################### +rabbitmq_user: "openstack" +rabbitmq_monitoring_user: "" +# Whether to enable TLS encryption for RabbitMQ client-server communication. +rabbitmq_enable_tls: "no" +# CA certificate bundle in RabbitMQ container. +rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.crt' }}" +rabbitmq_datadir_volume: "rabbitmq" + +rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}" +rabbitmq_management_port: "15672" +rabbitmq_cluster_port: "25672" +rabbitmq_epmd_port: "4369" +rabbitmq_prometheus_port: "15692" diff --git a/ansible/group_vars/all/redis.yml b/ansible/group_vars/all/redis.yml new file mode 100644 index 0000000000..bea98be6e2 --- /dev/null +++ b/ansible/group_vars/all/redis.yml @@ -0,0 +1,11 @@ +--- +enable_redis: "no" + +#################### +# Redis options +#################### +redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}default:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}" +redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes" + +redis_port: "6379" +redis_sentinel_port: "26379" diff --git a/ansible/group_vars/all/s3.yml b/ansible/group_vars/all/s3.yml new file mode 100644 index 0000000000..c42e45931b --- /dev/null +++ b/ansible/group_vars/all/s3.yml @@ -0,0 +1,7 @@ +--- +############# +# Common options for S3 Cinder Backup and Glance S3 backend. +s3_url: +s3_bucket: +s3_access_key: +s3_secret_key: diff --git a/ansible/group_vars/all/skyline.yml b/ansible/group_vars/all/skyline.yml new file mode 100644 index 0000000000..56225268a0 --- /dev/null +++ b/ansible/group_vars/all/skyline.yml @@ -0,0 +1,18 @@ +--- +enable_skyline: "no" + +skyline_apiserver_internal_fqdn: "{{ kolla_internal_fqdn }}" +skyline_apiserver_external_fqdn: "{{ kolla_external_fqdn }}" +skyline_apiserver_internal_endpoint: "{{ skyline_apiserver_internal_fqdn | kolla_url(internal_protocol, skyline_apiserver_port) }}" +skyline_apiserver_public_endpoint: "{{ skyline_apiserver_external_fqdn | kolla_url(public_protocol, skyline_apiserver_public_port) }}" +skyline_console_internal_fqdn: "{{ kolla_internal_fqdn }}" +skyline_console_external_fqdn: "{{ kolla_external_fqdn }}" +skyline_console_internal_endpoint: "{{ skyline_console_internal_fqdn | kolla_url(internal_protocol, skyline_console_port) }}" +skyline_console_public_endpoint: "{{ skyline_console_external_fqdn | kolla_url(public_protocol, skyline_console_public_port) }}" +skyline_apiserver_port: "9998" +skyline_apiserver_listen_port: "{{ skyline_apiserver_port }}" +skyline_apiserver_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_apiserver_port }}" +skyline_console_port: "9999" +skyline_console_listen_port: "{{ skyline_console_port }}" +skyline_console_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_console_port }}" +skyline_enable_sso: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}" diff --git a/ansible/group_vars/all/tacker.yml b/ansible/group_vars/all/tacker.yml new file mode 100644 index 0000000000..936f5fbeab --- /dev/null +++ b/ansible/group_vars/all/tacker.yml @@ -0,0 +1,10 @@ +--- +enable_tacker: "no" + +tacker_internal_fqdn: "{{ kolla_internal_fqdn }}" +tacker_external_fqdn: "{{ kolla_external_fqdn }}" +tacker_internal_endpoint: "{{ tacker_internal_fqdn | kolla_url(internal_protocol, tacker_server_port) }}" +tacker_public_endpoint: "{{ tacker_external_fqdn | kolla_url(public_protocol, tacker_server_public_port) }}" +tacker_server_port: "9890" +tacker_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else tacker_server_port }}" +tacker_server_listen_port: "{{ tacker_server_port }}" diff --git a/ansible/group_vars/all/telegraf.yml b/ansible/group_vars/all/telegraf.yml new file mode 100644 index 0000000000..1ad350fd98 --- /dev/null +++ b/ansible/group_vars/all/telegraf.yml @@ -0,0 +1,9 @@ +--- +enable_telegraf: "no" + +########## +# Telegraf +########## +# Configure telegraf to use the docker daemon itself as an input for +# telemetry data. +telegraf_enable_docker_input: "no" diff --git a/ansible/group_vars/all/trove.yml b/ansible/group_vars/all/trove.yml new file mode 100644 index 0000000000..420efad5e5 --- /dev/null +++ b/ansible/group_vars/all/trove.yml @@ -0,0 +1,11 @@ +--- +enable_trove: "no" +enable_trove_singletenant: "no" + +trove_internal_fqdn: "{{ kolla_internal_fqdn }}" +trove_external_fqdn: "{{ kolla_external_fqdn }}" +trove_internal_base_endpoint: "{{ trove_internal_fqdn | kolla_url(internal_protocol, trove_api_port) }}" +trove_public_base_endpoint: "{{ trove_external_fqdn | kolla_url(public_protocol, trove_api_public_port) }}" +trove_api_port: "8779" +trove_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else trove_api_port }}" +trove_api_listen_port: "{{ trove_api_port }}" diff --git a/ansible/group_vars/all/watcher.yml b/ansible/group_vars/all/watcher.yml new file mode 100644 index 0000000000..87d919c8f6 --- /dev/null +++ b/ansible/group_vars/all/watcher.yml @@ -0,0 +1,10 @@ +--- +enable_watcher: "no" + +watcher_internal_fqdn: "{{ kolla_internal_fqdn }}" +watcher_external_fqdn: "{{ kolla_external_fqdn }}" +watcher_internal_endpoint: "{{ watcher_internal_fqdn | kolla_url(internal_protocol, watcher_api_port) }}" +watcher_public_endpoint: "{{ watcher_external_fqdn | kolla_url(public_protocol, watcher_api_public_port) }}" +watcher_api_port: "9322" +watcher_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else watcher_api_port }}" +watcher_api_listen_port: "{{ watcher_api_port }}" diff --git a/ansible/group_vars/all/zun.yml b/ansible/group_vars/all/zun.yml new file mode 100644 index 0000000000..ed10ea5462 --- /dev/null +++ b/ansible/group_vars/all/zun.yml @@ -0,0 +1,31 @@ +--- +enable_zun: "no" + +# Extra docker options for Zun +docker_configure_for_zun: "no" +docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375 +docker_zun_config: {} + +# Extra containerd options for Zun +containerd_configure_for_zun: "no" + +# Enable Ceph backed Cinder Volumes for zun +zun_configure_for_cinder_ceph: "no" + +# 42463 is the static group id of the zun user in the Zun image. +# If users customize this value on building the Zun images, +# they need to change this config accordingly. +containerd_grpc_gid: 42463 + +zun_api_port: "9517" +zun_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else zun_api_port }}" +zun_api_listen_port: "{{ zun_api_port }}" +zun_wsproxy_internal_fqdn: "{{ kolla_internal_fqdn }}" +zun_wsproxy_external_fqdn: "{{ kolla_external_fqdn }}" +zun_wsproxy_port: "6784" +zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}" +zun_cni_daemon_port: "9036" +zun_internal_fqdn: "{{ kolla_internal_fqdn }}" +zun_external_fqdn: "{{ kolla_external_fqdn }}" +zun_internal_base_endpoint: "{{ zun_internal_fqdn | kolla_url(internal_protocol, zun_api_port) }}" +zun_public_base_endpoint: "{{ zun_external_fqdn | kolla_url(public_protocol, zun_api_public_port) }}" diff --git a/ansible/group_vars/baremetal.yml b/ansible/group_vars/baremetal/ansible-python-interpreter.yml similarity index 100% rename from ansible/group_vars/baremetal.yml rename to ansible/group_vars/baremetal/ansible-python-interpreter.yml diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one index 1d47ec7982..6ab9227f5b 100644 --- a/ansible/inventory/all-in-one +++ b/ansible/inventory/all-in-one @@ -165,9 +165,6 @@ control [blazar:children] control -[venus:children] -monitoring - [letsencrypt:children] loadbalancer @@ -590,12 +587,6 @@ ovn-database [ovn-sb-db-relay:children] ovn-database -[venus-api:children] -venus - -[venus-manager:children] -venus - [letsencrypt-webserver:children] letsencrypt diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode index 9c35be0475..972e4059e5 100644 --- a/ansible/inventory/multinode +++ b/ansible/inventory/multinode @@ -183,9 +183,6 @@ control [blazar:children] control -[venus:children] -monitoring - [letsencrypt:children] loadbalancer @@ -608,12 +605,6 @@ ovn-database [ovn-sb-db-relay:children] ovn-database -[venus-api:children] -venus - -[venus-manager:children] -venus - [letsencrypt-webserver:children] letsencrypt diff --git a/ansible/library/kolla_container_facts.py b/ansible/library/kolla_container_facts.py index 053b2ef5e1..9886d5252b 100644 --- a/ansible/library/kolla_container_facts.py +++ b/ansible/library/kolla_container_facts.py @@ -20,10 +20,10 @@ DOCUMENTATION = ''' --- module: kolla_container_facts -short_description: Module for collecting Docker container facts +short_description: Module for collecting container facts description: - - A module targeting at collecting Docker container facts. It is used for - detecting whether the container is running on host in Kolla. + - A module targeted at collecting container facts. It is used for + retrieving data about containers like their environment or state. options: container_engine: description: @@ -32,7 +32,7 @@ type: str api_version: description: - - The version of the api for docker-py to use when contacting docker + - The version of the API for container SDK to use required: False type: str default: auto @@ -44,18 +44,45 @@ action: description: - The action to perform + - The action "get_containers" only returns running containers, unless + argument get_all_containers is True required: True type: str -author: Jeffrey Zhang, Michal Nasiadka, Ivan Halomi + choices: + - get_containers + - get_container_env + - get_container_state + args: + description: + - Additional arguments for actions + required: False + type: dict + elements: dict + suboptions: + get_all_containers: + description: + - Get all containers, even stopped containers when + performing action "get_containers" + type: bool + required: False + default: False +author: Jeffrey Zhang, Michal Nasiadka, Roman Krček, Ivan Halomi ''' EXAMPLES = ''' - hosts: all tasks: - - name: Gather docker facts + - name: Gather docker facts for running containers + kolla_container_facts: + container_engine: docker + action: get_containers + + - name: Gather docker facts for all containers kolla_container_facts: container_engine: docker - action: get_containers + action: get_containers + args: + get_all_containers: true - name: Gather glance container facts kolla_container_facts: @@ -136,9 +163,11 @@ def get_containers_names(self): def get_containers(self): """Handle when module is called with action get_containers""" names = self.params.get('name') + args = self.params.get('args', {}) + get_all_containers = args.get('get_all_containers', False) self.result['containers'] = dict() - containers = self.client.containers.list() + containers = self.client.containers.list(all=get_all_containers) for container in containers: container.reload() container_name = container.name @@ -226,9 +255,19 @@ def main(): action=dict(required=True, type='str', choices=['get_containers', 'get_containers_env', - 'get_containers_state', + 'get_volumes', 'get_containers_names', - 'get_volumes']), + 'get_containers_state']), + args=dict( + type='dict', + required=False, + default={}, + options=dict( + get_all_containers=dict(required=False, + type='bool', + default=False) + ) + ) ) required_if = [ diff --git a/ansible/module_utils/kolla_container_worker.py b/ansible/module_utils/kolla_container_worker.py index 3e16ac2c9b..d9d2bf7142 100644 --- a/ansible/module_utils/kolla_container_worker.py +++ b/ansible/module_utils/kolla_container_worker.py @@ -410,7 +410,7 @@ def generate_volumes(self, binds=None): vol_dict = dict() for vol in volumes: - if len(vol) == 0: + if not vol: continue if ':' not in vol: diff --git a/ansible/module_utils/kolla_docker_worker.py b/ansible/module_utils/kolla_docker_worker.py index f0799f0645..267cc27a86 100644 --- a/ansible/module_utils/kolla_docker_worker.py +++ b/ansible/module_utils/kolla_docker_worker.py @@ -469,8 +469,11 @@ def create_volume(self, name=None): labels={'kolla_managed': 'true'}) def create_container_volumes(self): - volumes = self.params.get("volumes", []) - + volumes = self.params.get('volumes') + if not volumes: + return + # Filter out null / empty string volumes + volumes = [v for v in volumes if v] for volume in volumes: volume_name = volume.split(":")[0] if "/" in volume_name: diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py index 2f323eff24..d7b09a1354 100644 --- a/ansible/module_utils/kolla_podman_worker.py +++ b/ansible/module_utils/kolla_podman_worker.py @@ -85,7 +85,7 @@ def prepare_container_args(self): # functionality is broken mounts = [] filtered_volumes = {} - volumes = self.params.get('volumes', []) + volumes = self.params.get('volumes') if volumes: self.parse_volumes(volumes, mounts, filtered_volumes) # we can delete original volumes so it won't raise error later @@ -149,10 +149,10 @@ def prepare_container_args(self): # Therefore, we must parse them and set the permissions ourselves # and send them to API separately. def parse_volumes(self, volumes, mounts, filtered_volumes): - # we can ignore empty strings - volumes = [item for item in volumes if item.strip()] - for item in volumes: + if not item or not item.strip(): + # we can ignore empty strings or null volumes + continue # if it starts with / it is bind not volume if item[0] == '/': mode = None @@ -220,7 +220,7 @@ def parse_dimensions(self, dimensions): # NOTE(m.hiner): default ulimits have to be filtered out because # Podman would treat them as new ulimits and break the container # as a result. Names are a copy of - # default_podman_dimensions_el9 in /ansible/group_vars/all.yml + # default_podman_dimensions_el9 in group_vars for name in ['RLIMIT_NOFILE', 'RLIMIT_NPROC']: ulimits.pop(name, None) @@ -642,7 +642,11 @@ def create_volume(self, name=None): self.result = vol.attrs def create_container_volumes(self): - volumes = self.params.get("volumes", []) or [] + volumes = self.params.get('volumes') + if not volumes: + return + # Filter out null / empty string volumes + volumes = [v for v in volumes if v] for volume in volumes: volume_name = volume.split(":")[0] diff --git a/ansible/roles/aodh/defaults/main.yml b/ansible/roles/aodh/defaults/main.yml index 30796e9c77..530601f7b2 100644 --- a/ansible/roles/aodh/defaults/main.yml +++ b/ansible/roles/aodh/defaults/main.yml @@ -8,6 +8,7 @@ aodh_services: volumes: "{{ aodh_api_default_volumes + aodh_api_extra_volumes }}" dimensions: "{{ aodh_api_dimensions }}" healthcheck: "{{ aodh_api_healthcheck }}" + wsgi: "aodh.wsgi.api:application" haproxy: aodh_api: enabled: "{{ enable_aodh }}" @@ -257,3 +258,8 @@ aodh_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }}" # Copy certificates ################### aodh_copy_certs: "{{ kolla_copy_ca_into_containers | bool or aodh_database_enable_tls_internal | bool }}" + +#################### +# WSGI +#################### +aodh_wsgi_provider: "uwsgi" diff --git a/ansible/roles/aodh/tasks/config.yml b/ansible/roles/aodh/tasks/config.yml index e5701c87ec..2542d061b3 100644 --- a/ansible/roles/aodh/tasks/config.yml +++ b/ansible/roles/aodh/tasks/config.yml @@ -26,7 +26,7 @@ aodh_policy_file: "{{ aodh_policy.results.0.stat.path | basename }}" aodh_policy_file_path: "{{ aodh_policy.results.0.stat.path }}" when: - - aodh_policy.results + - aodh_policy.results | length > 0 - name: Copying over existing policy file template: @@ -65,6 +65,20 @@ become: true with_dict: "{{ aodh_services | select_services_enabled_and_mapped_to_host }}" +- name: "Configure uWSGI for {{ project_name }}" + include_role: + name: service-uwsgi-config + vars: + project_services: "{{ aodh_services }}" + service: "{{ aodh_services[service_name] }}" + service_name: "aodh-api" + service_uwsgi_config_http_port: "{{ aodh_api_listen_port }}" + service_uwsgi_config_module: "{{ service.wsgi }}" + service_uwsgi_config_uid: "aodh" + when: + - aodh_wsgi_provider == "uwsgi" + - service | service_enabled_and_mapped_to_host + - name: Copying over wsgi-aodh files for services vars: service: "{{ aodh_services['aodh-api'] }}" @@ -73,4 +87,6 @@ dest: "{{ node_config_directory }}/aodh-api/wsgi-aodh.conf" mode: "0660" become: true - when: service | service_enabled_and_mapped_to_host + when: + - aodh_wsgi_provider == "apache" + - service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/aodh/templates/aodh-api.json.j2 b/ansible/roles/aodh/templates/aodh-api.json.j2 index b7d4feff77..844fe8db93 100644 --- a/ansible/roles/aodh/templates/aodh-api.json.j2 +++ b/ansible/roles/aodh/templates/aodh-api.json.j2 @@ -1,20 +1,27 @@ -{% set aodh_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} +{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} {% set aodh_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} +{% set command = ('/usr/sbin/' + apache_binary + ' -DFOREGROUND') if aodh_wsgi_provider == 'apache' else 'uwsgi /etc/aodh/aodh-api-uwsgi.ini' %} { - "command": "{{ aodh_cmd }} -DFOREGROUND", + "command": "{{ command }}", "config_files": [ { "source": "{{ container_config_directory }}/aodh.conf", "dest": "/etc/aodh/aodh.conf", "owner": "aodh", "perm": "0600" - }, + }{% if aodh_wsgi_provider == "apache" %}, { "source": "{{ container_config_directory }}/wsgi-aodh.conf", "dest": "/etc/{{ aodh_dir }}/wsgi-aodh.conf", "owner": "root", "perm": "0600" - }{% if aodh_policy_file is defined %}, + }{% elif aodh_wsgi_provider == 'uwsgi' %}, + { + "source": "{{ container_config_directory }}/aodh-api-uwsgi.ini", + "dest": "/etc/aodh/aodh-api-uwsgi.ini", + "owner": "aodh", + "perm": "0600" + }{% endif %}{% if aodh_policy_file is defined %}, { "source": "{{ container_config_directory }}/{{ aodh_policy_file }}", "dest": "/etc/aodh/{{ aodh_policy_file }}", diff --git a/ansible/roles/aodh/templates/aodh.conf.j2 b/ansible/roles/aodh/templates/aodh.conf.j2 index d172ad72d6..c9c1803850 100644 --- a/ansible/roles/aodh/templates/aodh.conf.j2 +++ b/ansible/roles/aodh/templates/aodh.conf.j2 @@ -2,6 +2,9 @@ [DEFAULT] auth_strategy = keystone log_dir = /var/log/kolla/aodh +{% if service_name == "aodh-api" %} +log_file = aodh-api.log +{% endif %} debug = {{ aodh_logging_debug }} evaluation_interval = {{ aodh_evaluation_interval }} transport_url = {{ rpc_transport_url }} diff --git a/ansible/roles/barbican/tasks/config.yml b/ansible/roles/barbican/tasks/config.yml index 60a06c64dd..2870735d90 100644 --- a/ansible/roles/barbican/tasks/config.yml +++ b/ansible/roles/barbican/tasks/config.yml @@ -40,7 +40,7 @@ barbican_policy_file: "{{ barbican_policy.results.0.stat.path | basename }}" barbican_policy_file_path: "{{ barbican_policy.results.0.stat.path }}" when: - - barbican_policy.results + - barbican_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/blazar/tasks/config.yml b/ansible/roles/blazar/tasks/config.yml index 942107fd26..d411eecf85 100644 --- a/ansible/roles/blazar/tasks/config.yml +++ b/ansible/roles/blazar/tasks/config.yml @@ -26,7 +26,7 @@ blazar_policy_file: "{{ blazar_policy.results.0.stat.path | basename }}" blazar_policy_file_path: "{{ blazar_policy.results.0.stat.path }}" when: - - blazar_policy.results + - blazar_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/ceilometer/tasks/config.yml b/ansible/roles/ceilometer/tasks/config.yml index eda99e6e71..faa40fa9db 100644 --- a/ansible/roles/ceilometer/tasks/config.yml +++ b/ansible/roles/ceilometer/tasks/config.yml @@ -155,7 +155,7 @@ ceilometer_policy_file: "{{ ceilometer_policy.results.0.stat.path | basename }}" ceilometer_policy_file_path: "{{ ceilometer_policy.results.0.stat.path }}" when: - - ceilometer_policy.results + - ceilometer_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/cinder/tasks/config.yml b/ansible/roles/cinder/tasks/config.yml index bc4c2c838e..2b7a05bee5 100644 --- a/ansible/roles/cinder/tasks/config.yml +++ b/ansible/roles/cinder/tasks/config.yml @@ -38,7 +38,7 @@ cinder_policy_file: "{{ cinder_policy.results.0.stat.path | basename }}" cinder_policy_file_path: "{{ cinder_policy.results.0.stat.path }}" when: - - cinder_policy.results + - cinder_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/cinder/templates/cinder-wsgi.conf.j2 b/ansible/roles/cinder/templates/cinder-wsgi.conf.j2 index b467bab02a..b663c73b4b 100644 --- a/ansible/roles/cinder/templates/cinder-wsgi.conf.j2 +++ b/ansible/roles/cinder/templates/cinder-wsgi.conf.j2 @@ -23,9 +23,7 @@ LogLevel info WSGIScriptAlias / /var/www/cgi-bin/cinder/cinder-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/kolla/cinder/cinder-api-error.log LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog /var/log/kolla/cinder/cinder-api-access.log logformat diff --git a/ansible/roles/cloudkitty/tasks/config.yml b/ansible/roles/cloudkitty/tasks/config.yml index fae192d301..2b1d357d8f 100644 --- a/ansible/roles/cloudkitty/tasks/config.yml +++ b/ansible/roles/cloudkitty/tasks/config.yml @@ -26,7 +26,7 @@ cloudkitty_policy_file: "{{ cloudkitty_policy.results.0.stat.path | basename }}" cloudkitty_policy_file_path: "{{ cloudkitty_policy.results.0.stat.path }}" when: - - cloudkitty_policy.results + - cloudkitty_policy.results | length > 0 - name: Check if custom {{ cloudkitty_custom_metrics_yaml_file }} exists stat: diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml index 4ca1b704b7..715b335897 100644 --- a/ansible/roles/common/defaults/main.yml +++ b/ansible/roles/common/defaults/main.yml @@ -12,32 +12,18 @@ common_services: privileged: True volumes: "{{ kolla_toolbox_default_volumes + kolla_toolbox_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ kolla_toolbox_dimensions }}" - cron: - container_name: cron - group: cron - enabled: True - image: "{{ cron_image_full }}" - environment: - KOLLA_LOGROTATE_SCHEDULE: "{{ cron_logrotate_schedule }}" - volumes: "{{ cron_default_volumes + cron_extra_volumes }}" - dimensions: "{{ cron_dimensions }}" #################### # Docker #################### common_tag: "{{ openstack_tag }}" -cron_dimensions: "{{ default_container_dimensions }}" kolla_toolbox_dimensions: "{{ default_container_dimensions }}" kolla_toolbox_image: "{{ docker_image_url }}kolla-toolbox" kolla_toolbox_tag: "{{ common_tag }}" kolla_toolbox_image_full: "{{ kolla_toolbox_image }}:{{ kolla_toolbox_tag }}" -cron_image: "{{ docker_image_url }}cron" -cron_tag: "{{ common_tag }}" -cron_image_full: "{{ cron_image }}:{{ cron_tag }}" - kolla_toolbox_default_volumes: - "{{ node_config_directory }}/kolla-toolbox/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -45,19 +31,7 @@ kolla_toolbox_default_volumes: - "/dev/:/dev/" - "/run/:/run/{{ ':shared' if kolla_container_engine == 'docker' else '' }}" # see: https://github.com/containers/podman/issues/16305 - "kolla_logs:/var/log/kolla/" -cron_default_volumes: - - "{{ node_config_directory }}/cron/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" kolla_toolbox_extra_volumes: "{{ default_extra_volumes }}" -cron_extra_volumes: "{{ default_extra_volumes }}" - -cron_logrotate_log_maxsize: "100M" -cron_logrotate_log_minsize: "30M" -cron_logrotate_rotation_interval: "weekly" -cron_logrotate_rotation_count: 6 -cron_logrotate_schedule: "daily" ################### # Copy certificates diff --git a/ansible/roles/common/handlers/main.yml b/ansible/roles/common/handlers/main.yml index c963de693b..137b4f9cd7 100644 --- a/ansible/roles/common/handlers/main.yml +++ b/ansible/roles/common/handlers/main.yml @@ -20,17 +20,3 @@ become: true command: "{{ kolla_container_engine }} exec -t {{ common_services['kolla-toolbox']['container_name'] }} ansible --version" changed_when: false - -- name: Restart cron container - vars: - service_name: "cron" - service: "{{ common_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" - image: "{{ service.image }}" - volumes: "{{ service.volumes }}" - environment: "{{ service.environment }}" - dimensions: "{{ service.dimensions }}" diff --git a/ansible/roles/common/tasks/config.yml b/ansible/roles/common/tasks/config.yml index 2562c31e5c..3d8f0bb36b 100644 --- a/ansible/roles/common/tasks/config.yml +++ b/ansible/roles/common/tasks/config.yml @@ -11,10 +11,7 @@ mode: "0770" become: true with_subelements: - - - service_name: "cron" - paths: - - "cron" - - service_name: "kolla-toolbox" + - - service_name: "kolla-toolbox" paths: - "kolla-toolbox" - paths @@ -24,24 +21,6 @@ when: - common_copy_certs | bool -- name: Copying over /run subdirectories conf - become: true - template: - src: kolla-directories.conf.j2 - dest: /etc/tmpfiles.d/kolla.conf - when: kolla_container_engine == 'podman' - -- name: Restart systemd-tmpfiles - become: true - command: systemd-tmpfiles --create - when: kolla_container_engine == 'podman' - -- name: Copying over kolla.target - become: true - template: - src: kolla.target.j2 - dest: /etc/systemd/system/kolla.target - - name: Copying over config.json files for services template: src: "{{ item.key }}.json.j2" @@ -50,72 +29,6 @@ become: true with_dict: "{{ common_services | select_services_enabled_and_mapped_to_host }}" -- name: Copying over cron logrotate config file - vars: - cron_logrotate_enabled_services: >- - {{ cron_logrotate_services | - selectattr('enabled') | - map(attribute='name') | - list }} - cron_logrotate_services: - - { name: "ansible", enabled: "yes" } - - { name: "aodh", enabled: "{{ enable_aodh | bool }}" } - - { name: "barbican", enabled: "{{ enable_barbican | bool }}" } - - { name: "blazar", enabled: "{{ enable_blazar | bool }}" } - - { name: "ceilometer", enabled: "{{ enable_ceilometer | bool }}" } - - { name: "cinder", enabled: "{{ enable_cinder | bool }}" } - - { name: "cloudkitty", enabled: "{{ enable_cloudkitty | bool }}" } - - { name: "collectd", enabled: "{{ enable_collectd | bool }}" } - - { name: "cyborg", enabled: "{{ enable_cyborg | bool }}" } - - { name: "designate", enabled: "{{ enable_designate | bool }}" } - - { name: "etcd", enabled: "{{ enable_etcd | bool }}" } - - { name: "fluentd", enabled: "{{ enable_fluentd | bool }}" } - - { name: "glance", enabled: "{{ enable_glance | bool }}" } - - { name: "glance-tls-proxy", enabled: "{{ glance_enable_tls_backend | bool }}" } - - { name: "gnocchi", enabled: "{{ enable_gnocchi | bool }}" } - - { name: "grafana", enabled: "{{ enable_grafana | bool }}" } - - { name: "hacluster", enabled: "{{ enable_hacluster | bool }}" } - - { name: "haproxy", enabled: "{{ enable_haproxy | bool }}" } - - { name: "heat", enabled: "{{ enable_heat | bool }}" } - - { name: "horizon", enabled: "{{ enable_horizon | bool }}" } - - { name: "influxdb", enabled: "{{ enable_influxdb | bool }}" } - - { name: "ironic", enabled: "{{ enable_ironic | bool }}" } - - { name: "keystone", enabled: "{{ enable_keystone | bool }}" } - - { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" } - - { name: "magnum", enabled: "{{ enable_magnum | bool }}" } - - { name: "manila", enabled: "{{ enable_manila | bool }}" } - - { name: "mariadb", enabled: "{{ enable_mariadb | bool }}" } - - { name: "masakari", enabled: "{{ enable_masakari | bool }}" } - - { name: "mistral", enabled: "{{ enable_mistral | bool }}" } - - { name: "neutron", enabled: "{{ enable_neutron | bool }}" } - - { name: "nova", enabled: "{{ enable_nova | bool }}" } - - { name: "nova-libvirt", enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}" } - - { name: "octavia", enabled: "{{ enable_octavia | bool }}" } - - { name: "opensearch", enabled: "{{ enable_opensearch | bool or enable_opensearch_dashboards | bool }}" } - - { name: "openvswitch", enabled: "{{ enable_openvswitch | bool }}" } - - { name: "placement", enabled: "{{ enable_placement | bool }}" } - - { name: "prometheus", enabled: "{{ enable_prometheus | bool }}" } - - { name: "proxysql", enabled: "{{ enable_proxysql | bool }}" } - - { name: "rabbitmq", enabled: "{{ enable_rabbitmq | bool }}" } - - { name: "redis", enabled: "{{ enable_redis | bool }}" } - - { name: "skyline", enabled: "{{ enable_skyline | bool }}" } - - { name: "tacker", enabled: "{{ enable_tacker | bool }}" } - - { name: "trove", enabled: "{{ enable_trove | bool }}" } - - { name: "venus", enabled: "{{ enable_venus | bool }}" } - - { name: "watcher", enabled: "{{ enable_watcher | bool }}" } - - { name: "zun", enabled: "{{ enable_zun | bool }}" } - template: - src: "{{ item }}" - dest: "{{ node_config_directory }}/cron/logrotate.conf" - mode: "0660" - become: true - when: - - common_services.cron | service_enabled_and_mapped_to_host - with_first_found: - - "{{ node_custom_config }}/cron/{{ inventory_hostname }}/cron-logrotate-global.conf" - - "{{ node_custom_config }}/cron/cron-logrotate-global.conf" - - "cron-logrotate-global.conf.j2" - - name: Ensure RabbitMQ Erlang cookie exists become: true template: diff --git a/ansible/roles/common/templates/cron-logrotate-venus.conf.j2 b/ansible/roles/common/templates/cron-logrotate-venus.conf.j2 deleted file mode 100644 index 5ff3c425a7..0000000000 --- a/ansible/roles/common/templates/cron-logrotate-venus.conf.j2 +++ /dev/null @@ -1,3 +0,0 @@ -"/var/log/kolla/venus/*.log" -{ -} diff --git a/ansible/roles/common/templates/kolla-directories.conf.j2 b/ansible/roles/common/templates/kolla-directories.conf.j2 deleted file mode 100644 index 3831b21065..0000000000 --- a/ansible/roles/common/templates/kolla-directories.conf.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% for path in run_default_subdirectories %} -d {{ path }} 0755 root root - - -{% endfor %} diff --git a/ansible/roles/common/templates/kolla.target.j2 b/ansible/roles/common/templates/kolla.target.j2 deleted file mode 100644 index 1eb3693e55..0000000000 --- a/ansible/roles/common/templates/kolla.target.j2 +++ /dev/null @@ -1,5 +0,0 @@ -[Unit] -Description=Kolla target allowing to start/stop all kolla*@.service instances at once - -[Install] -WantedBy=multi-user.target diff --git a/ansible/roles/cron/defaults/main.yml b/ansible/roles/cron/defaults/main.yml new file mode 100644 index 0000000000..909285ae18 --- /dev/null +++ b/ansible/roles/cron/defaults/main.yml @@ -0,0 +1,38 @@ +--- +cron_services: + cron: + container_name: cron + group: cron + enabled: True + image: "{{ cron_image_full }}" + environment: + KOLLA_LOGROTATE_SCHEDULE: "{{ cron_logrotate_schedule }}" + volumes: "{{ cron_default_volumes + cron_extra_volumes }}" + dimensions: "{{ cron_dimensions }}" + +#################### +# Docker +#################### +cron_dimensions: "{{ default_container_dimensions }}" + +cron_image: "{{ docker_image_url }}cron" +cron_tag: "{{ openstack_tag }}" +cron_image_full: "{{ cron_image }}:{{ cron_tag }}" + +cron_default_volumes: + - "{{ node_config_directory }}/cron/:{{ container_config_directory }}/:ro" + - "/etc/localtime:/etc/localtime:ro" + - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" + - "kolla_logs:/var/log/kolla/" +cron_extra_volumes: "{{ default_extra_volumes }}" + +cron_logrotate_log_maxsize: "100M" +cron_logrotate_log_minsize: "30M" +cron_logrotate_rotation_interval: "weekly" +cron_logrotate_rotation_count: 6 +cron_logrotate_schedule: "daily" + +################### +# Copy certificates +################### +cron_copy_certs: "{{ kolla_copy_ca_into_containers | bool }}" diff --git a/ansible/roles/cron/handlers/main.yml b/ansible/roles/cron/handlers/main.yml new file mode 100644 index 0000000000..a9581eba1e --- /dev/null +++ b/ansible/roles/cron/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: Restart cron container + vars: + service_name: "cron" + service: "{{ cron_services[service_name] }}" + become: true + kolla_container: + action: "recreate_or_restart_container" + common_options: "{{ docker_common_options }}" + name: "{{ service.container_name }}" + image: "{{ service.image }}" + volumes: "{{ service.volumes }}" + environment: "{{ service.environment }}" + dimensions: "{{ service.dimensions }}" diff --git a/ansible/roles/cron/tasks/bootstrap.yml b/ansible/roles/cron/tasks/bootstrap.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/ansible/roles/cron/tasks/bootstrap.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/venus/tasks/check-containers.yml b/ansible/roles/cron/tasks/check-containers.yml similarity index 100% rename from ansible/roles/venus/tasks/check-containers.yml rename to ansible/roles/cron/tasks/check-containers.yml diff --git a/ansible/roles/venus/tasks/check.yml b/ansible/roles/cron/tasks/check.yml similarity index 55% rename from ansible/roles/venus/tasks/check.yml rename to ansible/roles/cron/tasks/check.yml index 63d29a6f31..c6eeb2833f 100644 --- a/ansible/roles/venus/tasks/check.yml +++ b/ansible/roles/cron/tasks/check.yml @@ -1,4 +1,4 @@ --- -- name: Checking Venus containers +- name: Checking Cron containers import_role: role: service-check diff --git a/ansible/roles/cron/tasks/config.yml b/ansible/roles/cron/tasks/config.yml new file mode 100644 index 0000000000..e1a5e30e90 --- /dev/null +++ b/ansible/roles/cron/tasks/config.yml @@ -0,0 +1,100 @@ +--- +- name: Ensuring config directories exist + vars: + service_name: "{{ 'cron' }}" + service: "{{ cron_services[service_name] }}" + file: + path: "{{ node_config_directory }}/{{ service_name }}" + state: "directory" + owner: "{{ config_owner_user }}" + group: "{{ config_owner_group }}" + mode: "0770" + become: true + when: service | service_enabled_and_mapped_to_host + +- include_tasks: copy-certs.yml + when: + - cron_copy_certs | bool + +- name: Copying over config.json files for services + template: + src: "{{ item.key }}.json.j2" + dest: "{{ node_config_directory }}/{{ item.key }}/config.json" + mode: "0660" + become: true + with_dict: "{{ cron_services | select_services_enabled_and_mapped_to_host }}" + +- name: Copying over cron logrotate config file + vars: + cron_logrotate_enabled_services: >- + {{ cron_logrotate_services | + selectattr('enabled') | + map(attribute='name') | + list }} + cron_logrotate_services: + - { name: "ansible", enabled: "yes" } + - { name: "aodh", enabled: "{{ enable_aodh | bool }}" } + - { name: "barbican", enabled: "{{ enable_barbican | bool }}" } + - { name: "blazar", enabled: "{{ enable_blazar | bool }}" } + - { name: "ceilometer", enabled: "{{ enable_ceilometer | bool }}" } + - { name: "cinder", enabled: "{{ enable_cinder | bool }}" } + - { name: "cloudkitty", enabled: "{{ enable_cloudkitty | bool }}" } + - { name: "collectd", enabled: "{{ enable_collectd | bool }}" } + - { name: "cyborg", enabled: "{{ enable_cyborg | bool }}" } + - { name: "designate", enabled: "{{ enable_designate | bool }}" } + - { name: "etcd", enabled: "{{ enable_etcd | bool }}" } + - { name: "fluentd", enabled: "{{ enable_fluentd | bool }}" } + - { name: "glance", enabled: "{{ enable_glance | bool }}" } + - { name: "glance-tls-proxy", enabled: "{{ glance_enable_tls_backend | bool }}" } + - { name: "gnocchi", enabled: "{{ enable_gnocchi | bool }}" } + - { name: "grafana", enabled: "{{ enable_grafana | bool }}" } + - { name: "hacluster", enabled: "{{ enable_hacluster | bool }}" } + - { name: "haproxy", enabled: "{{ enable_haproxy | bool }}" } + - { name: "heat", enabled: "{{ enable_heat | bool }}" } + - { name: "horizon", enabled: "{{ enable_horizon | bool }}" } + - { name: "influxdb", enabled: "{{ enable_influxdb | bool }}" } + - { name: "ironic", enabled: "{{ enable_ironic | bool }}" } + - { name: "keystone", enabled: "{{ enable_keystone | bool }}" } + - { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" } + - { name: "magnum", enabled: "{{ enable_magnum | bool }}" } + - { name: "manila", enabled: "{{ enable_manila | bool }}" } + - { name: "mariadb", enabled: "{{ enable_mariadb | bool }}" } + - { name: "masakari", enabled: "{{ enable_masakari | bool }}" } + - { name: "mistral", enabled: "{{ enable_mistral | bool }}" } + - { name: "neutron", enabled: "{{ enable_neutron | bool }}" } + - { name: "nova", enabled: "{{ enable_nova | bool }}" } + - { name: "nova-libvirt", enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}" } + - { name: "octavia", enabled: "{{ enable_octavia | bool }}" } + - { name: "opensearch", enabled: "{{ enable_opensearch | bool or enable_opensearch_dashboards | bool }}" } + - { name: "openvswitch", enabled: "{{ enable_openvswitch | bool }}" } + - { name: "placement", enabled: "{{ enable_placement | bool }}" } + - { name: "prometheus", enabled: "{{ enable_prometheus | bool }}" } + - { name: "proxysql", enabled: "{{ enable_proxysql | bool }}" } + - { name: "rabbitmq", enabled: "{{ enable_rabbitmq | bool }}" } + - { name: "redis", enabled: "{{ enable_redis | bool }}" } + - { name: "skyline", enabled: "{{ enable_skyline | bool }}" } + - { name: "tacker", enabled: "{{ enable_tacker | bool }}" } + - { name: "trove", enabled: "{{ enable_trove | bool }}" } + - { name: "watcher", enabled: "{{ enable_watcher | bool }}" } + - { name: "zun", enabled: "{{ enable_zun | bool }}" } + template: + src: "{{ item }}" + dest: "{{ node_config_directory }}/cron/logrotate.conf" + mode: "0660" + become: true + when: + - cron_services.cron | service_enabled_and_mapped_to_host + with_first_found: + - "{{ node_custom_config }}/cron/{{ inventory_hostname }}/cron-logrotate-global.conf" + - "{{ node_custom_config }}/cron/cron-logrotate-global.conf" + - "cron-logrotate-global.conf.j2" + +- name: Ensuring config directories have correct owner and permission + become: true + file: + path: "{{ node_config_directory }}/{{ item.key }}" + owner: "{{ config_owner_user }}" + group: "{{ config_owner_group }}" + mode: "0770" + ignore_errors: "{{ ansible_check_mode }}" + with_dict: "{{ cron_services | select_services_enabled_and_mapped_to_host }}" diff --git a/ansible/roles/cron/tasks/config_validate.yml b/ansible/roles/cron/tasks/config_validate.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/ansible/roles/cron/tasks/config_validate.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/venus/tasks/copy-certs.yml b/ansible/roles/cron/tasks/copy-certs.yml similarity index 71% rename from ansible/roles/venus/tasks/copy-certs.yml rename to ansible/roles/cron/tasks/copy-certs.yml index c0452d546e..3e43bc11b0 100644 --- a/ansible/roles/venus/tasks/copy-certs.yml +++ b/ansible/roles/cron/tasks/copy-certs.yml @@ -3,4 +3,4 @@ import_role: role: service-cert-copy vars: - project_services: "{{ venus_services }}" + project_services: "{{ cron_services }}" diff --git a/ansible/roles/venus/tasks/deploy-containers.yml b/ansible/roles/cron/tasks/deploy-containers.yml similarity index 100% rename from ansible/roles/venus/tasks/deploy-containers.yml rename to ansible/roles/cron/tasks/deploy-containers.yml diff --git a/ansible/roles/venus/tasks/deploy.yml b/ansible/roles/cron/tasks/deploy.yml similarity index 62% rename from ansible/roles/venus/tasks/deploy.yml rename to ansible/roles/cron/tasks/deploy.yml index b9775dda85..d0b36cb78b 100644 --- a/ansible/roles/venus/tasks/deploy.yml +++ b/ansible/roles/cron/tasks/deploy.yml @@ -1,13 +1,8 @@ --- -- import_tasks: register.yml - - import_tasks: config.yml - import_tasks: check-containers.yml -- include_tasks: clone.yml - when: venus_dev_mode | bool - - import_tasks: bootstrap.yml - name: Flush handlers diff --git a/ansible/roles/venus/tasks/main.yml b/ansible/roles/cron/tasks/main.yml similarity index 100% rename from ansible/roles/venus/tasks/main.yml rename to ansible/roles/cron/tasks/main.yml diff --git a/ansible/roles/cron/tasks/precheck.yml b/ansible/roles/cron/tasks/precheck.yml new file mode 100644 index 0000000000..9a65561141 --- /dev/null +++ b/ansible/roles/cron/tasks/precheck.yml @@ -0,0 +1,6 @@ +--- +- import_role: + name: service-precheck + vars: + service_precheck_services: "{{ cron_services }}" + service_name: "{{ project_name }}" diff --git a/ansible/roles/venus/tasks/pull.yml b/ansible/roles/cron/tasks/pull.yml similarity index 100% rename from ansible/roles/venus/tasks/pull.yml rename to ansible/roles/cron/tasks/pull.yml diff --git a/ansible/roles/venus/tasks/reconfigure.yml b/ansible/roles/cron/tasks/reconfigure.yml similarity index 100% rename from ansible/roles/venus/tasks/reconfigure.yml rename to ansible/roles/cron/tasks/reconfigure.yml diff --git a/ansible/roles/venus/tasks/stop.yml b/ansible/roles/cron/tasks/stop.yml similarity index 66% rename from ansible/roles/venus/tasks/stop.yml rename to ansible/roles/cron/tasks/stop.yml index 5016a76343..ce903cd50f 100644 --- a/ansible/roles/venus/tasks/stop.yml +++ b/ansible/roles/cron/tasks/stop.yml @@ -2,5 +2,5 @@ - import_role: name: service-stop vars: - project_services: "{{ venus_services }}" + project_services: "{{ cron_services }}" service_name: "{{ project_name }}" diff --git a/ansible/roles/venus/tasks/upgrade.yml b/ansible/roles/cron/tasks/upgrade.yml similarity index 100% rename from ansible/roles/venus/tasks/upgrade.yml rename to ansible/roles/cron/tasks/upgrade.yml diff --git a/ansible/roles/common/templates/cron-logrotate-ansible.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-ansible.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-ansible.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-ansible.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-aodh.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-aodh.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-aodh.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-aodh.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-barbican.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-barbican.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-barbican.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-barbican.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-blazar.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-blazar.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-blazar.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-blazar.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-ceilometer.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-ceilometer.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-ceilometer.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-ceilometer.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-cinder.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-cinder.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-cinder.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-cinder.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-cloudkitty.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-cloudkitty.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-cloudkitty.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-cloudkitty.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-collectd.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-collectd.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-collectd.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-collectd.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-cyborg.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-cyborg.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-cyborg.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-cyborg.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-designate.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-designate.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-designate.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-designate.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-etcd.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-etcd.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-etcd.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-etcd.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-fluentd.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-fluentd.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-fluentd.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-fluentd.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-glance-tls-proxy.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-glance-tls-proxy.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-glance-tls-proxy.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-glance-tls-proxy.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-glance.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-glance.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-glance.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-glance.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-global.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-global.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-global.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-global.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-gnocchi.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-gnocchi.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-gnocchi.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-gnocchi.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-grafana.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-grafana.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-grafana.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-grafana.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-hacluster.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-hacluster.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-hacluster.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-hacluster.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-haproxy.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-haproxy.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-haproxy.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-haproxy.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-heat.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-heat.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-heat.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-heat.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-horizon.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-horizon.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-horizon.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-horizon.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-influxdb.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-influxdb.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-influxdb.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-influxdb.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-ironic.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-ironic.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-ironic.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-ironic.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-keystone.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-keystone.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-keystone.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-keystone.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-kuryr.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-kuryr.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-kuryr.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-kuryr.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-letsencrypt.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-letsencrypt.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-magnum.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-magnum.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-magnum.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-magnum.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-manila.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-manila.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-manila.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-manila.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-mariadb.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-mariadb.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-mariadb.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-mariadb.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-masakari.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-masakari.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-masakari.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-masakari.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-mistral.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-mistral.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-mistral.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-mistral.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-neutron.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-neutron.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-neutron.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-neutron.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-nova-libvirt.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-nova-libvirt.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-nova-libvirt.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-nova-libvirt.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-nova.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-nova.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-nova.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-nova.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-octavia.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-octavia.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-octavia.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-octavia.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-opensearch.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-opensearch.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-opensearch.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-opensearch.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-openvswitch.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-openvswitch.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-openvswitch.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-openvswitch.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-placement.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-placement.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-placement.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-placement.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-prometheus.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-prometheus.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-prometheus.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-prometheus.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-proxysql.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-proxysql.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-proxysql.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-proxysql.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-rabbitmq.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-rabbitmq.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-rabbitmq.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-rabbitmq.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-redis.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-redis.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-redis.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-redis.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-skyline.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-skyline.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-skyline.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-skyline.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-swift.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-swift.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-swift.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-swift.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-tacker.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-tacker.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-tacker.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-tacker.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-trove.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-trove.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-trove.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-trove.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-watcher.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-watcher.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-watcher.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-watcher.conf.j2 diff --git a/ansible/roles/common/templates/cron-logrotate-zun.conf.j2 b/ansible/roles/cron/templates/cron-logrotate-zun.conf.j2 similarity index 100% rename from ansible/roles/common/templates/cron-logrotate-zun.conf.j2 rename to ansible/roles/cron/templates/cron-logrotate-zun.conf.j2 diff --git a/ansible/roles/common/templates/cron.json.j2 b/ansible/roles/cron/templates/cron.json.j2 similarity index 100% rename from ansible/roles/common/templates/cron.json.j2 rename to ansible/roles/cron/templates/cron.json.j2 diff --git a/ansible/roles/cron/vars/main.yml b/ansible/roles/cron/vars/main.yml new file mode 100644 index 0000000000..3c0c0f60d8 --- /dev/null +++ b/ansible/roles/cron/vars/main.yml @@ -0,0 +1,2 @@ +--- +project_name: "cron" diff --git a/ansible/roles/cyborg/tasks/config.yml b/ansible/roles/cyborg/tasks/config.yml index c1fb81c0c2..463b1a127b 100644 --- a/ansible/roles/cyborg/tasks/config.yml +++ b/ansible/roles/cyborg/tasks/config.yml @@ -26,7 +26,7 @@ cyborg_policy_file: "{{ cyborg_policy.results.0.stat.path | basename }}" cyborg_policy_file_path: "{{ cyborg_policy.results.0.stat.path }}" when: - - cyborg_policy.results + - cyborg_policy.results | length > 0 - name: Copying over existing policy file template: diff --git a/ansible/roles/designate/tasks/config.yml b/ansible/roles/designate/tasks/config.yml index d61d2958dd..2b05382488 100644 --- a/ansible/roles/designate/tasks/config.yml +++ b/ansible/roles/designate/tasks/config.yml @@ -26,7 +26,7 @@ designate_policy_file: "{{ designate_policy.results.0.stat.path | basename }}" designate_policy_file_path: "{{ designate_policy.results.0.stat.path }}" when: - - designate_policy.results + - designate_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/fluentd/defaults/main.yml b/ansible/roles/fluentd/defaults/main.yml index f749a5bea5..dfbe2d635d 100644 --- a/ansible/roles/fluentd/defaults/main.yml +++ b/ansible/roles/fluentd/defaults/main.yml @@ -24,12 +24,9 @@ fluentd_elasticsearch_cacert: "{{ openstack_cacert }}" fluentd_elasticsearch_request_timeout: "60s" fluentd_opensearch_path: "" -fluentd_opensearch_scheme: "{{ internal_protocol }}" +fluentd_opensearch_scheme: "http" fluentd_opensearch_user: "" fluentd_opensearch_password: "" -fluentd_opensearch_ssl_version: "TLSv1_2" -fluentd_opensearch_ssl_verify: "true" -fluentd_opensearch_cacert: "{{ openstack_cacert }}" fluentd_opensearch_request_timeout: "60s" #################### diff --git a/ansible/roles/fluentd/tasks/config.yml b/ansible/roles/fluentd/tasks/config.yml index d1fc3ac338..83a5504f3f 100644 --- a/ansible/roles/fluentd/tasks/config.yml +++ b/ansible/roles/fluentd/tasks/config.yml @@ -16,6 +16,16 @@ when: - kolla_copy_ca_into_containers | bool +- name: Ensure /var/log/journal exists on EL systems + become: true + file: + path: /var/log/journal + state: directory + owner: root + group: systemd-journal + mode: "2755" + when: kolla_base_distro in ['centos', 'rocky'] + - name: Copying over config.json files for services template: src: "{{ item.key }}.json.j2" diff --git a/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 b/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 index c8e35c8598..66a1897a20 100644 --- a/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 +++ b/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 @@ -136,11 +136,6 @@ pattern ^(masakari-engine|masakari-api)$ tag openstack_python - - key programname - pattern ^(venus-api|venus-manager)$ - tag openstack_python - key programname pattern ^(skyline)$ diff --git a/ansible/roles/fluentd/templates/conf/output/00-local.conf.j2 b/ansible/roles/fluentd/templates/conf/output/00-local.conf.j2 index 2c82f539fc..15fc270c54 100644 --- a/ansible/roles/fluentd/templates/conf/output/00-local.conf.j2 +++ b/ansible/roles/fluentd/templates/conf/output/00-local.conf.j2 @@ -51,7 +51,7 @@ {% elif log_direct_to_opensearch %} @type opensearch - host {{ opensearch_address }} + hosts {% for host in groups['opensearch'] %}{{ 'api' | kolla_address(host) }}{% if not loop.last %},{% endif %}{% endfor %} port {{ opensearch_port }} scheme {{ fluentd_opensearch_scheme }} {% if fluentd_opensearch_path != '' %} diff --git a/ansible/roles/fluentd/templates/conf/output/03-opensearch.conf.j2 b/ansible/roles/fluentd/templates/conf/output/03-opensearch.conf.j2 index dd60f1e1cc..02d39f037d 100644 --- a/ansible/roles/fluentd/templates/conf/output/03-opensearch.conf.j2 +++ b/ansible/roles/fluentd/templates/conf/output/03-opensearch.conf.j2 @@ -3,39 +3,29 @@ @type copy @type opensearch - host {{ opensearch_address }} + hosts {% for host in groups['opensearch'] %}{{ 'api' | kolla_address(host) }}{% if not loop.last %},{% endif %}{% endfor %} port {{ opensearch_port }} scheme {{ fluentd_opensearch_scheme }} -{% if fluentd_opensearch_path != '' %} - path {{ fluentd_opensearch_path }} -{% endif %} -{% if fluentd_opensearch_scheme == 'https' %} - ssl_version {{ fluentd_opensearch_ssl_version }} - ssl_verify {{ fluentd_opensearch_ssl_verify }} -{% if fluentd_opensearch_cacert | length > 0 %} - ca_file {{ fluentd_opensearch_cacert }} -{% endif %} -{% endif %} -{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%} +{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != '' -%} user {{ fluentd_opensearch_user }} password {{ fluentd_opensearch_password }} -{% endif %} +{%- endif %} logstash_format true logstash_prefix {{ opensearch_log_index_prefix }} reconnect_on_error true -{% if match_pattern != 'retry_os' %} +{% if match_pattern != 'retry_os' -%} retry_tag retry_os -{% endif %} +{%- endif %} request_timeout {{ fluentd_opensearch_request_timeout }} suppress_type_name true bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }} @type file -{% if match_pattern == 'retry_os' %} +{% if match_pattern == 'retry_os' -%} path /var/lib/fluentd/data/opensearch.buffer/openstack_retry.* {% else %} path /var/lib/fluentd/data/opensearch.buffer/openstack.* -{% endif %} +{%- endif %} flush_interval 15s chunk_limit_size {{ fluentd_buffer_chunk_limit_size }} diff --git a/ansible/roles/glance/tasks/config.yml b/ansible/roles/glance/tasks/config.yml index d268e92d32..52ee34a973 100644 --- a/ansible/roles/glance/tasks/config.yml +++ b/ansible/roles/glance/tasks/config.yml @@ -30,7 +30,7 @@ glance_policy_file: "{{ glance_policy.results.0.stat.path | basename }}" glance_policy_file_path: "{{ glance_policy.results.0.stat.path }}" when: - - glance_policy.results + - glance_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/gnocchi/defaults/main.yml b/ansible/roles/gnocchi/defaults/main.yml index e0d3f3c6ad..d185fef648 100644 --- a/ansible/roles/gnocchi/defaults/main.yml +++ b/ansible/roles/gnocchi/defaults/main.yml @@ -8,6 +8,7 @@ gnocchi_services: volumes: "{{ gnocchi_api_default_volumes + gnocchi_api_extra_volumes }}" dimensions: "{{ gnocchi_api_dimensions }}" healthcheck: "{{ gnocchi_api_healthcheck }}" + wsgi: "gnocchi.wsgi.api:application" haproxy: gnocchi_api: enabled: "{{ enable_gnocchi }}" @@ -199,3 +200,8 @@ gnocchi_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }} # Copy certificates ################### gnocchi_copy_certs: "{{ kolla_copy_ca_into_containers | bool or gnocchi_database_enable_tls_internal | bool }}" + +#################### +# WSGI +#################### +gnocchi_wsgi_provider: "uwsgi" diff --git a/ansible/roles/gnocchi/tasks/config.yml b/ansible/roles/gnocchi/tasks/config.yml index c8e6897aee..b978044e29 100644 --- a/ansible/roles/gnocchi/tasks/config.yml +++ b/ansible/roles/gnocchi/tasks/config.yml @@ -30,7 +30,7 @@ gnocchi_policy_file: "{{ gnocchi_policy.results.0.stat.path | basename }}" gnocchi_policy_file_path: "{{ gnocchi_policy.results.0.stat.path }}" when: - - gnocchi_policy.results + - gnocchi_policy.results | length > 0 - include_tasks: copy-certs.yml when: @@ -67,10 +67,26 @@ dest: "{{ node_config_directory }}/{{ item }}/wsgi-gnocchi.conf" mode: "0660" become: true - when: service | service_enabled_and_mapped_to_host + when: + - gnocchi_wsgi_provider == "apache" + - service | service_enabled_and_mapped_to_host with_items: - "gnocchi-api" +- name: "Configure uWSGI for Gnocchi" + include_role: + name: service-uwsgi-config + vars: + project_services: "{{ gnocchi_services }}" + service: "{{ gnocchi_services['gnocchi-api'] }}" + service_name: "gnocchi-api" + service_uwsgi_config_http_port: "{{ gnocchi_api_listen_port }}" + service_uwsgi_config_module: "{{ service.wsgi }}" + service_uwsgi_config_uid: "gnocchi" + when: + - gnocchi_wsgi_provider == "uwsgi" + - service | service_enabled_and_mapped_to_host + - name: Copying over existing policy file template: src: "{{ gnocchi_policy_file_path }}" diff --git a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 index de8ed12900..2fc22c6469 100644 --- a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 +++ b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 @@ -1,20 +1,27 @@ -{% set gnocchi_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} +{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} {% set gnocchi_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} +{% set command = ('/usr/sbin/' + apache_binary + ' -DFOREGROUND') if gnocchi_wsgi_provider == 'apache' else 'uwsgi /etc/gnocchi/gnocchi-api-uwsgi.ini' %} { - "command": "{{ gnocchi_cmd }} -DFOREGROUND", + "command": "{{ command }}", "config_files": [ { "source": "{{ container_config_directory }}/gnocchi.conf", "dest": "/etc/gnocchi/gnocchi.conf", "owner": "gnocchi", "perm": "0600" - }, + }{% if gnocchi_wsgi_provider == 'apache' %}, { "source": "{{ container_config_directory }}/wsgi-gnocchi.conf", "dest": "/etc/{{ gnocchi_dir }}/wsgi-gnocchi.conf", "owner": "gnocchi", "perm": "0600" - }{% if gnocchi_policy_file is defined %}, + }{% elif gnocchi_wsgi_provider == 'uwsgi' %}, + { + "source": "{{ container_config_directory }}/gnocchi-api-uwsgi.ini", + "dest": "/etc/gnocchi/gnocchi-api-uwsgi.ini", + "owner": "gnocchi", + "perm": "0600" + }{% endif %}{% if gnocchi_policy_file is defined %}, { "source": "{{ container_config_directory }}/{{ gnocchi_policy_file }}", "dest": "/etc/gnocchi/{{ gnocchi_policy_file }}", diff --git a/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 b/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 index e84923d67d..4cc1568849 100644 --- a/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 +++ b/ansible/roles/hacluster/templates/hacluster-pacemaker-remote.json.j2 @@ -1,5 +1,6 @@ +{% set remoted = '/usr/sbin/pacemaker-remoted' if kolla_base_distro in ['centos', 'rocky'] else '/usr/sbin/pacemaker_remoted' %} { - "command": "/usr/sbin/pacemaker_remoted -l /var/log/kolla/hacluster/pacemaker-remoted.log{% if openstack_logging_debug | bool %} -VV{% endif %} -p {{ hacluster_pacemaker_remote_port }}", + "command": "{{ remoted }} -l /var/log/kolla/hacluster/pacemaker-remoted.log{% if openstack_logging_debug | bool %} -VV{% endif %} -p {{ hacluster_pacemaker_remote_port }}", "config_files": [ { "source": "{{ container_config_directory }}/authkey", diff --git a/ansible/roles/heat/tasks/config.yml b/ansible/roles/heat/tasks/config.yml index 107e72198e..10271bd8d4 100644 --- a/ansible/roles/heat/tasks/config.yml +++ b/ansible/roles/heat/tasks/config.yml @@ -26,7 +26,7 @@ heat_policy_file: "{{ heat_policy.results.0.stat.path | basename }}" heat_policy_file_path: "{{ heat_policy.results.0.stat.path }}" when: - - heat_policy.results + - heat_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 b/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 index 27aea0dd07..b692c2d6fa 100644 --- a/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 +++ b/ansible/roles/heat/templates/wsgi-heat-api-cfn.conf.j2 @@ -34,9 +34,7 @@ CustomLog "{{ heat_log_dir }}/apache-cfn-access.log" common WSGIScriptAlias / {{ binary_path }}/heat-wsgi-api-cfn WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ heat_log_dir }}/heat-api-cfn-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ heat_log_dir }}/heat-api-cfn-access.log" logformat diff --git a/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 b/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 index d197f764a5..1f285dc153 100644 --- a/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 +++ b/ansible/roles/heat/templates/wsgi-heat-api.conf.j2 @@ -34,9 +34,7 @@ CustomLog "{{ heat_log_dir }}/apache-access.log" common WSGIScriptAlias / {{ binary_path }}/heat-wsgi-api WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ heat_log_dir }}/heat-api-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ heat_log_dir }}/heat-api-access.log" logformat diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml index cdda448e16..01a6e3082a 100644 --- a/ansible/roles/horizon/defaults/main.yml +++ b/ansible/roles/horizon/defaults/main.yml @@ -20,13 +20,13 @@ horizon_services: ENABLE_OCTAVIA: "{{ 'yes' if enable_horizon_octavia | bool else 'no' }}" ENABLE_TACKER: "{{ 'yes' if enable_horizon_tacker | bool else 'no' }}" ENABLE_TROVE: "{{ 'yes' if enable_horizon_trove | bool else 'no' }}" - ENABLE_VENUS: "{{ 'yes' if enable_horizon_venus | bool else 'no' }}" ENABLE_WATCHER: "{{ 'yes' if enable_horizon_watcher | bool else 'no' }}" ENABLE_ZUN: "{{ 'yes' if enable_horizon_zun | bool else 'no' }}" FORCE_GENERATE: "{{ 'yes' if horizon_dev_mode | bool else 'no' }}" volumes: "{{ horizon_default_volumes + horizon_extra_volumes }}" dimensions: "{{ horizon_dimensions }}" healthcheck: "{{ horizon_healthcheck }}" + wsgi: "openstack_dashboard.wsgi:application" haproxy: horizon: enabled: "{{ enable_horizon }}" @@ -168,3 +168,8 @@ horizon_use_keystone_public_url: False # Copy certificates ################### horizon_copy_certs: "{{ kolla_copy_ca_into_containers | bool or horizon_enable_tls_backend | bool }}" + +############ +# WSGI +############ +horizon_wsgi_provider: "uwsgi" diff --git a/ansible/roles/horizon/tasks/config.yml b/ansible/roles/horizon/tasks/config.yml index 37d06c2007..7fece46ebd 100644 --- a/ansible/roles/horizon/tasks/config.yml +++ b/ansible/roles/horizon/tasks/config.yml @@ -59,7 +59,26 @@ - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/horizon.conf" - "{{ node_custom_config }}/horizon/horizon.conf" - "horizon.conf.j2" - when: service | service_enabled_and_mapped_to_host + when: + - service | service_enabled_and_mapped_to_host + - horizon_wsgi_provider == "apache" + +- name: "Configure uWSGI for Horizon" + include_role: + name: service-uwsgi-config + vars: + project_services: "{{ horizon_services }}" + service: "{{ project_services[service_name] }}" + service_name: "horizon" + service_uwsgi_config_http_port: "{{ horizon_listen_port }}" + service_uwsgi_config_module: "{{ service.wsgi }}" + service_uwsgi_config_tls_backend: "{{ horizon_enable_tls_backend | bool }}" + service_uwsgi_config_tls_cert: "/etc/horizon/certs/horizon-cert.pem" + service_uwsgi_config_tls_key: "/etc/horizon/certs/horizon-key.pem" + service_uwsgi_config_uid: "{{ 'horizon' if enable_haproxy | bool else 'root' }}" + when: + - service | service_enabled_and_mapped_to_host + - horizon_wsgi_provider == "uwsgi" - name: Copying over kolla-settings.py become: true diff --git a/ansible/roles/horizon/tasks/policy_item.yml b/ansible/roles/horizon/tasks/policy_item.yml index 708f60e6ef..7b427108a3 100644 --- a/ansible/roles/horizon/tasks/policy_item.yml +++ b/ansible/roles/horizon/tasks/policy_item.yml @@ -22,4 +22,4 @@ set_fact: custom_policy: "{{ custom_policy + [overwritten_files.results.0.stat.path] }}" when: - - overwritten_files.results + - overwritten_files.results | length > 0 diff --git a/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 b/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 index 2abbd6acc2..4860f0a887 100644 --- a/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 +++ b/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 @@ -19,6 +19,7 @@ DATABASES = { {% elif groups['memcached'] | length > 0 and not horizon_backend_database | bool %} SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES['default']['LOCATION'] = [{% for host in groups['memcached'] %}'{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ memcached_port }}'{% if not loop.last %},{% endif %}{% endfor %}] +CACHES['default']['OPTIONS'] = {'ignore_exc': True} {% endif %} {% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %} diff --git a/ansible/roles/horizon/templates/horizon.json.j2 b/ansible/roles/horizon/templates/horizon.json.j2 index 5e070ff493..edbc335f8a 100644 --- a/ansible/roles/horizon/templates/horizon.json.j2 +++ b/ansible/roles/horizon/templates/horizon.json.j2 @@ -1,14 +1,23 @@ {% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} {% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} {% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'horizon.conf' %} +{% set uwsgi_cmd = 'uwsgi /etc/horizon/horizon-uwsgi.ini' %} +{% set command = uwsgi_cmd if horizon_wsgi_provider == 'uwsgi' else ('/usr/sbin/' + apache_cmd + ' -DFOREGROUND') %} { - "command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND", + "command": "{{ command }}", "config_files": [ { +{% if horizon_wsgi_provider == 'apache' %} "source": "{{ container_config_directory }}/horizon.conf", "dest": "/etc/{{ apache_dir }}/{{ apache_file }}", "owner": "horizon", "perm": "0600" +{% elif horizon_wsgi_provider == 'uwsgi' %} + "source": "{{ container_config_directory }}/horizon-uwsgi.ini", + "dest": "/etc/horizon/horizon-uwsgi.ini", + "owner": "horizon", + "perm": "0600" +{% endif %} }, {% for path in custom_policy %} { diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml index fb485375a2..9c80ad6fea 100644 --- a/ansible/roles/ironic/defaults/main.yml +++ b/ansible/roles/ironic/defaults/main.yml @@ -68,6 +68,17 @@ ironic_services: image: "{{ ironic_dnsmasq_image_full }}" volumes: "{{ ironic_dnsmasq_default_volumes + ironic_dnsmasq_extra_volumes }}" dimensions: "{{ ironic_dnsmasq_dimensions }}" + pid_mode: host + ironic-pxe-filter: + container_name: ironic_pxe_filter + group: ironic-dnsmasq + enabled: "{{ enable_ironic_pxe_filter }}" + image: "{{ ironic_pxe_filter_image_full }}" + volumes: "{{ ironic_pxe_filter_default_volumes + ironic_pxe_filter_extra_volumes }}" + dimensions: "{{ ironic_pxe_filter_dimensions }}" + # TODO: --pid container:ironic_dnsmasq but this is more complicated since we need to + # declare dependency in systemd too. + pid_mode: host ironic-prometheus-exporter: container_name: ironic_prometheus_exporter group: ironic-conductor @@ -127,6 +138,10 @@ ironic_dnsmasq_image: "{{ docker_image_url }}dnsmasq" ironic_dnsmasq_tag: "{{ ironic_tag }}" ironic_dnsmasq_image_full: "{{ ironic_dnsmasq_image }}:{{ ironic_dnsmasq_tag }}" +ironic_pxe_filter_image: "{{ docker_image_url }}ironic-pxe-filter" +ironic_pxe_filter_tag: "{{ ironic_tag }}" +ironic_pxe_filter_image_full: "{{ ironic_pxe_filter_image }}:{{ ironic_pxe_filter_tag }}" + ironic_prometheus_exporter_image: "{{ docker_image_url }}ironic-prometheus-exporter" ironic_prometheus_exporter_tag: "{{ ironic_tag }}" ironic_prometheus_exporter_image_full: "{{ ironic_prometheus_exporter_image }}:{{ ironic_prometheus_exporter_tag }}" @@ -136,6 +151,7 @@ ironic_conductor_dimensions: "{{ default_container_dimensions }}" ironic_tftp_dimensions: "{{ default_container_dimensions }}" ironic_http_dimensions: "{{ default_container_dimensions }}" ironic_dnsmasq_dimensions: "{{ default_container_dimensions }}" +ironic_pxe_filter_dimensions: "{{ default_container_dimensions }}" ironic_prometheus_exporter_dimensions: "{{ default_container_dimensions }}" ironic_api_enable_healthchecks: "{{ enable_container_healthchecks }}" @@ -212,8 +228,16 @@ ironic_dnsmasq_default_volumes: - "{{ node_config_directory }}/ironic-dnsmasq/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla" - "ironic_dhcp_hosts:/etc/dnsmasq/dhcp-hostsdir:ro" + - "kolla_logs:/var/log/kolla" + - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}" +ironic_pxe_filter_default_volumes: + - "{{ node_config_directory }}/ironic-pxe-filter/:{{ container_config_directory }}/:ro" + - "/etc/localtime:/etc/localtime:ro" + - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" + - "kolla_logs:/var/log/kolla" + - "ironic_dhcp_hosts:/etc/dnsmasq/dhcp-hostsdir" + - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}" ironic_prometheus_exporter_default_volumes: - "{{ node_config_directory }}/ironic-prometheus-exporter/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -227,6 +251,7 @@ ironic_conductor_extra_volumes: "{{ ironic_extra_volumes }}" ironic_tftp_extra_volumes: "{{ ironic_extra_volumes }}" ironic_http_extra_volumes: "{{ ironic_extra_volumes }}" ironic_dnsmasq_extra_volumes: "{{ ironic_extra_volumes }}" +ironic_pxe_filter_extra_volumes: "{{ ironic_extra_volumes }}" ironic_prometheus_exporter_extra_volumes: "{{ ironic_extra_volumes }}" #################### diff --git a/ansible/roles/ironic/handlers/main.yml b/ansible/roles/ironic/handlers/main.yml index d7989a5736..417fbd46f5 100644 --- a/ansible/roles/ironic/handlers/main.yml +++ b/ansible/roles/ironic/handlers/main.yml @@ -69,6 +69,21 @@ volumes: "{{ service.volumes }}" dimensions: "{{ service.dimensions }}" cap_add: "{{ service.cap_add }}" + pid_mode: "{{ service.pid_mode }}" + +- name: Restart ironic-pxe-filter container + vars: + service_name: "ironic-pxe-filter" + service: "{{ ironic_services[service_name] }}" + become: true + kolla_container: + action: "recreate_or_restart_container" + common_options: "{{ docker_common_options }}" + name: "{{ service.container_name }}" + image: "{{ service.image }}" + volumes: "{{ service.volumes }}" + dimensions: "{{ service.dimensions }}" + pid_mode: "{{ service.pid_mode }}" - name: Restart ironic-prometheus-exporter container vars: diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml index 98a3e66e2e..9b7627e189 100644 --- a/ansible/roles/ironic/tasks/config.yml +++ b/ansible/roles/ironic/tasks/config.yml @@ -26,7 +26,7 @@ ironic_policy_file: "{{ ironic_policy.results.0.stat.path | basename }}" ironic_policy_file_path: "{{ ironic_policy.results.0.stat.path }}" when: - - ironic_policy.results + - ironic_policy.results | length > 0 - include_tasks: copy-certs.yml when: @@ -54,7 +54,7 @@ mode: "0660" become: true when: - - item.key in [ "ironic-api", "ironic-conductor", "ironic-prometheus-exporter" ] + - item.key in [ "ironic-api", "ironic-conductor", "ironic-prometheus-exporter", "ironic-pxe-filter" ] with_dict: "{{ ironic_services | select_services_enabled_and_mapped_to_host }}" - name: Copying over dnsmasq.conf diff --git a/ansible/roles/ironic/tasks/rolling_upgrade.yml b/ansible/roles/ironic/tasks/rolling_upgrade.yml index 66a86fcf6e..40998142c1 100644 --- a/ansible/roles/ironic/tasks/rolling_upgrade.yml +++ b/ansible/roles/ironic/tasks/rolling_upgrade.yml @@ -5,22 +5,20 @@ # This is only needed when performing a slow rolling upgrade process # where you need to maintain compatibility between different versions # during the upgrade. For direct version jumps, this section can be skipped. -- import_tasks: config.yml - vars: - pin_release_version: "{{ ironic_pin_release_version }}" +- name: Pin release version for rolling upgrades when: ironic_pin_release_version | length > 0 + block: + - import_tasks: config.yml + vars: + pin_release_version: "{{ ironic_pin_release_version }}" -- import_tasks: check-containers.yml - -- import_tasks: bootstrap_service.yml + - import_tasks: check-containers.yml -# TODO(donghm): Flush_handlers to restart ironic services -# should be run in serial nodes to decrease downtime. Update when -# the module ansible strategy for rolling upgrade is finished. + - import_tasks: bootstrap_service.yml -# Restart ironic services with pinned release version -- name: Flush handlers - meta: flush_handlers + # Restart ironic services with pinned release version + - name: Flush handlers + meta: flush_handlers # Unpin version - import_tasks: config.yml diff --git a/ansible/roles/ironic/templates/ipa.ipxe.j2 b/ansible/roles/ironic/templates/ipa.ipxe.j2 index 676f885c45..4ae0d25762 100644 --- a/ansible/roles/ironic/templates/ipa.ipxe.j2 +++ b/ansible/roles/ironic/templates/ipa.ipxe.j2 @@ -13,6 +13,6 @@ chain pxelinux.cfg/${mac:hexhyp} || goto ipa :ipa :retry_boot imgfree -kernel --timeout 30000 {{ ironic_http_url }}/ironic-agent.kernel ipa-inspection-callback-url={{ ironic_internal_endpoint }}/v1/continue systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=ironic-agent.initramfs {{ ironic_kernel_cmdline_extras | join(' ') }} || goto retry_boot +kernel --timeout 30000 {{ ironic_http_url }}/ironic-agent.kernel ipa-inspection-callback-url={{ ironic_internal_endpoint }}/v1/continue_inspection systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=ironic-agent.initramfs {{ ironic_kernel_cmdline_extras | join(' ') }} || goto retry_boot initrd --timeout 30000 {{ ironic_http_url }}/ironic-agent.initramfs || goto retry_boot boot diff --git a/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 b/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 index 7e62ac2c87..ecb7a93daa 100644 --- a/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 +++ b/ansible/roles/ironic/templates/ironic-api-wsgi.conf.j2 @@ -37,9 +37,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/ironic-api-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ ironic_log_dir }}/ironic-api-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ ironic_log_dir }}/ironic-api-access.log" logformat diff --git a/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2 b/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2 new file mode 100644 index 0000000000..6bcf7e351b --- /dev/null +++ b/ansible/roles/ironic/templates/ironic-pxe-filter.json.j2 @@ -0,0 +1,23 @@ +{ + "command": "ironic-pxe-filter --config-file /etc/ironic/ironic.conf --log-file /var/log/kolla/ironic/ironic-pxe-filter.log", + "config_files": [ + { + "source": "{{ container_config_directory }}/ironic.conf", + "dest": "/etc/ironic/ironic.conf", + "owner": "ironic", + "perm": "0600" + } + ], + "permissions": [ + { + "path": "/var/log/kolla/ironic", + "owner": "ironic:ironic", + "recurse": true + }, + { + "path": "/var/lib/ironic", + "owner": "ironic:ironic", + "recurse": true + } + ] +} diff --git a/ansible/roles/ironic/templates/ironic.conf.j2 b/ansible/roles/ironic/templates/ironic.conf.j2 index fcf130bf76..66fbdb1f77 100644 --- a/ansible/roles/ironic/templates/ironic.conf.j2 +++ b/ansible/roles/ironic/templates/ironic.conf.j2 @@ -4,7 +4,7 @@ auth_strategy = noauth {% endif %} debug = {{ ironic_logging_debug }} -log_file = /var/log/kolla/ironic/{{ service_name }} +log_file = /var/log/kolla/ironic/{{ service_name }}.log transport_url = {{ rpc_transport_url }} @@ -197,3 +197,6 @@ dhcp_provider = none [oslo_concurrency] lock_path = /var/lib/ironic/tmp + +[pxe_filter] +dhcp_hostsdir = /etc/dnsmasq/dhcp-hostsdir diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml index 3aad442a2c..3d236dfef7 100644 --- a/ansible/roles/keystone/defaults/main.yml +++ b/ansible/roles/keystone/defaults/main.yml @@ -8,6 +8,7 @@ keystone_services: volumes: "{{ keystone_default_volumes + keystone_extra_volumes }}" dimensions: "{{ keystone_dimensions }}" healthcheck: "{{ keystone_healthcheck }}" + wsgi: "keystone.wsgi.api:application" haproxy: keystone_internal: enabled: "{{ enable_keystone }}" @@ -225,12 +226,15 @@ keystone_remote_id_attribute_oidc: "HTTP_OIDC_ISS" keystone_container_federation_oidc_metadata_folder: "{{ '/etc/apache2/metadata' if kolla_base_distro in ['debian', 'ubuntu'] else '/etc/httpd/metadata' }}" keystone_container_federation_oidc_idp_certificate_folder: "{{ '/etc/apache2/cert' if kolla_base_distro in ['debian', 'ubuntu'] else '/etc/httpd/cert' }}" keystone_container_federation_oidc_attribute_mappings_folder: "{{ container_config_directory }}/federation/oidc/attribute_maps" -keystone_host_federation_oidc_metadata_folder: "{{ node_config_directory }}/keystone/federation/oidc/metadata" -keystone_host_federation_oidc_idp_certificate_folder: "{{ node_config_directory }}/keystone/federation/oidc/cert" -keystone_host_federation_oidc_attribute_mappings_folder: "{{ node_config_directory }}/keystone/federation/oidc/attribute_maps" +keystone_host_federation_base_folder: "{{ node_config_directory }}/keystone/federation" +keystone_host_federation_oidc_metadata_folder: "{{ keystone_host_federation_base_folder }}/oidc/metadata" +keystone_host_federation_oidc_idp_certificate_folder: "{{ keystone_host_federation_base_folder }}/oidc/cert" +keystone_host_federation_oidc_attribute_mappings_folder: "{{ keystone_host_federation_base_folder }}/oidc/attribute_maps" keystone_federation_oidc_jwks_uri: "" keystone_federation_oidc_additional_options: {} +keystone_federation_oidc_error_page_retry_login_delay_milliseconds: 5000 + # These variables are used to define multiple trusted Horizon dashboards. # keystone_trusted_dashboards: ['', '', ''] horizon_trusted_dashboards: "{{ ['%s://%s/auth/websso/' % (public_protocol, kolla_external_fqdn), '%s/auth/websso/' % (horizon_public_endpoint)] if enable_horizon | bool else [] }}" @@ -257,3 +261,8 @@ keystone_database_enable_tls_internal: "{{ database_enable_tls_internal | bool } # Copy certificates ################### keystone_copy_certs: "{{ kolla_copy_ca_into_containers | bool or keystone_enable_tls_backend | bool or keystone_database_enable_tls_internal | bool }}" + +############ +# WSGI +############ +keystone_wsgi_provider: "uwsgi" diff --git a/ansible/roles/keystone/tasks/config-federation-oidc.yml b/ansible/roles/keystone/tasks/config-federation-oidc.yml index 81384931d0..7a50e8c40a 100644 --- a/ansible/roles/keystone/tasks/config-federation-oidc.yml +++ b/ansible/roles/keystone/tasks/config-federation-oidc.yml @@ -85,3 +85,21 @@ keystone_federation_openid_certificate_key_ids: "{{ certificates_path.files | map(attribute='path') | map('regex_replace', '^.*/(.*)\\.pem$', '\\1#' + keystone_container_federation_oidc_idp_certificate_folder + '/\\1.pem') | list }}" # noqa 204 when: - inventory_hostname in groups[keystone.group] + +- name: Copying modOIDC error page template + vars: + keystone: "{{ keystone_services.keystone }}" + template: + src: "{{ item }}" + dest: "{{ keystone_host_federation_base_folder }}/modoidc-error-page.html" + mode: "0660" + become: true + when: + - inventory_hostname in groups[keystone.group] + - keystone.enabled | bool + - keystone_enable_federation_openid | bool + with_first_found: + - files: + - "{{ node_custom_config }}/keystone/federation/modoidc-error-page.html" + - "modoidc-error-page.html.j2" + skip: true diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml index d9c54e88e6..df6617dd41 100644 --- a/ansible/roles/keystone/tasks/config.yml +++ b/ansible/roles/keystone/tasks/config.yml @@ -26,7 +26,7 @@ keystone_policy_file: "{{ keystone_policy.results.0.stat.path | basename }}" keystone_policy_file_path: "{{ keystone_policy.results.0.stat.path }}" when: - - keystone_policy.results + - keystone_policy.results | length > 0 - name: Check if Keystone domain-specific config is supplied stat: @@ -132,12 +132,31 @@ dest: "{{ node_config_directory }}/keystone/wsgi-keystone.conf" mode: "0660" become: true - when: service | service_enabled_and_mapped_to_host + when: + - service | service_enabled_and_mapped_to_host + - keystone_wsgi_provider == "apache" with_first_found: - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/wsgi-keystone.conf" - "{{ node_custom_config }}/keystone/wsgi-keystone.conf" - "wsgi-keystone.conf.j2" +- name: "Configure uWSGI for Keystone" + include_role: + name: service-uwsgi-config + vars: + project_services: "{{ keystone_services }}" + service: "{{ keystone_services['keystone'] }}" + service_name: "keystone" + service_uwsgi_config_http_port: "{{ keystone_listen_port }}" + service_uwsgi_config_module: "{{ service.wsgi }}" + service_uwsgi_config_tls_backend: "{{ keystone_enable_tls_backend | bool }}" + service_uwsgi_config_tls_cert: "/etc/keystone/certs/keystone-cert.pem" + service_uwsgi_config_tls_key: "/etc/keystone/certs/keystone-key.pem" + service_uwsgi_config_uid: "keystone" + when: + - service | service_enabled_and_mapped_to_host + - keystone_wsgi_provider == "uwsgi" + - name: Checking whether keystone-paste.ini file exists vars: service: "{{ keystone_services['keystone'] }}" diff --git a/ansible/roles/keystone/templates/keystone-startup.sh.j2 b/ansible/roles/keystone/templates/keystone-startup.sh.j2 index 126ec865df..224e86f5dd 100644 --- a/ansible/roles/keystone/templates/keystone-startup.sh.j2 +++ b/ansible/roles/keystone/templates/keystone-startup.sh.j2 @@ -1,5 +1,7 @@ #!/bin/bash -x -{% set keystone_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} +{% set apache_cmd = '/usr/sbin/apache2' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/sbin/httpd' %} +{% set uwsgi_cmd = 'uwsgi /etc/keystone/keystone-api-uwsgi.ini' %} +{% set keystone_cmd = uwsgi_cmd if keystone_wsgi_provider == 'uwsgi' else (apache_cmd + ' -DFOREGROUND') %} set -o errexit set -o pipefail @@ -21,4 +23,4 @@ while [ ! -f "${FERNET_KEY_DIR}/0" ]; do fi done -exec /usr/sbin/{{ keystone_cmd }} -DFOREGROUND $@ +exec {{ keystone_cmd }} $@ diff --git a/ansible/roles/keystone/templates/keystone.json.j2 b/ansible/roles/keystone/templates/keystone.json.j2 index c5b567d69f..dda23b82b4 100644 --- a/ansible/roles/keystone/templates/keystone.json.j2 +++ b/ansible/roles/keystone/templates/keystone.json.j2 @@ -34,13 +34,19 @@ "dest": "/etc/keystone/{{ keystone_policy_file }}", "owner": "keystone", "perm": "0600" - }{% endif %}, + }{% endif %}{% if keystone_wsgi_provider == 'apache' %}, { "source": "{{ container_config_directory }}/wsgi-keystone.conf", "dest": "/etc/{{ keystone_dir }}/wsgi-keystone.conf", "owner": "keystone", "perm": "0600" - }{% if keystone_enable_tls_backend | bool %}, + }{% elif keystone_wsgi_provider == 'uwsgi' %}, + { + "source": "{{ container_config_directory }}/keystone-uwsgi.ini", + "dest": "/etc/keystone/keystone-api-uwsgi.ini", + "owner": "keystone", + "perm": "0600" + }{% endif %}{% if keystone_enable_tls_backend | bool %}, { "source": "{{ container_config_directory }}/keystone-cert.pem", "dest": "/etc/keystone/certs/keystone-cert.pem", @@ -52,8 +58,7 @@ "dest": "/etc/keystone/certs/keystone-key.pem", "owner": "keystone", "perm": "0600" - }{% endif %} - {% if keystone_enable_federation_openid | bool %}, + }{% endif %}{% if keystone_enable_federation_openid | bool %}, { "source": "{{ container_config_directory }}/federation/oidc/metadata", "dest": "{{ keystone_container_federation_oidc_metadata_folder }}", @@ -67,7 +72,14 @@ "owner": "{{ apache_user }}:{{ apache_user }}", "perm": "0600", "merge": true - }{% endif %}{% if kolla_copy_ca_into_containers | bool %}, + }, + { + "source": "{{ container_config_directory }}/federation/modoidc-error-page.html", + "dest": "/var/www/html/modoidc-error-page.html", + "owner": "{{ apache_user }}:{{ apache_user }}", + "perm": "0600" + } + {% endif %}{% if kolla_copy_ca_into_containers | bool %}, { "source": "{{ container_config_directory }}/ca-certificates", "dest": "/var/lib/kolla/share/ca-certificates", diff --git a/ansible/roles/keystone/templates/modoidc-error-page.html.j2 b/ansible/roles/keystone/templates/modoidc-error-page.html.j2 new file mode 100644 index 0000000000..1d8db9c077 --- /dev/null +++ b/ansible/roles/keystone/templates/modoidc-error-page.html.j2 @@ -0,0 +1,29 @@ + + + + + +
+

It seems that an error happened during the login process.

+

You will be redirected again. Wait a few seconds please.

+
+ Redirect me now. +
+
+
+

Error: %s

+

%s

+ + diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 index 427c36d105..b94e825572 100644 --- a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 +++ b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 @@ -44,9 +44,7 @@ LogLevel info WSGIScriptAlias / {{ binary_path }}/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ keystone_log_dir }}/keystone-apache-public-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ keystone_log_dir }}/keystone-apache-public-access.log" logformat @@ -69,6 +67,7 @@ LogLevel info {% if keystone_federation_oidc_jwks_uri | length > 0 %} OIDCOAuthVerifyJwksUri {{ keystone_federation_oidc_jwks_uri }} {% endif %} + OIDCHTMLErrorTemplate /var/www/html/modoidc-error-page.html {% if keystone_federation_openid_certificate_key_ids | length > 0 %} OIDCOAuthVerifyCertFiles {{ keystone_federation_openid_certificate_key_ids | join(" ") }} {% endif %} diff --git a/ansible/roles/kuryr/tasks/config.yml b/ansible/roles/kuryr/tasks/config.yml index d0d436a87e..10bfff47db 100644 --- a/ansible/roles/kuryr/tasks/config.yml +++ b/ansible/roles/kuryr/tasks/config.yml @@ -26,7 +26,7 @@ kuryr_policy_file: "{{ kuryr_policy.results.0.stat.path | basename }}" kuryr_policy_file_path: "{{ kuryr_policy.results.0.stat.path }}" when: - - kuryr_policy.results + - kuryr_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 index 7d3492c08a..1cb4d0a21a 100644 --- a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 +++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 @@ -1,4 +1,8 @@ -#!/bin/bash -x +#!/bin/bash + +set -o errexit +set -o pipefail +set -o xtrace {% if kolla_enable_tls_internal | bool or kolla_enable_tls_external | bool %} {% if kolla_enable_tls_external | bool %} diff --git a/ansible/roles/magnum/tasks/config.yml b/ansible/roles/magnum/tasks/config.yml index 6243153244..5921da31e0 100644 --- a/ansible/roles/magnum/tasks/config.yml +++ b/ansible/roles/magnum/tasks/config.yml @@ -26,7 +26,7 @@ magnum_policy_file: "{{ magnum_policy.results.0.stat.path | basename }}" magnum_policy_file_path: "{{ magnum_policy.results.0.stat.path }}" when: - - magnum_policy.results + - magnum_policy.results | length > 0 - name: Check if kubeconfig file is supplied stat: diff --git a/ansible/roles/manila/tasks/config.yml b/ansible/roles/manila/tasks/config.yml index f334acc9c5..1607316a26 100644 --- a/ansible/roles/manila/tasks/config.yml +++ b/ansible/roles/manila/tasks/config.yml @@ -31,7 +31,7 @@ manila_policy_file: "{{ manila_policy.results.0.stat.path | basename }}" manila_policy_file_path: "{{ manila_policy.results.0.stat.path }}" when: - - manila_policy.results + - manila_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/manila/templates/manila-share.conf.j2 b/ansible/roles/manila/templates/manila-share.conf.j2 index ee1deb3413..cade41e823 100644 --- a/ansible/roles/manila/templates/manila-share.conf.j2 +++ b/ansible/roles/manila/templates/manila-share.conf.j2 @@ -74,8 +74,6 @@ memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_addres share_driver = manila.share.drivers.generic.GenericShareDriver {% if neutron_plugin_agent == "openvswitch" %} interface_driver = manila.network.linux.interface.OVSInterfaceDriver -{% elif neutron_plugin_agent == "linuxbridge" %} -interface_driver = manila.network.linux.interface.BridgeInterfaceDriver {% endif %} driver_handles_share_servers = true diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml index 837baaab88..31c5efb2ac 100644 --- a/ansible/roles/mariadb/defaults/main.yml +++ b/ansible/roles/mariadb/defaults/main.yml @@ -8,25 +8,7 @@ mariadb_services: volumes: "{{ mariadb_default_volumes + mariadb_extra_volumes }}" dimensions: "{{ mariadb_dimensions }}" healthcheck: "{{ mariadb_healthcheck }}" - environment: - MYSQL_USERNAME: "{{ mariadb_monitor_user }}" - MYSQL_PASSWORD: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}" - MYSQL_HOST: "{{ api_interface_address }}" - AVAILABLE_WHEN_DONOR: "1" haproxy: - mariadb: - enabled: "{{ enable_mariadb | bool and not enable_external_mariadb_load_balancer | bool }}" - mode: "tcp" - port: "{{ database_port }}" - listen_port: "{{ mariadb_port }}" - frontend_tcp_extra: - - "option clitcpka" - - "timeout client 3600s" - backend_tcp_extra: - - "option srvtcpka" - - "timeout server 3600s" - - "{% if enable_mariadb_clustercheck | bool %}option httpchk{% endif %}" - custom_member_list: "{{ internal_haproxy_members.split(';') }}" mariadb_external_lb: enabled: "{{ enable_external_mariadb_load_balancer | bool }}" mode: "tcp" @@ -39,18 +21,6 @@ mariadb_services: - "option srvtcpka" - "timeout server 3600s" custom_member_list: "{{ external_haproxy_members.split(';') }}" - mariadb-clustercheck: - container_name: mariadb_clustercheck - group: "{{ mariadb_shard_group }}" - enabled: "{{ enable_mariadb_clustercheck | bool }}" - image: "{{ mariadb_clustercheck_image_full }}" - volumes: "{{ mariadb_clustercheck_default_volumes + mariadb_clustercheck_extra_volumes }}" - dimensions: "{{ mariadb_clustercheck_dimensions }}" - environment: - MYSQL_USERNAME: "{{ mariadb_monitor_user }}" - MYSQL_PASSWORD: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}" - MYSQL_HOST: "{{ api_interface_address }}" - AVAILABLE_WHEN_DONOR: "1" #################### # Database @@ -61,8 +31,7 @@ database_max_timeout: 120 #################### # HAProxy #################### -internal_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ hostvars[host].ansible_facts.hostname }} {{ 'api' | kolla_address(host) }}:{{ mariadb_port }} check port {% if enable_mariadb_clustercheck | bool %}{{ mariadb_clustercheck_port }}{% else %}{{ mariadb_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" -external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ host }} {{ host }}:{{ mariadb_port }} check port {% if enable_mariadb_clustercheck | bool %}{{ mariadb_clustercheck_port }}{% else %}{{ mariadb_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" +external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ host }} {{ host }}:{{ mariadb_port }} check port {{ mariadb_port }} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}" #################### # Docker @@ -71,12 +40,7 @@ mariadb_image: "{{ docker_image_url }}mariadb-server" mariadb_tag: "{{ openstack_tag }}" mariadb_image_full: "{{ mariadb_image }}:{{ mariadb_tag }}" -mariadb_clustercheck_image: "{{ docker_image_url }}mariadb-clustercheck" -mariadb_clustercheck_tag: "{{ mariadb_tag }}" -mariadb_clustercheck_image_full: "{{ mariadb_clustercheck_image }}:{{ mariadb_clustercheck_tag }}" - mariadb_dimensions: "{{ default_container_dimensions }}" -mariadb_clustercheck_dimensions: "{{ default_container_dimensions }}" mariadb_default_volumes: - "{{ node_config_directory }}/mariadb/:{{ container_config_directory }}/:ro" @@ -84,20 +48,15 @@ mariadb_default_volumes: - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - "{{ mariadb_datadir_volume }}:/var/lib/mysql" - "kolla_logs:/var/log/kolla/" -mariadb_clustercheck_default_volumes: - - "{{ node_config_directory }}/mariadb-clustercheck/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" mariadb_extra_volumes: "{{ default_extra_volumes }}" -mariadb_clustercheck_extra_volumes: "{{ default_extra_volumes }}" mariadb_enable_healthchecks: "{{ enable_container_healthchecks }}" mariadb_healthcheck_interval: "{{ default_container_healthcheck_interval }}" mariadb_healthcheck_retries: "{{ default_container_healthcheck_retries }}" mariadb_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -mariadb_healthcheck_test: ["CMD-SHELL", "/usr/bin/clustercheck"] +mariadb_healthcheck_test: ["CMD-SHELL", "/usr/bin/healthcheck.sh --defaults-file /etc/{{ 'mysql/' if kolla_base_distro in ['ubuntu', 'debian'] else '' }}healthcheck.cnf --connect --galera_online"] + mariadb_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" mariadb_healthcheck: interval: "{{ mariadb_healthcheck_interval }}" @@ -116,25 +75,21 @@ mariadb_recover_tmp_file_path: "/tmp/kolla_mariadb_recover_inventory_name_{{ mar # WSREP options ############### mariadb_wsrep_extra_provider_options: [] +mariadb_wsrep_sst_method: "mariabackup" #################### # Backups #################### mariadb_backup_host: "{{ groups[mariadb_shard_group][0] }}" mariadb_backup_database_schema: "mysql" -mariadb_backup_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}backup{% else %}{{ mariadb_shard_backup_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}" +mariadb_backup_database_user: "{{ mariadb_shard_backup_user_prefix }}{{ mariadb_shard_id | string }}" mariadb_backup_type: "full" -mariadb_backup_possible: "{{ mariadb_loadbalancer != 'haproxy' or inventory_hostname in mariadb_default_database_shard_hosts }}" - -#################### -# Clustercheck -#################### -enable_mariadb_clustercheck: "{{ 'True' if mariadb_loadbalancer == 'haproxy' else 'False' }}" +mariadb_backup_possible: "{{ inventory_hostname in mariadb_default_database_shard_hosts }}" #################### # Sharding #################### -mariadb_shard_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}{{ mariadb_shard_root_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}" +mariadb_shard_database_user: "{{ mariadb_shard_root_user_prefix }}{{ mariadb_shard_id | string }}" mariadb_database_shard: "{{ mariadb_shards_info }}" # Database diff --git a/ansible/roles/mariadb/handlers/main.yml b/ansible/roles/mariadb/handlers/main.yml index c5d120615f..5e1b68ae90 100644 --- a/ansible/roles/mariadb/handlers/main.yml +++ b/ansible/roles/mariadb/handlers/main.yml @@ -72,19 +72,3 @@ - groups[mariadb_shard_group + '_port_alive_False'] is defined - inventory_hostname in groups[mariadb_shard_group + '_port_alive_False'] listen: Restart mariadb container - -- name: Restart mariadb-clustercheck container - vars: - service_name: "mariadb-clustercheck" - service: "{{ mariadb_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - image: "{{ service.image }}" - name: "{{ service.container_name }}" - volumes: "{{ service.volumes }}" - dimensions: "{{ service.dimensions }}" - environment: "{{ service.environment }}" - when: - - service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/mariadb/tasks/config.yml b/ansible/roles/mariadb/tasks/config.yml index bf1779be55..405e34ffbd 100644 --- a/ansible/roles/mariadb/tasks/config.yml +++ b/ansible/roles/mariadb/tasks/config.yml @@ -70,6 +70,22 @@ become: true when: service | service_enabled_and_mapped_to_host +- name: Copying over healthcheck.cnf + vars: + service_name: "mariadb" + service: "{{ mariadb_services[service_name] }}" + merge_configs: + sources: + - "{{ role_path }}/templates/healthcheck.cnf.j2" + - "{{ node_custom_config }}/healthcheck.cnf" + - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/healthcheck.cnf" + dest: "{{ node_config_directory }}/{{ service_name }}/healthcheck.cnf" + mode: "0660" + become: true + when: + - mariadb_enable_healthchecks | bool + - service | service_enabled_and_mapped_to_host + - include_tasks: copy-certs.yml when: - mariadb_copy_certs | bool diff --git a/ansible/roles/mariadb/tasks/loadbalancer.yml b/ansible/roles/mariadb/tasks/loadbalancer.yml index 78cac3fb56..bc7439dfd8 100644 --- a/ansible/roles/mariadb/tasks/loadbalancer.yml +++ b/ansible/roles/mariadb/tasks/loadbalancer.yml @@ -30,7 +30,7 @@ login_user: "{{ database_user }}" login_password: "{{ database_password }}" name: "{{ mariadb_monitor_user }}" - password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}" + password: "{{ mariadb_monitor_password }}" host: "%" priv: "*.*:USAGE,REPLICATION CLIENT" tags: always diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml index 11e4b8333e..165aa3963b 100644 --- a/ansible/roles/mariadb/tasks/recover_cluster.yml +++ b/ansible/roles/mariadb/tasks/recover_cluster.yml @@ -111,6 +111,22 @@ - bootstrap_host is defined - bootstrap_host == inventory_hostname +- name: Refresh galera.cnf to set first MariaDB container as primary + vars: + service_name: "mariadb" + service: "{{ mariadb_services[service_name] }}" + primary_host_on_recovery: "{{ bootstrap_host == inventory_hostname }}" + merge_configs: + sources: + - "{{ role_path }}/templates/galera.cnf.j2" + - "{{ node_custom_config }}/galera.cnf" + - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf" + dest: "{{ node_config_directory }}/{{ service_name }}/galera.cnf" + mode: "0660" + become: true + when: + - bootstrap_host is defined + - name: Starting first MariaDB container become: true kolla_container: @@ -144,14 +160,6 @@ - bootstrap_host is defined - bootstrap_host == inventory_hostname -- name: Set first MariaDB container as primary - become: true - shell: "{{ kolla_container_engine }} exec {{ mariadb_service.container_name }} mariadb -uroot -p{{ database_password }} -e \"SET GLOBAL wsrep_provider_options='pc.bootstrap=yes';\"" - no_log: True - when: - - bootstrap_host is defined - - bootstrap_host == inventory_hostname - - name: Wait for MariaDB to become operational become: true kolla_toolbox: @@ -203,6 +211,23 @@ - bootstrap_host is defined - bootstrap_host != inventory_hostname +- name: Unset pc.bootstrap for primary MariaDB galera.cnf for next restart + vars: + service_name: "mariadb" + service: "{{ mariadb_services[service_name] }}" + primary_host_on_recovery: false + merge_configs: + sources: + - "{{ role_path }}/templates/galera.cnf.j2" + - "{{ node_custom_config }}/galera.cnf" + - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf" + dest: "{{ node_config_directory }}/{{ service_name }}/galera.cnf" + mode: "0660" + become: true + when: + - bootstrap_host is defined + - bootstrap_host == inventory_hostname + - name: Restart master MariaDB container(s) become: true kolla_container: diff --git a/ansible/roles/mariadb/tasks/register.yml b/ansible/roles/mariadb/tasks/register.yml index 8c679d9385..1b733afa70 100644 --- a/ansible/roles/mariadb/tasks/register.yml +++ b/ansible/roles/mariadb/tasks/register.yml @@ -29,7 +29,7 @@ login_user: "{{ database_user }}" login_password: "{{ database_password }}" name: "{{ mariadb_monitor_user }}" - password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}" + password: "{{ mariadb_monitor_password }}" host: "%" priv: "*.*:USAGE,REPLICATION CLIENT" when: diff --git a/ansible/roles/mariadb/tasks/upgrade.yml b/ansible/roles/mariadb/tasks/upgrade.yml index 5b10a7e111..9a54536962 100644 --- a/ansible/roles/mariadb/tasks/upgrade.yml +++ b/ansible/roles/mariadb/tasks/upgrade.yml @@ -1,2 +1,33 @@ --- +- name: Set wsrep_sst_method to rsync for upgrade + become: true + no_log: true + shell: > + {{ kolla_container_engine }} exec {{ mariadb_service.container_name }} + mysql -uroot -p{{ database_password }} + -e "SET GLOBAL wsrep_sst_method='rsync';" + - import_tasks: deploy.yml + vars: + mariadb_wsrep_sst_method: "rsync" + +- name: Set wsrep_sst_method to mariabackup after upgrade + become: true + no_log: true + shell: > + {{ kolla_container_engine }} exec {{ mariadb_service.container_name }} + mysql -uroot -p{{ database_password }} + -e "SET GLOBAL wsrep_sst_method='mariabackup';" + +- import_tasks: deploy.yml + +# TODO(seunghun1ee): Remove this task after 2026.1 +- name: "Stop and remove mariadb_clustercheck containers" + become: true + kolla_container: + action: "stop_and_remove_container" + common_options: "{{ docker_common_options }}" + name: "mariadb_clustercheck" + ignore_missing: true + when: + - "'mariadb_clustercheck' not in skip_stop_containers" diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2 index c7e5916fd5..805669aed1 100644 --- a/ansible/roles/mariadb/templates/galera.cnf.j2 +++ b/ansible/roles/mariadb/templates/galera.cnf.j2 @@ -1,5 +1,4 @@ {%- set wsrep_driver = '/usr/lib/galera/libgalera_smm.so' if kolla_base_distro in ['debian', 'ubuntu'] else '/usr/lib64/galera/libgalera_smm.so' %} -{% set sst_method = 'mariabackup' %} [client] default-character-set=utf8 @@ -32,7 +31,7 @@ datadir=/var/lib/mysql/ wsrep_cluster_address=gcomm://{% if (groups[mariadb_shard_group] | length) > 1 %}{% for host in groups[mariadb_shard_group] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ mariadb_wsrep_port }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %} -wsrep_provider_options=gmcast.listen_addr=tcp://{{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_ist_port }};{% for option in mariadb_wsrep_extra_provider_options %}{{ option }}{% if not loop.last %};{% endif %}{% endfor %} +wsrep_provider_options={% if primary_host_on_recovery is defined and primary_host_on_recovery %}pc.bootstrap=yes;{% endif %}gmcast.listen_addr=tcp://{{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_ist_port }};{% for option in mariadb_wsrep_extra_provider_options %}{{ option }}{% if not loop.last %};{% endif %}{% endfor %} wsrep_node_address={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }} @@ -41,7 +40,7 @@ wsrep_sst_receive_address={{ api_interface_address | put_address_in_context('url wsrep_provider={{ wsrep_driver }} wsrep_cluster_name="{{ database_cluster_name }}" wsrep_node_name={{ ansible_facts.hostname }} -wsrep_sst_method={{ sst_method }} +wsrep_sst_method={{ mariadb_wsrep_sst_method }} wsrep_sst_auth={{ database_user }}:{{ database_password }} wsrep_slave_threads=4 wsrep_on = ON @@ -62,7 +61,7 @@ innodb_buffer_pool_size = '8192M' pid-file=/var/lib/mysql/mariadb.pid [sst] -{% if sst_method == 'mariabackup' and api_address_family == 'ipv6' %} +{% if mariadb_wsrep_sst_method == 'mariabackup' and api_address_family == 'ipv6' %} # NOTE(yoctozepto): for IPv6 we need to tweak sockopt for socat (mariabackup sst backend) # see: https://mariadb.com/kb/en/library/xtrabackup-v2-sst-method/#performing-ssts-with-ipv6-addresses # and: https://jira.mariadb.org/browse/MDEV-18797 diff --git a/ansible/roles/mariadb/templates/healthcheck.cnf.j2 b/ansible/roles/mariadb/templates/healthcheck.cnf.j2 new file mode 100644 index 0000000000..670280dd51 --- /dev/null +++ b/ansible/roles/mariadb/templates/healthcheck.cnf.j2 @@ -0,0 +1,3 @@ +[mariadb-client] +user={{ mariadb_monitor_user }} +password={{ mariadb_monitor_password }} diff --git a/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 b/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 deleted file mode 100644 index aad07bff6a..0000000000 --- a/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{ - "command": "socat_wrapper {% if network_address_family == 'ipv6' %}-6{% endif %} -d -lf/var/log/kolla/mariadb/mariadb-clustercheck.log tcp-l:{{ mariadb_clustercheck_port }},fork,reuseaddr,bind={{ api_interface_address }} EXEC:clustercheck", - "config_files": [], - "permissions": [ - { - "path": "/var/log/kolla/mariadb", - "owner": "mysql:mysql", - "recurse": true - } - ] -} diff --git a/ansible/roles/mariadb/templates/mariadb.json.j2 b/ansible/roles/mariadb/templates/mariadb.json.j2 index 7910d69293..2ecc9fa5ef 100644 --- a/ansible/roles/mariadb/templates/mariadb.json.j2 +++ b/ansible/roles/mariadb/templates/mariadb.json.j2 @@ -8,6 +8,13 @@ "owner": "mysql", "perm": "0600" } + {% if mariadb_enable_healthchecks | bool %}, + { + "source": "{{ container_config_directory }}/healthcheck.cnf", + "dest": "/etc/{{ mysql_dir }}/healthcheck.cnf", + "owner": "mysql", + "perm": "0600" + }{% endif %} {% if database_enable_tls_backend | bool %}, { "source": "{{ container_config_directory }}/ca-certificates/root.crt", diff --git a/ansible/roles/masakari/tasks/config.yml b/ansible/roles/masakari/tasks/config.yml index 106faf4284..16e14e99f4 100644 --- a/ansible/roles/masakari/tasks/config.yml +++ b/ansible/roles/masakari/tasks/config.yml @@ -26,7 +26,7 @@ masakari_policy_file: "{{ masakari_policy.results.0.stat.path | basename }}" masakari_policy_file_path: "{{ masakari_policy.results.0.stat.path }}" when: - - masakari_policy.results + - masakari_policy.results | length > 0 - name: Copying over existing policy file template: diff --git a/ansible/roles/mistral/tasks/config.yml b/ansible/roles/mistral/tasks/config.yml index 4cb4fcdebf..d9ea731db3 100644 --- a/ansible/roles/mistral/tasks/config.yml +++ b/ansible/roles/mistral/tasks/config.yml @@ -26,7 +26,7 @@ mistral_policy_file: "{{ mistral_policy.results.0.stat.path | basename }}" mistral_policy_file_path: "{{ mistral_policy.results.0.stat.path }}" when: - - mistral_policy.results + - mistral_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml index 47d3e05a26..8d3dcdcfd7 100644 --- a/ansible/roles/neutron/defaults/main.yml +++ b/ansible/roles/neutron/defaults/main.yml @@ -33,7 +33,7 @@ neutron_services: neutron-rpc-server: container_name: "neutron_rpc_server" image: "{{ neutron_rpc_server_image_full }}" - enabled: "{{ neutron_plugin_agent in ['linuxbridge', 'openvswitch'] }}" + enabled: true group: "neutron-rpc-server" host_in_groups: "{{ inventory_hostname in groups['neutron-rpc-server'] }}" volumes: "{{ neutron_rpc_server_default_volumes + neutron_rpc_server_extra_volumes }}" @@ -62,8 +62,6 @@ neutron_services: image: "{{ neutron_openvswitch_agent_image_full }}" enabled: "{{ neutron_plugin_agent == 'openvswitch' }}" privileged: True - environment: - KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}" host_in_groups: >- {{ (inventory_hostname in groups['compute'] @@ -81,24 +79,6 @@ neutron_services: volumes: "{{ neutron_openvswitch_agent_default_volumes + neutron_openvswitch_agent_extra_volumes }}" dimensions: "{{ neutron_openvswitch_agent_dimensions }}" healthcheck: "{{ neutron_openvswitch_agent_healthcheck }}" - neutron-linuxbridge-agent: - container_name: "neutron_linuxbridge_agent" - image: "{{ neutron_linuxbridge_agent_image_full }}" - privileged: True - enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}" - environment: - KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}" - host_in_groups: >- - {{ - inventory_hostname in groups['compute'] - or (enable_manila | bool and inventory_hostname in groups['manila-share']) - or inventory_hostname in groups['neutron-dhcp-agent'] - or inventory_hostname in groups['neutron-l3-agent'] - or inventory_hostname in groups['neutron-metadata-agent'] - }} - volumes: "{{ neutron_linuxbridge_agent_default_volumes + neutron_linuxbridge_agent_extra_volumes }}" - dimensions: "{{ neutron_linuxbridge_agent_dimensions }}" - healthcheck: "{{ neutron_linuxbridge_agent_healthcheck }}" neutron-dhcp-agent: cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}" container_name: "neutron_dhcp_agent" @@ -123,7 +103,6 @@ neutron_services: enabled: "{{ neutron_plugin_agent != 'ovn' }}" environment: KOLLA_IMAGE: "{{ neutron_l3_agent_image_full }}" - KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}" KOLLA_NAME: "neutron_l3_agent" KOLLA_NEUTRON_WRAPPERS: "{{ 'true' if neutron_agents_wrappers | bool else 'false' }}" host_in_groups: >- @@ -135,7 +114,6 @@ neutron_services: dimensions: "{{ neutron_l3_agent_dimensions }}" healthcheck: "{{ neutron_l3_agent_healthcheck }}" pid_mode: "{{ 'host' if neutron_agents_wrappers | bool else '' }}" - neutron-sriov-agent: container_name: "neutron_sriov_agent" image: "{{ neutron_sriov_agent_image_full }}" @@ -174,6 +152,7 @@ neutron_services: dimensions: "{{ neutron_metadata_agent_dimensions }}" healthcheck: "{{ neutron_metadata_agent_healthcheck }}" neutron-ovn-metadata-agent: + cgroupns_mode: "{{ 'host' if neutron_agents_wrappers | bool else 'private' }}" container_name: "neutron_ovn_metadata_agent" image: "{{ neutron_ovn_metadata_agent_image_full }}" privileged: True @@ -182,6 +161,11 @@ neutron_services: volumes: "{{ neutron_ovn_metadata_agent_default_volumes + neutron_ovn_metadata_agent_extra_volumes }}" dimensions: "{{ neutron_ovn_metadata_agent_dimensions }}" healthcheck: "{{ neutron_ovn_metadata_agent_healthcheck }}" + pid_mode: "{{ 'host' if neutron_agents_wrappers | bool else '' }}" + environment: + KOLLA_IMAGE: "{{ neutron_ovn_metadata_agent_image_full }}" + KOLLA_NAME: "neutron_ovn_metadata_agent" + KOLLA_NEUTRON_WRAPPERS: "{{ 'true' if neutron_agents_wrappers | bool else 'false' }}" neutron-bgp-dragent: container_name: "neutron_bgp_dragent" image: "{{ neutron_bgp_dragent_image_full }}" @@ -254,8 +238,6 @@ neutron_config_validation: config: "/etc/neutron/l3_agent.ini" - generator: "/neutron/etc/oslo-config-generator/dhcp_agent.ini" config: "/etc/neutron/dhcp_agent.ini" - - generator: "/neutron/etc/oslo-config-generator/linuxbridge_agent.ini" - config: "/etc/neutron/plugins/ml2/linuxbridge_agent.ini" #################### # Database @@ -307,10 +289,6 @@ neutron_eswitchd_image: "{{ docker_image_url }}neutron-mlnx-agent" neutron_eswitchd_tag: "{{ neutron_mlnx_agent_tag }}" neutron_eswitchd_image_full: "{{ neutron_eswitchd_image }}:{{ neutron_eswitchd_tag }}" -neutron_linuxbridge_agent_image: "{{ docker_image_url }}neutron-linuxbridge-agent" -neutron_linuxbridge_agent_tag: "{{ neutron_tag }}" -neutron_linuxbridge_agent_image_full: "{{ neutron_linuxbridge_agent_image }}:{{ neutron_linuxbridge_agent_tag }}" - neutron_metadata_agent_image: "{{ docker_image_url }}neutron-metadata-agent" neutron_metadata_agent_tag: "{{ neutron_tag }}" neutron_metadata_agent_image_full: "{{ neutron_metadata_agent_image }}:{{ neutron_metadata_agent_tag }}" @@ -366,7 +344,6 @@ neutron_l3_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_sriov_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_mlnx_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_eswitchd_dimensions: "{{ neutron_agent_dimensions }}" -neutron_linuxbridge_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_metadata_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_ovn_metadata_agent_dimensions: "{{ neutron_agent_dimensions }}" neutron_openvswitch_agent_dimensions: "{{ neutron_agent_dimensions }}" @@ -406,19 +383,6 @@ neutron_l3_agent_healthcheck: test: "{% if neutron_l3_agent_enable_healthchecks | bool %}{{ neutron_l3_agent_healthcheck_test }}{% else %}NONE{% endif %}" timeout: "{{ neutron_l3_agent_healthcheck_timeout }}" -neutron_linuxbridge_agent_enable_healthchecks: "{{ enable_container_healthchecks }}" -neutron_linuxbridge_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}" -neutron_linuxbridge_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}" -neutron_linuxbridge_agent_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -neutron_linuxbridge_agent_healthcheck_test: ["CMD-SHELL", "healthcheck_port neutron-linuxbridge-agent {{ om_rpc_port }}"] -neutron_linuxbridge_agent_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" -neutron_linuxbridge_agent_healthcheck: - interval: "{{ neutron_linuxbridge_agent_healthcheck_interval }}" - retries: "{{ neutron_linuxbridge_agent_healthcheck_retries }}" - start_period: "{{ neutron_linuxbridge_agent_healthcheck_start_period }}" - test: "{% if neutron_linuxbridge_agent_enable_healthchecks | bool %}{{ neutron_linuxbridge_agent_healthcheck_test }}{% else %}NONE{% endif %}" - timeout: "{{ neutron_linuxbridge_agent_healthcheck_timeout }}" - neutron_metadata_agent_enable_healthchecks: "no" neutron_metadata_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}" neutron_metadata_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}" @@ -571,9 +535,9 @@ neutron_dhcp_agent_default_volumes: - "kolla_logs:/var/log/kolla/" - "{{ '/dev/shm:/dev/shm' }}" - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" - - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' }}" - - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}" - - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}" + - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}" + - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" + - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" neutron_l3_agent_default_volumes: - "{{ node_config_directory }}/neutron-l3-agent/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -584,9 +548,9 @@ neutron_l3_agent_default_volumes: - "kolla_logs:/var/log/kolla/" - "{{ '/dev/shm:/dev/shm' }}" - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" - - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' }}" - - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}" - - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' }}" + - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}" + - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" + - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" neutron_sriov_agent_default_volumes: - "{{ node_config_directory }}/neutron-sriov-agent/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -609,14 +573,6 @@ neutron_eswitchd_default_volumes: - "kolla_logs:/var/log/kolla/" - "{{ '/dev/shm:/dev/shm' }}" - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" -neutron_linuxbridge_agent_default_volumes: - - "{{ node_config_directory }}/neutron-linuxbridge-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "/lib/modules:/lib/modules:ro" - - "kolla_logs:/var/log/kolla/" - - "{{ '/dev/shm:/dev/shm' }}" - - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" neutron_metadata_agent_default_volumes: - "{{ node_config_directory }}/neutron-metadata-agent/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -634,6 +590,9 @@ neutron_ovn_metadata_agent_default_volumes: - "kolla_logs:/var/log/kolla/" - "{{ '/dev/shm:/dev/shm' }}" - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}" + - "{{ '/var/run/docker.sock:/var/run/docker.sock:ro' if neutron_agents_wrappers | bool and kolla_container_engine == 'docker' else '' }}" + - "{{ '/run/podman/podman.sock:/run/podman/podman.sock' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" + - "{{ '/var/lib/containers:/var/lib/containers' if neutron_agents_wrappers | bool and kolla_container_engine == 'podman' else '' }}" neutron_openvswitch_agent_default_volumes: - "{{ node_config_directory }}/neutron-openvswitch-agent/:{{ container_config_directory }}/:ro" - "/etc/localtime:/etc/localtime:ro" @@ -706,7 +665,6 @@ neutron_l3_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_sriov_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_mlnx_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_eswitchd_extra_volumes: "{{ neutron_extra_volumes }}" -neutron_linuxbridge_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_metadata_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_ovn_metadata_agent_extra_volumes: "{{ neutron_extra_volumes }}" neutron_openvswitch_agent_extra_volumes: "{{ neutron_extra_volumes }}" @@ -748,7 +706,7 @@ neutron_l3_agent_host_ipv6_neigh_gc_thresh3: "{{ neutron_l3_agent_host_ipv4_neig neutron_api_workers: "{{ openstack_service_workers }}" neutron_metadata_workers: "{{ openstack_service_workers }}" -neutron_agents_wrappers: "no" +neutron_agents_wrappers: "yes" #################### # Subprojects @@ -769,8 +727,6 @@ neutron_subprojects: # Mechanism drivers #################### mechanism_drivers: - - name: "linuxbridge" - enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}" - name: "openvswitch" enabled: "{{ neutron_plugin_agent == 'openvswitch' }}" - name: "mlnx_infiniband" diff --git a/ansible/roles/neutron/handlers/main.yml b/ansible/roles/neutron/handlers/main.yml index 867880bcbc..454db95d22 100644 --- a/ansible/roles/neutron/handlers/main.yml +++ b/ansible/roles/neutron/handlers/main.yml @@ -69,7 +69,7 @@ common_options: "{{ docker_common_options }}" name: "{{ service.container_name }}" image: "{{ service.image }}" - environment: "{{ service.environment }}" + environment: "{{ service.environment | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" @@ -91,22 +91,6 @@ healthcheck: "{{ service.healthcheck | default(omit) }}" with_sequence: "start=1 end={{ num_nova_fake_per_node }}" -- name: Restart neutron-linuxbridge-agent container - vars: - service_name: "neutron-linuxbridge-agent" - service: "{{ neutron_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" - image: "{{ service.image }}" - environment: "{{ service.environment }}" - volumes: "{{ service.volumes | reject('equalto', '') | list }}" - dimensions: "{{ service.dimensions }}" - privileged: "{{ service.privileged | default(False) }}" - healthcheck: "{{ service.healthcheck | default(omit) }}" - - name: Restart neutron-dhcp-agent container vars: service_name: "neutron-dhcp-agent" @@ -150,7 +134,7 @@ common_options: "{{ docker_common_options }}" name: "{{ service.container_name }}" image: "{{ service.image }}" - environment: "{{ service.environment }}" + environment: "{{ service.environment | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" @@ -180,7 +164,7 @@ common_options: "{{ docker_common_options }}" name: "{{ service.container_name }}" image: "{{ service.image }}" - environment: "{{ service.environment }}" + environment: "{{ service.environment | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" @@ -266,6 +250,9 @@ dimensions: "{{ service.dimensions }}" privileged: "{{ service.privileged | default(False) }}" healthcheck: "{{ service.healthcheck | default(omit) }}" + pid_mode: "{{ service.pid_mode | default(omit) }}" + cgroupns_mode: "{{ service.cgroupns_mode | default(omit) }}" + environment: "{{ service.environment | default(omit) }}" - name: Restart neutron-bgp-dragent container vars: diff --git a/ansible/roles/neutron/tasks/config-host.yml b/ansible/roles/neutron/tasks/config-host.yml index a33919b123..8ad3f3d5a1 100644 --- a/ansible/roles/neutron/tasks/config-host.yml +++ b/ansible/roles/neutron/tasks/config-host.yml @@ -8,7 +8,7 @@ neutron_services | select_services_enabled_and_mapped_to_host | list | - intersect(["neutron-l3-agent", "neutron-linuxbridge-agent", "neutron-openvswitch-agent"]) | + intersect(["neutron-l3-agent", "neutron-openvswitch-agent"]) | list | length > 0 diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml index a8c22e0068..18b8f3991d 100644 --- a/ansible/roles/neutron/tasks/config.yml +++ b/ansible/roles/neutron/tasks/config.yml @@ -38,7 +38,7 @@ neutron_policy_file: "{{ neutron_policy.results.0.stat.path | basename }}" neutron_policy_file_path: "{{ neutron_policy.results.0.stat.path }}" when: - - neutron_policy.results + - neutron_policy.results | length > 0 - name: Copying over existing policy file template: @@ -69,7 +69,6 @@ - "neutron-eswitchd" - "neutron-infoblox-ipam-agent" - "neutron-l3-agent" - - "neutron-linuxbridge-agent" - "neutron-metadata-agent" - "neutron-metering-agent" - "neutron-mlnx-agent" @@ -144,20 +143,6 @@ - item.key in services_need_ml2_conf_ini with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}" -- name: Copying over linuxbridge_agent.ini - become: true - vars: - service_name: "neutron-linuxbridge-agent" - service: "{{ neutron_services[service_name] }}" - merge_configs: - sources: - - "{{ role_path }}/templates/linuxbridge_agent.ini.j2" - - "{{ node_custom_config }}/neutron/linuxbridge_agent.ini" - - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/linuxbridge_agent.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/linuxbridge_agent.ini" - mode: "0660" - when: service | service_enabled_and_mapped_to_host - - name: Copying over openvswitch_agent.ini become: true vars: @@ -372,7 +357,6 @@ vars: service_name: "{{ item.0 }}" services_need_ml2_conf_ini: - - "neutron-linuxbridge-agent" - "neutron-openvswitch-agent" - "neutron-server" template: diff --git a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 b/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 deleted file mode 100644 index 5b0ae990b8..0000000000 --- a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 +++ /dev/null @@ -1,24 +0,0 @@ -[agent] -{% if neutron_agent_extensions %} -extensions = {{ neutron_agent_extensions|map(attribute='name')|join(',') }} -{% endif %} - -[linux_bridge] -{% if inventory_hostname in groups["network"] or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool ) %} -{# Format: physnet1:br1,physnet2:br2 #} -physical_interface_mappings = {{ neutron_physical_networks.split(',') | zip(neutron_external_interface.split(',')) | map('join', ':') | join(',') }} -{% endif %} - -[securitygroup] -firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - -[vxlan] -l2_population = true -local_ip = {{ tunnel_interface_address }} - -{% if enable_neutron_sriov | bool %} -[FDB] -# Allows instances using sriov ports to communicate with instances that do not. -# See https://docs.openstack.org/neutron/latest/admin/config-sriov.html -shared_physical_device_mappings = {{ neutron_sriov_physnets }} -{% endif %} diff --git a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 b/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 deleted file mode 100644 index 937abe37c8..0000000000 --- a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 +++ /dev/null @@ -1,44 +0,0 @@ -{ - "command": "neutron-linuxbridge-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/linuxbridge_agent.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - {% if neutron_policy_file is defined %}{ - "source": "{{ container_config_directory }}/{{ neutron_policy_file }}", - "dest": "/etc/neutron/{{ neutron_policy_file }}", - "owner": "neutron", - "perm": "0600" - },{% endif %} -{% if check_extra_ml2_plugins is defined and check_extra_ml2_plugins.matched > 0 %}{% for plugin in check_extra_ml2_plugins.files %} - { - "source": "{{ container_config_directory }}/{{ plugin.path | basename }}", - "dest": "/etc/neutron/plugins/ml2/{{ plugin.path | basename }}", - "owner": "neutron", - "perm": "0600" - }, -{% endfor %}{% endif %} - { - "source": "{{ container_config_directory }}/linuxbridge_agent.ini", - "dest": "/etc/neutron/plugins/ml2/linuxbridge_agent.ini", - "owner": "neutron", - "perm": "0600" - }{% if kolla_copy_ca_into_containers | bool %}, - { - "source": "{{ container_config_directory }}/ca-certificates", - "dest": "/var/lib/kolla/share/ca-certificates", - "owner": "root", - "perm": "0600" - }{% endif %} - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron.conf.j2 b/ansible/roles/neutron/templates/neutron.conf.j2 index 648ea97dfb..0c5323d7ee 100644 --- a/ansible/roles/neutron/templates/neutron.conf.j2 +++ b/ansible/roles/neutron/templates/neutron.conf.j2 @@ -25,8 +25,6 @@ state_path = /var/lib/neutron/kolla {% if neutron_plugin_agent == "openvswitch" or (neutron_plugin_agent == "ovn" and neutron_ovn_dhcp_agent | bool) %} interface_driver = openvswitch -{% elif neutron_plugin_agent == "linuxbridge" %} -interface_driver = linuxbridge {% endif %} {% if enable_nova_fake | bool %} diff --git a/ansible/roles/nova-cell/tasks/config.yml b/ansible/roles/nova-cell/tasks/config.yml index 8ff28b3292..7783d5ee09 100644 --- a/ansible/roles/nova-cell/tasks/config.yml +++ b/ansible/roles/nova-cell/tasks/config.yml @@ -35,7 +35,7 @@ nova_policy_file: "{{ nova_policy.results.0.stat.path | basename }}" nova_policy_file_path: "{{ nova_policy.results.0.stat.path }}" when: - - nova_policy.results + - nova_policy.results | length > 0 - name: Check for vendordata file stat: diff --git a/ansible/roles/nova-cell/tasks/create_cells.yml b/ansible/roles/nova-cell/tasks/create_cells.yml index e4606d88cd..66c7f18759 100644 --- a/ansible/roles/nova-cell/tasks/create_cells.yml +++ b/ansible/roles/nova-cell/tasks/create_cells.yml @@ -26,7 +26,7 @@ - '"already exists" not in nova_cell_create.stdout' when: - inventory_hostname == groups[nova_conductor.group][0] | default(None) - - nova_cell_settings | length == 0 + - not nova_cell_settings | bool - name: Update cell vars: @@ -51,5 +51,5 @@ - nova_cell_updated.rc != 0 when: - inventory_hostname == groups[nova_conductor.group][0] | default(None) - - nova_cell_settings | length > 0 + - nova_cell_settings | bool - nova_cell_settings.cell_message_queue != nova_cell_rpc_transport_url or nova_cell_settings.cell_database != nova_cell_database_url diff --git a/ansible/roles/nova-cell/tasks/external_ceph.yml b/ansible/roles/nova-cell/tasks/external_ceph.yml index de8fc143e6..f2d7ba6967 100644 --- a/ansible/roles/nova-cell/tasks/external_ceph.yml +++ b/ansible/roles/nova-cell/tasks/external_ceph.yml @@ -200,6 +200,6 @@ # reload. This may be due to differences in tested versions of libvirt # (8.0.0 vs 6.0.0). Reload should be low overhead, so do it always. libvirt_restart_handlers: >- - {{ ['Restart nova-libvirt container'] - if enable_nova_libvirt_container | bool else - ['Reload libvirtd'] }} + {{ ['Reload libvirtd'] + if not enable_nova_libvirt_container | bool else + [] }} diff --git a/ansible/roles/nova/tasks/config.yml b/ansible/roles/nova/tasks/config.yml index 92e55f417b..6666d1a3e3 100644 --- a/ansible/roles/nova/tasks/config.yml +++ b/ansible/roles/nova/tasks/config.yml @@ -26,7 +26,7 @@ nova_policy_file: "{{ nova_policy.results.0.stat.path | basename }}" nova_policy_file_path: "{{ nova_policy.results.0.stat.path }}" when: - - nova_policy.results + - nova_policy.results | length > 0 - name: Check for vendordata file stat: @@ -103,6 +103,21 @@ - service | service_enabled_and_mapped_to_host - nova_wsgi_provider == "apache" +- name: Copying over vendordata file for nova services + vars: + service: "{{ nova_services[item] }}" + copy: + src: "{{ vendordata_file_path }}" + dest: "{{ node_config_directory }}/{{ item }}/vendordata.json" + mode: "0660" + become: True + when: + - vendordata_file_path is defined + - service | service_enabled_and_mapped_to_host + loop: + - "nova-metadata" + - "nova-api" + - name: "Configure uWSGI for Nova" include_role: name: service-uwsgi-config @@ -123,15 +138,3 @@ loop: - { name: "nova-api", port: "{{ nova_api_listen_port }}" } - { name: "nova-metadata", port: "{{ nova_metadata_listen_port }}" } - -- name: Copying over vendordata file - vars: - service: "{{ nova_services['nova-api'] }}" - copy: - src: "{{ vendordata_file_path }}" - dest: "{{ node_config_directory }}/nova-api/vendordata.json" - mode: "0660" - become: True - when: - - vendordata_file_path is defined - - service | service_enabled_and_mapped_to_host diff --git a/ansible/roles/nova/tasks/map_cell0.yml b/ansible/roles/nova/tasks/map_cell0.yml index 1fb6c4314b..429b2fb955 100644 --- a/ansible/roles/nova/tasks/map_cell0.yml +++ b/ansible/roles/nova/tasks/map_cell0.yml @@ -59,7 +59,7 @@ failed_when: - nova_cell0_updated.rc != 0 when: - - nova_cell_settings | length > 0 + - nova_cell_settings | bool - nova_cell_settings.cell_database != nova_cell0_connection run_once: True delegate_to: "{{ groups[nova_api.group][0] }}" diff --git a/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 b/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 index 7acd59eb8c..aeea3e932f 100644 --- a/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 +++ b/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 @@ -37,9 +37,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/nova-api-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ nova_log_dir }}/nova-api-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ nova_log_dir }}/nova-api-access.log" logformat diff --git a/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 b/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 index 58ab62302f..8519ebf339 100644 --- a/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 +++ b/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 @@ -37,9 +37,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/nova-metadata-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ nova_log_dir }}/nova-metadata-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ nova_log_dir }}/nova-metadata-access.log" logformat diff --git a/ansible/roles/octavia/defaults/main.yml b/ansible/roles/octavia/defaults/main.yml index df3d39ae62..e683e67669 100644 --- a/ansible/roles/octavia/defaults/main.yml +++ b/ansible/roles/octavia/defaults/main.yml @@ -277,6 +277,15 @@ octavia_ks_users: password: "{{ octavia_keystone_password }}" role: "admin" +#################### +# Notification +#################### +octavia_notification_topics: + - name: notifications + enabled: "{{ enable_ceilometer | bool }}" + +octavia_enabled_notification_topics: "{{ octavia_notification_topics | selectattr('enabled', 'equalto', true) | list }}" + #################### # Kolla #################### diff --git a/ansible/roles/octavia/tasks/config.yml b/ansible/roles/octavia/tasks/config.yml index 73990ac6c8..63910990c1 100644 --- a/ansible/roles/octavia/tasks/config.yml +++ b/ansible/roles/octavia/tasks/config.yml @@ -29,7 +29,7 @@ octavia_policy_file: "{{ octavia_policy.results.0.stat.path | basename }}" octavia_policy_file_path: "{{ octavia_policy.results.0.stat.path }}" when: - - octavia_policy.results + - octavia_policy.results | length > 0 - name: Copying over existing policy file template: diff --git a/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 b/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 index e3a3a598c9..f4203c1c8a 100644 --- a/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 +++ b/ansible/roles/octavia/templates/octavia-wsgi.conf.j2 @@ -24,9 +24,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/octavia-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/kolla/octavia/octavia-api-error.log LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog /var/log/kolla/octavia/octavia-api-access.log logformat diff --git a/ansible/roles/octavia/templates/octavia.conf.j2 b/ansible/roles/octavia/templates/octavia.conf.j2 index 552b4cbfec..586957f303 100644 --- a/ansible/roles/octavia/templates/octavia.conf.j2 +++ b/ansible/roles/octavia/templates/octavia.conf.j2 @@ -122,6 +122,12 @@ rpc_thread_pool_size = 2 [oslo_messaging_notifications] transport_url = {{ notify_transport_url }} +{% if octavia_enabled_notification_topics %} +driver = messagingv2 +topics = {{ octavia_enabled_notification_topics | map(attribute='name') | join(',') }} +{% else %} +driver = noop +{% endif %} [oslo_messaging_rabbit] use_queue_manager = true @@ -152,7 +158,7 @@ ca_certificates_file = {{ openstack_cacert }} [neutron] region_name = {{ openstack_region_name }} -endpoint_type = internal +valid_interfaces = internal ca_certificates_file = {{ openstack_cacert }} [nova] diff --git a/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 b/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 index ca2f04886e..4555094e02 100644 --- a/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 +++ b/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 @@ -2,7 +2,7 @@ opensearchDashboards.defaultAppId: "{{ opensearch_dashboards_default_app_id }}" logging.dest: /var/log/kolla/opensearch-dashboards/opensearch-dashboards.log server.port: {{ opensearch_dashboards_port }} server.host: "{{ api_interface_address }}" -opensearch.hosts: "{{ opensearch_internal_endpoint }}" +opensearch.hosts: [{% for host in groups['opensearch'] %}"http://{{ 'api' | kolla_address(host) }}:{{ opensearch_port }}"{% if not loop.last %},{% endif %}{% endfor %}] opensearch.requestTimeout: {{ opensearch_dashboards_opensearch_request_timeout }} opensearch.shardTimeout: {{ opensearch_dashboards_opensearch_shard_timeout }} opensearch.ssl.verificationMode: "{{ 'full' if opensearch_dashboards_opensearch_ssl_verify | bool else 'none' }}" diff --git a/ansible/roles/ovn-db/defaults/main.yml b/ansible/roles/ovn-db/defaults/main.yml index 18e999e1e6..0bc27949b6 100644 --- a/ansible/roles/ovn-db/defaults/main.yml +++ b/ansible/roles/ovn-db/defaults/main.yml @@ -4,6 +4,9 @@ ovn_db_services: container_name: ovn_northd group: ovn-northd enabled: true + environment: + OVN_NB_DB: "{{ ovn_nb_connection }}" + OVN_SB_DB: "{{ ovn_sb_connection_no_relay }}" image: "{{ ovn_northd_image_full }}" volumes: "{{ ovn_northd_default_volumes + ovn_northd_extra_volumes }}" dimensions: "{{ ovn_northd_dimensions }}" @@ -11,6 +14,8 @@ ovn_db_services: container_name: ovn_nb_db group: ovn-nb-db enabled: true + environment: + OVN_NB_DB: "{{ ovn_nb_connection }}" image: "{{ ovn_nb_db_image_full }}" volumes: "{{ ovn_nb_db_default_volumes + ovn_nb_db_extra_volumes }}" dimensions: "{{ ovn_nb_db_dimensions }}" @@ -18,6 +23,8 @@ ovn_db_services: container_name: ovn_sb_db group: ovn-sb-db enabled: true + environment: + OVN_SB_DB: "{{ ovn_sb_connection_no_relay }}" image: "{{ ovn_sb_db_image_full }}" volumes: "{{ ovn_sb_db_default_volumes + ovn_sb_db_extra_volumes }}" dimensions: "{{ ovn_sb_db_dimensions }}" diff --git a/ansible/roles/ovn-db/handlers/main.yml b/ansible/roles/ovn-db/handlers/main.yml index 76c04399f4..128659c7a5 100644 --- a/ansible/roles/ovn-db/handlers/main.yml +++ b/ansible/roles/ovn-db/handlers/main.yml @@ -7,6 +7,7 @@ kolla_container: action: "recreate_or_restart_container" common_options: "{{ docker_common_options }}" + environment: "{{ service.environment }}" name: "{{ service.container_name }}" image: "{{ service.image }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" @@ -20,6 +21,7 @@ kolla_container: action: "recreate_or_restart_container" common_options: "{{ docker_common_options }}" + environment: "{{ service.environment }}" name: "{{ service.container_name }}" image: "{{ service.image }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" @@ -48,6 +50,7 @@ kolla_container: action: "recreate_or_restart_container" common_options: "{{ docker_common_options }}" + environment: "{{ service.environment }}" name: "{{ service.container_name }}" image: "{{ service.image }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" diff --git a/ansible/roles/ovn-db/tasks/bootstrap-db.yml b/ansible/roles/ovn-db/tasks/bootstrap-db.yml index adeec211a0..89282ab98b 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-db.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-db.yml @@ -11,7 +11,10 @@ - name: Get OVN_Northbound cluster leader become: true - command: "{{ kolla_container_engine }} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound" + command: >- + {{ kolla_container_engine }} exec ovn_nb_db + ovs-appctl -t /var/run/ovn/ovnnb_db.ctl + cluster/status OVN_Northbound changed_when: False register: ovn_nb_cluster_status @@ -19,12 +22,24 @@ vars: search_string: "Role: leader" become: true - command: "{{ kolla_container_engine }} exec ovn_nb_db ovn-nbctl --inactivity-probe={{ ovn_nb_db_inactivity_probe }} set-connection ptcp:{{ ovn_nb_db_port }}:0.0.0.0" + command: >- + {{ kolla_container_engine }} exec ovn_nb_db + ovn-nbctl + --db unix:/var/run/ovn/ovnnb_db.sock + --inactivity-probe={{ ovn_nb_db_inactivity_probe }} + set-connection ptcp:{{ ovn_nb_db_port }}:0.0.0.0 + register: ovn_nb_set_connection_result + retries: 3 + delay: 5 + until: ovn_nb_set_connection_result.rc == 0 when: ovn_nb_cluster_status is search(search_string) - name: Get OVN_Southbound cluster leader become: true - command: "{{ kolla_container_engine }} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound" + command: >- + {{ kolla_container_engine }} exec ovn_sb_db + ovs-appctl -t /var/run/ovn/ovnsb_db.ctl + cluster/status OVN_Southbound changed_when: False register: ovn_sb_cluster_status @@ -32,7 +47,16 @@ vars: search_string: "Role: leader" become: true - command: "{{ kolla_container_engine }} exec ovn_sb_db ovn-sbctl --inactivity-probe={{ ovn_sb_db_inactivity_probe }} set-connection ptcp:{{ ovn_sb_db_port }}:0.0.0.0" + command: >- + {{ kolla_container_engine }} exec ovn_sb_db + ovn-sbctl + --db unix:/var/run/ovn/ovnsb_db.sock + --inactivity-probe={{ ovn_sb_db_inactivity_probe }} + set-connection ptcp:{{ ovn_sb_db_port }}:0.0.0.0 + register: ovn_sb_set_connection_result + retries: 3 + delay: 5 + until: ovn_sb_set_connection_result.rc == 0 when: ovn_sb_cluster_status is search(search_string) - name: Wait for ovn-nb-db @@ -72,4 +96,4 @@ delay: 6 when: - enable_ovn_sb_db_relay | bool - loop: "{{ range(1, (ovn_sb_db_relay_count | int) +1) }}" + loop: "{{ range(1, (ovn_sb_db_relay_count | int) +1) | list }}" diff --git a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml index 693e2c1ddf..fde7295039 100644 --- a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml +++ b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml @@ -20,7 +20,7 @@ changed_when: false register: ovn_nb_db_cluster_status when: groups['ovn-nb-db_leader'] is defined and inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '') - delegate_to: "{{ groups['ovn-nb-db_leader'][0] }}" + delegate_to: "{{ groups['ovn-nb-db_leader'][0] if groups['ovn-nb-db_leader'] is defined else omit }}" - name: Check SB cluster status command: > @@ -30,7 +30,7 @@ changed_when: false register: ovn_sb_db_cluster_status when: groups['ovn-sb-db_leader'] is defined and inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '') - delegate_to: "{{ groups['ovn-sb-db_leader'][0] }}" + delegate_to: "{{ groups['ovn-sb-db_leader'][0] if groups['ovn-sb-db_leader'] is defined else omit }}" - name: Remove an old node with the same ip address as the new node in NB DB vars: @@ -42,7 +42,7 @@ when: - ovn_nb_db_cluster_status.stdout is defined - (ovn_nb_db_cluster_status.stdout is search('at tcp:' + api_interface_address)) and inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '') - delegate_to: "{{ groups['ovn-nb-db_leader'][0] }}" + delegate_to: "{{ groups['ovn-nb-db_leader'][0] if groups['ovn-nb-db_leader'] is defined else omit }}" - name: Remove an old node with the same ip address as the new node in SB DB vars: @@ -54,7 +54,7 @@ when: - ovn_sb_db_cluster_status.stdout is defined - (ovn_sb_db_cluster_status.stdout is search('at tcp:' + api_interface_address)) and inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '') - delegate_to: "{{ groups['ovn-sb-db_leader'][0] }}" + delegate_to: "{{ groups['ovn-sb-db_leader'][0] if groups['ovn-sb-db_leader'] is defined else omit }}" - name: Set bootstrap args fact for NB (new member) set_fact: diff --git a/ansible/roles/ovn-db/tasks/config-relay.yml b/ansible/roles/ovn-db/tasks/config-relay.yml index 71c3828768..f26cd2b48f 100644 --- a/ansible/roles/ovn-db/tasks/config-relay.yml +++ b/ansible/roles/ovn-db/tasks/config-relay.yml @@ -19,8 +19,6 @@ dest: "{{ node_config_directory }}/ovn-sb-db-relay-{{ item }}/config.json" mode: "0660" become: true - notify: - - Restart ovn-sb-db-relay container - name: Generate config files for OVN relay services vars: @@ -31,5 +29,3 @@ dest: "{{ node_config_directory }}/ovn-sb-db-relay-{{ item }}/ovsdb-relay.json" mode: "0660" become: true - notify: - - Restart ovn-sb-db-relay container diff --git a/ansible/roles/placement/tasks/config.yml b/ansible/roles/placement/tasks/config.yml index 9093dc4bdc..8926746825 100644 --- a/ansible/roles/placement/tasks/config.yml +++ b/ansible/roles/placement/tasks/config.yml @@ -26,7 +26,7 @@ placement_policy_file: "{{ placement_policy.results.0.stat.path | basename }}" placement_policy_file_path: "{{ placement_policy.results.0.stat.path }}" when: - - placement_policy.results + - placement_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 b/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 index aa313b6809..c1809aed13 100644 --- a/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 +++ b/ansible/roles/placement/templates/placement-api-wsgi.conf.j2 @@ -25,9 +25,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/placement-api WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog "{{ log_dir }}/placement-api-error.log" LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog "{{ log_dir }}/placement-api-access.log" logformat diff --git a/ansible/roles/prechecks/vars/main.yml b/ansible/roles/prechecks/vars/main.yml index 6b65e8bfcd..1da91bb291 100644 --- a/ansible/roles/prechecks/vars/main.yml +++ b/ansible/roles/prechecks/vars/main.yml @@ -1,8 +1,8 @@ --- docker_version_min: '18.09' docker_py_version_min: '3.4.1' -ansible_version_min: '2.17' -ansible_version_max: '2.18' +ansible_version_min: '2.18' +ansible_version_max: '2.19' # Top level keys should match ansible_facts.distribution. # These map to lists of supported releases (ansible_facts.distribution_release) or @@ -14,6 +14,6 @@ host_os_distributions: Debian: - "bookworm" Rocky: - - "9" + - "10" Ubuntu: - "noble" diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml index 883fdce2fe..27affe4472 100644 --- a/ansible/roles/prometheus/defaults/main.yml +++ b/ansible/roles/prometheus/defaults/main.yml @@ -286,10 +286,6 @@ prometheus_blackbox_exporter_endpoints_default: - "trove:os_endpoint:{{ trove_public_base_endpoint }}" - "{{ ('trove_internal:os_endpoint:' + trove_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}" enabled: "{{ enable_trove | bool }}" - - endpoints: - - "venus:os_endpoint:{{ venus_public_endpoint }}" - - "{{ ('venus_internal:os_endpoint:' + venus_internal_endpoint) if not kolla_same_external_internal_vip | bool }}" - enabled: "{{ enable_venus | bool }}" - endpoints: - "watcher:os_endpoint:{{ watcher_public_endpoint }}" - "{{ ('watcher_internal:os_endpoint:' + watcher_internal_endpoint) if not kolla_same_external_internal_vip | bool }}" diff --git a/ansible/roles/prometheus/tasks/bootstrap.yml b/ansible/roles/prometheus/tasks/bootstrap.yml index 3eda6b1a3e..9cc8e2634c 100644 --- a/ansible/roles/prometheus/tasks/bootstrap.yml +++ b/ansible/roles/prometheus/tasks/bootstrap.yml @@ -3,7 +3,7 @@ become: true vars: shard_id: "{{ item.key }}" - shard_root_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}{{ mariadb_shard_root_user_prefix }}{{ shard_id | string }}{% endif %}" + shard_root_user: "{{ mariadb_shard_root_user_prefix }}{{ shard_id | string }}" shard_host: "{{ mariadb_shards_info.shards[shard_id].hosts[0] }}" kolla_toolbox: container_engine: "{{ kolla_container_engine }}" diff --git a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 index 870448243b..a7495b4395 100644 --- a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 +++ b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 @@ -27,8 +27,10 @@ wsgi-file = {{ service_uwsgi_config_wsgi_file }} plugins-dir = {{ '/usr/lib/uwsgi/plugins' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/lib64/uwsgi' }} plugins = python3 processes = {{ service_uwsgi_config_workers }} +socket-timeout = 30 thunder-lock = true {% if service_uwsgi_config_uid is defined %} uid = {{ service_uwsgi_config_uid }} {% endif %} worker-reload-mercy = {{ service_uwsgi_config_worker_timeout }} +static-map = /static=/var/lib/kolla/venv/lib/python3/site-packages/static diff --git a/ansible/roles/skyline/templates/skyline.yaml.j2 b/ansible/roles/skyline/templates/skyline.yaml.j2 index c48dc4eeb9..ad7fe09ae9 100644 --- a/ansible/roles/skyline/templates/skyline.yaml.j2 +++ b/ansible/roles/skyline/templates/skyline.yaml.j2 @@ -2,7 +2,7 @@ default: access_token_expire: {{ skyline_access_token_expire_seconds }} access_token_renew: {{ skyline_access_token_renew_seconds }} cors_allow_origins: {{ skyline_backend_cors_origins }} - database_url: mysql://{{ skyline_database_user }}:{{ skyline_database_password }}@{{ skyline_database_address }}/{{ skyline_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if skyline_database_enable_tls_internal | bool }} + database_url: mysql+pymysql://{{ skyline_database_user }}:{{ skyline_database_password }}@{{ skyline_database_address }}/{{ skyline_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if skyline_database_enable_tls_internal | bool }} debug: {{ skyline_logging_debug }} log_dir: {{ log_dir }} {% if enable_prometheus | bool %} diff --git a/ansible/roles/tacker/tasks/config.yml b/ansible/roles/tacker/tasks/config.yml index 95669e5268..f6473b0d31 100644 --- a/ansible/roles/tacker/tasks/config.yml +++ b/ansible/roles/tacker/tasks/config.yml @@ -26,7 +26,7 @@ tacker_policy_file: "{{ tacker_policy.results.0.stat.path | basename }}" tacker_policy_file_path: "{{ tacker_policy.results.0.stat.path }}" when: - - tacker_policy.results + - tacker_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/trove/tasks/config.yml b/ansible/roles/trove/tasks/config.yml index fa2dc44179..8427d0079b 100644 --- a/ansible/roles/trove/tasks/config.yml +++ b/ansible/roles/trove/tasks/config.yml @@ -26,7 +26,7 @@ trove_policy_file: "{{ trove_policy.results.0.stat.path | basename }}" trove_policy_file_path: "{{ trove_policy.results.0.stat.path }}" when: - - trove_policy.results + - trove_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/trove/templates/trove-wsgi.conf.j2 b/ansible/roles/trove/templates/trove-wsgi.conf.j2 index 26449a5384..3d79d04e7d 100644 --- a/ansible/roles/trove/templates/trove-wsgi.conf.j2 +++ b/ansible/roles/trove/templates/trove-wsgi.conf.j2 @@ -24,9 +24,7 @@ LogLevel info WSGIScriptAlias / {{ wsgi_directory }}/trove-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/kolla/trove/trove-api-error.log LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat CustomLog /var/log/kolla/trove/trove-api-access.log logformat diff --git a/ansible/roles/venus/defaults/main.yml b/ansible/roles/venus/defaults/main.yml deleted file mode 100644 index 2677cb5263..0000000000 --- a/ansible/roles/venus/defaults/main.yml +++ /dev/null @@ -1,172 +0,0 @@ ---- -venus_services: - venus-api: - container_name: venus_api - group: venus-api - enabled: true - image: "{{ venus_api_image_full }}" - volumes: "{{ venus_api_default_volumes + venus_api_extra_volumes }}" - dimensions: "{{ venus_api_dimensions }}" - healthcheck: "{{ venus_api_healthcheck }}" - haproxy: - venus_api: - enabled: "{{ enable_venus }}" - mode: "http" - external: false - port: "{{ venus_api_port }}" - backend_http_extra: - - "option httpchk" - venus_api_external: - enabled: "{{ enable_venus }}" - mode: "http" - external: true - external_fqdn: "{{ venus_external_fqdn }}" - port: "{{ venus_api_public_port }}" - backend_http_extra: - - "option httpchk" - venus-manager: - container_name: venus_manager - group: venus-manager - enabled: true - image: "{{ venus_manager_image_full }}" - volumes: "{{ venus_manager_default_volumes + venus_manager_extra_volumes }}" - dimensions: "{{ venus_manager_dimensions }}" - -#################### -# Config Validate -#################### -venus_config_validation: - - generator: "/venus/tools/config/venus-config-generator.conf" - config: "/etc/venus/venus.conf" - -#################### -# Database -#################### -venus_database_name: "venus" -venus_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}venus{% endif %}" -venus_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}" - -#################### -# Database sharding -#################### -venus_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ venus_database_shard_id }}{% else %}{{ database_user }}{% endif %}" -venus_database_shard_id: "{{ mariadb_default_database_shard_id | int }}" -venus_database_shard: - users: - - user: "{{ venus_database_user }}" - password: "{{ venus_database_password }}" - shard_id: "{{ venus_database_shard_id }}" - rules: - - schema: "{{ venus_database_name }}" - shard_id: "{{ venus_database_shard_id }}" - - user: "{{ venus_database_user }}" - shard_id: "{{ venus_database_shard_id }}" - - -#################### -# Docker -#################### -venus_tag: "{{ openstack_tag }}" - -venus_api_image: "{{ docker_image_url }}venus-api" -venus_api_tag: "{{ venus_tag }}" -venus_api_image_full: "{{ venus_api_image }}:{{ venus_api_tag }}" - -venus_manager_image: "{{ docker_image_url }}venus-manager" -venus_manager_tag: "{{ venus_tag }}" -venus_manager_image_full: "{{ venus_manager_image }}:{{ venus_manager_tag }}" - -venus_api_dimensions: "{{ default_container_dimensions }}" -venus_manager_dimensions: "{{ default_container_dimensions }}" - -venus_api_enable_healthchecks: "{{ enable_container_healthchecks }}" -venus_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}" -venus_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}" -venus_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}" -venus_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ venus_api_port }}"] -venus_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}" -venus_api_healthcheck: - interval: "{{ venus_api_healthcheck_interval }}" - retries: "{{ venus_api_healthcheck_retries }}" - start_period: "{{ venus_api_healthcheck_start_period }}" - test: "{% if venus_api_enable_healthchecks | bool %}{{ venus_api_healthcheck_test }}{% else %}NONE{% endif %}" - timeout: "{{ venus_api_healthcheck_timeout }}" - -venus_api_default_volumes: - - "{{ node_config_directory }}/venus-api/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" - - "{{ '/dev/shm:/dev/shm' }}" - - "{{ kolla_dev_repos_directory ~ '/venus:/dev-mode/venus' if venus_dev_mode | bool else '' }}" - - "venus:/var/lib/venus/" -venus_manager_default_volumes: - - "{{ node_config_directory }}/venus-manager/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}" - - "kolla_logs:/var/log/kolla/" - - "{{ '/dev/shm:/dev/shm' }}" - - "{{ kolla_dev_repos_directory ~ '/venus:/dev-mode/venus' if venus_dev_mode | bool else '' }}" - - "venus:/var/lib/venus/" - -venus_extra_volumes: "{{ default_extra_volumes }}" -venus_api_extra_volumes: "{{ venus_extra_volumes }}" -venus_manager_extra_volumes: "{{ venus_extra_volumes }}" - -#################### -# OpenStack -#################### -venus_logging_debug: "{{ openstack_logging_debug }}" - -venus_keystone_user: "venus" - -openstack_venus_auth: "{{ openstack_auth }}" - - -#################### -# Kolla -#################### -venus_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}" -venus_dev_repos_pull: "{{ kolla_dev_repos_pull }}" -venus_dev_mode: "{{ kolla_dev_mode }}" -venus_source_version: "{{ kolla_source_version }}" - -#################### -# logging -#################### -openstack_logging_default_format_string: "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [- req-None - - - - -] %(instance)s%(message)s" -openstack_logging_context_format_string: "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s" - -#################### -# Notifications -#################### -venus_notification_topics: - - name: notifications - enabled: "{{ enable_ceilometer | bool }}" - -venus_enabled_notification_topics: "{{ venus_notification_topics | selectattr('enabled', 'equalto', true) | list }}" - -#################### -# Keystone -#################### -venus_ks_services: - - name: "venus" - type: "LMS" - description: "Log Manager Service" - endpoints: - - {'interface': 'internal', 'url': '{{ venus_internal_endpoint }}'} - - {'interface': 'public', 'url': '{{ venus_public_endpoint }}'} - -venus_ks_users: - - project: "service" - user: "{{ venus_keystone_user }}" - password: "{{ venus_keystone_password }}" - role: "admin" - -# Database -venus_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }}" - -################### -# Copy certificates -################### -venus_copy_certs: "{{ kolla_copy_ca_into_containers | bool or venus_database_enable_tls_internal | bool }}" diff --git a/ansible/roles/venus/handlers/main.yml b/ansible/roles/venus/handlers/main.yml deleted file mode 100644 index 1f8b3fdb50..0000000000 --- a/ansible/roles/venus/handlers/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: Restart venus-api container - vars: - service_name: "venus-api" - service: "{{ venus_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" - image: "{{ service.image }}" - volumes: "{{ service.volumes | reject('equalto', '') | list }}" - dimensions: "{{ service.dimensions }}" - healthcheck: "{{ service.healthcheck | default(omit) }}" - -- name: Restart venus-manager container - vars: - service_name: "venus-manager" - service: "{{ venus_services[service_name] }}" - become: true - kolla_container: - action: "recreate_or_restart_container" - common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" - image: "{{ service.image }}" - volumes: "{{ service.volumes | reject('equalto', '') | list }}" - dimensions: "{{ service.dimensions }}" - healthcheck: "{{ service.healthcheck | default(omit) }}" diff --git a/ansible/roles/venus/tasks/bootstrap.yml b/ansible/roles/venus/tasks/bootstrap.yml deleted file mode 100644 index 57938e60f1..0000000000 --- a/ansible/roles/venus/tasks/bootstrap.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- name: Creating venus database - become: true - kolla_toolbox: - container_engine: "{{ kolla_container_engine }}" - module_name: mysql_db - module_args: - ca_cert: "{{ openstack_cacert if database_enable_tls_internal | bool else omit }}" - login_host: "{{ database_address }}" - login_port: "{{ database_port }}" - login_user: "{{ venus_database_shard_root_user }}" - login_password: "{{ database_password }}" - name: "{{ venus_database_name }}" - run_once: True - delegate_to: "{{ groups['venus-api'][0] }}" - when: - - not use_preconfigured_databases | bool - -- name: Creating venus database user and setting permissions - become: true - kolla_toolbox: - container_engine: "{{ kolla_container_engine }}" - module_name: mysql_user - module_args: - ca_cert: "{{ openstack_cacert if database_enable_tls_internal | bool else omit }}" - login_host: "{{ database_address }}" - login_port: "{{ database_port }}" - login_user: "{{ venus_database_shard_root_user }}" - login_password: "{{ database_password }}" - name: "{{ venus_database_user }}" - password: "{{ venus_database_password }}" - host: "%" - priv: "{{ venus_database_name }}.*:ALL" - append_privs: "yes" - run_once: True - delegate_to: "{{ groups['venus-api'][0] }}" - when: - - not use_preconfigured_databases | bool diff --git a/ansible/roles/venus/tasks/clone.yml b/ansible/roles/venus/tasks/clone.yml deleted file mode 100644 index 4d85cc0e80..0000000000 --- a/ansible/roles/venus/tasks/clone.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Cloning venus source repository for development - become: true - git: - repo: "{{ venus_git_repository }}" - dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}" - update: "{{ venus_dev_repos_pull }}" - version: "{{ venus_source_version }}" diff --git a/ansible/roles/venus/tasks/config.yml b/ansible/roles/venus/tasks/config.yml deleted file mode 100644 index 05cfe4de3e..0000000000 --- a/ansible/roles/venus/tasks/config.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - owner: "{{ config_owner_user }}" - group: "{{ config_owner_group }}" - mode: "0770" - become: true - with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}" - -- name: Check if policies shall be overwritten - stat: - path: "{{ item }}" - run_once: True - delegate_to: localhost - register: venus_policy - with_first_found: - - files: "{{ supported_policy_format_list }}" - paths: - - "{{ node_custom_config }}/venus/" - skip: true - -- name: Set venus policy file - set_fact: - venus_policy_file: "{{ venus_policy.results.0.stat.path | basename }}" - venus_policy_file_path: "{{ venus_policy.results.0.stat.path }}" - when: - - venus_policy.results - -- include_tasks: copy-certs.yml - when: - - venus_copy_certs | bool - -- name: Copying over config.json files for services - template: - src: "{{ item.key }}.json.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/config.json" - mode: "0660" - become: true - with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}" - -- name: Copying over venus.conf - vars: - service_name: "{{ item.key }}" - merge_configs: - sources: - - "{{ role_path }}/templates/venus.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/venus.conf" - - "{{ node_custom_config }}/venus/{{ item.key }}.conf" - - "{{ node_custom_config }}/venus/{{ inventory_hostname }}/venus.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/venus.conf" - mode: "0660" - become: true - with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}" - -- name: Copying over existing policy file - template: - src: "{{ venus_policy_file_path }}" - dest: "{{ node_config_directory }}/{{ item.key }}/{{ venus_policy_file }}" - mode: "0660" - when: - - venus_policy_file is defined - with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}" diff --git a/ansible/roles/venus/tasks/config_validate.yml b/ansible/roles/venus/tasks/config_validate.yml deleted file mode 100644 index 57ab862017..0000000000 --- a/ansible/roles/venus/tasks/config_validate.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- import_role: - name: service-config-validate - vars: - service_config_validate_services: "{{ venus_services }}" - service_name: "{{ project_name }}" - service_config_validation: "{{ venus_config_validation }}" diff --git a/ansible/roles/venus/tasks/loadbalancer.yml b/ansible/roles/venus/tasks/loadbalancer.yml deleted file mode 100644 index b692351e63..0000000000 --- a/ansible/roles/venus/tasks/loadbalancer.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: "Configure loadbalancer for {{ project_name }}" - import_role: - name: loadbalancer-config - vars: - project_services: "{{ venus_services }}" - tags: always diff --git a/ansible/roles/venus/tasks/precheck.yml b/ansible/roles/venus/tasks/precheck.yml deleted file mode 100644 index 10408219d1..0000000000 --- a/ansible/roles/venus/tasks/precheck.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- import_role: - name: service-precheck - vars: - service_precheck_services: "{{ venus_services }}" - service_name: "{{ project_name }}" - -- name: Get container facts - become: true - kolla_container_facts: - action: get_containers - container_engine: "{{ kolla_container_engine }}" - name: - - venus_api - check_mode: false - register: container_facts - -- name: Checking free port for Venus API - wait_for: - host: "{{ api_interface_address }}" - port: "{{ venus_api_port }}" - connect_timeout: 1 - timeout: 1 - state: stopped - when: - - container_facts.containers['venus_api'] is not defined - - inventory_hostname in groups['venus-api'] diff --git a/ansible/roles/venus/tasks/register.yml b/ansible/roles/venus/tasks/register.yml deleted file mode 100644 index d61d9a9b0c..0000000000 --- a/ansible/roles/venus/tasks/register.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- import_role: - name: service-ks-register - vars: - service_ks_register_auth: "{{ openstack_venus_auth }}" - service_ks_register_services: "{{ venus_ks_services }}" - service_ks_register_users: "{{ venus_ks_users }}" diff --git a/ansible/roles/venus/templates/venus-api.json.j2 b/ansible/roles/venus/templates/venus-api.json.j2 deleted file mode 100644 index 0a825529d8..0000000000 --- a/ansible/roles/venus/templates/venus-api.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "venus_api --config-file /etc/venus/venus.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/venus.conf", - "dest": "/etc/venus/venus.conf", - "owner": "venus", - "perm": "0644" - }{% if kolla_copy_ca_into_containers | bool %}, - { - "source": "{{ container_config_directory }}/ca-certificates", - "dest": "/var/lib/kolla/share/ca-certificates", - "owner": "root", - "perm": "0600" - }{% endif %} - ], - "permissions": [ - { - "path":"/var/log/kolla/venus/venus-api.log", - "owner": "venus:venus", - "recurse": true - } - ] -} - diff --git a/ansible/roles/venus/templates/venus-manager.json.j2 b/ansible/roles/venus/templates/venus-manager.json.j2 deleted file mode 100644 index 02f7503cb3..0000000000 --- a/ansible/roles/venus/templates/venus-manager.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "venus_manager --config-file /etc/venus/venus.conf task start", - "config_files": [ - { - "source": "{{ container_config_directory }}/venus.conf", - "dest": "/etc/venus/venus.conf", - "owner": "venus", - "perm": "0644" - }{% if kolla_copy_ca_into_containers | bool %}, - { - "source": "{{ container_config_directory }}/ca-certificates", - "dest": "/var/lib/kolla/share/ca-certificates", - "owner": "root", - "perm": "0600" - }{% endif %} - ], - "permissions": [ - { - "path":"/var/log/kolla/venus/venus-manager.log", - "owner": "venus:venus", - "recurse": true - } - ] -} - diff --git a/ansible/roles/venus/templates/venus.conf.j2 b/ansible/roles/venus/templates/venus.conf.j2 deleted file mode 100644 index d4cbd91f40..0000000000 --- a/ansible/roles/venus/templates/venus.conf.j2 +++ /dev/null @@ -1,38 +0,0 @@ -[DEFAULT] -my_ip = {{ api_interface_address }} -periodic_interval = 60 -rootwrap_config = /etc/venus/rootwrap.conf -api_paste_config = /etc/venus/api-paste.ini -log_dir = /var/log/kolla/venus/ -debug = {{ venus_logging_debug }} -auth_strategy = keystone -os_region_name = {{ openstack_region_name }} -osapi_venus_listen = {{ api_interface_address }} -osapi_venus_listen_port = {{ venus_api_port }} - -logging_default_format_string = {{ openstack_logging_default_format_string }} -logging_context_format_string = {{ openstack_logging_context_format_string }} - -transport_url = {{ rpc_transport_url }} - -[database] -connection = mysql+pymysql://{{ venus_database_user }}:{{ venus_database_password }}@{{ venus_database_address }}/{{ venus_database_name }}?charset=utf8{{ '&ssl_ca=' ~ openstack_cacert if venus_database_enable_tls_internal | bool }} - -[keystone_authtoken] -cafile = {{ openstack_cacert }} -project_name = service -password = {{ venus_keystone_password }} -username = {{ venus_keystone_user }} -auth_url = {{ keystone_internal_url }} -project_domain_id = {{ default_project_domain_id }} -user_domain_id = {{ default_user_domain_id }} -auth_type = password -memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %} - -{% if enable_opensearch | bool %} -[elasticsearch] -url = {{ opensearch_internal_endpoint }} -{% endif %} - -[oslo_concurrency] -lock_path = /var/lib/venus/tmp diff --git a/ansible/roles/venus/vars/main.yml b/ansible/roles/venus/vars/main.yml deleted file mode 100644 index 3955d5f95f..0000000000 --- a/ansible/roles/venus/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -project_name: "venus" diff --git a/ansible/roles/watcher/tasks/config.yml b/ansible/roles/watcher/tasks/config.yml index ee1a6c6912..1b21a5202d 100644 --- a/ansible/roles/watcher/tasks/config.yml +++ b/ansible/roles/watcher/tasks/config.yml @@ -26,7 +26,7 @@ watcher_policy_file: "{{ watcher_policy.results.0.stat.path | basename }}" watcher_policy_file_path: "{{ watcher_policy.results.0.stat.path }}" when: - - watcher_policy.results + - watcher_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/roles/zun/tasks/config.yml b/ansible/roles/zun/tasks/config.yml index 7d5100189d..7ef4c7e3b5 100644 --- a/ansible/roles/zun/tasks/config.yml +++ b/ansible/roles/zun/tasks/config.yml @@ -31,7 +31,7 @@ zun_policy_file: "{{ zun_policy.results.0.stat.path | basename }}" zun_policy_file_path: "{{ zun_policy.results.0.stat.path }}" when: - - zun_policy.results + - zun_policy.results | length > 0 - include_tasks: copy-certs.yml when: diff --git a/ansible/site.yml b/ansible/site.yml index 5588746dfb..33855f1d61 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -68,7 +68,6 @@ - enable_tacker_{{ enable_tacker | bool }} - enable_telegraf_{{ enable_telegraf | bool }} - enable_trove_{{ enable_trove | bool }} - - enable_venus_{{ enable_venus | bool }} - enable_watcher_{{ enable_watcher | bool }} - enable_zun_{{ enable_zun | bool }} tags: always @@ -87,7 +86,6 @@ - name: Apply role common gather_facts: false hosts: - - cron - kolla-logs - kolla-toolbox serial: '{{ kolla_serial|default("0") }}' @@ -100,6 +98,20 @@ roles: - role: common +- name: Apply role cron + gather_facts: false + hosts: + - cron + serial: '{{ kolla_serial|default("0") }}' + max_fail_percentage: >- + {{ cron_max_fail_percentage | + default(kolla_max_fail_percentage) | + default(100) }} + tags: + - cron + roles: + - role: cron + - name: Apply role fluentd gather_facts: false hosts: @@ -313,11 +325,6 @@ tasks_from: loadbalancer tags: trove when: enable_trove | bool - - include_role: - name: venus - tasks_from: loadbalancer - tags: venus - when: enable_venus | bool - include_role: name: watcher tasks_from: loadbalancer @@ -1053,21 +1060,6 @@ - { role: masakari, tags: masakari } -- name: Apply role venus - gather_facts: false - hosts: - - venus-api - - venus-manager - - '&enable_venus_True' - serial: '{{ kolla_serial|default("0") }}' - max_fail_percentage: >- - {{ venus_max_fail_percentage | - default(kolla_max_fail_percentage) | - default(100) }} - roles: - - { role: venus, - tags: venus } - - name: Apply role skyline gather_facts: false hosts: diff --git a/doc/source/admin/mariadb-backup-and-restore.rst b/doc/source/admin/mariadb-backup-and-restore.rst index 6ebb73d5b5..69fd411fd4 100644 --- a/doc/source/admin/mariadb-backup-and-restore.rst +++ b/doc/source/admin/mariadb-backup-and-restore.rst @@ -83,7 +83,7 @@ following options on the first database node: docker run --rm -it --volumes-from mariadb --name dbrestore \ --volume mariadb_backup:/backup \ - quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \ + quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \ /bin/bash (dbrestore) $ cd /backup (dbrestore) $ rm -rf /backup/restore @@ -105,7 +105,7 @@ place, again on the first node: docker run --rm -it --volumes-from mariadb --name dbrestore \ --volume mariadb_backup:/backup \ - quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \ + quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \ /bin/bash (dbrestore) $ rm -rf /var/lib/mysql/* (dbrestore) $ rm -rf /var/lib/mysql/\.[^\.]* @@ -148,7 +148,7 @@ incremental backup, docker run --rm -it --volumes-from mariadb --name dbrestore \ --volume mariadb_backup:/backup --tmpfs /backup/restore \ - quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \ + quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-10 \ /bin/bash (dbrestore) $ cd /backup (dbrestore) $ rm -rf /backup/restore diff --git a/doc/source/conf.py b/doc/source/conf.py index add1790c4a..785cb15e20 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -142,10 +142,10 @@ TESTED_RUNTIMES_GOVERNANCE_URL =\ 'https://governance.openstack.org/tc/reference/runtimes/{}.html'.format(KOLLA_OPENSTACK_RELEASE) -ANSIBLE_CORE_VERSION_MIN = '2.17' -ANSIBLE_CORE_VERSION_MAX = '2.18' -ANSIBLE_VERSION_MIN = '10' -ANSIBLE_VERSION_MAX = '11' +ANSIBLE_CORE_VERSION_MIN = '2.18' +ANSIBLE_CORE_VERSION_MAX = '2.19' +ANSIBLE_VERSION_MIN = '11' +ANSIBLE_VERSION_MAX = '12' GLOBAL_VARIABLE_MAP = { '|ANSIBLE_CORE_VERSION_MIN|': ANSIBLE_CORE_VERSION_MIN, diff --git a/doc/source/contributor/adding-a-new-service.rst b/doc/source/contributor/adding-a-new-service.rst index 665439112c..967ca139b8 100644 --- a/doc/source/contributor/adding-a-new-service.rst +++ b/doc/source/contributor/adding-a-new-service.rst @@ -42,10 +42,10 @@ which Kolla uses throughout and which should be followed. * Log rotation - For OpenStack services there should be a ``cron-logrotate-PROJECT.conf.j2`` - template file in ``ansible/roles/common/templates`` with the following + template file in ``ansible/roles/cron/templates`` with the following content: - .. path ansible/roles/common/templates/cron-logrotate-PROJECT.conf.j2 + .. path ansible/roles/cron/templates/cron-logrotate-PROJECT.conf.j2 .. code-block:: console "/var/log/kolla/PROJECT/*.log" @@ -53,14 +53,14 @@ which Kolla uses throughout and which should be followed. } - For OpenStack services there should be an entry in the ``services`` list - in the ``cron.json.j2`` template file in ``ansible/roles/common/templates``. + in the ``cron.json.j2`` template file in ``ansible/roles/cron/templates``. * Log delivery - For OpenStack services the service should add a new ``rewriterule`` in the ``match`` element in the ``01-rewrite.conf.j2`` template file in - ``ansible/roles/common/templates/conf/filter`` to deliver log messages to - Elasticsearch. + ``ansible/roles/fluentd/templates/conf/filter`` to deliver log messages to + Opensearch. * Documentation diff --git a/doc/source/reference/bare-metal/ironic-guide.rst b/doc/source/reference/bare-metal/ironic-guide.rst index d7a5ee90b7..99e8bbefb2 100644 --- a/doc/source/reference/bare-metal/ironic-guide.rst +++ b/doc/source/reference/bare-metal/ironic-guide.rst @@ -107,6 +107,26 @@ You may optionally pass extra kernel parameters to the inspection kernel using: in ``/etc/kolla/globals.yml``. +PXE filter (optional) +~~~~~~~~~~~~~~~~~~~~~ + +To keep parity with the standalone inspector you can enable the experimental +PXE filter service: + +.. code-block:: yaml + + enable_ironic_pxe_filter: "yes" + +The PXE filter container runs alongside ``ironic-dnsmasq`` and cleans up stale +DHCP entries. It is especially useful when auto discovery is enabled and when +the dnsmasq DHCP range overlaps with a Neutron-served network. For the upstream +details see +https://docs.openstack.org/ironic/latest/admin/inspection/pxe_filter.html. + +.. note:: + + Upstream still classifies this PXE filter implementation as experimental. + Configure conductor's HTTP server port (optional) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The port used for conductor's HTTP server is controlled via diff --git a/doc/source/reference/containers/kuryr-guide.rst b/doc/source/reference/containers/kuryr-guide.rst index fbcbfd4192..f748d16c5a 100644 --- a/doc/source/reference/containers/kuryr-guide.rst +++ b/doc/source/reference/containers/kuryr-guide.rst @@ -26,7 +26,8 @@ The IP address is host running the etcd service. ```2375``` is port that allows Docker daemon to be accessed remotely. ```2379``` is the etcd listening port. -By default etcd and kuryr are disabled in the ``group_vars/all.yml``. +By default etcd and kuryr are disabled in the ``group_vars/all/etcd.yml`` and +``group_vars/all/kuryr.yml`` files. In order to enable them, you need to edit the file globals.yml and set the following variables diff --git a/doc/source/reference/databases/mariadb-guide.rst b/doc/source/reference/databases/mariadb-guide.rst index 71b51f77c4..e34e4bf0a4 100644 --- a/doc/source/reference/databases/mariadb-guide.rst +++ b/doc/source/reference/databases/mariadb-guide.rst @@ -52,9 +52,9 @@ inventory file in the way described below: .. note:: If ``mariadb_shard_id`` is not defined for host in inventory file it will be set automatically - to ``mariadb_default_database_shard_id`` (default 0) from ``group_vars/all.yml`` and can be - overwritten in ``/etc/kolla/globals.yml``. Shard which is marked as default is special in case - of backup or loadbalance, as it is described below. + to ``mariadb_default_database_shard_id`` (default 0) from ``group_vars/all/mariadb.yml`` and + can be overwritten in ``/etc/kolla/globals.yml``. Shard which is marked as default is + special in case of backup or loadbalance, as it is described below. Loadbalancer ------------ diff --git a/doc/source/reference/networking/neutron.rst b/doc/source/reference/networking/neutron.rst index 0dae7dcb6d..3994ed886e 100644 --- a/doc/source/reference/networking/neutron.rst +++ b/doc/source/reference/networking/neutron.rst @@ -279,6 +279,13 @@ In order to deploy Neutron OVN Agent you need to set the following: Currently the agent is only needed for QoS for hardware offloaded ports. +When in need of running `ovn-nbctl` or `ovn-sbctl` commands it's most +convenient to run them from ``ovn_northd`` container: + +.. code-block:: console + + docker exec ovn_northd ovn-nbctl show + Mellanox Infiniband (ml2/mlnx) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -339,9 +346,10 @@ In this example: Running Neutron agents subprocesses in separate containers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -There is an experimental feature in Kolla-Ansible that allows to overcome -the issue of breaking data plane connectivity and dhcp services when -restarting neutron-l3-agent and neutron-dhcp-agent. +There is a feature in Kolla-Ansible that allows to overcome +the issue of breaking data plane connectivity, dhcp and metadata services +when restarting neutron-l3-agent and neutron-dhcp-agent in ml2/ovs or +restarting the neutron-ovn-metadata-agent in ml2/ovn. To enable it, modify the configuration in ``/etc/kolla/globals.yml``: diff --git a/doc/source/reference/networking/octavia.rst b/doc/source/reference/networking/octavia.rst index af08cf9e04..7a29d67fa8 100644 --- a/doc/source/reference/networking/octavia.rst +++ b/doc/source/reference/networking/octavia.rst @@ -340,7 +340,7 @@ Now deploy Octavia: Amphora image ------------- -It is necessary to build an Amphora image. On CentOS / Rocky 9: +It is necessary to build an Amphora image. On CentOS / Rocky 10: .. code-block:: console diff --git a/doc/source/reference/orchestration-and-nfv/tacker-guide.rst b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst index 78fdf4821b..4c99e72e15 100644 --- a/doc/source/reference/orchestration-and-nfv/tacker-guide.rst +++ b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst @@ -30,7 +30,7 @@ Preparation and Deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~ By default tacker and required services are disabled in -the ``group_vars/all.yml`` file. +the ``group_vars/all/tacker.yml`` file. In order to enable them, you need to edit the file ``/etc/kolla/globals.yml`` and set the following variables: diff --git a/doc/source/reference/shared-services/keystone-guide.rst b/doc/source/reference/shared-services/keystone-guide.rst index d0958a3f92..b92230dcb6 100644 --- a/doc/source/reference/shared-services/keystone-guide.rst +++ b/doc/source/reference/shared-services/keystone-guide.rst @@ -104,6 +104,20 @@ Example for Keycloak shown below: keystone_federation_oidc_additional_options: OIDCTokenBindingPolicy: disabled +When using OIDC, operators can also use the following variable +to customize the delay to retry authenticating in the IdP if the +authentication has timeout: + +``keystone_federation_oidc_error_page_retry_login_delay_milliseconds`` + Default is 5000 milliseconds (5 seconds). + +It is also possible to override the ``OIDCHTMLErrorTemplate``, +the custom error template page via: + +.. code-block:: yaml + + {{ node_custom_config }}/keystone/federation/modoidc-error-page.html + Identity providers configurations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/user/multinode.rst b/doc/source/user/multinode.rst index 21bf67fa45..5a0db7cf2a 100644 --- a/doc/source/user/multinode.rst +++ b/doc/source/user/multinode.rst @@ -136,7 +136,7 @@ host or group variables: `__ are quite complex, but it is worth becoming familiar with them if using host and group variables. The playbook group variables in -``ansible/group_vars/all.yml`` define global defaults, and these take +``ansible/group_vars/all/`` define global defaults, and these take precedence over variables defined in an inventory file and inventory ``group_vars/all``, but not over inventory ``group_vars/*``. Variables in 'extra' files (``globals.yml``) have the highest precedence, so any variables diff --git a/doc/source/user/operating-kolla.rst b/doc/source/user/operating-kolla.rst index 0b2d78eca8..a48f3357f1 100644 --- a/doc/source/user/operating-kolla.rst +++ b/doc/source/user/operating-kolla.rst @@ -198,15 +198,42 @@ After this command is complete, the containers will have been recreated from the new images and all database schema upgrades and similar actions performed for you. + +CLI Command Completion +~~~~~~~~~~~~~~~~~~~~~~ + +Kolla Ansible supports shell command completion to make the CLI easier to use. + +To enable Bash completion, generate the completion script: + +.. code-block:: console + + kolla-ansible complete --shell bash > ~/.kolla_ansible_completion.sh + +Then, add the following line to your ``~/.bashrc`` file: + +.. code-block:: console + + source ~/.kolla_ansible_completion.sh + +Finally, reload your shell configuration: + +.. code-block:: console + + source ~/.bashrc + +.. note:: + + If you're using a shell other than Bash, replace ``--shell bash`` with your shell type, + e.g., ``zsh``, and adapt your shell's configuration file accordingly. + + Tips and Tricks ~~~~~~~~~~~~~~~ Kolla Ansible CLI ----------------- -When running the ``kolla-ansible`` CLI, additional arguments may be passed to -``ansible-playbook`` via the ``EXTRA_OPTS`` environment variable. - ``kolla-ansible deploy -i INVENTORY`` is used to deploy and start all Kolla containers. @@ -242,6 +269,10 @@ images on hosts. files for enabled OpenStack services, without then restarting the containers so it is not applied right away. +``kolla-ansible validate-config -i INVENTORY`` is used to validate generated +configuration files of enabled OpenStack services. By default, the results are +saved to ``/var/log/kolla/config-validate`` when issues are detected. + ``kolla-ansible ... -i INVENTORY1 -i INVENTORY2`` Multiple inventories can be specified by passing the ``--inventory`` or ``-i`` command line option multiple times. This can be useful to share configuration between multiple environments. diff --git a/doc/source/user/quickstart-development.rst b/doc/source/user/quickstart-development.rst index 0dccdb1f18..6e29a2ad51 100644 --- a/doc/source/user/quickstart-development.rst +++ b/doc/source/user/quickstart-development.rst @@ -186,7 +186,7 @@ There are a few options that are required to deploy Kolla Ansible: - Rocky (``rocky``) - Ubuntu (``ubuntu``) - For newcomers, we recommend to use Rocky Linux 9 or Ubuntu 24.04. + For newcomers, we recommend to use Rocky Linux 10 or Ubuntu 24.04. .. code-block:: console diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 4afcab38aa..b4d8553670 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -177,7 +177,7 @@ There are a few options that are required to deploy Kolla Ansible: - Rocky (``rocky``) - Ubuntu (``ubuntu``) - For newcomers, we recommend to use Rocky Linux 9 or Ubuntu 24.04. + For newcomers, we recommend to use Rocky Linux 10 or Ubuntu 24.04. .. code-block:: console diff --git a/doc/source/user/support-matrix.rst b/doc/source/user/support-matrix.rst index 0301775f19..8925a2c021 100644 --- a/doc/source/user/support-matrix.rst +++ b/doc/source/user/support-matrix.rst @@ -9,13 +9,13 @@ Kolla Ansible supports the following host Operating Systems (OS): .. note:: - CentOS Stream 9 is supported as a host OS while Kolla does not publish CS9 + CentOS Stream 10 is supported as a host OS while Kolla does not publish CS10 based images. Users can build them on their own. We recommend using Rocky - Linux 9 images instead. + Linux 10 images instead. -* CentOS Stream 9 +* CentOS Stream 10 * Debian Bookworm (12) -* Rocky Linux 9 +* Rocky Linux 10 * Ubuntu Noble (24.04) Supported container images diff --git a/doc/source/user/troubleshooting.rst b/doc/source/user/troubleshooting.rst index d00a94ca74..76a9ac6047 100644 --- a/doc/source/user/troubleshooting.rst +++ b/doc/source/user/troubleshooting.rst @@ -88,13 +88,13 @@ You can find all kolla logs in there. /var/lib/docker/volumes/kolla_logs/_data When ``enable_central_logging`` is enabled, to view the logs in a web browser -using Kibana, go to -``http://:`` or -``http://:``. Authenticate -using ```` and ````. +using OpenSearch Dashboards, go to +``http://:`` or +``http://:``. Authenticate +using ``opensearch`` and ````. The values ````, ```` -```` and ```` can be found in -``/kolla/ansible/group_vars/all.yml`` or if the default -values are overridden, in ``/etc/kolla/globals.yml``. The value of -```` can be found in ``/etc/kolla/passwords.yml``. +```` can be found in +``/kolla/ansible/group_vars/all/opensearch.yml``. The value +of ```` can be found in +``/etc/kolla/passwords.yml``. diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index cb0e651d59..8f0973f5a8 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -1,7 +1,7 @@ --- # You can use this file to override _any_ variable throughout Kolla. # Additional options can be found in the -# 'kolla-ansible/ansible/group_vars/all.yml' file. Default value of all the +# 'kolla-ansible/ansible/group_vars/all' directory. Default value of all the # commented parameters are shown here, To override the default value uncomment # the parameter and change its value. @@ -160,9 +160,7 @@ workaround_ansible_issue_8743: yes # addresses for that reason. #neutron_external_interface: "eth1" -# Valid options are [ openvswitch, ovn, linuxbridge ] -# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable. -# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html +# Valid options are [ openvswitch, ovn ] #neutron_plugin_agent: "openvswitch" # Valid options are [ internal, infoblox ] @@ -175,6 +173,9 @@ workaround_ansible_issue_8743: yes # Neutron rolling upgrade were enable by default #neutron_enable_rolling_upgrade: "yes" +# Enable wrapper containers to keep Neutron agent restarts isolated from the main service containers +#neutron_agents_wrappers: "yes" + # Configure neutron logging framework to log ingress/egress connections to instances # for security groups rules. More information can be found here: # https://docs.openstack.org/neutron/latest/admin/config-logging.html @@ -378,13 +379,13 @@ workaround_ansible_issue_8743: yes #enable_horizon_octavia: "{{ enable_octavia | bool }}" #enable_horizon_tacker: "{{ enable_tacker | bool }}" #enable_horizon_trove: "{{ enable_trove | bool }}" -#enable_horizon_venus: "{{ enable_venus | bool }}" #enable_horizon_watcher: "{{ enable_watcher | bool }}" #enable_horizon_zun: "{{ enable_zun | bool }}" #enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}" #enable_ironic: "no" #enable_ironic_neutron_agent: "no" #enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}" +#enable_ironic_pxe_filter: "no" #enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}" #enable_kuryr: "no" #enable_magnum: "no" @@ -421,7 +422,7 @@ workaround_ansible_issue_8743: yes #enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}" #enable_opensearch_dashboards: "{{ enable_opensearch | bool }}" #enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}" -#enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}" +#enable_openvswitch: "{{ enable_neutron }}" #enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}" #enable_ovs_dpdk: "no" #enable_osprofiler: "no" @@ -434,7 +435,6 @@ workaround_ansible_issue_8743: yes #enable_telegraf: "no" #enable_trove: "no" #enable_trove_singletenant: "no" -#enable_venus: "no" #enable_watcher: "no" #enable_zun: "no" diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml index 8647dd9d29..5705873d68 100644 --- a/etc/kolla/passwords.yml +++ b/etc/kolla/passwords.yml @@ -131,9 +131,6 @@ tacker_keystone_password: zun_database_password: zun_keystone_password: -venus_database_password: -venus_keystone_password: - masakari_database_password: masakari_keystone_password: diff --git a/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml b/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml new file mode 100644 index 0000000000..33ed5451df --- /dev/null +++ b/releasenotes/notes/ansible-2.19-986e55799b72dbf5.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Minimum supported Ansible version is now ``11`` (ansible-core 2.18) + and maximum supported is ``12`` (ansible-core 2.19). diff --git a/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml b/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml new file mode 100644 index 0000000000..7f558e85a8 --- /dev/null +++ b/releasenotes/notes/bug-2106557-6adff0f76b17500e.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes an issue where Horizon returned HTTP 500 errors when one of the + Memcached nodes was unavailable by setting ``ignore_exc`` to ``True`` in + the cache backend. + `LP#2106557 `__ diff --git a/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml b/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml new file mode 100644 index 0000000000..94bf9a694b --- /dev/null +++ b/releasenotes/notes/bug-2111328-c4f57b50eb5bfecf.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue where vendordata.json, if defined, + was not being copied to the nova-metadata directory. + `LP#2111328 `__ diff --git a/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml b/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml new file mode 100644 index 0000000000..060471f697 --- /dev/null +++ b/releasenotes/notes/bug-2123946-notify-handlers-3d0a6a0788d5dcce.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Handlers to trigger a restart nova_libvirt and ovn_sb_db_relay + containers have been removed and restarts of these services + are now under the control of the service-check-containers + role `LP#2123946 `__. diff --git a/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml b/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml new file mode 100644 index 0000000000..7bbb085e5c --- /dev/null +++ b/releasenotes/notes/cron-break-out-role-fa72289cc100ef53.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + A ``cron`` Ansible role has been created and its deployment is not part + of the ``common`` role anymore. diff --git a/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml b/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml new file mode 100644 index 0000000000..e50b469113 --- /dev/null +++ b/releasenotes/notes/custom_modOIDC_error_page-0fe3dd7414310536.yaml @@ -0,0 +1,19 @@ +--- +features: + - | + Enable the configuration of the timeout manager by + ``OIDCStateTimeout`` variable. We also provide means to + override the error page for the modOIDC plugin via + ``{{ node_custom_config }}/keystone/federation/modoidc-error-page.html`` + file. + +upgrade: + - | + It was added a default template for the modOIDC plugin, + which will handle authentication errors for federated users. + The default template is found at + "ansible/roles/keystone/templates/modoidc-error-page.html.j2"; + it can also be replaced/overwritten. One can also overwrite, + the timeout, instead of the whole page via the following variable: + ``keystone_federation_oidc_error_page_retry_login_delay_milliseconds``. + The default timeout for the page redirection is 5 seconds. diff --git a/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml b/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml new file mode 100644 index 0000000000..8e044d6687 --- /dev/null +++ b/releasenotes/notes/docs-remove-extra-opts-9f68a9b3dd9f14c1.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Remove reference to EXTRA_OPTS in documentation. diff --git a/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml b/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml new file mode 100644 index 0000000000..ab486ca79d --- /dev/null +++ b/releasenotes/notes/drop-clustercheck-and-haproxy-support-for-mariadb-4cbd7c8590a34981.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + ProxySQL is now automatically enabled when MariaDB is enabled. + MariaDB container healthcheck method was updated as healthcheck script was + replaced from Clustercheck to official MariaDB docker image's + `healthcheck.sh `__ +upgrade: + - | + Database loadbalancing with HAProxy and MariaDB Clustercheck is no longer + supported. For the system that uses HAProxy and Clustercheck, upgrading + MariaDB with ``kolla-ansible upgrade`` will deploy ProxySQL containers and + remove MariaDB Clustercheck containers. diff --git a/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml b/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml new file mode 100644 index 0000000000..0fa17f01d0 --- /dev/null +++ b/releasenotes/notes/drop-legacy-iptables-1979f67a924d4da1.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + ``neutron_legacy_iptables`` and its handling has been dropped. diff --git a/releasenotes/notes/drop-venus-b929071fb79b8026.yaml b/releasenotes/notes/drop-venus-b929071fb79b8026.yaml new file mode 100644 index 0000000000..e33fe99bb0 --- /dev/null +++ b/releasenotes/notes/drop-venus-b929071fb79b8026.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Support for deploying ``Venus`` container images has been dropped. diff --git a/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml b/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml new file mode 100644 index 0000000000..513f2d190f --- /dev/null +++ b/releasenotes/notes/fix-horizon-glance-cors-55e2e83902662c99.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue where CORS can be blocked when attempting + to upload an image via the Horizon user interface. diff --git a/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml b/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml new file mode 100644 index 0000000000..3f16efb467 --- /dev/null +++ b/releasenotes/notes/fluentd-direct-b37822ae1145355e.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + Fluentd now sends logs directly to OpenSearch node IPs instead of using + a Load Balancer. This change reduces Load Balancer overhead from high + log volumes. The Load Balancer for OpenSearch remains in place, as it + is still used by OpenSearch Dashboards. Fluentd continues to handle node + availability, automatically distributing logs via round-robin to + available nodes, ensuring log delivery even if individual OpenSearch + nodes become unavailable. +fixes: + - | + Fixed Fluentd configuration template to avoid generating unnecessary + empty lines when optional parameters are not set. diff --git a/releasenotes/notes/horizon-port-584efee771a14fd9.yaml b/releasenotes/notes/horizon-port-584efee771a14fd9.yaml new file mode 100644 index 0000000000..96f2f37737 --- /dev/null +++ b/releasenotes/notes/horizon-port-584efee771a14fd9.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + ``Horizon`` default port (80/443) has been changed to ``8080`` when using + HAProxy, while the old default has been retained for development + environments using ``enable_haproxy`` set to ``no``. diff --git a/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml b/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml new file mode 100644 index 0000000000..80f38db8d2 --- /dev/null +++ b/releasenotes/notes/ironic-pxe-filter-8376c424cb533bd3.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds the optional ``ironic-pxe-filter`` service controlled by + ``enable_ironic_pxe_filter``. This brings parity with the standalone + inspector. Upstream currently classifies the PXE filter as experimental. diff --git a/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml b/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml new file mode 100644 index 0000000000..3c8c90d155 --- /dev/null +++ b/releasenotes/notes/move-tasks-a8e65bbda50dd2a0.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Move tasks that modified host configuration from kolla-ansible + role common to a-c-k as they need to be run only once at the + bootstrap of the host and are not strongly related to the common + services. diff --git a/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml b/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml new file mode 100644 index 0000000000..1b7464f8ae --- /dev/null +++ b/releasenotes/notes/neutron-agent-wrappers-by-default-c48bc7c00fcca011.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + Neutron agent wrappers are now enabled by default. The wrapper containers + restart DHCP, L3, and related agents without having to respawn the main + service containers, which reduces dataplane disruptions during upgrades and + restarts. Operators who need the previous behaviour can set + ``neutron_agents_wrappers`` to ``"no"`` in ``/etc/kolla/globals.yml``. diff --git a/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml b/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml new file mode 100644 index 0000000000..3a13a9ee79 --- /dev/null +++ b/releasenotes/notes/neutron-ovn-metadata-agent-haproxy-container-2935ea0b03c41900.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Implement ``neutron_agents_wrappers`` for the + neutron-ovn-metdata-agent. This allows the haproxy processes which + forward metadata requests in ml2/ovn setups to spawn in separate + containers. diff --git a/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml b/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml new file mode 100644 index 0000000000..ca6a5f4ac1 --- /dev/null +++ b/releasenotes/notes/octavia-notifications-98a91ab02d9cbee6.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Adds a missing override for ``octavia_notification_topics`` so that + operators can add their own notification topics for Octavia. By + default it will send notifications to ceilometer when ceilometer + is enabled. diff --git a/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml b/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml new file mode 100644 index 0000000000..1fdd9b0880 --- /dev/null +++ b/releasenotes/notes/opensearch-dashboards-direct-to-opensearch-0d9e94c4b6a608c0.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + OpenSearch Dashboards now connects directly to OpenSearch nodes, rather + than via a HAProxy endpoint. This should have no user facing impact. diff --git a/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml b/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml new file mode 100644 index 0000000000..69f2665c3b --- /dev/null +++ b/releasenotes/notes/ovn-env-variables-b622b4c53ee275f4.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The OVN container images (``ovn-nb-db``, ``ovn-northd`` and ``ovn-sb-db``) + have now default environment variables in place that ease running of + ``ovn-nbctl`` and ``ovn-sbctl`` commands for operators. diff --git a/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml b/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml new file mode 100644 index 0000000000..f5625e7820 --- /dev/null +++ b/releasenotes/notes/remove-neutron-linuxbridge-b1a2457e848709f7.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Support for Linux Bridge mechanism driver has been removed. The driver was + already removed from neutron. diff --git a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml index f3d3cd764d..0c120edd68 100644 --- a/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml +++ b/releasenotes/notes/uwsgi-flamingo-5144740f1a2bb4fb.yaml @@ -9,10 +9,18 @@ features: * - Service - Variable + * - Aodh + - aodh_wsgi_provider + * - Gnocchi + - gnocchi_wsgi_provider * - Heat - heat_wsgi_provider + * - Horizon + - horizon_wsgi_provider * - Ironic - ironic_wsgi_provider + * - Keystone + - keystone_wsgi_provider * - Masakari - masakari_wsgi_provider * - Octavia diff --git a/requirements-core.yml b/requirements-core.yml index a1d367ef2d..2eca2c9f36 100644 --- a/requirements-core.yml +++ b/requirements-core.yml @@ -2,19 +2,19 @@ collections: - name: ansible.netcommon source: https://galaxy.ansible.com - version: <8 + version: <9 - name: ansible.posix source: https://galaxy.ansible.com - version: <2 + version: <3 - name: ansible.utils source: https://galaxy.ansible.com - version: <6 + version: <7 - name: community.crypto source: https://galaxy.ansible.com - version: <3 + version: <4 - name: community.general source: https://galaxy.ansible.com - version: <11 + version: <12 - name: community.docker source: https://galaxy.ansible.com version: <5 diff --git a/requirements.txt b/requirements.txt index 568d7f9b63..bf2591b8e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ hvac>=0.10.1 # Apache-2.0 Jinja2>=3 # BSD License (3 clause) # Ansible and ansible's json_query -ansible-core>=2.17,<2.19 # GPLv3 +ansible-core>=2.18,!=2.19.0,<2.20; python_version >= '3.11' # GPLv3 jmespath>=0.9.3 # MIT # ini parsing diff --git a/roles/cephadm/defaults/main.yml b/roles/cephadm/defaults/main.yml index 6739d491ce..5ab4c04257 100644 --- a/roles/cephadm/defaults/main.yml +++ b/roles/cephadm/defaults/main.yml @@ -1,5 +1,5 @@ --- -cephadm_ceph_release: "reef" +cephadm_ceph_release: "squid" cephadm_ceph_apt_repo: "deb http://download.ceph.com/debian-{{ cephadm_ceph_release }}/ {{ ansible_distribution_release }} main" cephadm_use_package_from_distribution: false diff --git a/roles/cephadm/tasks/pkg_redhat.yml b/roles/cephadm/tasks/pkg_redhat.yml index 85708cef43..af8c747b83 100644 --- a/roles/cephadm/tasks/pkg_redhat.yml +++ b/roles/cephadm/tasks/pkg_redhat.yml @@ -22,8 +22,11 @@ become: True when: not cephadm_use_package_from_distribution -- name: Install cephadm +# NOTE(mnasiadka): cephadm bootstrap failing on jinja2 missing +- name: Install cephadm and jinja2 dnf: - name: "cephadm" + name: + - cephadm + - python3-jinja2 install_weak_deps: False become: True diff --git a/roles/kolla-ansible-deploy-bifrost/tasks/main.yml b/roles/kolla-ansible-deploy-bifrost/tasks/main.yml new file mode 100644 index 0000000000..5b510da401 --- /dev/null +++ b/roles/kolla-ansible-deploy-bifrost/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy Bifrost + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible deploy-bifrost + -i /etc/kolla/inventory + >/tmp/logs/ansible/deploy-bifrost 2>&1 + diff --git a/roles/kolla-ansible-deploy/tasks/certificates.yml b/roles/kolla-ansible-deploy/tasks/certificates.yml new file mode 100644 index 0000000000..6296a1dbe2 --- /dev/null +++ b/roles/kolla-ansible-deploy/tasks/certificates.yml @@ -0,0 +1,39 @@ +--- +- name: Generate self-signed certificates for the optional internal TLS tests + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible certificates + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/certificates 2>&1 + +- name: Init pebble when Lets Encrypt is enabled + when: (le_enabled | default(False)) | bool + block: + - name: "Run pebble container" + become: true + community.docker.docker_container: + name: pebble + image: "ghcr.io/letsencrypt/pebble:latest" + env: + PEBBLE_VA_NOSLEEP: "1" + PEBBLE_VA_ALWAYS_VALID: "1" + network_mode: host + + - name: "Wait for pebble to start" + ansible.builtin.wait_for: + port: 15000 + delay: 3 + + - name: "Copy pebble miniCA to /etc/kolla/certificates" + become: true + ansible.builtin.command: + cmd: "docker cp pebble:/test/certs/pebble.minica.pem /etc/kolla/certificates/ca/pebble-root.crt" + + - name: "Fetch pebble.crt and store it in /etc/kolla/certificates/ca/" + become: true + ansible.builtin.get_url: + url: "https://127.0.0.1:15000/roots/0" + dest: "/etc/kolla/certificates/ca/pebble.crt" + validate_certs: false diff --git a/roles/kolla-ansible-deploy/tasks/deploy.yml b/roles/kolla-ansible-deploy/tasks/deploy.yml new file mode 100644 index 0000000000..771006735d --- /dev/null +++ b/roles/kolla-ansible-deploy/tasks/deploy.yml @@ -0,0 +1,55 @@ +--- +- name: Run kolla-ansible prechecks + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible prechecks + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/deploy-prechecks 2>&1 + +- name: Run kolla-ansible pull + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible pull + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/pull 2>&1 + +- name: Run kolla-ansible deploy + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible deploy + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/deploy 2>&1 + +- name: Run kolla-ansible post-deploy + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible post-deploy + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/post-deploy 2>&1 + +- name: Run kolla-ansible validate-config + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible validate-config + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/validate-config 2>&1 + when: not is_upgrade | bool + +- name: Run kolla-ansible check + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible check + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/check 2>&1 diff --git a/roles/kolla-ansible-deploy/tasks/main.yml b/roles/kolla-ansible-deploy/tasks/main.yml new file mode 100644 index 0000000000..e02765d99a --- /dev/null +++ b/roles/kolla-ansible-deploy/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Ensure /etc/kolla is writable + become: true + ansible.builtin.file: + path: /etc/kolla + state: directory + mode: "0777" + recurse: true + +- import_tasks: certificates.yml +- import_tasks: deploy.yml diff --git a/roles/kolla-ansible-reconfigure/tasks/main.yml b/roles/kolla-ansible-reconfigure/tasks/main.yml new file mode 100644 index 0000000000..4cb60025a7 --- /dev/null +++ b/roles/kolla-ansible-reconfigure/tasks/main.yml @@ -0,0 +1,63 @@ +--- +- name: Run kolla-ansible prechecks + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible prechecks + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/reconfigure-prechecks 2>&1 + +- name: Remove OVN DB containers and volumes on primary to test recreation (docker) + become: true + when: + - scenario == 'ovn' + - container_engine == 'docker' + vars: + ovn_db_services: + - "ovn_nb_db" + - "ovn_sb_db" + block: + - name: Remove OVN DB containers + community.docker.docker_container: + name: "{{ item }}" + state: absent + loop: "{{ ovn_db_services }}" + + - name: Remove OVN DB volumes + community.docker.docker_volume: + name: "{{ item }}" + state: absent + loop: "{{ ovn_db_services }}" + +- name: Remove OVN DB containers and volumes on primary to test recreation (podman) + become: true + when: + - scenario == 'ovn' + - container_engine == 'podman' + vars: + ovn_db_services: + - "ovn_nb_db" + - "ovn_sb_db" + block: + - name: Remove OVN DB containers + containers.podman.podman_container: + name: "{{ item }}" + state: absent + loop: "{{ ovn_db_services }}" + + - name: Remove OVN DB volumes + containers.podman.podman_volume: + name: "{{ item }}" + state: absent + loop: "{{ ovn_db_services }}" + +- name: Run kolla-ansible reconfigure + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible reconfigure + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/reconfigure 2>&1 + diff --git a/roles/kolla-ansible-setup-disks/README.rst b/roles/kolla-ansible-setup-disks/README.rst new file mode 100644 index 0000000000..90bb4e92c0 --- /dev/null +++ b/roles/kolla-ansible-setup-disks/README.rst @@ -0,0 +1,15 @@ +Prepare disks for Kolla-Ansible CI run. + +**Role Variables** + +.. zuul:rolevar:: kolla_ansible_setup_disks_filepath + + Path to allocated file passed to loopmount + +.. zuul:rolevar:: kolla_ansible_setup_disks_lv_name + + Logical volume name to create (skipped if not set) + +.. zuul:rolevar:: kolla_ansible_setup_disks_vg_name + + Volume group name to create diff --git a/roles/kolla-ansible-setup-disks/tasks/main.yml b/roles/kolla-ansible-setup-disks/tasks/main.yml new file mode 100644 index 0000000000..e4e25f736a --- /dev/null +++ b/roles/kolla-ansible-setup-disks/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- name: Check if kolla_ansible_setup_disks_file_path is set + ansible.builtin.assert: + that: kolla_ansible_setup_disks_file_path is defined + +- name: Check if kolla_ansible_setup_disks_vg_name is set + ansible.builtin.assert: + that: kolla_ansible_setup_disks_vg_name is defined + +- name: Allocate file for disk backing + become: true + community.general.filesize: + path: "{{ kolla_ansible_setup_disks_file_path }}" + size: "{{ kolla_ansible_setup_disks_file_size | default('5G') }}" + +- name: Get free loop device + become: true + ansible.builtin.shell: + cmd: "losetup -f" + register: _loop_device + +- name: Mount file on loop device + become: true + ansible.builtin.shell: + cmd: > + losetup {{ _loop_device.stdout }} + {{ kolla_ansible_setup_disks_file_path }} + +- name: Create LVM extents on loop device + become: true + community.general.lvg: + vg: "{{ kolla_ansible_setup_disks_vg_name }}" + pvs: "{{ _loop_device.stdout }}" + +- name: Create LV + become: true + community.general.lvol: + vg: "{{ kolla_ansible_setup_disks_vg_name }}" + lv: "{{ kolla_ansible_setup_disks_lv_name }}" + size: "100%FREE" + when: + - kolla_ansible_setup_disks_lv_name is defined diff --git a/roles/kolla-ansible-tempest/defaults/main.yml b/roles/kolla-ansible-tempest/defaults/main.yml new file mode 100644 index 0000000000..600059789a --- /dev/null +++ b/roles/kolla-ansible-tempest/defaults/main.yml @@ -0,0 +1,11 @@ +--- +kolla_ansible_tempest_packages: + - python-tempestconf + - tempest + +kolla_ansible_tempest_cirros_ver: "0.6.3" +kolla_ansible_tempest_exclude_regex: "" +kolla_ansible_tempest_packages_extra: [] +kolla_ansible_tempest_regex: "" + +post_upgrade: false diff --git a/roles/kolla-ansible-tempest/tasks/main.yml b/roles/kolla-ansible-tempest/tasks/main.yml new file mode 100644 index 0000000000..7c118feff8 --- /dev/null +++ b/roles/kolla-ansible-tempest/tasks/main.yml @@ -0,0 +1,49 @@ +--- +- name: Install required packages + ansible.builtin.pip: + name: "{{ kolla_ansible_tempest_packages + kolla_ansible_tempest_packages_extra }}" + virtualenv: "{{ kolla_ansible_venv_path }}" + virtualenv_command: "python3 -m venv" + +- name: Init tempest workspace + ansible.builtin.shell: + cmd: > + {{ kolla_ansible_venv_path }}/bin/tempest init tempest + >/tmp/logs/ansible/test-init-tempest 2>&1 + creates: "/home/zuul/tempest" + +- name: Discover tempest config + vars: + ver: "{{ kolla_ansible_tempest_cirros_ver }}" + image: "https://download.cirros-cloud.net/{{ ver }}/cirros-{{ ver }}-x86_64-disk.img" + ansible.builtin.shell: + chdir: "/home/zuul/tempest" + cmd: > + {{ kolla_ansible_venv_path }}/bin/discover-tempest-config + --debug + --image {{ image }} + --os-cloud kolla-admin + >/tmp/logs/ansible/test-init-tempest-discover 2>&1 + environment: + OS_CLIENT_CONFIG_FILE: "/etc/kolla/clouds.yaml" + +- name: Run tempest tests + environment: + OS_LOG_CAPTURE: "1" + OS_STDOUT_CAPTURE: "1" + OS_STDERR_CAPTURE: "1" + OS_TEST_TIMEOUT: "3600" + vars: + tempest_log_file: "test-tempest-run{{ '-post-upgrade' if post_upgrade | bool else '' }}" + ansible.builtin.shell: + chdir: "/home/zuul/tempest" + cmd: > + {{ kolla_ansible_venv_path }}/bin/tempest run + --config-file etc/tempest.conf + {% if kolla_ansible_tempest_regex | length > 0 %} + --regex '{{ kolla_ansible_tempest_regex }}' + {% endif %} + {% if kolla_ansible_tempest_exclude_regex | length > 0 %} + --exclude-regex '{{ kolla_ansible_tempest_exclude_regex }}' + {% endif %} + >/tmp/logs/ansible/{{ tempest_log_file }} 2>&1 diff --git a/roles/kolla-ansible-test-bifrost/tasks/main.yml b/roles/kolla-ansible-test-bifrost/tasks/main.yml new file mode 100644 index 0000000000..431aed5840 --- /dev/null +++ b/roles/kolla-ansible-test-bifrost/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- name: Check baremetal driver list + become: true + ansible.builtin.command: + cmd: > + {{ container_engine }} exec bifrost_deploy + bash -c 'OS_CLOUD=bifrost baremetal driver list' + register: bdl + until: bdl.rc == 0 + retries: 5 + delay: 10 + +- name: Check baremetal node list + become: true + ansible.builtin.command: + cmd: > + {{ container_engine }} exec bifrost_deploy + bash -c 'OS_CLOUD=bifrost baremetal node list' + +- name: Create baremetal node + become: true + ansible.builtin.command: + cmd: > + {{ container_engine }} exec bifrost_deploy + bash -c 'OS_CLOUD=bifrost baremetal node create + --driver redfish --name test-node' + +- name: Delete baremetal node + become: true + ansible.builtin.command: + cmd: > + {{ container_engine }} exec bifrost_deploy + bash -c 'OS_CLOUD=bifrost baremetal node delete + test-node' + diff --git a/roles/kolla-ansible-test-dashboard/tasks/main.yml b/roles/kolla-ansible-test-dashboard/tasks/main.yml new file mode 100644 index 0000000000..8455b31549 --- /dev/null +++ b/roles/kolla-ansible-test-dashboard/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: Get contents of clouds.yaml + ansible.builtin.slurp: + src: /etc/kolla/clouds.yaml + register: clouds_yaml + +- name: Query dashboard and check that the returned page looks like a login page + vars: + clouds: "{{ clouds_yaml['content'] | b64decode | from_yaml }}" + url_scheme: "{{ clouds.clouds['kolla-admin'].auth.auth_url | urlsplit('scheme') }}" + url_host: "{{ kolla_external_vip_address | default(kolla_internal_vip_address) }}" + ansible.builtin.uri: + url: "{{ url_scheme + '://' + url_host }}" + ca_path: "{{ clouds.clouds['kolla-admin'].cacert | default(omit) }}" + follow_redirects: "all" + return_content: true + validate_certs: "{{ 'false' if scenario == 'lets-encrypt' else 'true' }}" + register: dashboard_output + until: dashboard_output.content.find('Login') != -1 + retries: 30 + delay: 10 + +- name: Check if testinfra subdirectory exists + ansible.builtin.stat: + path: "{{ zuul.project.src_dir }}/tests/testinfra" + register: testinfra_dir + +- name: Run testinfra tests + when: testinfra_dir.stat.exists + block: + - name: Ensure testinfra subdirectory exists + ansible.builtin.file: + path: "/home/zuul/testinfra" + state: directory + + - name: Ensure screenshots directory exists + ansible.builtin.file: + path: "/home/zuul/testinfra/screenshots" + state: directory + + - name: Ensure required packages are installed + ansible.builtin.pip: + name: + - pytest-html + - pytest-testinfra + - selenium + virtualenv: "{{ kolla_ansible_venv_path }}" + virtualenv_command: "python3 -m venv" + + - name: Run Selenium Firefox container (Docker) + become: true + when: container_engine == 'docker' + community.docker.docker_container: + name: "selenium" + detach: true + image: "quay.io/opendevmirror/selenium-standalone-firefox:latest" + network_mode: host + + - name: Run Selenium Firefox container (Podman) + become: true + when: container_engine == 'podman' + containers.podman.podman_container: + name: "selenium" + detach: true + image: "quay.io/opendevmirror/selenium-standalone-firefox:latest" + network_mode: host + + - name: Wait for port 444 to be up + ansible.builtin.wait_for: + port: 4444 + + - name: Run testinfra tests + environment: + HORIZON_PROTO: "{{ 'https' if tls_enabled | bool else 'http' }}" + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + py.test + --junit-xml /home/zuul/testinfra/testinfra-junit.xml -o junit_family=xunit1 + --html=/home/zuul/testinfra/test-results-testinfra.html --self-contained-html + -v tests/testinfra + chdir: "{{ zuul.project.src_dir }}" diff --git a/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml b/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml new file mode 100644 index 0000000000..ac8d11c1c6 --- /dev/null +++ b/roles/kolla-ansible-upgrade-bifrost/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Upgrade Bifrost + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible deploy-bifrost + -i /etc/kolla/inventory + >/tmp/logs/ansible/upgrade-bifrost 2>&1 diff --git a/roles/kolla-ansible-upgrade/tasks/main.yml b/roles/kolla-ansible-upgrade/tasks/main.yml new file mode 100644 index 0000000000..c66e06fe20 --- /dev/null +++ b/roles/kolla-ansible-upgrade/tasks/main.yml @@ -0,0 +1,65 @@ +--- +- name: Generate self-signed certificates for the optional internal TLS tests + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible certificates + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-certificates 2>&1 + +# NOTE(mnasiadka): Need to run bootstrap before upgrade +- name: Run kolla-ansible bootstrap-servers + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible bootstrap-servers + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-bootstrap 2>&1 + +- name: Run kolla-ansible prechecks + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible prechecks + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-prechecks 2>&1 + +- name: Run kolla-ansible pull + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible pull + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-pull 2>&1 + +- name: Run kolla-ansible upgrade + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible upgrade + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade 2>&1 + +- name: Run kolla-ansible post-deploy + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible post-deploy + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-post-deploy 2>&1 + +- name: Run kolla-ansible validate-config on upgrades + ansible.builtin.shell: + cmd: > + . {{ kolla_ansible_venv_path }}/bin/activate && + kolla-ansible validate-config + -i /etc/kolla/inventory + -vvv + >/tmp/logs/ansible/upgrade-validate-config 2>&1 + diff --git a/roles/openstack-clients/defaults/main.yml b/roles/openstack-clients/defaults/main.yml index d335a3ef1d..6b1e3c6d97 100644 --- a/roles/openstack-clients/defaults/main.yml +++ b/roles/openstack-clients/defaults/main.yml @@ -1,9 +1,13 @@ --- openstack_clients_pip_packages: + - package: aodhclient + enabled: "{{ scenario == 'telemetry' }}" - package: python-barbicanclient - enabled: "{{ scenario == 'scenario_nfv' }}" + enabled: "{{ scenario == 'nfv' }}" - package: python-designateclient enabled: "{{ scenario == 'magnum' }}" + - package: gnocchiclient + enabled: "{{ scenario == 'telemetry' }}" - package: python-heatclient enabled: true - package: python-ironicclient @@ -13,13 +17,13 @@ openstack_clients_pip_packages: - package: python-masakariclient enabled: "{{ scenario == 'masakari' }}" - package: python-mistralclient - enabled: "{{ scenario == 'scenario_nfv' }}" + enabled: "{{ scenario == 'nfv' }}" - package: python-octaviaclient enabled: "{{ scenario in ['octavia', 'ovn'] }}" - package: python-openstackclient enabled: true - package: python-tackerclient - enabled: "{{ scenario == 'scenario_nfv' }}" + enabled: "{{ scenario == 'nfv' }}" - package: python-troveclient enabled: "{{ scenario == 'magnum' }}" - package: python-zunclient diff --git a/test-requirements.txt b/test-requirements.txt index 303f3d5b1e..8451a2b904 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,31 @@ +# Password hashing +bcrypt>=3.0.0 # Apache-2.0 + +# password generation +cryptography>=2.1 # BSD/Apache-2.0 + +# Hashicorp Vault +hvac>=0.10.1 # Apache-2.0 + +# templating +Jinja2>=3 # BSD License (3 clause) + +# Ansible and ansible's json_query +ansible-core>=2.18,!=2.19.0,<2.20; python_version >= '3.11' # GPLv3 +jmespath>=0.9.3 # MIT + +# ini parsing +oslo.config>=5.2.0 # Apache-2.0 + +# password generation +oslo.utils>=3.33.0 # Apache-2.0 + +# Password hashing +passlib[bcrypt]>=1.0.0 # BSD + +# CLI +cliff>=4.7.0 # Apache-2.0 + # coverage testing coverage!=4.4,>=4.0 # Apache-2.0 diff --git a/tests/check-logs.sh b/tests/check-logs.sh index d78da1ee17..dac11ccb9b 100755 --- a/tests/check-logs.sh +++ b/tests/check-logs.sh @@ -63,9 +63,6 @@ function check_fluentd_missing_logs { /var/log/kolla/mariadb/mariadb-bootstrap.log) continue ;; - /var/log/kolla/mariadb/mariadb-clustercheck.log) - continue - ;; /var/log/kolla/mariadb/mariadb-upgrade.log) continue ;; @@ -100,9 +97,6 @@ function check_fluentd_missing_logs { /var/log/kolla/tenks/*) continue ;; - /var/log/kolla/venus/*) - continue - ;; /var/log/kolla/zun/*) continue ;; @@ -196,6 +190,22 @@ if sudo test -d /var/log/kolla; then fi done + # NOTE: Check if OpenSearch output plugin has connected in OpenSearch scenarios, otherwise + # check_fluentd_missing_logs will fail because fluentd will only parse files when + # output plugin is working. + retries=0 + retries_max=10 + until [[ $(sudo tail -n 5 /var/log/kolla/fluentd/fluentd.log | grep "Could not communicate to OpenSearch" | wc -l) -eq 0 ]]; do + echo "Found 'Could not communicate to OpenSearch' in last 5 lines of fluentd.log, sleeping 30 seconds" + retries=$((retries + 1)) + if [[ $retries != $retries_max ]]; then + sleep 30 + else + echo "Found 'Could not communicate to OpenSearch' in last 5 lines of fluentd.log after 10 retries." | tee -a $fluentd_error_summary_file + break + fi + done + if check_fluentd_missing_logs >/dev/null; then any_critical=1 echo "(critical) Found some missing log files in fluentd logs. Matches in $fluentd_error_summary_file" diff --git a/tests/deploy-bifrost.sh b/tests/deploy-bifrost.sh deleted file mode 100755 index b06a6d769e..0000000000 --- a/tests/deploy-bifrost.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function deploy_bifrost { - RAW_INVENTORY=/etc/kolla/inventory - - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - # TODO(mgoddard): run prechecks. - # Deploy the bifrost container. - # TODO(mgoddard): add pull action when we have a local registry service in - # CI. - kolla-ansible deploy-bifrost -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy-bifrost -} - - -deploy_bifrost diff --git a/tests/deploy.sh b/tests/deploy.sh deleted file mode 100755 index e81dbf5dcc..0000000000 --- a/tests/deploy.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - -function init_pebble { - - sudo echo "[i] Pulling letsencrypt/pebble" > /tmp/logs/ansible/certificates - sudo docker pull quay.io/openstack.kolla/pebble:latest &>> /tmp/logs/ansible/certificates - - sudo echo "[i] Force removing old pebble container" &>> /tmp/logs/ansible/certificates - sudo docker rm -f pebble &>> /tmp/logs/ansible/certificates - - sudo echo "[i] Run new pebble container" &>> /tmp/logs/ansible/certificates - sudo docker run --name pebble --rm -d -e "PEBBLE_VA_NOSLEEP=1" -e "PEBBLE_VA_ALWAYS_VALID=1" --net=host quay.io/openstack.kolla/pebble:latest &>> /tmp/logs/ansible/certificates - - sudo echo "[i] Wait for pebble container be up" &>> /tmp/logs/ansible/certificates - # wait until pebble starts - while ! sudo docker logs pebble | grep -q "Listening on"; do - sleep 1 - done - sudo echo "[i] Wait for pebble container done" &>> /tmp/logs/ansible/certificates - - sudo echo "[i] Pebble container logs" &>> /tmp/logs/ansible/certificates - sudo docker logs pebble &>> /tmp/logs/ansible/certificates -} - -function pebble_cacert { - - sudo docker cp pebble:/test/certs/pebble.minica.pem /etc/kolla/certificates/ca/pebble-root.crt - sudo curl -k -s -o /etc/kolla/certificates/ca/pebble.crt -v https://127.0.0.1:15000/roots/0 -} - -function certificates { - - RAW_INVENTORY=/etc/kolla/inventory - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - # generate self-signed certificates for the optional internal TLS tests - if [[ "$TLS_ENABLED" = "True" ]]; then - kolla-ansible certificates -i ${RAW_INVENTORY} -vvv > /tmp/logs/ansible/certificates - fi - if [[ "$LE_ENABLED" = "True" ]]; then - init_pebble - pebble_cacert - fi - - #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there - sudo chmod -R 777 /etc/kolla -} - - -function deploy { - - RAW_INVENTORY=/etc/kolla/inventory - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there - sudo chmod -R 777 /etc/kolla - - certificates - - # Actually do the deployment - kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy-prechecks - kolla-ansible pull -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/pull - kolla-ansible deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/deploy - kolla-ansible post-deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/post-deploy - - if [[ $HAS_UPGRADE == 'no' ]]; then - kolla-ansible validate-config -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/validate-config - #TODO(r-krcek) check can be moved out of the if statement in the flamingo cycle - kolla-ansible check -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/check - fi -} - - -deploy diff --git a/tests/get_logs.sh b/tests/get_logs.sh index 8633ec1c36..db18de5cdf 100644 --- a/tests/get_logs.sh +++ b/tests/get_logs.sh @@ -15,7 +15,7 @@ copy_logs() { echo "Invalid container engine: ${CONTAINER_ENGINE}" exit 1 fi - + cp -rL /home/zuul/tempest ${LOG_DIR}/ [ -d ${VOLUMES_DIR}/kolla_logs/_data ] && cp -rnL ${VOLUMES_DIR}/kolla_logs/_data/* ${LOG_DIR}/kolla/ [ -d /etc/kolla ] && cp -rnL /etc/kolla/* ${LOG_DIR}/kolla_configs/ # Don't save the IPA images. diff --git a/tests/post.yml b/tests/post.yml index 77dd25b615..46e4370960 100644 --- a/tests/post.yml +++ b/tests/post.yml @@ -2,6 +2,10 @@ - hosts: all vars: logs_dir: "/tmp/logs" + roles: + - role: fetch-subunit-output + zuul_work_dir: '/home/zuul/tempest' + tasks: # TODO(mhiner): Currently only Docker to Podman migration is tested. # If we want to test the other direction we have to rework this. @@ -72,6 +76,26 @@ ara_report_local_dir: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}/ara-report" kolla_ansible_local_src_dir: "{{ zuul.executor.work_root }}/src/{{ zuul.project.canonical_hostname }}/openstack/kolla-ansible" tasks: + - name: Download testinfra to executor + synchronize: + src: "/home/zuul/testinfra" + dest: "{{ zuul.executor.log_root }}/" + mode: pull + # TODO(mnasiadka): Remove in G/2026.1 cycle + ignore_errors: true + + - name: Return artifact to Zuul + zuul_return: + data: + zuul: + artifacts: + - name: "TestInfra Unit Test Report" + url: "testinfra/test-results-testinfra.html" + metadata: + type: unit_test_report + - name: "TestInfra Screenshots" + url: "testinfra/screenshots" + - name: Check for existence of ara sqlite stat: path: "{{ ansible_env.HOME }}/.ara/server/ansible.sqlite" diff --git a/tests/pre.yml b/tests/pre.yml index 804c090d18..1a666e97d1 100644 --- a/tests/pre.yml +++ b/tests/pre.yml @@ -61,6 +61,13 @@ - gawk - python3-pip - python3-setuptools + - python3-requests + + - name: Install stestr + become: true + pip: + break_system_packages: true + name: stestr - name: Install lvm on storage scenarios become: true diff --git a/tests/reconfigure.sh b/tests/reconfigure.sh deleted file mode 100755 index 1824755729..0000000000 --- a/tests/reconfigure.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function reconfigure { - RAW_INVENTORY=/etc/kolla/inventory - - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - # TODO(jeffrey4l): make some configure file change and - # trigger a real reconfigure - # NOTE(mnasiadka): Remove OVN DB containers and volumes on primary to test recreation - if [[ $SCENARIO == "ovn" ]]; then - sudo ${CONTAINER_ENGINE} rm -f ovn_nb_db ovn_sb_db && sudo ${CONTAINER_ENGINE} volume rm ovn_nb_db ovn_sb_db - fi - kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/reconfigure-prechecks - kolla-ansible reconfigure -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/reconfigure -} - - -reconfigure diff --git a/tests/run-hashi-vault.yml b/tests/run-openbao.yml similarity index 92% rename from tests/run-hashi-vault.yml rename to tests/run-openbao.yml index f9a014e76e..f8d2fe2596 100644 --- a/tests/run-hashi-vault.yml +++ b/tests/run-openbao.yml @@ -68,10 +68,10 @@ command: "{{ kolla_ansible_venv_path }}/bin/kolla-genpwd" # At this point we have generated all necessary configuration, and are - # ready to test Hashicorp Vault. - - name: Run test-hashicorp-vault-passwords.sh script + # ready to test OpenBao. + - name: Run test-openbao-passwords.sh script script: - cmd: test-hashicorp-vault-passwords.sh + cmd: test-openbao-passwords.sh executable: /bin/bash chdir: "{{ kolla_ansible_src_dir }}" environment: @@ -85,11 +85,11 @@ - name: Read generated file slurp: - src: "/tmp/passwords-hashicorp-vault.yml" + src: "/tmp/passwords-openbao.yml" register: generated_file # This test will load in the original input file and the one that was - # generated by Vault and ensure that the keys are the same in both files. + # generated by OpenBao and ensure that the keys are the same in both files. # This ensures that we are not missing any passwords. - name: Check passwords that were written to Vault are as expected vars: diff --git a/tests/run.yml b/tests/run.yml index 99f110e315..d421c3e7a0 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -10,7 +10,7 @@ - name: Set facts for commonly used variables vars: # NOTE(yoctozepto): needed here to use in other facts too - openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-opensearch', 'venus'] }}" + openstack_core_enabled: "{{ scenario not in ['bifrost', 'mariadb', 'prometheus-opensearch'] }}" set_fact: kolla_inventory_path: "/etc/kolla/inventory" logs_dir: "/tmp/logs" @@ -35,12 +35,9 @@ name: "{{ 'bind-utils' if ansible_os_family == 'RedHat' else 'dnsutils' }}" when: scenario == 'magnum' - - name: Prepare disks for a storage service - script: "setup_disks.sh {{ disk_type }}" + - import_role: + name: kolla-ansible-setup-disks when: scenario in ['cephadm', 'zun'] - become: true - vars: - disk_type: "{{ 'ceph-lvm' if scenario in ['cephadm'] else scenario }}" - name: Update /etc/hosts with internal API FQDN blockinfile: @@ -391,28 +388,14 @@ # Deploy control plane. For upgrade jobs this is the previous release. - block: - - name: Run deploy.sh script - script: - cmd: deploy.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - TLS_ENABLED: "{{ tls_enabled }}" - LE_ENABLED: "{{ le_enabled }}" - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" - HAS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}" + - import_role: + name: kolla-ansible-deploy # NOTE(yoctozepto): this is nice as the first step after the deployment # because it waits for the services to stabilize well enough so that # the dashboard is able to show the login prompt - - name: Run test-dashboard.sh script - script: - cmd: test-dashboard.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - TLS_ENABLED: "{{ tls_enabled }}" - LE_ENABLED: "{{ le_enabled }}" + - import_role: + name: kolla-ansible-test-dashboard when: dashboard_enabled - name: Run init-core-openstack.sh script @@ -430,7 +413,7 @@ EXT_NET_GATEWAY: "{{ neutron_external_network_prefix }}1" EXT_NET_DEMO_ROUTER_ADDR: "{{ neutron_external_network_prefix }}10" SCENARIO: "{{ scenario }}" - when: openstack_core_tested or scenario in ['ironic', 'magnum', 'scenario_nfv', 'zun', 'octavia'] + when: openstack_core_tested or scenario in ['ironic', 'magnum', 'nfv', 'zun', 'octavia'] - name: Run test-ovn.sh script script: @@ -440,17 +423,10 @@ when: scenario == "ovn" environment: CONTAINER_ENGINE: "{{ container_engine }}" + IS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}" - - name: Run test-core-openstack.sh script - script: - cmd: test-core-openstack.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - SCENARIO: "{{ scenario }}" - HAS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}" - PHASE: deploy - IP_VERSION: "{{ 6 if address_family == 'ipv6' else 4 }}" + - import_role: + name: kolla-ansible-tempest when: openstack_core_tested - name: Run test-zun.sh script @@ -468,7 +444,7 @@ cmd: test-scenario-nfv.sh executable: /bin/bash chdir: "{{ kolla_ansible_src_dir }}" - when: scenario == "scenario_nfv" + when: scenario == "nfv" - block: - name: Run deploy-tenks.sh script @@ -550,13 +526,6 @@ when: - scenario == "prometheus-opensearch" - - name: Run test-venus.sh script - script: - cmd: test-venus.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - when: scenario == "venus" - - name: Run test-skyline.sh script script: cmd: test-skyline.sh @@ -571,6 +540,13 @@ chdir: "{{ kolla_ansible_src_dir }}" when: scenario == "skyline-sso" + - name: Run test-telemetry.sh script + script: + cmd: test-telemetry.sh + executable: /bin/bash + chdir: "{{ kolla_ansible_src_dir }}" + when: scenario == "telemetry" + - name: Run test-container-engine-migration.sh script script: cmd: test-container-engine-migration.sh @@ -731,43 +707,17 @@ --final /etc/kolla/passwords.yml # Perform an upgrade to the in-development code. - - name: Run upgrade.sh script - shell: - cmd: tests/upgrade.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" - SCENARIO: "{{ scenario }}" + - import_role: + name: kolla-ansible-upgrade - # NOTE(yoctozepto): this is nice as the first step after the upgrade - # because it waits for the services to stabilize well enough so that - # the dashboard is able to show the login prompt - - name: Run test-dashboard.sh script (post upgrade) - shell: - cmd: tests/test-dashboard.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - TLS_ENABLED: "{{ tls_enabled }}" + - import_role: + name: kolla-ansible-test-dashboard when: dashboard_enabled - # NOTE(yoctozepto): We need the script module here to avoid - # a bug in Glance OSC [1][2] which results in a failure when a volume - # is given as a source. The stdin works differently in shell/command - # than script. - # [1] https://opendev.org/openstack/python-openstackclient/src/commit/6810414e45a32dd44263dff47fec161989508ef0/openstackclient/image/v2/image.py#L114-L120 - # [2] https://opendev.org/openstack/python-openstackclient/src/commit/6810414e45a32dd44263dff47fec161989508ef0/openstackclient/image/v2/image.py#L414 - - name: Run test-core-openstack.sh script (post upgrade) - script: - cmd: test-core-openstack.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - SCENARIO: "{{ scenario }}" - HAS_UPGRADE: 'yes' - PHASE: upgrade - IP_VERSION: "{{ 6 if address_family == 'ipv6' else 4 }}" + - import_role: + name: kolla-ansible-tempest + vars: + post_upgrade: true when: openstack_core_tested - name: Run test-prometheus-opensearch.sh script (post-upgrade) @@ -781,31 +731,17 @@ when: is_upgrade # Bifrost testing. - - block: - - name: Run deploy-bifrost.sh script - shell: - cmd: tests/deploy-bifrost.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" + - name: Bifrost testing + when: scenario == "bifrost" + block: + - import_role: + name: kolla-ansible-deploy-bifrost - - name: Run test-bifrost.sh script - shell: - cmd: tests/test-bifrost.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - CONTAINER_ENGINE: "{{ container_engine }}" + - import_role: + name: kolla-ansible-test-bifrost - - name: Run upgrade-bifrost.sh script - shell: - cmd: tests/upgrade-bifrost.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" - when: scenario == "bifrost" + - import_role: + name: kolla-ansible-upgrade-bifrost # NOTE(yoctozepto): each host checks itself - hosts: all @@ -841,15 +777,8 @@ - hosts: primary any_errors_fatal: true tasks: - - name: Run reconfigure.sh script - script: - cmd: reconfigure.sh - executable: /bin/bash - chdir: "{{ kolla_ansible_src_dir }}" - environment: - KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" - SCENARIO: "{{ scenario }}" - CONTAINER_ENGINE: "{{ container_engine }}" + - import_role: + name: kolla-ansible-reconfigure when: - not is_upgrade - scenario != "bifrost" diff --git a/tests/setup_disks.sh b/tests/setup_disks.sh deleted file mode 100644 index 6cd03ddc41..0000000000 --- a/tests/setup_disks.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# $1: scenario / ceph store type - -set -o xtrace -set -o errexit - -mkdir -p /opt/data/kolla - -if [ $1 = 'zun' ]; then - # create cinder-volumes volume group for cinder lvm backend - free_device=$(losetup -f) - fallocate -l 5G /var/lib/cinder_data.img - losetup $free_device /var/lib/cinder_data.img - pvcreate $free_device - vgcreate cinder-volumes $free_device -elif [ $1 = 'ceph-lvm' ]; then - free_device=$(losetup -f) - fallocate -l 5G /var/lib/ceph-osd1.img - losetup $free_device /var/lib/ceph-osd1.img - pvcreate $free_device - vgcreate cephvg $free_device - lvcreate -l 100%FREE -n cephlv cephvg -else - echo "Unknown type" >&2 - exit 1 -fi - -partprobe diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index fe7950e656..ddcc631dd4 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -45,7 +45,11 @@ mariadb_wsrep_extra_provider_options: - "evs.inactive_timeout=PT30S" - "evs.keepalive_period=PT3S" -mariadb_monitor_connect_timeout: "60000" +mariadb_monitor_connect_interval: "60000" +mariadb_monitor_connect_timeout: "180000" +mariadb_monitor_ping_interval: "60000" +mariadb_monitor_ping_max_failures: "10" +mariadb_monitor_ping_timeout: "10000" nova_compute_virt_type: "{{ virt_type }}" @@ -110,7 +114,7 @@ enable_prometheus: "yes" enable_prometheus_openstack_exporter: "no" {% endif %} -{% if scenario == "scenario_nfv" %} +{% if scenario == "nfv" %} enable_tacker: "yes" enable_neutron_sfc: "yes" enable_mistral: "yes" @@ -123,6 +127,7 @@ enable_aodh: "yes" {% if scenario == "ironic" %} enable_ironic: "yes" +enable_ironic_pxe_filter: "yes" enable_prometheus: "yes" enable_prometheus_openstack_exporter: "no" ironic_dnsmasq_dhcp_ranges: @@ -232,12 +237,6 @@ octavia_network_type: "tenant" enable_redis: "yes" {% endif %} -{% if scenario == "venus" %} -enable_opensearch: "yes" -enable_keystone: "yes" -enable_venus: "yes" -{% endif %} - {% if groups['all'] | length == 1 %} keepalived_track_script_enabled: "no" {% endif %} @@ -278,5 +277,11 @@ enable_skyline: "yes" skyline_enable_sso: "yes" {% endif %} +{% if scenario == "telemetry" %} +enable_aodh: "yes" +enable_ceilometer: "yes" +enable_gnocchi: "yes" +{% endif %} + mariadb_monitor_read_only_interval: "30000" mariadb_monitor_galera_healthcheck_timeout: "30000" diff --git a/tests/templates/inventory.j2 b/tests/templates/inventory.j2 index ca98719a89..d0711e93ee 100644 --- a/tests/templates/inventory.j2 +++ b/tests/templates/inventory.j2 @@ -231,9 +231,6 @@ control [blazar:children] control -[venus:children] -monitoring - [letsencrypt:children] loadbalancer @@ -662,12 +659,6 @@ ovn-database [ovn-sb-db-relay:children] ovn-database -[venus-api:children] -venus - -[venus-manager:children] -venus - [letsencrypt-webserver:children] letsencrypt diff --git a/tests/test-bifrost.sh b/tests/test-bifrost.sh deleted file mode 100755 index b8017c026f..0000000000 --- a/tests/test-bifrost.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function test_bifrost { - container_engine="${1:-docker}" - - # TODO(mgoddard): More testing, deploy bare metal nodes. - # TODO(mgoddard): Use openstackclient when clouds.yaml works. See - # https://bugs.launchpad.net/bifrost/+bug/1754070. - attempts=0 - while [[ $(sudo ${container_engine} exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal driver list -f value" | wc -l) -eq 0 ]]; do - attempts=$((attempts + 1)) - if [[ $attempts -gt 6 ]]; then - echo "Timed out waiting for ironic conductor to become active" - exit 1 - fi - sleep 10 - done - sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node list" - sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node create --driver redfish --name test-node" - sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost baremetal node delete test-node" -} - - -test_bifrost $1 diff --git a/tests/test-core-openstack.sh b/tests/test-core-openstack.sh index c2c0ea3ad2..aa1cf74033 100755 --- a/tests/test-core-openstack.sh +++ b/tests/test-core-openstack.sh @@ -514,7 +514,8 @@ function test_openstack_logged { test_smoke test_neutron_modules test_instance_boot - test_internal_dns_integration + # NOTE(mnasiadka): Disable because it started failing in OVN scenario + [[ $SCENARIO != "ovn" ]] && test_internal_dns_integration test_proxysql_prometheus_exporter # Check for x86_64 architecture to run q35 tests diff --git a/tests/test-dashboard.sh b/tests/test-dashboard.sh deleted file mode 100755 index c3cb9a72cc..0000000000 --- a/tests/test-dashboard.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -export PYTHONUNBUFFERED=1 - - -function check_dashboard { - # Query the dashboard, and check that the returned page looks like a login - # page. - DASHBOARD_URL=${OS_AUTH_URL%:*} - output_path=$1 - args=( - --include - --location - --fail - ) - if [[ "$TLS_ENABLED" = "True" ]]; then - args+=(--cacert $OS_CACERT) - fi - if ! curl "${args[@]}" $DASHBOARD_URL > $output_path; then - return 1 - fi - if ! grep Login $output_path >/dev/null; then - return 1 - fi -} - -function test_dashboard_logged { - . /etc/kolla/admin-openrc.sh - - echo "TESTING: Dashboard" - # The dashboard has been known to take some time to become accessible, so - # use retries. - output_path=$(mktemp) - attempt=1 - while ! check_dashboard $output_path; do - echo "Dashboard not accessible yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 30 ]]; then - echo "FAILED: Dashboard did not become accessible. Response:" - cat $output_path - return 1 - fi - sleep 10 - done - echo "SUCCESS: Dashboard" -} - -function test_dashboard { - echo "Testing dashboard" - log_file=/tmp/logs/ansible/test-dashboard - if [[ -f $log_file ]]; then - log_file=${log_file}-upgrade - fi - test_dashboard_logged > $log_file 2>&1 - result=$? - if [[ $result != 0 ]]; then - echo "Testing dashboard failed. See ansible/test-dashboard for details" - else - echo "Successfully tested dashboard. See ansible/test-dashboard for details" - fi - return $result -} - - -test_dashboard diff --git a/tests/test-hashicorp-vault-passwords.sh b/tests/test-hashicorp-vault-passwords.sh deleted file mode 100755 index caa9ba6a4c..0000000000 --- a/tests/test-hashicorp-vault-passwords.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -export PYTHONUNBUFFERED=1 - -function install_vault { - if [[ "debian" == $BASE_DISTRO ]]; then - curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - - sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" - sudo apt-get update -y && sudo apt-get install -y vault jq - else - sudo dnf install -y yum-utils - sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo - sudo dnf install -y vault jq - fi -} - -function start_vault { - nohup vault server --dev & - # Give Vault some time to warm up - sleep 10 -} - -function test_vault { - TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output) - echo "${TOKEN}" | vault login -address 'http://127.0.0.1:8200' - - vault kv put -address 'http://127.0.0.1:8200' secret/foo data=bar -} - -function test_writepwd { - TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output) - kolla-writepwd \ - --passwords /etc/kolla/passwords.yml \ - --vault-addr 'http://127.0.0.1:8200' \ - --vault-token ${TOKEN} \ - --vault-mount-point secret -} - -function test_readpwd { - TOKEN=$(vault token create -address 'http://127.0.0.1:8200' -format json | jq '.auth.client_token' --raw-output) - cp etc/kolla/passwords.yml /tmp/passwords-hashicorp-vault.yml - kolla-readpwd \ - --passwords /tmp/passwords-hashicorp-vault.yml \ - --vault-addr 'http://127.0.0.1:8200' \ - --vault-token ${TOKEN} \ - --vault-mount-point secret -} - -function teardown { - pkill vault -} - -function test_hashicorp_vault_passwords { - echo "Setting up development Vault server..." - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - install_vault - start_vault - test_vault - echo "Write passwords to Hashicorp Vault..." - test_writepwd - echo "Read passwords from Hashicorp Vault..." - test_readpwd - echo "Cleaning up..." - teardown -} - -test_hashicorp_vault_passwords diff --git a/tests/test-openbao-passwords.sh b/tests/test-openbao-passwords.sh new file mode 100755 index 0000000000..5e6b115bad --- /dev/null +++ b/tests/test-openbao-passwords.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +set -o xtrace +set -o errexit + +export PYTHONUNBUFFERED=1 + +function install_openbao { + if [[ $BASE_DISTRO =~ (debian|ubuntu) ]]; then + curl -fsSLO https://github.com/openbao/openbao/releases/download/v2.4.1/bao_2.4.1_linux_amd64.deb + sudo dpkg -i bao_2.4.1_linux_amd64.deb + rm -f bao_2.4.1_linux_amd64.deb + else + sudo dnf install -y https://github.com/openbao/openbao/releases/download/v2.4.1/bao_2.4.1_linux_amd64.rpm + fi +} + +function start_openbao { + nohup bao server --dev & + # Give Vault some time to warm up + sleep 10 +} + +function test_openbao { + TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token) + echo "${TOKEN}" | bao login -address 'http://127.0.0.1:8200' - + bao kv put -address 'http://127.0.0.1:8200' secret/foo data=bar +} + +function test_writepwd { + TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token) + kolla-writepwd \ + --passwords /etc/kolla/passwords.yml \ + --vault-addr 'http://127.0.0.1:8200' \ + --vault-token ${TOKEN} \ + --vault-mount-point secret +} + +function test_readpwd { + TOKEN=$(bao token create -address 'http://127.0.0.1:8200' -field token) + cp etc/kolla/passwords.yml /tmp/passwords-openbao.yml + kolla-readpwd \ + --passwords /tmp/passwords-openbao.yml \ + --vault-addr 'http://127.0.0.1:8200' \ + --vault-token ${TOKEN} \ + --vault-mount-point secret +} + +function teardown { + pkill bao +} + +function test_openbao_passwords { + echo "Setting up development OpenBao server..." + source $KOLLA_ANSIBLE_VENV_PATH/bin/activate + install_openbao + start_openbao + test_openbao + echo "Write passwords to OpenBao..." + test_writepwd + echo "Read passwords from OpenBao..." + test_readpwd + echo "Cleaning up..." + teardown +} + +test_openbao_passwords diff --git a/tests/test-ovn.sh b/tests/test-ovn.sh index a9d77a95ae..a48fb1161b 100755 --- a/tests/test-ovn.sh +++ b/tests/test-ovn.sh @@ -15,10 +15,20 @@ function test_ovn { # List OVN NB/SB entries echo "OVN NB DB entries:" - sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show + # TODO(mnasiadka): Remove the first part of conditional in G cycle + if [ $IS_UPGRADE == "yes" ]; then + sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show + else + sudo ${container_engine} exec ovn_northd ovn-nbctl show + fi echo "OVN SB DB entries:" - sudo ${container_engine} exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show + # TODO(mnasiadka): Remove the first part of conditional in G cycle + if [ $IS_UPGRADE == "yes" ]; then + sudo ${container_engine} exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show + else + sudo ${container_engine} exec ovn_northd ovn-sbctl show + fi OVNNB_STATUS=$(sudo ${container_engine} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound) OVNSB_STATUS=$(sudo ${container_engine} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound) @@ -92,9 +102,20 @@ function test_octavia { openstack floating ip set $lb_fip --port $lb_port_id echo "OVN NB entries for LB:" - sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer + # TODO(mnasiadka): Remove the first part of conditional in G cycle + if [ $IS_UPGRADE == "yes" ]; then + sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer + else + sudo ${container_engine} exec ovn_northd ovn-nbctl list load_balancer + fi + echo "OVN NB entries for NAT:" - sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat + # TODO(mnasiadka): Remove the first part of conditional in G cycle + if [ $IS_UPGRADE == "yes" ]; then + sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat + else + sudo ${container_engine} exec ovn_northd ovn-nbctl list nat + fi echo "Attempt to access the load balanced HTTP server." attempts=12 diff --git a/tests/test-prometheus-opensearch.sh b/tests/test-prometheus-opensearch.sh index c1f8272c16..cbb687d072 100755 --- a/tests/test-prometheus-opensearch.sh +++ b/tests/test-prometheus-opensearch.sh @@ -170,7 +170,6 @@ function test_prometheus { function test_prometheus_opensearch_logged { . /etc/kolla/admin-openrc.sh - test_opensearch_dashboards test_opensearch test_grafana diff --git a/tests/test-telemetry.sh b/tests/test-telemetry.sh new file mode 100755 index 0000000000..91b5f6c323 --- /dev/null +++ b/tests/test-telemetry.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -o xtrace +set -o errexit +set -o pipefail + +# Enable unbuffered output +export PYTHONUNBUFFERED=1 + +function test_aodh { + echo "TESTING: Aodh" + openstack alarm list + echo "SUCCESS: Aodh" +} + +function test_gnocchi { + echo "TESTING: Gnocchi" + openstack metric list + openstack metric resource list + echo "SUCCESS: Gnocchi" +} + +function test_telemetry_scenario_logged { + . /etc/kolla/admin-openrc.sh + . ~/openstackclient-venv/bin/activate + test_aodh + test_gnocchi +} + +function test_telemetry_scenario { + echo "Testing Telemetry" + test_telemetry_scenario_logged > /tmp/logs/ansible/test-telemetry-scenario 2>&1 + result=$? + if [[ $result != 0 ]]; then + echo "Testing Telemetry scenario failed. See ansible/test-telemetry-scenario for details" + else + echo "Successfully tested Telemetry scenario. See ansible/test-telemetry-scenario for details" + fi + return $result +} + +test_telemetry_scenario diff --git a/tests/test-venus.sh b/tests/test-venus.sh deleted file mode 100755 index 0039d67749..0000000000 --- a/tests/test-venus.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit -set -o pipefail - -# Enable unbuffered output -export PYTHONUNBUFFERED=1 - -# TODO(yoctozepto): Avoid duplicating this from prometheus-opensearch -function check_opensearch { - # Verify that we see a healthy index created due to Fluentd forwarding logs - local opensearch_url=${OS_AUTH_URL%:*}:9200/_cluster/health - output_path=$1 - args=( - --include - --location - --fail - ) - if ! curl "${args[@]}" $opensearch_url > $output_path; then - return 1 - fi - # NOTE(mgoddard): Status may be yellow because no indices have been - # created. - if ! grep -E '"status":"(green|yellow)"' $output_path >/dev/null; then - return 1 - fi -} - -function check_venus { - local venus_url=${OS_AUTH_URL%:*}:10010/custom_config - output_path=$1 - if ! curl --include --fail $venus_url > $output_path; then - return 1 - fi - if ! grep -E '"status": "SUPPORTED"' $output_path >/dev/null; then - return 1 - fi -} - -function test_opensearch { - echo "TESTING: OpenSearch" - output_path=$(mktemp) - attempt=1 - while ! check_opensearch $output_path; do - echo "OpenSearch not accessible yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 12 ]]; then - echo "FAILED: OpenSearch did not become accessible. Response:" - cat $output_path - return 1 - fi - sleep 10 - done - echo "SUCCESS: OpenSearch" -} - -function test_venus { - echo "TESTING: Venus" - output_path=$(mktemp) - attempt=1 - while ! check_venus $output_path; do - echo "Venus not accessible yet" - attempt=$((attempt+1)) - if [[ $attempt -eq 12 ]]; then - echo "FAILED: Venus did not become accessible. Response:" - cat $output_path - return 1 - fi - sleep 10 - done - echo "SUCCESS: Venus" -} - -function test_venus_scenario_logged { - . /etc/kolla/admin-openrc.sh - - test_opensearch - test_venus -} - -function test_venus_scenario { - echo "Testing Venus and OpenSearch" - test_venus_scenario_logged > /tmp/logs/ansible/test-venus-scenario 2>&1 - result=$? - if [[ $result != 0 ]]; then - echo "Testing Venus scenario failed. See ansible/test-venus-scenario for details" - else - echo "Successfully tested Venus scenario. See ansible/test-venus-scenario for details" - fi - return $result -} - -test_venus_scenario diff --git a/tests/test_kolla_container_facts.py b/tests/test_kolla_container_facts.py index e58b058c5f..b6a3d8000b 100644 --- a/tests/test_kolla_container_facts.py +++ b/tests/test_kolla_container_facts.py @@ -109,7 +109,7 @@ def contruct_volume(vol_dict: dict) -> mock.Mock: return volume -def get_containers(override=None): +def get_containers(override=None, all: bool = False): if override: cont_dicts = override else: @@ -117,9 +117,11 @@ def get_containers(override=None): containers = [] for c in cont_dicts: - # Only running containers should be returned by the container APIs - if c['State']['Status'] == 'running': - containers.append(construct_container(c)) + # With the option "all", only running containers are returned + # by the container API + if not all and c['State']['Status'] != 'running': + continue + containers.append(construct_container(c)) return containers @@ -152,8 +154,9 @@ def test_get_containers_single(self): self.assertDictEqual( self.fake_data['containers'][0], self.dfw.result['containers']['my_container']) + self.dfw.client.containers.list.assert_called_once_with(all=False) - def test_get_container_multi(self): + def test_get_containers_multi(self): self.dfw = get_DockerFactsWorker( {'name': ['my_container', 'exited_container'], 'action': 'get_containers'}) @@ -165,8 +168,9 @@ def test_get_container_multi(self): self.assertIn('my_container', self.dfw.result['containers']) self.assertNotIn('my_container', self.dfw.result) self.assertNotIn('exited_container', self.dfw.result['containers']) + self.dfw.client.containers.list.assert_called_once_with(all=False) - def test_get_container_all(self): + def test_get_containers_all_running(self): self.dfw = get_DockerFactsWorker({'name': [], 'action': 'get_containers'}) running_containers = get_containers(self.fake_data['containers']) @@ -177,6 +181,21 @@ def test_get_container_all(self): self.assertIn('my_container', self.dfw.result['containers']) self.assertNotIn('my_container', self.dfw.result) self.assertNotIn('exited_container', self.dfw.result['containers']) + self.dfw.client.containers.list.assert_called_once_with(all=False) + + def test_get_containers_all_including_stopped(self): + self.dfw = get_DockerFactsWorker({'name': [], + 'action': 'get_containers', + 'args': { + 'get_all_containers': True}}) + all_containers = get_containers(self.fake_data['containers'], all=True) + self.dfw.client.containers.list.return_value = all_containers + self.dfw.get_containers() + + self.assertFalse(self.dfw.result['changed']) + self.assertIn('my_container', self.dfw.result['containers']) + self.assertIn('exited_container', self.dfw.result['containers']) + self.dfw.client.containers.list.assert_called_once_with(all=True) def test_get_containers_env(self): fake_env = dict(KOLLA_BASE_DISTRO='ubuntu', diff --git a/tests/test_kolla_toolbox.py b/tests/test_kolla_toolbox.py index 112bd403c8..9dca821cc4 100644 --- a/tests/test_kolla_toolbox.py +++ b/tests/test_kolla_toolbox.py @@ -13,6 +13,7 @@ # limitations under the License. import builtins +import contextlib import json import os import sys @@ -20,6 +21,18 @@ from ansible.module_utils import basic from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes +try: + from ansible.module_utils.testing import patch_module_args +except ImportError: + # TODO(dougszu): Remove this exception handler when Python 3.10 support + # is not required. Python 3.10 isn't supported by Ansible Core 2.18 which + # provides patch_module_args + @contextlib.contextmanager + def patch_module_args(args): + serialized_args = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args})) + with mock.patch.object(basic, '_ANSIBLE_ARGS', serialized_args): + yield + from importlib.machinery import SourceFileLoader from oslotest import base from unittest import mock @@ -33,13 +46,6 @@ kolla_toolbox_file).load_module() -def set_module_args(args): - """Prepare arguments so they will be picked up during module creation.""" - - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - basic._ANSIBLE_ARGS = to_bytes(args) - - class AnsibleExitJson(BaseException): """Exception to be raised by module.exit_json and caught by a test case.""" @@ -307,40 +313,40 @@ class TestModuleInteraction(TestKollaToolboxModule): """Class focused on testing user input data from playbook.""" def test_create_ansible_module_missing_required_module_name(self): - set_module_args({ + ansible_module_args = { 'container_engine': 'docker' - }) - - error = self.assertRaises(AnsibleFailJson, - kolla_toolbox.create_ansible_module) + } + with patch_module_args(ansible_module_args): + error = self.assertRaises(AnsibleFailJson, + kolla_toolbox.create_ansible_module) self.assertIn('missing required arguments: module_name', error.result['msg']) def test_create_ansible_module_missing_required_container_engine(self): - set_module_args({ + ansible_module_args = { 'module_name': 'url' - }) - - error = self.assertRaises(AnsibleFailJson, - kolla_toolbox.create_ansible_module) + } + with patch_module_args(ansible_module_args): + error = self.assertRaises(AnsibleFailJson, + kolla_toolbox.create_ansible_module) self.assertIn('missing required arguments: container_engine', error.result['msg']) def test_create_ansible_module_invalid_container_engine(self): - set_module_args({ + ansible_module_args = { 'module_name': 'url', 'container_engine': 'podmano' - }) - - error = self.assertRaises(AnsibleFailJson, - kolla_toolbox.create_ansible_module) + } + with patch_module_args(ansible_module_args): + error = self.assertRaises(AnsibleFailJson, + kolla_toolbox.create_ansible_module) self.assertIn( 'value of container_engine must be one of: podman, docker', error.result['msg'] ) def test_create_ansible_module_success(self): - args = { + ansible_module_args = { 'container_engine': 'docker', 'module_name': 'file', 'module_args': { @@ -357,12 +363,10 @@ def test_create_ansible_module_success(self): 'timeout': 180, 'api_version': '1.5' } - set_module_args(args) - - module = kolla_toolbox.create_ansible_module() - + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() self.assertIsInstance(module, AnsibleModule) - self.assertEqual(args, module.params) + self.assertEqual(ansible_module_args, module.params) class TestContainerEngineClientIntraction(TestKollaToolboxModule): @@ -381,14 +385,14 @@ def mock_import_error(self, name, globals, locals, fromlist, level): return self.original_import(name, globals, locals, fromlist, level) def test_podman_client_params(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'podman', 'api_version': '1.47', 'timeout': 155 - }) - - module = kolla_toolbox.create_ansible_module() + } + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() mock_podman = mock.MagicMock() mock_podman_errors = mock.MagicMock() import_dict = {'podman': mock_podman, @@ -403,14 +407,14 @@ def test_podman_client_params(self): ) def test_docker_client_params(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'docker', 'api_version': '1.47', 'timeout': 155 - }) - - module = kolla_toolbox.create_ansible_module() + } + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() mock_docker = mock.MagicMock() mock_docker_errors = mock.MagicMock() import_dict = {'docker': mock_docker, @@ -425,14 +429,14 @@ def test_docker_client_params(self): ) def test_create_container_client_podman_not_called_with_auto(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'podman', 'api_version': 'auto', 'timeout': 90 - }) - - module = kolla_toolbox.create_ansible_module() + } + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() mock_podman = mock.MagicMock() mock_podman_errors = mock.MagicMock() import_dict = {'podman': mock_podman, @@ -446,12 +450,13 @@ def test_create_container_client_podman_not_called_with_auto(self): ) def test_create_container_client_podman_importerror(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'podman' - }) + } self.module_to_mock_import = 'podman' - module = kolla_toolbox.create_ansible_module() + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() with mock.patch('builtins.__import__', side_effect=self.mock_import_error): @@ -462,13 +467,13 @@ def test_create_container_client_podman_importerror(self): error.result['msg']) def test_create_container_client_docker_importerror(self): - set_module_args({ + ansible_module_args = { 'module_name': 'ping', 'container_engine': 'docker' - }) - + } self.module_to_mock_import = 'docker' - module = kolla_toolbox.create_ansible_module() + with patch_module_args(ansible_module_args): + module = kolla_toolbox.create_ansible_module() with mock.patch('builtins.__import__', side_effect=self.mock_import_error): diff --git a/tests/testinfra/test_horizon.py b/tests/testinfra/test_horizon.py new file mode 100644 index 0000000000..abdb787c68 --- /dev/null +++ b/tests/testinfra/test_horizon.py @@ -0,0 +1,120 @@ +# Copyright 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time +import yaml + +from pathlib import Path +from selenium.common.exceptions import TimeoutException +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait + +home = Path.home() +subpath = '/testinfra/screenshots/' +screenshot_path = str(home) + subpath + +with open("/etc/kolla/passwords.yml", 'r') as file: + passwords = yaml.safe_load(file) + admin_password = passwords.get('keystone_admin_password') + + +def test_horizon_screenshot(host): + + firefox_options = webdriver.FirefoxOptions() + + driver = webdriver.Remote( + command_executor='http://localhost:4444/wd/hub', + options=firefox_options) + + horizon_proto = host.environment().get('HORIZON_PROTO') + horizon_url = horizon_proto + "://192.0.2.10" + + try: + driver.get(horizon_url) + WebDriverWait(driver, 30).until( + lambda driver: driver.execute_script( + 'return document.readyState') == 'complete') + + time.sleep(5) + + original_size = driver.get_window_size() + required_width = driver.execute_script( + 'return document.body.parentNode.scrollWidth') + required_height = driver.execute_script( + 'return document.body.parentNode.scrollHeight') + 100 + driver.set_window_size(required_width, required_height) + + driver.find_element(By.TAG_NAME, 'body').\ + screenshot(screenshot_path + "horizon-main.png") # nosec B108 + + driver.set_window_size( + original_size['width'], original_size['height']) + + assert 'Login' in driver.title # nosec B101 + + except TimeoutException as e: + raise e + finally: + driver.quit() + + +def test_horizon_login(host): + + firefox_options = webdriver.FirefoxOptions() + + driver = webdriver.Remote( + command_executor='http://localhost:4444/wd/hub', + options=firefox_options) + + horizon_proto = host.environment().get('HORIZON_PROTO') + horizon_url = horizon_proto + "://192.0.2.10" + logout_url = '/'.join(( + horizon_url, + 'auth', + 'logout')) + + try: + driver.get(logout_url) + user_field = driver.find_element(By.ID, 'id_username') + user_field.send_keys('admin') + pass_field = driver.find_element(By.ID, 'id_password') + pass_field.send_keys(admin_password) + button = driver.find_element(By.CSS_SELECTOR, '.btn-primary') + button.click() + WebDriverWait(driver, 30).until( + lambda driver: driver.execute_script( + 'return document.readyState') == 'complete') + + time.sleep(10) + + original_size = driver.get_window_size() + required_width = driver.execute_script( + 'return document.body.parentNode.scrollWidth') + required_height = driver.execute_script( + 'return document.body.parentNode.scrollHeight') + 100 + driver.set_window_size(required_width, required_height) + + driver.find_element(By.TAG_NAME, 'body').\ + screenshot(screenshot_path + "horizon-logged-in.png") # nosec B108 + + driver.set_window_size( + original_size['width'], original_size['height']) + + assert 'Overview - OpenStack Dashboard' in driver.title # nosec B101 + + except TimeoutException as e: + raise e + finally: + driver.quit() diff --git a/tests/upgrade-bifrost.sh b/tests/upgrade-bifrost.sh deleted file mode 100755 index a5d5c36826..0000000000 --- a/tests/upgrade-bifrost.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function upgrade_bifrost { - RAW_INVENTORY=/etc/kolla/inventory - - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - # TODO(mgoddard): run prechecks. - # TODO(mgoddard): add pull action when we have a local registry service in - # CI. - # TODO(mgoddard): make some configuration file changes and trigger a real - # upgrade. - kolla-ansible deploy-bifrost -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-bifrost -} - - -upgrade_bifrost diff --git a/tests/upgrade.sh b/tests/upgrade.sh deleted file mode 100755 index a53a440e5b..0000000000 --- a/tests/upgrade.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# Enable unbuffered output for Ansible in Jenkins. -export PYTHONUNBUFFERED=1 - - -function upgrade { - RAW_INVENTORY=/etc/kolla/inventory - - source $KOLLA_ANSIBLE_VENV_PATH/bin/activate - - kolla-ansible certificates -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/certificates - # Previous versions had older docker, requests requirements for example - # Therefore we need to run bootstrap again to ensure libraries are in - # proper versions (ansible-collection-kolla is different for new version, potentionally - # also dependencies). - kolla-ansible bootstrap-servers -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-bootstrap - # Skip rabbitmq-ha-precheck before the queues are migrated. - kolla-ansible prechecks -i ${RAW_INVENTORY} --skip-tags rabbitmq-ha-precheck -vvv &> /tmp/logs/ansible/upgrade-prechecks-pre-rabbitmq - - # NOTE(SvenKieske): As om_enable_rabbitmq_transient_quorum_queue now also - # enables quorum_queues for fanout/reply queues in Epoxy, we need - # to perform a migration to durable queues. - # TODO(SvenKieske): Remove these steps in F Cycle. - SERVICE_TAGS="heat,keystone,neutron,nova" - if [[ $SCENARIO == "zun" ]] || [[ $SCENARIO == "cephadm" ]]; then - SERVICE_TAGS+=",cinder" - fi - if [[ $SCENARIO == "scenario_nfv" ]]; then - SERVICE_TAGS+=",barbican" - fi - if [[ $SCENARIO == "ironic" ]]; then - SERVICE_TAGS+=",ironic" - fi - if [[ $SCENARIO == "masakari" ]]; then - SERVICE_TAGS+=",masakari" - fi - if [[ $SCENARIO == "ovn" ]] || [[ $SCENARIO == "octavia" ]]; then - SERVICE_TAGS+=",octavia" - fi - if [[ $SCENARIO == "magnum" ]]; then - SERVICE_TAGS+=",magnum,designate" - fi - kolla-ansible stop -i ${RAW_INVENTORY} -vvv --tags $SERVICE_TAGS --yes-i-really-really-mean-it --ignore-missing &> /tmp/logs/ansible/stop - kolla-ansible genconfig -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/genconfig - kolla-ansible rabbitmq-reset-state -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/rabbitmq-reset-state - # Include rabbitmq-ha-precheck this time to confirm all queues have migrated. - kolla-ansible prechecks -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-prechecks - - kolla-ansible pull -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/pull-upgrade - kolla-ansible upgrade -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade - - kolla-ansible post-deploy -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/upgrade-post-deploy - - kolla-ansible validate-config -i ${RAW_INVENTORY} -vvv &> /tmp/logs/ansible/validate-config -} - - -upgrade diff --git a/tools/init-runonce b/tools/init-runonce index 0c389642b2..900d979f23 100755 --- a/tools/init-runonce +++ b/tools/init-runonce @@ -19,7 +19,7 @@ fi # to be created. ARCH=$(uname -m) -CIRROS_RELEASE=${CIRROS_RELEASE:-0.6.2} +CIRROS_RELEASE=${CIRROS_RELEASE:-0.6.3} IMAGE_PATH=/opt/cache/files/ IMAGE_URL=${IMAGE_URL:-"https://github.com/cirros-dev/cirros/releases/download/${CIRROS_RELEASE}/"} IMAGE=cirros-${CIRROS_RELEASE}-${ARCH}-disk.img diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 82daa26e2c..7a9b481cbe 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -1,58 +1,5 @@ --- - job: - name: kolla-ansible-variables - vars: - address_family: 'ipv4' - # Test latest ansible-core version on Ubuntu, minimum supported on others. - # Use SLURP version (two releases back) on SLURP upgrades. - ansible_core_version_constraint: >- - {{ ansible_core_version_slurp if is_slurp else ansible_core_version_min if is_upgrade or ansible_facts.distribution != "Ubuntu" else ansible_core_version_max }} - ansible_core_version_slurp: "==2.16.*" - ansible_core_version_max: "==2.18.*" - ansible_core_version_min: "==2.17.*" - # NOTE(mgoddard): Test the use of interface names with dashes. - api_interface_name: "vxlan-0" - api_network_prefix: "192.0.2." - api_network_prefix_length: "24" - configure_swap_size: 0 - container_engine: "docker" - is_upgrade: false - is_slurp: false - kolla_internal_vip_address: "192.0.2.10" - le_enabled: false - neutron_external_bridge_name: br-0 - neutron_external_interface_name: "veth-{{ neutron_external_bridge_name }}-ext" - neutron_external_network_prefix: "198.51.100." - neutron_external_network_prefix_length: "24" - neutron_external_vxlan_interface_name: "vxlan-1" - neutron_tenant_network_dns_server: "8.8.8.8" - neutron_tenant_network_prefix: "203.0.113." - neutron_tenant_network_prefix_length: "24" - previous_release: "2025.1" - scenario: core - scenario_images_core: - - ^cron - - ^fluentd - - ^glance - - ^haproxy - - ^heat - - ^horizon - - ^keepalived - - ^keystone - - ^kolla-toolbox - - ^mariadb - - ^memcached - - ^neutron - - ^nova- - - ^openvswitch - - ^placement - - ^proxysql - - ^rabbitmq - tls_enabled: false - virt_type: qemu - -- job: - parent: kolla-ansible-variables name: kolla-ansible-base pre-run: tests/pre.yml run: tests/run.yml @@ -64,6 +11,17 @@ - openstack/kolla - openstack/kolla-ansible - openstack/requirements + files: + - ^ansible/group_vars/all/common.yml + - ^requirements-core.yml + - ^roles/kolla-ansible-test-dashboard/ + - ^tests/check-(config|failure|logs).sh + - ^tests/get_logs.sh + - ^tests/init-core-openstack.sh + - ^tests/(run|pre|post).yml + - ^tests/setup_gate.sh + - ^tests/templates/(inventory|globals-default).j2 + - ^tests/upgrade.sh irrelevant-files: - ^.*\.rst$ - ^bindep.txt$ @@ -80,363 +38,60 @@ - ^specs/ - ^kolla_ansible/tests/ - ^tools/validate-.*$ + - ^zuul.d/ roles: - zuul: zuul/zuul-jobs - zuul: openstack/kolla - -- job: - parent: kolla-ansible-base - name: kolla-ansible-scenario-base - files: - - ^ansible/group_vars/all.yml - - ^ansible/roles/common/ - - ^requirements-core.yml - - ^tests/check-logs.sh - - ^tests/get_logs.sh - - ^tests/(pre|run).yml - - ^tests/templates/(inventory|globals-default.j2) - -- job: - name: kolla-ansible-kvm-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(nova-cell)/ - - ^tests/templates/nova-compute-overrides.j2 - vars: - virt_type: kvm - -- job: - name: kolla-ansible-ipv6-base - parent: kolla-ansible-base - voting: false - vars: - api_network_prefix: "fd::" - api_network_prefix_length: "64" - kolla_internal_vip_address: "fd::ff:0" - neutron_external_network_prefix: "fd:1::" - neutron_external_network_prefix_length: "64" - neutron_tenant_network_prefix: "fd:f0::" - neutron_tenant_network_prefix_length: "64" - neutron_tenant_network_dns_server: 2001:4860:4860::8888 - address_family: 'ipv6' - scenario: ipv6 - -- job: - name: kolla-ansible-bifrost-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/bifrost/ - - ^tests/test-bifrost.sh - vars: - scenario: bifrost - scenario_images_core: - - ^bifrost - -- job: - name: kolla-ansible-ironic-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(ironic|neutron|nova|nova-cell)/ - - ^tests/deploy-tenks\.sh$ - - ^tests/templates/ironic-overrides\.j2$ - - ^tests/templates/tenks-deploy-config\.yml\.j2$ - - ^tests/test-dashboard\.sh$ - - ^tests/test-ironic\.sh$ - required-projects: - - openstack/tenks - vars: - scenario: ironic - scenario_images_extra: - - ^dnsmasq - - ^ironic - - ^iscsid - - ^prometheus - -- job: - name: kolla-ansible-zun-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(zun|kuryr|etcd|cinder|iscsi)/ - - ^tests/setup_disks.sh - - ^tests/test-core-openstack.sh - - ^tests/test-zun.sh - - ^tests/test-dashboard.sh - vars: - scenario: zun - scenario_images_extra: - - ^zun - - ^kuryr - - ^etcd - - ^cinder - - ^iscsid - - ^tgtd - -- job: - name: kolla-ansible-swift-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(glance|swift)/ - - ^tests/setup_disks.sh - - ^tests/init-swift.sh - - ^tests/test-core-openstack.sh - - ^tests/test-dashboard.sh - - ^tests/test-swift.sh - vars: - scenario: swift - scenario_images_extra: - - ^swift - -- job: - name: kolla-ansible-cephadm-base - parent: kolla-ansible-base - voting: false - vars: - scenario: cephadm - scenario_images_extra: - - ^cinder - - ^redis - -- job: - name: kolla-ansible-magnum-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(designate|magnum|trove)/ - - ^tests/test-dashboard.sh - - ^tests/test-magnum.sh - vars: - scenario: magnum - scenario_images_extra: - - ^designate - - ^magnum - - ^trove - -- job: - name: kolla-ansible-octavia-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(octavia|octavia-certificates)/ - - ^tests/test-dashboard.sh - - ^tests/test-octavia.sh - vars: - scenario: octavia - scenario_images_extra: - - ^redis - - ^octavia - -- job: - name: kolla-ansible-masakari-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/masakari/ - - ^ansible/roles/hacluster/ - - ^tests/test-masakari.sh - - ^tests/test-dashboard.sh - vars: - scenario: masakari - scenario_images_extra: - - ^masakari - - ^hacluster - -- job: - name: kolla-ansible-mariadb-base - parent: kolla-ansible-scenario-base - voting: true - files: !inherit - - ^ansible/roles/(loadbalancer|mariadb|proxysql-config)/ - - ^tests/test-mariadb.sh - vars: - scenario: mariadb - scenario_images_core: - - ^cron - - ^fluentd - - ^haproxy - - ^keepalived - - ^kolla-toolbox - - ^mariadb - - ^proxysql - -- job: - name: kolla-ansible-scenario-nfv-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(aodh|barbican|heat|mistral|redis|tacker)/ - - ^tests/test-scenario-nfv.sh - - ^tests/test-dashboard.sh vars: - scenario: scenario_nfv - scenario_images_extra: - - ^aodh - - ^tacker - - ^mistral - - ^redis - - ^barbican - -- job: - name: kolla-ansible-cells-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/nova/ - - ^ansible/roles/nova-cell/ - - ^ansible/roles/loadbalancer/ - - ^tests/test-core-openstack.sh - - ^tests/test-proxysql.sh - vars: - scenario: cells - scenario_images_extra: - - ^proxysql - -- job: - name: kolla-ansible-ovn-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ - - ^tests/test-ovn.sh - - ^tests/test-core-openstack.sh - - ^tests/reconfigure.sh - vars: - scenario: ovn - scenario_images_extra: - - ^redis - - ^octavia - - ^ovn - -- job: - name: kolla-ansible-prometheus-opensearch-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/site.yml - - ^ansible/roles/(common|opensearch|grafana|prometheus)/ - - ^tests/test-prometheus-opensearch.sh - vars: - scenario: prometheus-opensearch - scenario_images_core: - - ^cron - - ^fluentd - - ^grafana - - ^haproxy - - ^keepalived - - ^kolla-toolbox - - ^mariadb - - ^memcached - - ^opensearch - - ^prometheus - - ^proxysql - - ^rabbitmq - -- job: - name: kolla-ansible-venus-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/(common|opensearch|venus)/ - - ^tests/test-venus.sh - vars: - scenario: venus + address_family: 'ipv4' + # Test latest ansible-core version on Ubuntu, minimum supported on others. + # Use SLURP version (two releases back) on SLURP upgrades. + ansible_core_version_constraint: >- + {{ ansible_core_version_slurp if is_slurp else ansible_core_version_min if is_upgrade + or ansible_facts.distribution != "Ubuntu" else ansible_core_version_max }} + ansible_core_version_slurp: "==2.17.*" + ansible_core_version_max: "==2.19.*" + ansible_core_version_min: "==2.18.*" + # NOTE(mgoddard): Test the use of interface names with dashes. + api_interface_name: "vxlan-0" + api_network_prefix: "192.0.2." + api_network_prefix_length: "24" + base_distro: "{{ zuul.job.split('-').2 }}" + configure_swap_size: 0 + container_engine: "docker" + is_upgrade: "{{ 'upgrade' in zuul.job }}" + is_slurp: "{{ 'slurp' in zuul.job }}" + kolla_ansible_tempest_regex: "\\[.*\\bsmoke\\b.*\\]" + kolla_internal_vip_address: "192.0.2.10" + le_enabled: false + neutron_external_bridge_name: br-0 + neutron_external_interface_name: >- + veth-{{ neutron_external_bridge_name }}-ext + neutron_external_network_prefix: "198.51.100." + neutron_external_network_prefix_length: "24" + neutron_external_vxlan_interface_name: "vxlan-1" + neutron_tenant_network_dns_server: "8.8.8.8" + neutron_tenant_network_prefix: "203.0.113." + neutron_tenant_network_prefix_length: "24" + previous_release: "{{ '2025.1' if is_slurp else '2025.1' }}" + scenario: core scenario_images_core: - ^cron - - ^opensearch - ^fluentd + - ^glance - ^haproxy + - ^heat + - ^horizon - ^keepalived - ^keystone - ^kolla-toolbox - ^mariadb - ^memcached + - ^neutron + - ^nova- + - ^openvswitch + - ^placement + - ^proxysql - ^rabbitmq - - ^venus - -- job: - name: kolla-ansible-hashi-vault-base - parent: kolla-ansible-variables - run: tests/run-hashi-vault.yml - required-projects: - - openstack/kolla-ansible - - openstack/requirements - voting: false - files: - - ^requirements-core.yml - - ^tests/templates/(inventory|globals-default.j2) - - ^tests/(pre|run).yml - - ^kolla_ansible/ - - ^tests/run-hashi-vault.yml - - ^tests/test-hashicorp-vault-passwords.sh - -- job: - name: kolla-ansible-haproxy-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/haproxy/ - - ^ansible/roles/loadbalancer/ - - ^kolla_ansible/kolla_url.py - vars: - external_api_interface_name: vxlan2 - external_api_network_prefix: "192.0.3." - external_api_network_prefix_length: "24" - kolla_external_vip_address: "192.0.3.10" - scenario: haproxy - -- job: - name: kolla-ansible-lets-encrypt-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2 - - ^ansible/roles/(letsencrypt|loadbalancer)/ - - ^tests/test-core-openstack.sh - - ^tests/test-dashboard.sh - - ^tests/deploy.sh - vars: - scenario: lets-encrypt - scenario_images_extra: - - ^letsencrypt - - ^haproxy tls_enabled: true - le_enabled: true - -- job: - name: kolla-ansible-skyline-base - parent: kolla-ansible-scenario-base - voting: false - files: !inherit - - ^ansible/roles/skyline/ - - ^tests/test-skyline.sh - vars: - scenario: skyline - scenario_images_extra: - - ^skyline - -- job: - name: kolla-ansible-skyline-sso-base - parent: kolla-ansible-scenario-base - voting: false - files: - - ^ansible/roles/skyline/ - - ^tests/test-skyline-sso.sh - vars: - scenario: skyline-sso - scenario_images_extra: - - ^skyline - -- job: - name: kolla-ansible-container-engine-migration-base - parent: kolla-ansible-base - voting: false - files: - - ^ansible/migrate-container-engine.yml - - ^ansible/roles/container-engine-migration/ - - ^tests/test-container-engine-migration.sh - vars: - scenario: container-engine-migration + virt_type: qemu diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml deleted file mode 100644 index e416ce8452..0000000000 --- a/zuul.d/jobs.yaml +++ /dev/null @@ -1,612 +0,0 @@ ---- -- job: - name: kolla-ansible-centos9s - parent: kolla-ansible-base - nodeset: kolla-ansible-centos9s - voting: false - vars: - base_distro: centos - tls_enabled: true - kolla_build_images: true - -- job: - name: kolla-ansible-centos10s - parent: kolla-ansible-base - nodeset: kolla-ansible-centos10s-8GB - voting: false - vars: - base_distro: centos - tls_enabled: true - kolla_build_images: true - -- job: - name: kolla-ansible-centos10s-aarch64 - parent: kolla-ansible-centos10s - nodeset: kolla-ansible-centos10s-aarch64-8GB - -- job: - name: kolla-ansible-debian-aarch64 - parent: kolla-ansible-debian - nodeset: kolla-ansible-debian-bookworm-aarch64-8GB - timeout: 10800 - voting: false - required-projects: - - openstack/kolla - -- job: - name: kolla-ansible-debian-aarch64-podman - parent: kolla-ansible-debian - nodeset: kolla-ansible-debian-bookworm-aarch64-8GB - timeout: 10800 - voting: false - vars: - container_engine: podman - required-projects: - - openstack/kolla - -- job: - name: kolla-ansible-debian - parent: kolla-ansible-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - tls_enabled: true - -- job: - name: kolla-ansible-debian-podman - parent: kolla-ansible-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - tls_enabled: true - container_engine: podman - -- job: - name: kolla-ansible-rocky9 - parent: kolla-ansible-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - tls_enabled: true - -- job: - name: kolla-ansible-rocky9-podman - parent: kolla-ansible-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - tls_enabled: true - container_engine: podman - -- job: - name: kolla-ansible-ubuntu - parent: kolla-ansible-base - nodeset: kolla-ansible-ubuntu-noble-16GB - vars: - base_distro: ubuntu - tls_enabled: true - -- job: - name: kolla-ansible-ubuntu-podman - parent: kolla-ansible-base - nodeset: kolla-ansible-ubuntu-noble-16GB - vars: - base_distro: ubuntu - tls_enabled: true - container_engine: podman - -- job: - name: kolla-ansible-rocky9-kvm - parent: kolla-ansible-kvm-base - nodeset: kolla-ansible-rocky9-nested-virt - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-kvm - parent: kolla-ansible-kvm-base - nodeset: kolla-ansible-ubuntu-noble-nested-virt - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-multinode-ipv6 - parent: kolla-ansible-ipv6-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-multinode-ipv6 - parent: kolla-ansible-ipv6-base - nodeset: kolla-ansible-ubuntu-noble-multi-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-rocky9-multi - timeout: 10800 - vars: - base_distro: rocky - cephadm_use_package_from_distribution: true - -- job: - name: kolla-ansible-ubuntu-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - timeout: 10800 - vars: - base_distro: ubuntu - cephadm_use_package_from_distribution: true - -- job: - name: kolla-ansible-debian-mariadb - parent: kolla-ansible-mariadb-base - nodeset: kolla-ansible-debian-bookworm-multi-16GB - vars: - base_distro: debian - -- job: - name: kolla-ansible-rocky9-mariadb - parent: kolla-ansible-mariadb-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-mariadb - parent: kolla-ansible-mariadb-base - nodeset: kolla-ansible-ubuntu-noble-multi-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-rocky9 - timeout: 10800 - vars: - base_distro: rocky - is_upgrade: yes - tls_enabled: true - -- job: - name: kolla-ansible-rocky9-slurp-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-rocky9 - timeout: 9000 - vars: - base_distro: rocky - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - tls_enabled: true - -- job: - name: kolla-ansible-debian-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-debian-bookworm-16GB - timeout: 10800 - vars: - base_distro: debian - is_upgrade: yes - tls_enabled: true - -- job: - name: kolla-ansible-debian-slurp-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-debian-bookworm-16GB - timeout: 9000 - vars: - base_distro: debian - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - tls_enabled: true - -- job: - name: kolla-ansible-debian-upgrade-aarch64 - parent: kolla-ansible-debian-upgrade - nodeset: kolla-ansible-debian-bookworm-aarch64-8GB - voting: false - -- job: - name: kolla-ansible-ubuntu-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-ubuntu-noble-16GB - timeout: 10800 - vars: - base_distro: ubuntu - is_upgrade: yes - tls_enabled: true - -- job: - name: kolla-ansible-ubuntu-slurp-upgrade - parent: kolla-ansible-base - nodeset: kolla-ansible-ubuntu-noble-16GB - timeout: 9000 - vars: - base_distro: ubuntu - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - tls_enabled: true - -- job: - name: kolla-ansible-rocky9-upgrade-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-rocky9-multi - timeout: 10800 - vars: - base_distro: rocky - is_upgrade: yes - -- job: - name: kolla-ansible-rocky9-slurp-upgrade-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-rocky9-multi - timeout: 9000 - vars: - base_distro: rocky - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - -- job: - name: kolla-ansible-ubuntu-upgrade-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - timeout: 10800 - vars: - base_distro: ubuntu - is_upgrade: yes - cephadm_use_package_from_distribution: true - -- job: - name: kolla-ansible-ubuntu-slurp-upgrade-cephadm - parent: kolla-ansible-cephadm-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - timeout: 9000 - vars: - base_distro: ubuntu - is_upgrade: yes - is_slurp: yes - previous_release: "2024.1" - cephadm_use_package_from_distribution: true - -- job: - name: kolla-ansible-rocky9-upgrade-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-rocky9-multi - timeout: 10800 - vars: - base_distro: rocky - is_upgrade: yes - -- job: - name: kolla-ansible-debian-upgrade-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-debian-bookworm-multi-16GB - timeout: 10800 - vars: - base_distro: debian - is_upgrade: yes - -- job: - name: kolla-ansible-ubuntu-upgrade-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - timeout: 10800 - vars: - base_distro: ubuntu - is_upgrade: yes - -- job: - name: kolla-ansible-rocky9-bifrost - parent: kolla-ansible-bifrost-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-bifrost - parent: kolla-ansible-bifrost-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-zun - parent: kolla-ansible-zun-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-zun - parent: kolla-ansible-zun-base - nodeset: kolla-ansible-ubuntu-noble-multi-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-scenario-nfv - parent: kolla-ansible-scenario-nfv-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-ironic - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-debian-ironic - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - -- job: - name: kolla-ansible-ubuntu-ironic - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-ironic-upgrade - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - is_upgrade: true - -- job: - name: kolla-ansible-debian-ironic-upgrade - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - is_upgrade: true - -- job: - name: kolla-ansible-ubuntu-ironic-upgrade - parent: kolla-ansible-ironic-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - is_upgrade: true - -- job: - name: kolla-ansible-rocky9-magnum - parent: kolla-ansible-magnum-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-magnum - parent: kolla-ansible-magnum-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-octavia - parent: kolla-ansible-octavia-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-octavia - parent: kolla-ansible-octavia-base - nodeset: kolla-ansible-ubuntu-noble-16GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-ubuntu-masakari - parent: kolla-ansible-masakari-base - nodeset: kolla-ansible-ubuntu-noble-masakari-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-masakari - parent: kolla-ansible-masakari-base - nodeset: kolla-ansible-rocky9-masakari - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-cells - parent: kolla-ansible-cells-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-cells - parent: kolla-ansible-cells-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-debian-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-debian-bookworm-multi-16GB - vars: - base_distro: debian - -- job: - name: kolla-ansible-ubuntu-ovn - parent: kolla-ansible-ovn-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-ubuntu-lets-encrypt - parent: kolla-ansible-lets-encrypt-base - nodeset: kolla-ansible-ubuntu-noble-multi-16GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-lets-encrypt - parent: kolla-ansible-lets-encrypt-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-prometheus-opensearch - parent: kolla-ansible-prometheus-opensearch-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-prometheus-opensearch - parent: kolla-ansible-prometheus-opensearch-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-prometheus-opensearch-upgrade - parent: kolla-ansible-prometheus-opensearch-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - is_upgrade: yes - -- job: - name: kolla-ansible-ubuntu-prometheus-opensearch-upgrade - parent: kolla-ansible-prometheus-opensearch-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - is_upgrade: yes - -- job: - name: kolla-ansible-rocky9-venus - parent: kolla-ansible-venus-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-venus - parent: kolla-ansible-venus-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-hashi-vault - parent: kolla-ansible-hashi-vault-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-haproxy-fqdn - parent: kolla-ansible-haproxy-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - tls_enabled: true - -- job: - name: kolla-ansible-ubuntu-haproxy-fqdn - parent: kolla-ansible-haproxy-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - tls_enabled: true - -- job: - name: kolla-ansible-ubuntu-skyline - parent: kolla-ansible-skyline-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-skyline - parent: kolla-ansible-skyline-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-skyline-sso - parent: kolla-ansible-skyline-sso-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-rocky9-skyline-sso - parent: kolla-ansible-skyline-sso-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-container-engine-migration - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-rocky9 - vars: - base_distro: rocky - -- job: - name: kolla-ansible-rocky9-container-engine-migration-multinode - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-rocky9-multi - vars: - base_distro: rocky - -- job: - name: kolla-ansible-ubuntu-container-engine-migration - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-ubuntu-noble-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-ubuntu-container-engine-migration-multinode - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-ubuntu-noble-multi-8GB - vars: - base_distro: ubuntu - -- job: - name: kolla-ansible-debian-container-engine-migration - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-debian-bookworm-16GB - vars: - base_distro: debian - -- job: - name: kolla-ansible-debian-container-engine-migration-multinode - parent: kolla-ansible-container-engine-migration-base - nodeset: kolla-ansible-debian-bookworm-multi-16GB - vars: - base_distro: debian diff --git a/zuul.d/nodesets.yaml b/zuul.d/nodesets.yaml index 68565fba63..295df62e61 100644 --- a/zuul.d/nodesets.yaml +++ b/zuul.d/nodesets.yaml @@ -1,12 +1,12 @@ --- - nodeset: - name: kolla-ansible-centos10s-8GB + name: kolla-ansible-centos-10s-8GB nodes: - name: primary label: centos-10-stream-8GB - nodeset: - name: kolla-ansible-centos10s-aarch64-8GB + name: kolla-ansible-centos-10s-aarch64-8GB nodes: - name: primary label: centos-10-stream-arm64-8GB @@ -30,16 +30,16 @@ label: debian-bookworm-arm64-8GB - nodeset: - name: kolla-ansible-ubuntu-noble-8GB + name: kolla-ansible-debian-bookworm-masakari-8GB nodes: - name: primary - label: ubuntu-noble-8GB - -- nodeset: - name: kolla-ansible-ubuntu-noble-16GB - nodes: - - name: primary - label: ubuntu-noble-16GB + label: debian-bookworm-8GB + - name: secondary + label: debian-bookworm-8GB + - name: ternary1 + label: debian-bookworm-8GB + - name: ternary2 + label: debian-bookworm-8GB - nodeset: name: kolla-ansible-debian-bookworm-multi-8GB @@ -62,52 +62,92 @@ label: debian-bookworm-16GB - nodeset: - name: kolla-ansible-rocky9 + name: kolla-ansible-debian-bookworm-nested-virt nodes: - name: primary - label: rockylinux-9 + label: debian-bookworm-nested-virt-8GB - nodeset: - name: kolla-ansible-ubuntu-noble-multi-8GB + name: kolla-ansible-rocky-10-8GB nodes: - name: primary - label: ubuntu-noble-8GB + label: rockylinux-10-8GB + +- nodeset: + name: kolla-ansible-rocky-10-16GB + nodes: + - name: primary + label: rockylinux-10-16GB + +- nodeset: + name: kolla-ansible-rocky-10-masakari-8GB + nodes: + - name: primary + label: rockylinux-10-8GB + - name: secondary + label: rockylinux-10-8GB + - name: ternary1 + label: rockylinux-10-8GB + - name: ternary2 + label: rockylinux-10-8GB + +- nodeset: + name: kolla-ansible-rocky-10-multi-8GB + nodes: + - name: primary + label: rockylinux-10-8GB - name: secondary1 - label: ubuntu-noble-8GB + label: rockylinux-10-8GB - name: secondary2 - label: ubuntu-noble-8GB + label: rockylinux-10-8GB - nodeset: - name: kolla-ansible-ubuntu-noble-multi-16GB + name: kolla-ansible-rocky-10-multi-16GB nodes: - name: primary - label: ubuntu-noble-16GB + label: rockylinux-10-16GB - name: secondary1 - label: ubuntu-noble-16GB + label: rockylinux-10-16GB - name: secondary2 + label: rockylinux-10-16GB + +- nodeset: + name: kolla-ansible-ubuntu-noble-8GB + nodes: + - name: primary + label: ubuntu-noble-8GB + +- nodeset: + name: kolla-ansible-ubuntu-noble-16GB + nodes: + - name: primary label: ubuntu-noble-16GB - nodeset: - name: kolla-ansible-rocky9-multi + name: kolla-ansible-ubuntu-noble-multi-8GB nodes: - name: primary - label: rockylinux-9 + label: ubuntu-noble-8GB - name: secondary1 - label: rockylinux-9 + label: ubuntu-noble-8GB - name: secondary2 - label: rockylinux-9 + label: ubuntu-noble-8GB - nodeset: - name: kolla-ansible-ubuntu-noble-nested-virt + name: kolla-ansible-ubuntu-noble-multi-16GB nodes: - name: primary - label: ubuntu-noble-nested-virt-8GB + label: ubuntu-noble-16GB + - name: secondary1 + label: ubuntu-noble-16GB + - name: secondary2 + label: ubuntu-noble-16GB - nodeset: - name: kolla-ansible-rocky9-nested-virt + name: kolla-ansible-ubuntu-noble-nested-virt nodes: - name: primary - label: nested-virt-centos-9-stream + label: ubuntu-noble-nested-virt-8GB - nodeset: name: kolla-ansible-ubuntu-noble-masakari-8GB @@ -120,15 +160,3 @@ label: ubuntu-noble-8GB - name: ternary2 label: ubuntu-noble-8GB - -- nodeset: - name: kolla-ansible-rocky9-masakari - nodes: - - name: primary - label: rockylinux-9 - - name: secondary - label: rockylinux-9 - - name: ternary1 - label: rockylinux-9 - - name: ternary2 - label: rockylinux-9 diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 7ec21b694f..dffc20c4ed 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -4,66 +4,33 @@ templates: - ansible-role-jobs - check-requirements + - kolla-ansible-scenario-aio + - kolla-ansible-scenario-bifrost + - kolla-ansible-scenario-cells + - kolla-ansible-scenario-cephadm + # NOTE(mnasiadka): Failing since + # https://review.opendev.org/c/openstack/kolla-ansible/+/864780 + # - kolla-ansible-scenario-container-engine-migration + - kolla-ansible-scenario-haproxy-fqdn + - kolla-ansible-scenario-kayobe + - kolla-ansible-scenario-openbao + - kolla-ansible-scenario-kvm + - kolla-ansible-scenario-lets-encrypt + - kolla-ansible-scenario-magnum + - kolla-ansible-scenario-mariadb + - kolla-ansible-scenario-masakari + - kolla-ansible-scenario-nfv + - kolla-ansible-scenario-ironic + # NOTE(mnasiadka): All runs end up with DISK_FULL + #- kolla-ansible-scenario-ipv6 + - kolla-ansible-scenario-octavia + - kolla-ansible-scenario-ovn + - kolla-ansible-scenario-prometheus-opensearch + # NOTE(mnasiadka): SSO and non-SSO tests are failing + #- kolla-ansible-scenario-skyline + - kolla-ansible-scenario-telemetry - openstack-cover-jobs - - openstack-python3-jobs + - openstack-python3-jobs-kolla-ansible - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 - check: - jobs: - - kolla-ansible-centos10s - - kolla-ansible-debian - - kolla-ansible-debian-podman - - kolla-ansible-ubuntu - - kolla-ansible-ubuntu-podman - - kolla-ansible-ubuntu-kvm - - kolla-ansible-ubuntu-multinode-ipv6 - - kolla-ansible-ubuntu-bifrost - - kolla-ansible-ubuntu-magnum - - kolla-ansible-ubuntu-octavia - - kolla-ansible-ubuntu-masakari - - kolla-ansible-debian-ironic - - kolla-ansible-ubuntu-ironic - - kolla-ansible-debian-ironic-upgrade - - kolla-ansible-ubuntu-ironic-upgrade - - kolla-ansible-debian-upgrade - - kolla-ansible-ubuntu-upgrade - - kolla-ansible-ubuntu-cells - - kolla-ansible-debian-mariadb - - kolla-ansible-ubuntu-mariadb - - kolla-ansible-debian-ovn - - kolla-ansible-ubuntu-ovn - - kolla-ansible-debian-upgrade-ovn - - kolla-ansible-ubuntu-upgrade-ovn - - kolla-ansible-debian - - kolla-ansible-ubuntu-prometheus-opensearch - - kolla-ansible-ubuntu-prometheus-opensearch-upgrade - - kolla-ansible-ubuntu-venus - - kolla-ansible-ubuntu-cephadm - - kolla-ansible-ubuntu-upgrade-cephadm - - kolla-ansible-ubuntu-haproxy-fqdn - - kolla-ansible-ubuntu-lets-encrypt - - kolla-ansible-ubuntu-skyline - - kolla-ansible-ubuntu-skyline-sso - - kolla-ansible-ubuntu-container-engine-migration - - kolla-ansible-ubuntu-container-engine-migration-multinode - - kolla-ansible-debian-container-engine-migration - - kolla-ansible-debian-container-engine-migration-multinode - check-arm64: - jobs: - - kolla-ansible-centos10s-aarch64 - - kolla-ansible-debian-aarch64 - - kolla-ansible-debian-aarch64-podman - - kolla-ansible-debian-upgrade-aarch64 - gate: - jobs: - - kolla-ansible-debian - - kolla-ansible-debian-mariadb - - kolla-ansible-debian-upgrade - - kolla-ansible-debian-podman - - kolla-ansible-ubuntu - - kolla-ansible-ubuntu-mariadb - - kolla-ansible-ubuntu-prometheus-opensearch - - kolla-ansible-ubuntu-prometheus-opensearch-upgrade - - kolla-ansible-ubuntu-upgrade - - kolla-ansible-ubuntu-podman diff --git a/zuul.d/python3-jobs.yaml b/zuul.d/python3-jobs.yaml new file mode 100644 index 0000000000..5dc7903ed0 --- /dev/null +++ b/zuul.d/python3-jobs.yaml @@ -0,0 +1,23 @@ +- project-template: + name: openstack-python3-jobs-kolla-ansible + # NOTE(mnasiadka): Local definition to skip py310 jobs on Flamingo + description: | + Runs unit tests for an OpenStack Python project under the CPython + version 3 releases designated for testing the latest release. + check: + jobs: + - openstack-tox-pep8 + # py3.12 testing is added as mandatory from 2025.1 release onwards. + # From 2026.1, we run it as periodic only(assuming py3.10 and py3.13 + # will be enough coverage to run on every change) + - openstack-tox-py312 + gate: + jobs: + - openstack-tox-pep8 + # py3.12 testing is added as mandatory from 2025.1 release onwards. + # From 2026.1, we run it as periodic only(assuming py3.10 and py3.13 + # will be enough coverage to run on every change) + - openstack-tox-py312 + post: + jobs: + - publish-openstack-python-branch-tarball \ No newline at end of file diff --git a/zuul.d/scenarios/aio.yaml b/zuul.d/scenarios/aio.yaml new file mode 100644 index 0000000000..f8e9b06e31 --- /dev/null +++ b/zuul.d/scenarios/aio.yaml @@ -0,0 +1,143 @@ +--- +- job: + parent: kolla-ansible-base + name: kolla-ansible-aio-base + files: !inherit + - ^ansible/group_vars/all/(common|fluentd|glance|haproxy|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml + - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml + - ^ansible/(action_plugins|filter_plugins|library|module_utils)/ + - ^ansible/roles/(common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|nova-cell|openvswitch|placement|proxysql|rabbitmq|service-.*)/ + - ^kolla_ansible/ + - ^roles/kolla-ansible-(deploy|tempest|test-dashboard)/ + - ^tests/testinfra/test_horizon.py + - ^tools/init-runonce + +- job: + name: kolla-ansible-centos-10s + parent: kolla-ansible-aio-base + nodeset: kolla-ansible-centos-10s-8GB + voting: false + vars: + kolla_build_images: true + +- job: + name: kolla-ansible-centos-10s-aarch64 + parent: kolla-ansible-centos-10s + nodeset: kolla-ansible-centos-10s-aarch64-8GB + +- job: + name: kolla-ansible-debian-bookworm + parent: kolla-ansible-aio-base + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-debian-bookworm-aarch64 + parent: kolla-ansible-debian-bookworm + nodeset: kolla-ansible-debian-bookworm-aarch64-8GB + timeout: 10800 + vars: + kolla_build_images: true + voting: false + +- job: + name: kolla-ansible-debian-bookworm-aarch64-podman + parent: kolla-ansible-debian-bookworm-aarch64 + nodeset: kolla-ansible-debian-bookworm-aarch64-8GB + timeout: 10800 + vars: + container_engine: podman + kolla_build_images: true + voting: false + +- job: + name: kolla-ansible-debian-bookworm-podman + parent: kolla-ansible-debian-bookworm + nodeset: kolla-ansible-debian-bookworm-16GB + vars: + container_engine: podman + +- job: + name: kolla-ansible-debian-bookworm-upgrade + parent: kolla-ansible-aio-base + nodeset: kolla-ansible-debian-bookworm-16GB + timeout: 10800 + +- job: + name: kolla-ansible-debian-bookworm-aarch64-upgrade + parent: kolla-ansible-debian-bookworm-upgrade + nodeset: kolla-ansible-debian-bookworm-aarch64-8GB + voting: false + +- job: + name: kolla-ansible-debian-bookworm-upgrade-slurp + parent: kolla-ansible-debian-bookworm-upgrade + nodeset: kolla-ansible-debian-bookworm-16GB + timeout: 9000 + +- job: + name: kolla-ansible-rocky-10 + parent: kolla-ansible-aio-base + nodeset: kolla-ansible-rocky-10-16GB + +- job: + name: kolla-ansible-rocky-10-podman + parent: kolla-ansible-rocky-10 + nodeset: kolla-ansible-rocky-10-16GB + vars: + container_engine: podman + +- job: + name: kolla-ansible-ubuntu-noble + parent: kolla-ansible-aio-base + nodeset: kolla-ansible-ubuntu-noble-16GB + +- job: + name: kolla-ansible-ubuntu-noble-podman + parent: kolla-ansible-ubuntu-noble + nodeset: kolla-ansible-ubuntu-noble-16GB + vars: + container_engine: podman + +- job: + name: kolla-ansible-ubuntu-noble-upgrade + parent: kolla-ansible-aio-base + nodeset: kolla-ansible-ubuntu-noble-16GB + timeout: 10800 + +- job: + name: kolla-ansible-ubuntu-noble-upgrade-slurp + parent: kolla-ansible-aio-base + nodeset: kolla-ansible-ubuntu-noble-16GB + timeout: 9000 + +- project-template: + name: kolla-ansible-scenario-aio + description: | + Runs Kolla-Ansible AIO scenario jobs. + check: + jobs: + - kolla-ansible-centos-10s + - kolla-ansible-debian-bookworm + - kolla-ansible-debian-bookworm-podman + - kolla-ansible-debian-bookworm-upgrade + - kolla-ansible-rocky-10 + - kolla-ansible-rocky-10-podman + - kolla-ansible-ubuntu-noble + - kolla-ansible-ubuntu-noble-podman + - kolla-ansible-ubuntu-noble-upgrade + check-arm64: + jobs: + - kolla-ansible-centos-10s-aarch64 + - kolla-ansible-debian-bookworm-aarch64 + - kolla-ansible-debian-bookworm-aarch64-podman + - kolla-ansible-debian-bookworm-aarch64-upgrade + gate: + jobs: + - kolla-ansible-debian-bookworm + - kolla-ansible-debian-bookworm-podman + - kolla-ansible-debian-bookworm-upgrade + - kolla-ansible-rocky-10 + - kolla-ansible-rocky-10-podman + - kolla-ansible-ubuntu-noble + - kolla-ansible-ubuntu-noble-upgrade + - kolla-ansible-ubuntu-noble-podman diff --git a/zuul.d/scenarios/bifrost.yaml b/zuul.d/scenarios/bifrost.yaml new file mode 100644 index 0000000000..958c5a69c7 --- /dev/null +++ b/zuul.d/scenarios/bifrost.yaml @@ -0,0 +1,38 @@ +--- +- job: + name: kolla-ansible-bifrost-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/bifrost.yml + - ^ansible/roles/bifrost/ + - ^roles/kolla-ansible-(deploy|test|upgrade)-bifrost/ + vars: + scenario: bifrost + scenario_images_core: + - ^bifrost + +- job: + name: kolla-ansible-debian-bookworm-bifrost + parent: kolla-ansible-bifrost-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-rocky-10-bifrost + parent: kolla-ansible-bifrost-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-bifrost + parent: kolla-ansible-bifrost-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-bifrost + description: | + Runs Kolla-Ansible Bifrost scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-bifrost + - kolla-ansible-rocky-10-bifrost + - kolla-ansible-ubuntu-noble-bifrost diff --git a/zuul.d/scenarios/cells.yaml b/zuul.d/scenarios/cells.yaml new file mode 100644 index 0000000000..b94bbe93a0 --- /dev/null +++ b/zuul.d/scenarios/cells.yaml @@ -0,0 +1,40 @@ +--- +- job: + name: kolla-ansible-cells-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/(baremetal|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq|sysctl).yml + - ^ansible/roles/nova/ + - ^ansible/roles/nova-cell/ + - ^ansible/roles/loadbalancer/ + - ^tests/test-(core-openstack|proxysql).sh + vars: + scenario: cells + scenario_images_extra: + - ^proxysql + +- job: + name: kolla-ansible-debian-bookworm-cells + parent: kolla-ansible-cells-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-rocky-10-cells + parent: kolla-ansible-cells-base + nodeset: kolla-ansible-rocky-10-multi-16GB + +- job: + name: kolla-ansible-ubuntu-noble-cells + parent: kolla-ansible-cells-base + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + +- project-template: + name: kolla-ansible-scenario-cells + description: | + Runs Kolla-Ansible Nova Cells scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-cells + - kolla-ansible-rocky-10-cells + - kolla-ansible-ubuntu-noble-cells diff --git a/zuul.d/scenarios/cephadm.yaml b/zuul.d/scenarios/cephadm.yaml new file mode 100644 index 0000000000..28ac2128b9 --- /dev/null +++ b/zuul.d/scenarios/cephadm.yaml @@ -0,0 +1,68 @@ +--- +- job: + name: kolla-ansible-cephadm-base + parent: kolla-ansible-base + voting: false + files: + - ^ansible/group_vars/all/(ceph|ceph-rgw|common|fluentd|glance|heat|horizon|keepalived|keystone|loadbalancer|neutron|nova|openvswitch|placement|proxysql|rabbitmq).yml + - ^ansible/group_vars/baremetal/ansible-python-interpreter.yml + - ^ansible/roles/(ceph-rgw|common|fluentd|glance|haproxy-config|heat|horizon|keystone|loadbalancer|loadbalancer-config|neutron|nova|openvswitch|placement|proxysql|rabbitmq)/ + - ^roles/cephadm/ + vars: + kolla_ansible_setup_disks_file_path: "/var/lib/ceph-osd.img" + kolla_ansible_setup_disks_vg_name: "cephvg" + kolla_ansible_setup_disks_lv_name: "cephlv" + kolla_ansible_tempest_exclude_regex: "^tempest.api.object_storage" + scenario: cephadm + scenario_images_extra: + - ^cinder + - ^redis + +- job: + name: kolla-ansible-debian-bookworm-cephadm + parent: kolla-ansible-cephadm-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + timeout: 10800 + +- job: + name: kolla-ansible-debian-bookworm-cephadm-upgrade + parent: kolla-ansible-debian-bookworm-cephadm + nodeset: kolla-ansible-debian-bookworm-multi-16GB + timeout: 10800 + +- job: + name: kolla-ansible-debian-bookworm-cephadm-upgrade-slurp + parent: kolla-ansible-debian-bookworm-cephadm-upgrade + nodeset: kolla-ansible-debian-bookworm-multi-16GB + timeout: 9000 + +- job: + name: kolla-ansible-ubuntu-noble-cephadm + parent: kolla-ansible-cephadm-base + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + timeout: 10800 + vars: + cephadm_use_package_from_distribution: true + +- job: + name: kolla-ansible-ubuntu-noble-cephadm-upgrade + parent: kolla-ansible-ubuntu-noble-cephadm + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + timeout: 10800 + +- job: + name: kolla-ansible-ubuntu-noble-cephadm-upgrade-slurp + parent: kolla-ansible-ubuntu-noble-cephadm-upgrade + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + timeout: 9000 + +- project-template: + name: kolla-ansible-scenario-cephadm + description: | + Runs Kolla-Ansible CephAdm scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-cephadm + - kolla-ansible-debian-bookworm-cephadm-upgrade + - kolla-ansible-ubuntu-noble-cephadm + - kolla-ansible-ubuntu-noble-cephadm-upgrade diff --git a/zuul.d/scenarios/container-engine-migration.yaml b/zuul.d/scenarios/container-engine-migration.yaml new file mode 100644 index 0000000000..c57aaa23a5 --- /dev/null +++ b/zuul.d/scenarios/container-engine-migration.yaml @@ -0,0 +1,42 @@ +--- +- job: + name: kolla-ansible-container-engine-migration-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/migrate-container-engine.yml + - ^ansible/roles/container-engine-migration/ + - ^tests/test-container-engine-migration.sh + vars: + scenario: container-engine-migration + +- job: + name: kolla-ansible-debian-container-engine-migration + parent: kolla-ansible-container-engine-migration-base + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-debian-container-engine-migration-multinode + parent: kolla-ansible-container-engine-migration-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-ubuntu-container-engine-migration + parent: kolla-ansible-container-engine-migration-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- job: + name: kolla-ansible-ubuntu-container-engine-migration-multinode + parent: kolla-ansible-container-engine-migration-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-container-engine-migration + description: | + Runs Kolla-Ansible container engine migration scenario jobs. + check: + jobs: + - kolla-ansible-debian-container-engine-migration + - kolla-ansible-debian-container-engine-migration-multinode + - kolla-ansible-ubuntu-container-engine-migration + - kolla-ansible-ubuntu-container-engine-migration-multinode diff --git a/zuul.d/scenarios/haproxy-fqdn.yaml b/zuul.d/scenarios/haproxy-fqdn.yaml new file mode 100644 index 0000000000..51ecfcdc0a --- /dev/null +++ b/zuul.d/scenarios/haproxy-fqdn.yaml @@ -0,0 +1,40 @@ +--- +- job: + name: kolla-ansible-haproxy-fqdn-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/haproxy.yml + - ^ansible/roles/(haproxy|haproxy-config|loadbalancer|loadbalancer-config)/ + - ^kolla_ansible/kolla_url.py + vars: + external_api_interface_name: vxlan2 + external_api_network_prefix: "192.0.3." + external_api_network_prefix_length: "24" + kolla_external_vip_address: "192.0.3.10" + scenario: haproxy + +- job: + name: kolla-ansible-debian-bookworm-haproxy-fqdn + parent: kolla-ansible-haproxy-fqdn-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-rocky-10-haproxy-fqdn + parent: kolla-ansible-haproxy-fqdn-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-haproxy-fqdn + parent: kolla-ansible-haproxy-fqdn-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-haproxy-fqdn + description: | + Runs Kolla-Ansible HAProxy FQDN scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-haproxy-fqdn + - kolla-ansible-rocky-10-haproxy-fqdn + - kolla-ansible-ubuntu-noble-haproxy-fqdn diff --git a/zuul.d/scenarios/ipv6.yaml b/zuul.d/scenarios/ipv6.yaml new file mode 100644 index 0000000000..f465b966a5 --- /dev/null +++ b/zuul.d/scenarios/ipv6.yaml @@ -0,0 +1,44 @@ +--- +- job: + name: kolla-ansible-ipv6-base + parent: kolla-ansible-aio-base + voting: false + vars: + address_family: 'ipv6' + api_network_prefix: "fd::" + api_network_prefix_length: "64" + kolla_internal_vip_address: "fd::ff:0" + neutron_external_network_prefix: "fd:1::" + neutron_external_network_prefix_length: "64" + neutron_tenant_network_prefix: "fd:f0::" + neutron_tenant_network_prefix_length: "64" + neutron_tenant_network_dns_server: 2001:4860:4860::8888 + scenario: ipv6 + scenario_images_extra: + - ^prometheus + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-ipv6 + parent: kolla-ansible-ipv6-base + nodeset: kolla-ansible-debian-bookworm-multi-8GB + +- job: + name: kolla-ansible-rocky-10-ipv6 + parent: kolla-ansible-ipv6-base + nodeset: kolla-ansible-rocky-10-multi-8GB + +- job: + name: kolla-ansible-ubuntu-noble-ipv6 + parent: kolla-ansible-ipv6-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-ipv6 + description: | + Runs Kolla-Ansible ipv6 scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-ipv6 + - kolla-ansible-rocky-10-ipv6 + - kolla-ansible-ubuntu-noble-ipv6 diff --git a/zuul.d/scenarios/ironic.yaml b/zuul.d/scenarios/ironic.yaml new file mode 100644 index 0000000000..b1fa449060 --- /dev/null +++ b/zuul.d/scenarios/ironic.yaml @@ -0,0 +1,60 @@ +--- +- job: + name: kolla-ansible-ironic-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/(nova|ironic).yml + - ^ansible/roles/(nova|nova-cell|ironic)/ + - ^tests/deploy-tenks\.sh$ + - ^tests/templates/ironic-overrides\.j2$ + - ^tests/templates/tenks-deploy-config\.yml\.j2$ + - ^tests/test-dashboard\.sh$ + - ^tests/test-ironic\.sh$ + required-projects: + - openstack/tenks + vars: + scenario: ironic + scenario_images_extra: + - ^dnsmasq + - ^ironic + - ^iscsid + - ^prometheus + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-ironic + parent: kolla-ansible-ironic-base + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-debian-bookworm-ironic-upgrade + parent: kolla-ansible-debian-bookworm-ironic + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-rocky-10-ironic + parent: kolla-ansible-ironic-base + nodeset: kolla-ansible-rocky-10-16GB + +- job: + name: kolla-ansible-ubuntu-noble-ironic + parent: kolla-ansible-ironic-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- job: + name: kolla-ansible-ubuntu-noble-ironic-upgrade + parent: kolla-ansible-ubuntu-noble-ironic + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-ironic + description: | + Runs Kolla-Ansible Ironic scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-ironic + - kolla-ansible-debian-bookworm-ironic-upgrade + - kolla-ansible-rocky-10-ironic + - kolla-ansible-ubuntu-noble-ironic + - kolla-ansible-ubuntu-noble-ironic-upgrade diff --git a/zuul.d/scenarios/kayobe.yaml b/zuul.d/scenarios/kayobe.yaml new file mode 100644 index 0000000000..abc0d173a8 --- /dev/null +++ b/zuul.d/scenarios/kayobe.yaml @@ -0,0 +1,11 @@ +--- +- project-template: + name: kolla-ansible-scenario-kayobe + description: | + Runs a subset of Kayobe jobs in Kolla Ansible CI to catch regressions. + check: + jobs: + - kayobe-overcloud-rocky10: + voting: false + - kayobe-overcloud-ubuntu-noble: + voting: false diff --git a/zuul.d/scenarios/kvm.yaml b/zuul.d/scenarios/kvm.yaml new file mode 100644 index 0000000000..ed7d3219cd --- /dev/null +++ b/zuul.d/scenarios/kvm.yaml @@ -0,0 +1,35 @@ +--- +- job: + name: kolla-ansible-kvm-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/roles/nova-cell/ + - ^tests/templates/nova-compute-overrides.j2 + vars: + virt_type: kvm + +- job: + name: kolla-ansible-debian-bookworm-kvm + parent: kolla-ansible-kvm-base + nodeset: kolla-ansible-debian-bookworm-nested-virt + +- job: + name: kolla-ansible-rocky-10-kvm + parent: kolla-ansible-kvm-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-kvm + parent: kolla-ansible-kvm-base + nodeset: kolla-ansible-ubuntu-noble-nested-virt + +- project-template: + name: kolla-ansible-scenario-kvm + description: | + Runs Kolla-Ansible KVM scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-kvm + - kolla-ansible-rocky-10-kvm + - kolla-ansible-ubuntu-noble-kvm diff --git a/zuul.d/scenarios/lets-encrypt.yaml b/zuul.d/scenarios/lets-encrypt.yaml new file mode 100644 index 0000000000..fc059a79c0 --- /dev/null +++ b/zuul.d/scenarios/lets-encrypt.yaml @@ -0,0 +1,38 @@ +--- +- job: + name: kolla-ansible-lets-encrypt-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/lets-encrypt.yml + - ^ansible/roles/fluentd/templates/conf/input/11-letsencrypt.conf.j2 + - ^ansible/roles/(haproxy-config|letsencrypt|loadbalancer|loadbalancer-config)/ + - ^roles/kolla-ansible-deploy/tasks/certificates.yml + - ^tests/test-core-openstack.sh + - ^tests/test-dashboard.sh + vars: + scenario: lets-encrypt + scenario_images_extra: + - ^letsencrypt + - ^haproxy + tls_enabled: true + le_enabled: true + +- job: + name: kolla-ansible-debian-bookworm-lets-encrypt + parent: kolla-ansible-lets-encrypt-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-ubuntu-noble-lets-encrypt + parent: kolla-ansible-lets-encrypt-base + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + +- project-template: + name: kolla-ansible-scenario-lets-encrypt + description: | + Runs Kolla-Ansible Let's Encrypt scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-lets-encrypt + - kolla-ansible-ubuntu-noble-lets-encrypt diff --git a/zuul.d/scenarios/magnum.yaml b/zuul.d/scenarios/magnum.yaml new file mode 100644 index 0000000000..9a5552150a --- /dev/null +++ b/zuul.d/scenarios/magnum.yaml @@ -0,0 +1,43 @@ +--- +- job: + name: kolla-ansible-magnum-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/(designate|magnum|trove).yml + - ^ansible/roles/(designate|magnum|trove)/ + - ^tests/test-dashboard.sh + - ^tests/test-magnum.sh + vars: + scenario: magnum + scenario_images_extra: + - ^designate + - ^magnum + - ^trove + # TODO: Remove after adding TLS support for Trove + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-magnum + parent: kolla-ansible-magnum-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-rocky-10-magnum + parent: kolla-ansible-magnum-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-magnum + parent: kolla-ansible-magnum-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-magnum + description: | + Runs Kolla-Ansible Magnum scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-magnum + - kolla-ansible-rocky-10-magnum + - kolla-ansible-ubuntu-noble-magnum diff --git a/zuul.d/scenarios/mariadb.yaml b/zuul.d/scenarios/mariadb.yaml new file mode 100644 index 0000000000..3e540e93a1 --- /dev/null +++ b/zuul.d/scenarios/mariadb.yaml @@ -0,0 +1,67 @@ +--- +- job: + name: kolla-ansible-mariadb-base + parent: kolla-ansible-base + voting: true + files: !inherit + - ^ansible/group_vars/all/mariadb.yml + - ^ansible/roles/(loadbalancer|loadbalancer-config|mariadb|proxysql-config)/ + - ^tests/test-mariadb.sh + vars: + scenario: mariadb + scenario_images_core: + - ^cron + - ^fluentd + - ^haproxy + - ^keepalived + - ^kolla-toolbox + - ^mariadb + - ^proxysql + +- job: + name: kolla-ansible-debian-bookworm-mariadb + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + +- job: + name: kolla-ansible-debian-bookworm-mariadb-upgrade + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + vars: + is_upgrade: true + +- job: + name: kolla-ansible-rocky-10-mariadb + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-rocky-10-multi-16GB + +- job: + name: kolla-ansible-ubuntu-noble-mariadb + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- job: + name: kolla-ansible-ubuntu-noble-mariadb-upgrade + parent: kolla-ansible-mariadb-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + vars: + is_upgrade: true + +- project-template: + name: kolla-ansible-scenario-mariadb + description: | + Runs Kolla-Ansible MariaDB scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-mariadb + - kolla-ansible-debian-bookworm-mariadb-upgrade + - kolla-ansible-rocky-10-mariadb + - kolla-ansible-ubuntu-noble-mariadb + - kolla-ansible-ubuntu-noble-mariadb-upgrade + gate: + jobs: + - kolla-ansible-debian-bookworm-mariadb + - kolla-ansible-debian-bookworm-mariadb-upgrade + - kolla-ansible-rocky-10-mariadb + - kolla-ansible-ubuntu-noble-mariadb + - kolla-ansible-ubuntu-noble-mariadb-upgrade diff --git a/zuul.d/scenarios/masakari.yaml b/zuul.d/scenarios/masakari.yaml new file mode 100644 index 0000000000..4593f0a673 --- /dev/null +++ b/zuul.d/scenarios/masakari.yaml @@ -0,0 +1,41 @@ +--- +- job: + name: kolla-ansible-masakari-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/(hacluster|masakari).yml + - ^ansible/roles/(hacluster|masakari)/ + - ^tests/test-masakari.sh + vars: + scenario: masakari + scenario_images_extra: + - ^masakari + - ^hacluster + # TODO: Remove once Masakari has TLS support + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-masakari + parent: kolla-ansible-masakari-base + nodeset: kolla-ansible-debian-bookworm-masakari-8GB + +- job: + name: kolla-ansible-rocky-10-masakari + parent: kolla-ansible-masakari-base + nodeset: kolla-ansible-rocky-10-masakari-8GB + +- job: + name: kolla-ansible-ubuntu-noble-masakari + parent: kolla-ansible-masakari-base + nodeset: kolla-ansible-ubuntu-noble-masakari-8GB + +- project-template: + name: kolla-ansible-scenario-masakari + description: | + Runs Kolla-Ansible Masakari scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-masakari + - kolla-ansible-rocky-10-masakari + - kolla-ansible-ubuntu-noble-masakari diff --git a/zuul.d/scenarios/nfv.yaml b/zuul.d/scenarios/nfv.yaml new file mode 100644 index 0000000000..60e44f7eea --- /dev/null +++ b/zuul.d/scenarios/nfv.yaml @@ -0,0 +1,37 @@ +--- +- job: + name: kolla-ansible-scenario-nfv-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/(aodh|barbican|heat|mistral|redis|tacker).yml + - ^ansible/roles/(aodh|barbican|heat|mistral|redis|tacker)/ + - ^tests/test-scenario-nfv.sh + vars: + scenario: nfv + scenario_images_extra: + - ^aodh + - ^tacker + - ^mistral + - ^redis + - ^barbican + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-nfv + parent: kolla-ansible-scenario-nfv-base + nodeset: kolla-ansible-debian-bookworm-multi-8GB + +- job: + name: kolla-ansible-ubuntu-noble-nfv + parent: kolla-ansible-scenario-nfv-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-nfv + description: | + Runs Kolla-Ansible NFV scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-nfv + - kolla-ansible-ubuntu-noble-nfv diff --git a/zuul.d/scenarios/octavia.yaml b/zuul.d/scenarios/octavia.yaml new file mode 100644 index 0000000000..d7fdcb2080 --- /dev/null +++ b/zuul.d/scenarios/octavia.yaml @@ -0,0 +1,34 @@ +--- +- job: + name: kolla-ansible-octavia-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/octavia.yml + - ^ansible/roles/(octavia|octavia-certificates)/ + - ^tests/test-octavia.sh + vars: + scenario: octavia + scenario_images_extra: + - ^redis + - ^octavia + tls_enabled: false + +- job: + name: kolla-ansible-debian-bookworm-octavia + parent: kolla-ansible-octavia-base + nodeset: kolla-ansible-debian-bookworm-16GB + +- job: + name: kolla-ansible-ubuntu-noble-octavia + parent: kolla-ansible-octavia-base + nodeset: kolla-ansible-ubuntu-noble-16GB + +- project-template: + name: kolla-ansible-scenario-octavia + description: | + Runs Kolla-Ansible Octavia scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-octavia + - kolla-ansible-ubuntu-noble-octavia diff --git a/zuul.d/scenarios/openbao.yaml b/zuul.d/scenarios/openbao.yaml new file mode 100644 index 0000000000..1cca53ec3a --- /dev/null +++ b/zuul.d/scenarios/openbao.yaml @@ -0,0 +1,40 @@ +--- +- job: + name: kolla-ansible-openbao-base + parent: kolla-ansible-base + run: tests/run-openbao.yml + required-projects: + - openstack/kolla-ansible + - openstack/requirements + voting: false + files: !override + - ^kolla_ansible/ + - ^requirements-core.yml + - ^tests/(pre|run|run-openbao).yml + - ^tests/templates/(inventory|globals-default).j2 + - ^tests/test-openbao-passwords.sh + +- job: + name: kolla-ansible-debian-bookworm-openbao + parent: kolla-ansible-openbao-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-rocky-10-openbao + parent: kolla-ansible-openbao-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-openbao + parent: kolla-ansible-openbao-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-openbao + description: | + Runs Kolla-Ansible OpenBao scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-openbao + - kolla-ansible-rocky-10-openbao + - kolla-ansible-ubuntu-noble-openbao diff --git a/zuul.d/scenarios/ovn.yaml b/zuul.d/scenarios/ovn.yaml new file mode 100644 index 0000000000..cbeca20058 --- /dev/null +++ b/zuul.d/scenarios/ovn.yaml @@ -0,0 +1,50 @@ +--- +- job: + name: kolla-ansible-ovn-base + parent: kolla-ansible-base + files: !inherit + - ^ansible/group_vars/all/(neutron|octavia|openvswitch|ovn).yml + - ^ansible/roles/(neutron|octavia|openvswitch|ovn-controller|ovn-db)/ + - ^tests/test-ovn.sh + voting: false + vars: + scenario: ovn + scenario_images_extra: + - ^redis + - ^octavia + - ^ovn + +- job: + name: kolla-ansible-debian-bookworm-ovn + parent: kolla-ansible-ovn-base + nodeset: kolla-ansible-debian-bookworm-multi-16GB + # NOTE(mnasiadka): Often cirros boot is crashing on cannot open root device + voting: false + +- job: + name: kolla-ansible-debian-bookworm-ovn-upgrade + parent: kolla-ansible-debian-bookworm-ovn + nodeset: kolla-ansible-debian-bookworm-multi-16GB + timeout: 10800 + +- job: + name: kolla-ansible-ubuntu-noble-ovn + parent: kolla-ansible-ovn-base + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + +- job: + name: kolla-ansible-ubuntu-noble-ovn-upgrade + parent: kolla-ansible-ubuntu-noble-ovn + nodeset: kolla-ansible-ubuntu-noble-multi-16GB + timeout: 10800 + +- project-template: + name: kolla-ansible-scenario-ovn + description: | + Runs Kolla-Ansible OVN scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-ovn + - kolla-ansible-debian-bookworm-ovn-upgrade + - kolla-ansible-ubuntu-noble-ovn + - kolla-ansible-ubuntu-noble-ovn-upgrade diff --git a/zuul.d/scenarios/prometheus-opensearch.yaml b/zuul.d/scenarios/prometheus-opensearch.yaml new file mode 100644 index 0000000000..f17d34f7de --- /dev/null +++ b/zuul.d/scenarios/prometheus-opensearch.yaml @@ -0,0 +1,61 @@ +--- +- job: + name: kolla-ansible-prometheus-opensearch-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/(common|fluentd|grafana|opensearch|prometheus).yml + - ^ansible/roles/(common|fluentd|grafana|opensearch|prometheus)/ + - ^tests/test-prometheus-opensearch.sh + vars: + scenario: prometheus-opensearch + scenario_images_core: + - ^cron + - ^fluentd + - ^grafana + - ^haproxy + - ^keepalived + - ^kolla-toolbox + - ^mariadb + - ^memcached + - ^opensearch + - ^prometheus + - ^proxysql + - ^rabbitmq + +- job: + name: kolla-ansible-debian-bookworm-prometheus-opensearch + parent: kolla-ansible-prometheus-opensearch-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-debian-bookworm-prometheus-opensearch-upgrade + parent: kolla-ansible-debian-bookworm-prometheus-opensearch + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-rocky-10-prometheus-opensearch + parent: kolla-ansible-prometheus-opensearch-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-prometheus-opensearch + parent: kolla-ansible-prometheus-opensearch-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- job: + name: kolla-ansible-ubuntu-noble-prometheus-opensearch-upgrade + parent: kolla-ansible-ubuntu-noble-prometheus-opensearch + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-prometheus-opensearch + description: | + Runs Kolla-Ansible Prometheus OpenSearch scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-prometheus-opensearch + - kolla-ansible-debian-bookworm-prometheus-opensearch-upgrade + - kolla-ansible-rocky-10-prometheus-opensearch + - kolla-ansible-ubuntu-noble-prometheus-opensearch + - kolla-ansible-ubuntu-noble-prometheus-opensearch-upgrade diff --git a/zuul.d/scenarios/skyline.yaml b/zuul.d/scenarios/skyline.yaml new file mode 100644 index 0000000000..5ff97810be --- /dev/null +++ b/zuul.d/scenarios/skyline.yaml @@ -0,0 +1,64 @@ +--- +- job: + name: kolla-ansible-skyline-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/skyline.yml + - ^ansible/roles/skyline/ + - ^tests/test-skyline.sh + vars: + scenario: skyline + scenario_images_extra: + - ^skyline + +- job: + name: kolla-ansible-skyline-sso-base + parent: kolla-ansible-skyline-base + files: !inherit + - ^tests/test-skyline-sso.sh + vars: + scenario: skyline-sso + +- job: + name: kolla-ansible-debian-bookworm-skyline + parent: kolla-ansible-skyline-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-debian-bookworm-skyline-sso + parent: kolla-ansible-skyline-sso-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-rocky-10-skyline + parent: kolla-ansible-skyline-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-rocky-10-skyline-sso + parent: kolla-ansible-skyline-sso-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-skyline + parent: kolla-ansible-skyline-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- job: + name: kolla-ansible-ubuntu-noble-skyline-sso + parent: kolla-ansible-skyline-sso-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-skyline + description: | + Runs Kolla-Ansible Skyline scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-skyline + - kolla-ansible-debian-bookworm-skyline-sso + - kolla-ansible-rocky-10-skyline + - kolla-ansible-rocky-10-skyline-sso + - kolla-ansible-ubuntu-noble-skyline + - kolla-ansible-ubuntu-noble-skyline-sso diff --git a/zuul.d/scenarios/telemetry.yaml b/zuul.d/scenarios/telemetry.yaml new file mode 100644 index 0000000000..08d01d890c --- /dev/null +++ b/zuul.d/scenarios/telemetry.yaml @@ -0,0 +1,40 @@ +--- +- job: + name: kolla-ansible-telemetry-base + parent: kolla-ansible-base + voting: false + files: + - ^ansible/group_vars/all/(aodh|ceilometer|gnocchi).yml + - ^ansible/roles/(aodh|ceilometer|gnocchi)/ + - ^tests/test-telemetry.sh + vars: + scenario: telemetry + scenario_images_extra: + - ^aodh + - ^ceilometer + - ^gnocchi + +- job: + name: kolla-ansible-debian-bookworm-telemetry + parent: kolla-ansible-telemetry-base + nodeset: kolla-ansible-debian-bookworm-8GB + +- job: + name: kolla-ansible-rocky-10-telemetry + parent: kolla-ansible-telemetry-base + nodeset: kolla-ansible-rocky-10-8GB + +- job: + name: kolla-ansible-ubuntu-noble-telemetry + parent: kolla-ansible-telemetry-base + nodeset: kolla-ansible-ubuntu-noble-8GB + +- project-template: + name: kolla-ansible-scenario-telemetry + description: | + Runs Kolla-Ansible Telemetry scenario jobs. + check: + jobs: + - kolla-ansible-debian-bookworm-telemetry + - kolla-ansible-rocky-10-telemetry + - kolla-ansible-ubuntu-noble-telemetry diff --git a/zuul.d/scenarios/zun.yaml b/zuul.d/scenarios/zun.yaml new file mode 100644 index 0000000000..25397725dc --- /dev/null +++ b/zuul.d/scenarios/zun.yaml @@ -0,0 +1,42 @@ +--- +- job: + name: kolla-ansible-zun-base + parent: kolla-ansible-base + voting: false + files: !inherit + - ^ansible/group_vars/all/(zun|kuryr|etcd|cinder|iscsi).yml + - ^ansible/roles/(zun|kuryr|etcd|cinder|iscsi)/ + - ^tests/setup_disks.sh + - ^tests/test-core-openstack.sh + - ^tests/test-zun.sh + - ^tests/test-dashboard.sh + vars: + kolla_ansible_setup_disks_file_path: "/var/lib/cinder_data.img" + kolla_ansible_setup_disks_vg_name: "cinder-volumes" + scenario: zun + scenario_images_extra: + - ^zun + - ^kuryr + - ^etcd + - ^cinder + - ^iscsid + - ^tgtd + +- job: + name: kolla-ansible-debian-zun + parent: kolla-ansible-zun-base + nodeset: kolla-ansible-debian-bookworm-multi-8GB + +- job: + name: kolla-ansible-ubuntu-zun + parent: kolla-ansible-zun-base + nodeset: kolla-ansible-ubuntu-noble-multi-8GB + +- project-template: + name: kolla-ansible-scenario-zun + description: | + Runs Kolla-Ansible Zun scenario jobs. + check: + jobs: + - kolla-ansible-debian-zun + - kolla-ansible-ubuntu-zun