diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index e33d2d4244..9338d7f784 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -285,6 +285,12 @@ barbican_api_listen_port: "{{ barbican_api_port }}"
blazar_api_port: "1234"
+caso_tcp_output_port: "24224"
+
+ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}"
+ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}"
+ceph_rgw_port: "6780"
+
cinder_internal_fqdn: "{{ kolla_internal_fqdn }}"
cinder_external_fqdn: "{{ kolla_external_fqdn }}"
cinder_api_port: "8776"
@@ -447,6 +453,7 @@ placement_api_port: "8780"
placement_api_listen_port: "{{ placement_api_port }}"
prometheus_port: "9091"
+prometheus_libvirt_exporter_port: "9177"
prometheus_node_exporter_port: "9100"
prometheus_mysqld_exporter_port: "9104"
prometheus_haproxy_exporter_port: "9101"
@@ -584,6 +591,7 @@ enable_glance: "{{ enable_openstack_core | bool }}"
enable_haproxy: "yes"
enable_keepalived: "{{ enable_haproxy | bool }}"
enable_keystone: "{{ enable_openstack_core | bool }}"
+enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}"
enable_mariadb: "yes"
enable_memcached: "yes"
enable_neutron: "{{ enable_openstack_core | bool }}"
@@ -600,10 +608,13 @@ enable_haproxy_memcached: "no"
enable_aodh: "no"
enable_barbican: "no"
enable_blazar: "no"
+enable_caso: "no"
enable_ceilometer: "no"
enable_ceilometer_ipmi: "no"
enable_cells: "no"
enable_central_logging: "no"
+enable_ceph_rgw: "no"
+enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}"
enable_chrony: "yes"
enable_cinder: "no"
enable_cinder_backup: "yes"
@@ -1053,6 +1064,7 @@ enable_nova_horizon_policy_file: "{{ enable_nova }}"
horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
horizon_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port }}"
+horizon_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_external | bool else horizon_port }}"
#################
# Qinling options
@@ -1112,6 +1124,7 @@ use_common_mariadb_user: "no"
# Prometheus
############
enable_prometheus_server: "{{ enable_prometheus | bool }}"
+enable_prometheus_libvirt_exporter: "no"
enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}"
enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
@@ -1125,6 +1138,7 @@ enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}"
prometheus_alertmanager_user: "admin"
+prometheus_libvirt_exporter_interval: "60s"
prometheus_openstack_exporter_interval: "60s"
prometheus_openstack_exporter_timeout: "10s"
prometheus_elasticsearch_exporter_interval: "60s"
@@ -1207,3 +1221,45 @@ swift_public_endpoint: "{{ public_protocol }}://{{ swift_external_fqdn | put_add
octavia_admin_endpoint: "{{ admin_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
octavia_internal_endpoint: "{{ internal_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
octavia_public_endpoint: "{{ public_protocol }}://{{ octavia_external_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
+
+###################################
+# Identity federation configuration
+###################################
+# Here we configure all of the IdPs meta informations that will be required to implement identity federation with OpenStack Keystone.
+# We require the administrator to enter the following metadata:
+# * name (internal name of the IdP in Keystone);
+# * openstack_domain (the domain in Keystone that the IdP belongs to)
+# * protocol (the federated protocol used by the IdP; e.g. openid or saml);
+# * identifier (the IdP identifier; e.g. https://accounts.google.com);
+# * public_name (the public name that will be shown for users in Horizon);
+# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration);
+# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol
+# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf,
+# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}');
+# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem';
+# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid);
+#
+# The IdPs meta information are to be presented to Kolla-Ansible as the following example:
+# keystone_identity_providers:
+# - name: "myidp1"
+# openstack_domain: "my-domain"
+# protocol: "openid"
+# identifier: "https://accounts.google.com"
+# public_name: "Authenticate via myidp1"
+# attribute_mapping: "mappingId1"
+# metadata_folder: "path/to/metadata/folder"
+# certificate_file: "path/to/certificate/file.pem"
+#
+# We also need to configure the attribute mapping that is used by IdPs.
+# The configuration of attribute mappings is a list of objects, where each
+# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP
+# object in the IdPs set), and the 'file' with a full qualified path to a mapping file.
+# keystone_identity_mappings:
+# - name: "mappingId1"
+# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1"
+# - name: "mappingId2"
+# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2"
+# - name: "mappingId3"
+# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3"
+keystone_identity_providers: []
+keystone_identity_mappings: []
diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one
index f713d1c1ce..0bda5b52bb 100644
--- a/ansible/inventory/all-in-one
+++ b/ansible/inventory/all-in-one
@@ -18,6 +18,10 @@ localhost ansible_connection=local
[deployment]
localhost ansible_connection=local
+# Caso
+[caso:children]
+monitoring
+
# You can explicitly specify which hosts run each project by updating the
# groups in the sections below. Common services are grouped together.
@@ -728,6 +732,9 @@ compute
network
storage
+[prometheus-libvirt-exporter:children]
+compute
+
[prometheus-mysqld-exporter:children]
mariadb
diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode
index c534669d82..424a3e1f8c 100644
--- a/ansible/inventory/multinode
+++ b/ansible/inventory/multinode
@@ -42,6 +42,10 @@ monitoring
[tls-backend:children]
control
+# Caso
+[caso:children]
+monitoring
+
# You can explicitly specify which hosts run each project by updating the
# groups in the sections below. Common services are grouped together.
@@ -746,6 +750,9 @@ compute
network
storage
+[prometheus-libvirt-exporter:children]
+compute
+
[prometheus-mysqld-exporter:children]
mariadb
diff --git a/ansible/roles/caso/defaults/main.yml b/ansible/roles/caso/defaults/main.yml
new file mode 100644
index 0000000000..462c9f5b36
--- /dev/null
+++ b/ansible/roles/caso/defaults/main.yml
@@ -0,0 +1,40 @@
+---
+caso_services:
+ caso:
+ container_name: caso
+ group: caso
+ enabled: true
+ image: "{{ caso_image_full }}"
+ volumes:
+ - "{{ node_config_directory }}/caso/:{{ container_config_directory }}/"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "caso_spool:/var/lib/caso"
+ - "caso_ssm_outgoing:/var/spool/apel/outgoing/openstack"
+ - "kolla_logs:/var/log/kolla/"
+ dimensions: "{{ caso_dimensions }}"
+
+####################
+# caso
+####################
+caso_site_name: "kolla_caso"
+caso_projects: []
+caso_logging_debug: "{{ openstack_logging_debug }}"
+caso_log_dir: "/var/log/kolla/caso"
+caso_cron_table: "10 * * * *"
+caso_messengers:
+ - caso.messenger.logstash.LogstashMessenger
+
+####################
+# OpenStack
+####################
+caso_openstack_auth: "{{ openstack_auth }}"
+caso_keystone_user: "caso"
+
+####################
+# Docker
+####################
+caso_install_type: "{{ kolla_install_type }}"
+caso_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ caso_install_type }}-caso"
+caso_tag: "{{ openstack_release }}"
+caso_image_full: "{{ caso_image }}:{{ caso_tag }}"
+caso_dimensions: "{{ default_container_dimensions }}"
diff --git a/ansible/roles/caso/handlers/main.yml b/ansible/roles/caso/handlers/main.yml
new file mode 100644
index 0000000000..07cd0f24d4
--- /dev/null
+++ b/ansible/roles/caso/handlers/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Restart caso container
+ vars:
+ service_name: "caso"
+ service: "{{ caso_services[service_name] }}"
+ config_json: "{{ caso_config_jsons.results|selectattr('item.key', 'equalto', service_name)|first }}"
+ caso_container: "{{ check_caso_containers.results|selectattr('item.key', 'equalto', service_name)|first }}"
+ become: true
+ kolla_docker:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ when:
+ - kolla_action != "config"
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+ - config_json.changed | bool
+ or caso_conf.changed | bool
+ or caso_vom_conf.changed | bool
+ or caso_crontab.changed | bool
+ or caso_container.changed | bool
diff --git a/ansible/roles/caso/meta/main.yml b/ansible/roles/caso/meta/main.yml
new file mode 100644
index 0000000000..6b4fff8fef
--- /dev/null
+++ b/ansible/roles/caso/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - { role: common }
diff --git a/ansible/roles/caso/tasks/check.yml b/ansible/roles/caso/tasks/check.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/caso/tasks/check.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/caso/tasks/config.yml b/ansible/roles/caso/tasks/config.yml
new file mode 100644
index 0000000000..7e4d7eec3a
--- /dev/null
+++ b/ansible/roles/caso/tasks/config.yml
@@ -0,0 +1,90 @@
+---
+- name: Ensuring config directories exist
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+
+- name: Copying over config.json files for services
+ template:
+ src: "{{ item.key }}.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
+ mode: "0660"
+ become: true
+ register: caso_config_jsons
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
+
+- name: Copying over caso config
+ merge_configs:
+ sources:
+ - "{{ role_path }}/templates/caso.conf.j2"
+ - "{{ node_custom_config }}//caso/caso.conf"
+ - "{{ node_custom_config }}/{{ item.key }}/{{ inventory_hostname }}/caso.conf"
+ dest: "{{ node_config_directory }}/{{ item.key }}/caso.conf"
+ mode: "0660"
+ become: true
+ register: caso_conf
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
+
+- name: Copying over caso crontab
+ template:
+ src: "{{ role_path }}/templates/caso.crontab.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/caso.crontab"
+ mode: "0660"
+ become: true
+ register: caso_crontab
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
+
+- name: Copying over caso voms file
+ template:
+ src: "{{ role_path }}/templates/voms.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/voms.json"
+ mode: "0660"
+ become: true
+ register: caso_vom_conf
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
+
+- name: Check caso containers
+ become: true
+ kolla_docker:
+ action: "compare_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ item.value.container_name }}"
+ image: "{{ item.value.image }}"
+ volumes: "{{ item.value.volumes }}"
+ dimensions: "{{ item.value.dimensions }}"
+ register: check_caso_containers
+ when:
+ - kolla_action != "config"
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
diff --git a/ansible/roles/caso/tasks/deploy.yml b/ansible/roles/caso/tasks/deploy.yml
new file mode 100644
index 0000000000..27c275b7a4
--- /dev/null
+++ b/ansible/roles/caso/tasks/deploy.yml
@@ -0,0 +1,12 @@
+---
+- include_tasks: register.yml
+ when: inventory_hostname in groups['caso']
+
+- include_tasks: config.yml
+ when: inventory_hostname in groups['caso']
+
+- name: Flush handlers
+ meta: flush_handlers
+
+- include_tasks: check.yml
+ when: inventory_hostname in groups['caso']
diff --git a/ansible/roles/caso/tasks/main.yml b/ansible/roles/caso/tasks/main.yml
new file mode 100644
index 0000000000..bc5d1e6257
--- /dev/null
+++ b/ansible/roles/caso/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/caso/tasks/precheck.yml b/ansible/roles/caso/tasks/precheck.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/caso/tasks/precheck.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/caso/tasks/pull.yml b/ansible/roles/caso/tasks/pull.yml
new file mode 100644
index 0000000000..5b08cc879a
--- /dev/null
+++ b/ansible/roles/caso/tasks/pull.yml
@@ -0,0 +1,11 @@
+---
+- name: Pulling caso images
+ become: true
+ kolla_docker:
+ action: "pull_image"
+ common_options: "{{ docker_common_options }}"
+ image: "{{ item.value.image }}"
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
diff --git a/ansible/roles/caso/tasks/reconfigure.yml b/ansible/roles/caso/tasks/reconfigure.yml
new file mode 100644
index 0000000000..f670a5b78d
--- /dev/null
+++ b/ansible/roles/caso/tasks/reconfigure.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: deploy.yml
diff --git a/ansible/roles/caso/tasks/register.yml b/ansible/roles/caso/tasks/register.yml
new file mode 100644
index 0000000000..b0b2049be9
--- /dev/null
+++ b/ansible/roles/caso/tasks/register.yml
@@ -0,0 +1,14 @@
+---
+- name: Configure cASO user
+ kolla_toolbox:
+ module_name: "kolla_keystone_user"
+ module_args:
+ project: "{{ item }}"
+ user: "{{ caso_keystone_user }}"
+ password: "{{ caso_keystone_password }}"
+ region_name: "{{ openstack_region_name }}"
+ role: admin
+ auth: "{{ caso_openstack_auth }}"
+ endpoint_type: "{{ openstack_interface }}"
+ with_items: "{{ caso_projects }}"
+ run_once: True
diff --git a/ansible/roles/caso/tasks/upgrade.yml b/ansible/roles/caso/tasks/upgrade.yml
new file mode 100644
index 0000000000..375dcad19b
--- /dev/null
+++ b/ansible/roles/caso/tasks/upgrade.yml
@@ -0,0 +1,5 @@
+---
+- include_tasks: config.yml
+
+- name: Flush handlers
+ meta: flush_handlers
diff --git a/ansible/roles/caso/templates/caso.conf.j2 b/ansible/roles/caso/templates/caso.conf.j2
new file mode 100644
index 0000000000..81502116df
--- /dev/null
+++ b/ansible/roles/caso/templates/caso.conf.j2
@@ -0,0 +1,23 @@
+[DEFAULT]
+messengers = {{ caso_messengers|join(', ') }}
+site_name = {{ caso_site_name }}
+projects = {{ caso_projects|join(', ') }}
+debug = {{ caso_logging_debug }}
+log_file = caso.log
+log_dir = {{ caso_log_dir }}
+log_rotation_type = none
+spooldir = /var/lib/caso
+
+[keystone_auth]
+auth_type = password
+auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
+project_domain_id = {{ default_project_domain_id }}
+username = {{ caso_keystone_user }}
+user_domain_id = {{ default_user_domain_id }}
+password = {{ caso_keystone_password }}
+
+[logstash]
+port = {{ caso_tcp_output_port }}
+
+[ssm]
+output_path = /var/spool/apel/outgoing/openstack
diff --git a/ansible/roles/caso/templates/caso.crontab.j2 b/ansible/roles/caso/templates/caso.crontab.j2
new file mode 100644
index 0000000000..f406d808eb
--- /dev/null
+++ b/ansible/roles/caso/templates/caso.crontab.j2
@@ -0,0 +1 @@
+{{ caso_cron_table }} caso-extract --config-file /etc/caso/caso.conf
diff --git a/ansible/roles/caso/templates/caso.json.j2 b/ansible/roles/caso/templates/caso.json.j2
new file mode 100644
index 0000000000..949c4ca022
--- /dev/null
+++ b/ansible/roles/caso/templates/caso.json.j2
@@ -0,0 +1,41 @@
+{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
+{
+ "command": "{{ cron_cmd }}",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/caso.crontab",
+ "dest": "/var/spool/cron/caso",
+ "owner": "caso",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/caso.conf",
+ "dest": "/etc/caso/caso.conf",
+ "owner": "caso",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/voms.json",
+ "dest": "/etc/caso/voms.json",
+ "owner": "caso",
+ "perm": "0600"
+ }
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/caso",
+ "owner": "caso:caso",
+ "recurse": true
+ },
+ {
+ "path": "/var/spool/apel/outgoing/openstack",
+ "owner": "caso:caso",
+ "recurse": true
+ },
+ {
+ "path": "/var/lib/caso",
+ "owner": "caso:caso",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/caso/templates/voms.json.j2 b/ansible/roles/caso/templates/voms.json.j2
new file mode 100644
index 0000000000..559eccb765
--- /dev/null
+++ b/ansible/roles/caso/templates/voms.json.j2
@@ -0,0 +1,9 @@
+{
+ "VO FQAN": {
+ "projects": ["local tenant 1", "local tenant 2"]
+ },
+ "VO NAME": {
+ "projects": ["local tenant 3"]
+ }
+}
+
diff --git a/ansible/roles/ceph-rgw/defaults/main.yml b/ansible/roles/ceph-rgw/defaults/main.yml
new file mode 100644
index 0000000000..f997dd6ae4
--- /dev/null
+++ b/ansible/roles/ceph-rgw/defaults/main.yml
@@ -0,0 +1,78 @@
+---
+project_name: "ceph-rgw"
+
+ceph_rgw_services:
+ # NOTE(mgoddard): There is no container deployment, this is used for load
+ # balancer configuration.
+ ceph-rgw:
+ group: "all"
+ enabled: "{{ enable_ceph_rgw | bool }}"
+ haproxy:
+ radosgw:
+ enabled: "{{ enable_ceph_rgw_loadbalancer | bool }}"
+ mode: "http"
+ external: false
+ port: "{{ ceph_rgw_port }}"
+ custom_member_list: "{{ ceph_rgw_haproxy_members }}"
+ radosgw_external:
+ enabled: "{{ enable_ceph_rgw_loadbalancer | bool }}"
+ mode: "http"
+ external: true
+ port: "{{ ceph_rgw_port }}"
+ custom_member_list: "{{ ceph_rgw_haproxy_members }}"
+
+####################
+# Load balancer
+####################
+
+# List of Ceph RadosGW hostname:port to use as HAProxy backends.
+ceph_rgw_hosts: []
+ceph_rgw_haproxy_members: "{{ ceph_rgw_hosts | map('regex_replace', '(.*)', 'server \\1 \\1 ' + ceph_rgw_haproxy_healthcheck) | list }}"
+ceph_rgw_haproxy_healthcheck: "check inter 2000 rise 2 fall 5"
+
+
+####################
+# OpenStack
+####################
+
+# Whether to register Ceph RadosGW swift-compatible endpoints in Keystone.
+enable_ceph_rgw_keystone: "{{ enable_ceph_rgw | bool }}"
+
+# Enable/disable ceph-rgw compatibility with OpenStack Swift.
+ceph_rgw_compatibility: false
+
+# Enable/disable including the account (project) in the endpoint URL. This
+# allows for cross-project and public object access.
+ceph_rgw_account_in_url: false
+
+ceph_rgw_endpoint_path: "{{ '/' if ceph_rgw_compatibility | bool else '/swift/' }}v1{% if ceph_rgw_account_in_url | bool %}/AUTH_%(project_id)s{% endif %}"
+
+ceph_rgw_admin_endpoint: "{{ admin_protocol }}://{{ ceph_rgw_internal_fqdn | put_address_in_context('url') }}:{{ ceph_rgw_port }}{{ ceph_rgw_endpoint_path }}"
+ceph_rgw_internal_endpoint: "{{ internal_protocol }}://{{ ceph_rgw_internal_fqdn | put_address_in_context('url') }}:{{ ceph_rgw_port }}{{ ceph_rgw_endpoint_path }}"
+ceph_rgw_public_endpoint: "{{ public_protocol }}://{{ ceph_rgw_external_fqdn | put_address_in_context('url') }}:{{ ceph_rgw_port }}{{ ceph_rgw_endpoint_path }}"
+
+ceph_rgw_keystone_user: "ceph_rgw"
+
+openstack_ceph_rgw_auth: "{{ openstack_auth }}"
+
+
+####################
+# Keystone
+####################
+ceph_rgw_ks_services:
+ - name: "swift"
+ type: "object-store"
+ description: "Openstack Object Storage"
+ endpoints:
+ - {'interface': 'admin', 'url': '{{ ceph_rgw_admin_endpoint }}'}
+ - {'interface': 'internal', 'url': '{{ ceph_rgw_internal_endpoint }}'}
+ - {'interface': 'public', 'url': '{{ ceph_rgw_public_endpoint }}'}
+
+ceph_rgw_ks_users:
+ - project: "service"
+ user: "{{ ceph_rgw_keystone_user }}"
+ password: "{{ ceph_rgw_keystone_password }}"
+ role: "admin"
+
+ceph_rgw_ks_roles:
+ - "ResellerAdmin"
diff --git a/ansible/roles/ceph-rgw/tasks/check.yml b/ansible/roles/ceph-rgw/tasks/check.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/check.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/ceph-rgw/tasks/config.yml b/ansible/roles/ceph-rgw/tasks/config.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/config.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/ceph-rgw/tasks/deploy-containers.yml b/ansible/roles/ceph-rgw/tasks/deploy-containers.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/deploy-containers.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/ceph-rgw/tasks/deploy.yml b/ansible/roles/ceph-rgw/tasks/deploy.yml
new file mode 100644
index 0000000000..40daddd63b
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/deploy.yml
@@ -0,0 +1,2 @@
+---
+- import_tasks: register.yml
diff --git a/ansible/roles/ceph-rgw/tasks/loadbalancer.yml b/ansible/roles/ceph-rgw/tasks/loadbalancer.yml
new file mode 100644
index 0000000000..d29f3e56d1
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/loadbalancer.yml
@@ -0,0 +1,7 @@
+---
+- name: "Configure haproxy for {{ project_name }}"
+ import_role:
+ role: haproxy-config
+ vars:
+ project_services: "{{ ceph_rgw_services }}"
+ tags: always
diff --git a/ansible/roles/ceph-rgw/tasks/main.yml b/ansible/roles/ceph-rgw/tasks/main.yml
new file mode 100644
index 0000000000..bc5d1e6257
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/ceph-rgw/tasks/precheck.yml b/ansible/roles/ceph-rgw/tasks/precheck.yml
new file mode 100644
index 0000000000..5430f4837a
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/precheck.yml
@@ -0,0 +1,10 @@
+---
+- name: Fail if load balancer members not set
+ fail:
+ msg: >-
+ Ceph RadosGW load balancer configuration is enabled
+ (enable_ceph_rgw_loadbalancer) but no HAProxy members are configured.
+ Have you set ceph_rgw_hosts?
+ when:
+ - enable_ceph_rgw_loadbalancer | bool
+ - ceph_rgw_haproxy_members | length == 0
diff --git a/ansible/roles/ceph-rgw/tasks/pull.yml b/ansible/roles/ceph-rgw/tasks/pull.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/pull.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/ceph-rgw/tasks/reconfigure.yml b/ansible/roles/ceph-rgw/tasks/reconfigure.yml
new file mode 100644
index 0000000000..5b10a7e111
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/reconfigure.yml
@@ -0,0 +1,2 @@
+---
+- import_tasks: deploy.yml
diff --git a/ansible/roles/ceph-rgw/tasks/register.yml b/ansible/roles/ceph-rgw/tasks/register.yml
new file mode 100644
index 0000000000..c33683163c
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/register.yml
@@ -0,0 +1,9 @@
+---
+- import_role:
+ name: service-ks-register
+ vars:
+ service_ks_register_auth: "{{ openstack_ceph_rgw_auth }}"
+ service_ks_register_services: "{{ ceph_rgw_ks_services }}"
+ service_ks_register_users: "{{ ceph_rgw_ks_users }}"
+ service_ks_register_roles: "{{ ceph_rgw_ks_roles }}"
+ when: enable_ceph_rgw_keystone | bool
diff --git a/ansible/roles/ceph-rgw/tasks/stop.yml b/ansible/roles/ceph-rgw/tasks/stop.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/stop.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/ceph-rgw/tasks/upgrade.yml b/ansible/roles/ceph-rgw/tasks/upgrade.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ceph-rgw/tasks/upgrade.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/common/tasks/config.yml b/ansible/roles/common/tasks/config.yml
index d8a6fde79f..73651d478d 100644
--- a/ansible/roles/common/tasks/config.yml
+++ b/ansible/roles/common/tasks/config.yml
@@ -116,6 +116,7 @@
# Inputs
fluentd_input_files: "{{ default_input_files | customise_fluentd(customised_input_files) }}"
default_input_files:
+ - "conf/input/99-caso.conf.j2"
- "conf/input/00-global.conf.j2"
- "conf/input/01-syslog.conf.j2"
- "conf/input/02-mariadb.conf.j2"
@@ -173,6 +174,7 @@
- { name: "barbican", enabled: "{{ enable_barbican | bool }}" }
- { name: "blazar", enabled: "{{ enable_blazar | bool }}" }
- { name: "ceilometer", enabled: "{{ enable_ceilometer | bool }}" }
+ - { name: "caso", enabled: "{{ enable_caso | bool }}" }
- { name: "chrony", enabled: "{{ enable_chrony | bool }}" }
- { name: "cinder", enabled: "{{ enable_cinder | bool }}" }
- { name: "cloudkitty", enabled: "{{ enable_cloudkitty | bool }}" }
diff --git a/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2 b/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
index 2d5fef5bbd..874c274d14 100644
--- a/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
+++ b/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
@@ -92,3 +92,12 @@
{% endif %}
+
+{% if enable_caso | bool and inventory_hostname in groups['caso'] %}
+
+ @type parser
+ format json
+ key_name Payload
+ reserve_data true
+
+{% endif %}
diff --git a/ansible/roles/common/templates/conf/input/99-caso.conf.j2 b/ansible/roles/common/templates/conf/input/99-caso.conf.j2
new file mode 100644
index 0000000000..5c577de410
--- /dev/null
+++ b/ansible/roles/common/templates/conf/input/99-caso.conf.j2
@@ -0,0 +1,8 @@
+
+ @type tcp
+ tag apel.events
+ port {{ caso_tcp_output_port }}
+ bind 127.0.0.1
+ format /^(?.*)$/
+ emit_unmatched_lines true
+
diff --git a/ansible/roles/common/templates/conf/output/01-es.conf.j2 b/ansible/roles/common/templates/conf/output/01-es.conf.j2
index 956d30156b..f35803efda 100644
--- a/ansible/roles/common/templates/conf/output/01-es.conf.j2
+++ b/ansible/roles/common/templates/conf/output/01-es.conf.j2
@@ -1,3 +1,19 @@
+{% if enable_caso | bool and inventory_hostname in groups['caso'] %}
+
+ @type copy
+
+ @type elasticsearch
+ host {% raw %}{{ elasticsearch_address }}
+{% endraw %}
+ port {% raw %}{{ elasticsearch_port }}
+{% endraw %}
+ logstash_format true
+ logstash_prefix apel
+ flush_interval 15s
+
+
+{% endif %}
+
@type copy
diff --git a/ansible/roles/common/templates/cron-logrotate-caso.conf.j2 b/ansible/roles/common/templates/cron-logrotate-caso.conf.j2
new file mode 100644
index 0000000000..2d4642e4b5
--- /dev/null
+++ b/ansible/roles/common/templates/cron-logrotate-caso.conf.j2
@@ -0,0 +1,3 @@
+"/var/log/kolla/caso/*.log"
+{
+}
diff --git a/ansible/roles/haproxy-config/defaults/main.yml b/ansible/roles/haproxy-config/defaults/main.yml
index 1fc226dac9..e8f6aa1181 100644
--- a/ansible/roles/haproxy-config/defaults/main.yml
+++ b/ansible/roles/haproxy-config/defaults/main.yml
@@ -13,3 +13,5 @@ haproxy_backend_tcp_extra: []
haproxy_health_check: "check inter 2000 rise 2 fall 5"
haproxy_health_check_ssl: "check check-ssl inter 2000 rise 2 fall 5"
+
+haproxy_enable_federation_openid: "{{ keystone_identity_providers | selectattr('protocol','equalto','openid') | list | count > 0 }}"
diff --git a/ansible/roles/haproxy/tasks/precheck.yml b/ansible/roles/haproxy/tasks/precheck.yml
index 86ca0b79b6..94e0b20edc 100644
--- a/ansible/roles/haproxy/tasks/precheck.yml
+++ b/ansible/roles/haproxy/tasks/precheck.yml
@@ -202,6 +202,20 @@
- haproxy_stat.find('blazar_api') == -1
- haproxy_vip_prechecks
+- name: Checking free port for Ceph RadosGW HAProxy
+ wait_for:
+ host: "{{ kolla_internal_vip_address }}"
+ port: "{{ ceph_rgw_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - enable_ceph_rgw | bool
+ - enable_ceph_rgw_loadbalancer | bool
+ - inventory_hostname in groups['haproxy']
+ - haproxy_stat.find('radosgw') == -1
+ - haproxy_vip_prechecks
+
- name: Checking free port for Cinder API HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml
index b8bc5a68e7..ea60351747 100644
--- a/ansible/roles/horizon/defaults/main.yml
+++ b/ansible/roles/horizon/defaults/main.yml
@@ -124,7 +124,7 @@ horizon_extra_volumes: "{{ default_extra_volumes }}"
# OpenStack
####################
horizon_logging_debug: "{{ openstack_logging_debug }}"
-horizon_keystone_url: "{{ keystone_internal_url }}/v3"
+horizon_keystone_url: "{{ keystone_public_url if horizon_use_keystone_public_url | bool else keystone_internal_url }}/v3"
####################
@@ -145,3 +145,9 @@ horizon_dev_mode: "{{ kolla_dev_mode }}"
horizon_murano_dev_mode: "{{ kolla_dev_mode }}"
horizon_source_version: "{{ kolla_source_version }}"
horizon_murano_source_version: "{{ kolla_source_version }}"
+
+# This variable was created for administrators to define which one of the Keystone's URLs should be configured in Horizon.
+# In some cases, such as when using OIDC, horizon will need to be configured with Keystone's public URL.
+# Therefore, instead of overriding the whole "horizon_keystone_url", this change allows an easier integration because
+# the Keystone public URL is already defined with variable "keystone_public_url".
+horizon_use_keystone_public_url: False
diff --git a/ansible/roles/horizon/templates/local_settings.j2 b/ansible/roles/horizon/templates/local_settings.j2
index 136741b8cf..ecaba31d2b 100644
--- a/ansible/roles/horizon/templates/local_settings.j2
+++ b/ansible/roles/horizon/templates/local_settings.j2
@@ -209,8 +209,9 @@ OPENSTACK_HOST = "{{ kolla_internal_fqdn }}"
OPENSTACK_KEYSTONE_URL = "{{ horizon_keystone_url }}"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ keystone_default_user_role }}"
+{% if enable_keystone_federation | bool %}
# Enables keystone web single-sign-on if set to True.
-#WEBSSO_ENABLED = False
+WEBSSO_ENABLED = True
# Determines which authentication choice to show as default.
#WEBSSO_INITIAL_CHOICE = "credentials"
@@ -223,13 +224,13 @@ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ keystone_default_user_role }}"
# Do not remove the mandatory credentials mechanism.
# Note: The last two tuples are sample mapping keys to a identity provider
# and federation protocol combination (WEBSSO_IDP_MAPPING).
-#WEBSSO_CHOICES = (
-# ("credentials", _("Keystone Credentials")),
-# ("oidc", _("OpenID Connect")),
-# ("saml2", _("Security Assertion Markup Language")),
-# ("acme_oidc", "ACME - OpenID Connect"),
-# ("acme_saml2", "ACME - SAML2"),
-#)
+WEBSSO_KEYSTONE_URL = "{{ keystone_public_url }}/v3"
+WEBSSO_CHOICES = (
+ ("credentials", _("Keystone Credentials")),
+ {% for idp in keystone_identity_providers %}
+ ("{{ idp.name }}_{{ idp.protocol }}", "{{ idp.public_name }}"),
+ {% endfor %}
+)
# A dictionary of specific identity provider and federation protocol
# combinations. From the selected authentication mechanism, the value
@@ -238,10 +239,12 @@ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ keystone_default_user_role }}"
# specific WebSSO endpoint in keystone, otherwise it will use the value
# as the protocol_id when redirecting to the WebSSO by protocol endpoint.
# NOTE: The value is expected to be a tuple formatted as: (, ).
-#WEBSSO_IDP_MAPPING = {
-# "acme_oidc": ("acme", "oidc"),
-# "acme_saml2": ("acme", "saml2"),
-#}
+WEBSSO_IDP_MAPPING = {
+{% for idp in keystone_identity_providers %}
+ "{{ idp.name }}_{{ idp.protocol }}": ("{{ idp.name }}", "{{ idp.protocol }}"),
+{% endfor %}
+}
+{% endif %}
# Disable SSL certificate checks (useful for self-signed certificates):
#OPENSTACK_SSL_NO_VERIFY = True
diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml
index 75df4f76ab..9eb6a0836b 100644
--- a/ansible/roles/keystone/defaults/main.yml
+++ b/ansible/roles/keystone/defaults/main.yml
@@ -16,6 +16,7 @@ keystone_services:
tls_backend: "{{ keystone_enable_tls_backend }}"
port: "{{ keystone_public_port }}"
listen_port: "{{ keystone_public_listen_port }}"
+ backend_http_extra: "{{ ['balance source'] if enable_keystone_federation | bool else [] }}"
keystone_external:
enabled: "{{ enable_keystone }}"
mode: "http"
@@ -23,6 +24,7 @@ keystone_services:
tls_backend: "{{ keystone_enable_tls_backend }}"
port: "{{ keystone_public_port }}"
listen_port: "{{ keystone_public_listen_port }}"
+ backend_http_extra: "{{ ['balance source'] if enable_keystone_federation | bool else [] }}"
keystone_admin:
enabled: "{{ enable_keystone }}"
mode: "http"
@@ -177,3 +179,23 @@ keystone_ks_services:
# TLS
####################
keystone_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+###############################
+# OpenStack identity federation
+###############################
+# Default OpenID Connect remote attribute key
+keystone_remote_id_attribute_oidc: "HTTP_OIDC_ISS"
+keystone_container_federation_oidc_metadata_folder: "{{ '/etc/apache2/metadata' if kolla_base_distro in ['debian', 'ubuntu'] else '/etc/httpd/metadata' }}"
+keystone_container_federation_oidc_idp_certificate_folder: "{{ '/etc/apache2/cert' if kolla_base_distro in ['debian', 'ubuntu'] else '/etc/httpd/cert' }}"
+keystone_container_federation_oidc_attribute_mappings_folder: "{{ container_config_directory }}/federation/oidc/attribute_maps"
+keystone_host_federation_oidc_metadata_folder: "{{ node_config_directory }}/keystone/federation/oidc/metadata"
+keystone_host_federation_oidc_idp_certificate_folder: "{{ node_config_directory }}/keystone/federation/oidc/cert"
+keystone_host_federation_oidc_attribute_mappings_folder: "{{ node_config_directory }}/keystone/federation/oidc/attribute_maps"
+
+# These variables are used to define multiple trusted Horizon dashboards.
+# keystone_trusted_dashboards: ['', '', '']
+keystone_trusted_dashboards: "{{ ['%s://%s/auth/websso/' % (public_protocol, kolla_external_fqdn), '%s/auth/websso/' % (horizon_public_endpoint)] if enable_horizon | bool else [] }}"
+keystone_enable_federation_openid: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol','equalto','openid') | list | count > 0 }}"
+keystone_should_remove_attribute_mappings: False
+keystone_should_remove_identity_providers: False
+keystone_federation_oidc_scopes: "openid email profile"
diff --git a/ansible/roles/keystone/tasks/config-federation-oidc.yml b/ansible/roles/keystone/tasks/config-federation-oidc.yml
new file mode 100644
index 0000000000..4171283273
--- /dev/null
+++ b/ansible/roles/keystone/tasks/config-federation-oidc.yml
@@ -0,0 +1,86 @@
+---
+- name: Remove OpenID certificate and metadata files
+ become: true
+ vars:
+ keystone: "{{ keystone_services['keystone'] }}"
+ file:
+ state: absent
+ path: "{{ item }}"
+ when:
+ - inventory_hostname in groups[keystone.group]
+ with_items:
+ - "{{ keystone_host_federation_oidc_metadata_folder }}"
+ - "{{ keystone_host_federation_oidc_idp_certificate_folder }}"
+ - "{{ keystone_host_federation_oidc_attribute_mappings_folder }}"
+
+- name: Create OpenID configuration directories
+ vars:
+ keystone: "{{ keystone_services['keystone'] }}"
+ file:
+ dest: "{{ item }}"
+ state: "directory"
+ mode: "0770"
+ become: true
+ with_items:
+ - "{{ keystone_host_federation_oidc_metadata_folder }}"
+ - "{{ keystone_host_federation_oidc_idp_certificate_folder }}"
+ - "{{ keystone_host_federation_oidc_attribute_mappings_folder }}"
+ when:
+ - inventory_hostname in groups[keystone.group]
+
+- name: Copying OpenID Identity Providers metadata
+ vars:
+ keystone: "{{ keystone_services['keystone'] }}"
+ become: true
+ copy:
+ src: "{{ item.metadata_folder }}/"
+ dest: "{{ keystone_host_federation_oidc_metadata_folder }}"
+ mode: "0660"
+ with_items: "{{ keystone_identity_providers }}"
+ when:
+ - item.protocol == 'openid'
+ - inventory_hostname in groups[keystone.group]
+
+- name: Copying OpenID Identity Providers certificate
+ vars:
+ keystone: "{{ keystone_services['keystone'] }}"
+ become: true
+ copy:
+ src: "{{ item.certificate_file }}"
+ dest: "{{ keystone_host_federation_oidc_idp_certificate_folder }}"
+ mode: "0660"
+ with_items: "{{ keystone_identity_providers }}"
+ when:
+ - item.protocol == 'openid'
+ - inventory_hostname in groups[keystone.group]
+
+- name: Copying OpenStack Identity Providers attribute mappings
+ vars:
+ keystone: "{{ keystone_services['keystone'] }}"
+ become: true
+ copy:
+ src: "{{ item.file }}"
+ dest: "{{ keystone_host_federation_oidc_attribute_mappings_folder }}/{{ item.file | basename }}"
+ mode: "0660"
+ with_items: "{{ keystone_identity_mappings }}"
+ when:
+ - inventory_hostname in groups[keystone.group]
+
+- name: Setting the certificates files variable
+ become: true
+ vars:
+ keystone: "{{ keystone_services['keystone'] }}"
+ find:
+ path: "{{ keystone_host_federation_oidc_idp_certificate_folder }}"
+ pattern: "*.pem"
+ register: certificates_path
+ when:
+ - inventory_hostname in groups[keystone.group]
+
+- name: Setting the certificates variable
+ vars:
+ keystone: "{{ keystone_services['keystone'] }}"
+ set_fact:
+ keystone_federation_openid_certificate_key_ids: "{{ certificates_path.files | map(attribute='path') | map('regex_replace', '^.*/(.*)\\.pem$', '\\1#' + keystone_container_federation_oidc_idp_certificate_folder + '/\\1.pem') | list }}" # noqa 204
+ when:
+ - inventory_hostname in groups[keystone.group]
diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml
index 06ecea3a7c..bec1350a34 100644
--- a/ansible/roles/keystone/tasks/config.yml
+++ b/ansible/roles/keystone/tasks/config.yml
@@ -144,6 +144,10 @@
notify:
- Restart {{ item.key }} container
+- include_tasks: config-federation-oidc.yml
+ when:
+ - keystone_enable_federation_openid | bool
+
- name: Copying over wsgi-keystone.conf
vars:
keystone: "{{ keystone_services.keystone }}"
diff --git a/ansible/roles/keystone/tasks/deploy.yml b/ansible/roles/keystone/tasks/deploy.yml
index 656e44e312..a6ff99b0e9 100644
--- a/ansible/roles/keystone/tasks/deploy.yml
+++ b/ansible/roles/keystone/tasks/deploy.yml
@@ -19,3 +19,7 @@
- import_tasks: register.yml
- import_tasks: check.yml
+
+- include_tasks: register_identity_providers.yml
+ when:
+ - enable_keystone_federation | bool
diff --git a/ansible/roles/keystone/tasks/register_identity_providers.yml b/ansible/roles/keystone/tasks/register_identity_providers.yml
new file mode 100644
index 0000000000..befcf41d3f
--- /dev/null
+++ b/ansible/roles/keystone/tasks/register_identity_providers.yml
@@ -0,0 +1,238 @@
+---
+- name: List configured attribute mappings (that can be used by IdPs)
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ mapping list -c ID --format value
+ run_once: True
+ become: True
+ register: existing_mappings_register
+
+- name: Register existing mappings
+ set_fact:
+ existing_mappings: "{{ existing_mappings_register.stdout_lines | map('trim') | list }}"
+
+- name: Remove unmanaged attribute mappings
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ mapping delete {{ item }}
+ run_once: True
+ become: true
+ with_items: "{{ existing_mappings }}"
+ when:
+ - item not in (keystone_identity_mappings | map(attribute='name') | list)
+ - keystone_should_remove_attribute_mappings
+
+- name: Create unexisting domains
+ become: true
+ kolla_toolbox:
+ module_name: "os_keystone_domain"
+ module_args:
+ name: "{{ item.openstack_domain }}"
+ auth: "{{ openstack_auth }}"
+ endpoint_type: "{{ openstack_interface }}"
+ cacert: "{{ openstack_cacert }}"
+ region_name: "{{ openstack_region_name }}"
+ run_once: True
+ with_items: "{{ keystone_identity_providers }}"
+
+- name: Register attribute mappings in OpenStack
+ become: true
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ mapping create
+ --rules "{{ keystone_container_federation_oidc_attribute_mappings_folder }}/{{ item.file | basename }}"
+ {{ item.name }}
+ run_once: True
+ when:
+ - item.name not in existing_mappings
+ with_items: "{{ keystone_identity_mappings }}"
+
+- name: Update existing attribute mappings in OpenStack
+ become: true
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ mapping set
+ --rules "{{ keystone_container_federation_oidc_attribute_mappings_folder }}/{{ item.file | basename }}"
+ {{ item.name }}
+ run_once: True
+ when:
+ - item.name in existing_mappings
+ with_items: "{{ keystone_identity_mappings }}"
+
+- name: List configured IdPs
+ become: true
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ identity provider list -c ID --format value
+ run_once: True
+ register: existing_idps_register
+
+- name: Register existing idps
+ set_fact:
+ existing_idps: "{{ existing_idps_register.stdout.split('\n') | map('trim') | list }}"
+
+- name: Remove unmanaged identity providers
+ become: true
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ identity provider delete {{ item }}
+ run_once: True
+ with_items: "{{ existing_idps }}"
+ when:
+ - item not in (keystone_identity_providers | map(attribute='name') | list)
+ - keystone_should_remove_identity_providers
+
+- name: Register Identity Providers in OpenStack
+ become: true
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ identity provider create
+ --description "{{ item.public_name }}"
+ --remote-id "{{ item.identifier }}"
+ --domain "{{ item.openstack_domain }}"
+ {{ item.name }}
+ run_once: True
+ when:
+ - item.name not in existing_idps
+ with_items: "{{ keystone_identity_providers }}"
+
+- name: Update Identity Providers in OpenStack according to Kolla-Ansible configuraitons
+ become: true
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ identity provider set
+ --description "{{ item.public_name }}"
+ --remote-id "{{ item.identifier }}"
+ "{{ item.name }}"
+ run_once: True
+ when:
+ - item.name in existing_idps
+ with_items: "{{ keystone_identity_providers }}"
+
+- name: Configure attribute mappings for each Identity Provider. (We expect the mappings to be configured by the operator)
+ become: true
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ federation protocol create
+ --mapping {{ item.attribute_mapping }}
+ --identity-provider {{ item.name }}
+ {{ item.protocol }}
+ run_once: True
+ when:
+ - item.name not in existing_idps
+ with_items: "{{ keystone_identity_providers }}"
+
+- name: Update attribute mappings for each Identity Provider. (We expect the mappings to be configured by the operator).
+ become: true
+ command: >
+ docker exec -t keystone openstack
+ --os-auth-url={{ openstack_auth.auth_url }}
+ --os-password={{ openstack_auth.password }}
+ --os-username={{ openstack_auth.username }}
+ --os-project-name={{ openstack_auth.project_name }}
+ --os-identity-api-version=3
+ --os-interface {{ openstack_interface }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-user-domain-name {{ openstack_auth.domain_name }}
+ --os-region-name {{ openstack_region_name }}
+ {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
+ federation protocol set
+ --identity-provider {{ item.name }}
+ --mapping {{ item.attribute_mapping }}
+ {{ item.protocol }}
+ run_once: True
+ register: result
+ failed_when: result.rc not in [0, 1] # This command returns RC 1 on success, so we need to add this to avoid fails.
+ when:
+ - item.name in existing_idps
+ with_items: "{{ keystone_identity_providers }}"
diff --git a/ansible/roles/keystone/templates/keystone.conf.j2 b/ansible/roles/keystone/templates/keystone.conf.j2
index 730107eaca..f1e787b6f5 100644
--- a/ansible/roles/keystone/templates/keystone.conf.j2
+++ b/ansible/roles/keystone/templates/keystone.conf.j2
@@ -77,3 +77,18 @@ connection_string = {{ osprofiler_backend_connection_string }}
[cors]
allowed_origin = {{ grafana_public_endpoint }}
{% endif %}
+
+{% if enable_keystone_federation %}
+[federation]
+{% for dashboard in keystone_trusted_dashboards %}
+trusted_dashboard = {{ dashboard }}
+{% endfor %}
+
+sso_callback_template = /etc/keystone/sso_callback_template.html
+
+[openid]
+remote_id_attribute = {{ keystone_remote_id_attribute_oidc }}
+
+[auth]
+methods = password,token,openid,application_credential
+{% endif %}
diff --git a/ansible/roles/keystone/templates/keystone.json.j2 b/ansible/roles/keystone/templates/keystone.json.j2
index e5c676190b..2dee915eb7 100644
--- a/ansible/roles/keystone/templates/keystone.json.j2
+++ b/ansible/roles/keystone/templates/keystone.json.j2
@@ -1,4 +1,5 @@
{% set keystone_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
+{% set apache_user = 'www-data' if kolla_base_distro in ['ubuntu', 'debian'] else 'apache' %}
{
"command": "/usr/bin/keystone-startup.sh",
"config_files": [
@@ -52,6 +53,22 @@
"owner": "keystone",
"perm": "0600"
}{% endif %}
+ {% if keystone_enable_federation_openid %},
+ {
+ "source": "{{ container_config_directory }}/federation/oidc/metadata",
+ "dest": "{{ keystone_container_federation_oidc_metadata_folder }}",
+ "owner": "{{ apache_user }}:{{ apache_user }}",
+ "perm": "0600",
+ "merge": true
+ },
+ {
+ "source": "{{ container_config_directory }}/federation/oidc/cert",
+ "dest": "{{ keystone_container_federation_oidc_idp_certificate_folder }}",
+ "owner": "{{ apache_user }}:{{ apache_user }}",
+ "perm": "0600",
+ "merge": true
+ }
+ {% endif %}
],
"permissions": [
{
@@ -61,7 +78,17 @@
{
"path": "/var/log/kolla/keystone/keystone.log",
"owner": "keystone:keystone"
+ },{% if keystone_enable_federation_openid %}
+ {
+ "path": "{{ keystone_container_federation_oidc_metadata_folder }}",
+ "owner": "{{ apache_user }}:{{ apache_user }}",
+ "perm": "0700"
},
+ {
+ "path": "{{ keystone_container_federation_oidc_idp_certificate_folder }}",
+ "owner": "{{ apache_user }}:{{ apache_user }}",
+ "perm": "0700"
+ },{% endif %}
{
"path": "/etc/keystone/fernet-keys",
"owner": "keystone:keystone",
diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
index 1d62274659..6c6e96450c 100644
--- a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
+++ b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
@@ -51,6 +51,52 @@ LogLevel info
SSLCertificateFile /etc/keystone/certs/keystone-cert.pem
SSLCertificateKeyFile /etc/keystone/certs/keystone-key.pem
{% endif %}
+
+{% if keystone_enable_federation_openid %}
+ OIDCClaimPrefix "OIDC-"
+ OIDCClaimDelimiter ";"
+ OIDCResponseType "id_token"
+ OIDCScope "{{ keystone_federation_oidc_scopes }}"
+ OIDCMetadataDir {{ keystone_container_federation_oidc_metadata_folder }}
+{% if keystone_federation_openid_certificate_key_ids | length > 0 %}
+ OIDCOAuthVerifyCertFiles {{ keystone_federation_openid_certificate_key_ids | join(" ") }}
+{% endif %}
+ OIDCCryptoPassphrase {{ keystone_federation_openid_crypto_password }}
+ OIDCRedirectURI {{ keystone_public_url }}/redirect_uri
+
+
+ Require valid-user
+ AuthType openid-connect
+
+
+ {# WebSSO authentication endpoint -#}
+
+ Require valid-user
+ AuthType openid-connect
+
+
+{% for idp in keystone_identity_providers %}
+{% if idp.protocol == 'openid' %}
+
+ OIDCDiscoverURL {{ keystone_public_url }}/redirect_uri?iss={{ item.identifier | urlencode }}
+ Require valid-user
+ AuthType openid-connect
+
+{% endif %}
+{% endfor %}
+
+ {# CLI / API authentication endpoint -#}
+{% for idp in keystone_identity_providers %}
+{% if idp.protocol == 'openid' %}
+
+ Require valid-user
+ {# Note(jasonanderson): `auth-openidc` is a special auth type that can -#}
+ {# additionally handle verifying bearer tokens -#}
+ AuthType auth-openidc
+
+{% endif %}
+{% endfor %}
+{% endif %}
diff --git a/ansible/roles/nova-cell/defaults/main.yml b/ansible/roles/nova-cell/defaults/main.yml
index 80eba05ed7..64d1b308c3 100644
--- a/ansible/roles/nova-cell/defaults/main.yml
+++ b/ansible/roles/nova-cell/defaults/main.yml
@@ -328,6 +328,7 @@ nova_libvirt_default_volumes:
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "kolla_logs:/var/log/kolla/"
- "libvirtd:/var/lib/libvirt"
+ - "{% if enable_prometheus_libvirt_exporter | bool %}libvirtd_run:/var/run/libvirt{% endif %}"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- "nova_libvirt_qemu:/etc/libvirt/qemu"
diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml
index 568a774a4d..d1845c9596 100644
--- a/ansible/roles/prometheus/defaults/main.yml
+++ b/ansible/roles/prometheus/defaults/main.yml
@@ -13,6 +13,7 @@ prometheus_services:
mode: "http"
external: false
port: "{{ prometheus_port }}"
+ custom_member_list: "{{ prometheus_haproxy_members.split(';') }}"
prometheus-node-exporter:
container_name: prometheus_node_exporter
group: prometheus-node-exporter
@@ -21,6 +22,13 @@ prometheus_services:
pid_mode: "host"
volumes: "{{ prometheus_node_exporter_default_volumes + prometheus_node_exporter_extra_volumes }}"
dimensions: "{{ prometheus_node_exporter_dimensions }}"
+ prometheus-libvirt-exporter:
+ container_name: "prometheus_libvirt_exporter"
+ group: "prometheus-libvirt-exporter"
+ enabled: "{{ enable_prometheus_libvirt_exporter | bool }}"
+ image: "{{ prometheus_libvirt_exporter_image_full }}"
+ volumes: "{{ prometheus_libvirt_exporter_default_volumes + prometheus_libvirt_exporter_extra_volumes }}"
+ dimensions: "{{ prometheus_libvirt_exporter_dimensions }}"
prometheus-mysqld-exporter:
container_name: prometheus_mysqld_exporter
group: prometheus-mysqld-exporter
@@ -98,6 +106,11 @@ prometheus_services:
####################
prometheus_mysql_exporter_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}prometheus{% endif %}"
+####################
+# HAProxy
+####################
+prometheus_haproxy_members: "{% for host in groups['prometheus'] %}server {{ hostvars[host]['ansible_hostname'] }} {{ 'api' | kolla_address(host) }}:{{ prometheus_port }} check inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}"
+
####################
# Blackbox
####################
@@ -121,6 +134,10 @@ prometheus_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}
prometheus_server_tag: "{{ prometheus_tag }}"
prometheus_server_image_full: "{{ prometheus_server_image }}:{{ prometheus_server_tag }}"
+prometheus_libvirt_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ prometheus_install_type }}-prometheus-libvirt-exporter"
+prometheus_libvirt_exporter_tag: "{{ prometheus_tag }}"
+prometheus_libvirt_exporter_image_full: "{{ prometheus_libvirt_exporter_image }}:{{ prometheus_libvirt_exporter_tag }}"
+
prometheus_haproxy_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ prometheus_install_type }}-prometheus-haproxy-exporter"
prometheus_haproxy_exporter_tag: "{{ prometheus_tag }}"
prometheus_haproxy_exporter_image_full: "{{ prometheus_haproxy_exporter_image }}:{{ prometheus_haproxy_exporter_tag }}"
@@ -159,6 +176,7 @@ prometheus_blackbox_exporter_tag: "{{ prometheus_tag }}"
prometheus_blackbox_exporter_image_full: "{{ prometheus_blackbox_exporter_image }}:{{ prometheus_blackbox_exporter_tag }}"
prometheus_server_dimensions: "{{ default_container_dimensions }}"
+prometheus_libvirt_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_haproxy_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_mysqld_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_node_exporter_dimensions: "{{ default_container_dimensions }}"
@@ -175,6 +193,10 @@ prometheus_server_default_volumes:
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "prometheus:/var/lib/prometheus"
- "kolla_logs:/var/log/kolla/"
+prometheus_libvirt_exporter_default_volumes:
+ - "{{ node_config_directory }}/prometheus-libvirt-exporter/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "libvirtd_run:/var/run/libvirt:ro"
prometheus_haproxy_exporter_default_volumes:
- "{{ node_config_directory }}/prometheus-haproxy-exporter/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -231,6 +253,7 @@ prometheus_blackbox_exporter_default_volumes:
- "kolla_logs:/var/log/kolla/"
prometheus_extra_volumes: "{{ default_extra_volumes }}"
+prometheus_libvirt_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_server_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_haproxy_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_mysqld_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
diff --git a/ansible/roles/prometheus/handlers/main.yml b/ansible/roles/prometheus/handlers/main.yml
index 42820e6365..d155cf825c 100644
--- a/ansible/roles/prometheus/handlers/main.yml
+++ b/ansible/roles/prometheus/handlers/main.yml
@@ -149,3 +149,18 @@
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
+
+- name: Restart prometheus-libvirt-exporter container
+ vars:
+ service_name: "prometheus-libvirt-exporter"
+ service: "{{ prometheus_services[service_name] }}"
+ become: true
+ kolla_docker:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ when:
+ - kolla_action != "config"
diff --git a/ansible/roles/prometheus/tasks/precheck.yml b/ansible/roles/prometheus/tasks/precheck.yml
index c3aa2eadde..ec6fb3b6e9 100644
--- a/ansible/roles/prometheus/tasks/precheck.yml
+++ b/ansible/roles/prometheus/tasks/precheck.yml
@@ -19,6 +19,7 @@
- prometheus_openstack_exporter
- prometheus_elasticsearch_exporter
- prometheus_blackbox_exporter
+ - prometheus_libvirt_exporter
register: container_facts
- name: Checking free port for Prometheus server
@@ -149,3 +150,17 @@
- enable_prometheus_blackbox_exporter | bool
with_items:
- "{{ prometheus_blackbox_exporter_port }}"
+
+- name: Checking free ports for Prometheus libvirt-exporter
+ wait_for:
+ host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
+ port: "{{ item }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - container_facts['prometheus_libvirt_exporter'] is not defined
+ - inventory_hostname in groups['prometheus-libvirt-exporter']
+ - enable_prometheus_libvirt_exporter | bool
+ with_items:
+ - "{{ prometheus_libvirt_exporter_port }}"
diff --git a/ansible/roles/prometheus/templates/prometheus-libvirt-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-libvirt-exporter.json.j2
new file mode 100644
index 0000000000..6154beef5f
--- /dev/null
+++ b/ansible/roles/prometheus/templates/prometheus-libvirt-exporter.json.j2
@@ -0,0 +1,4 @@
+{
+ "command": "/opt/libvirt_exporter --web.listen-address={{ api_interface_address }}:{{ prometheus_libvirt_exporter_port }}",
+ "config_files": []
+}
diff --git a/ansible/roles/prometheus/templates/prometheus.yml.j2 b/ansible/roles/prometheus/templates/prometheus.yml.j2
index bc494b3ff6..5f41f7a4f5 100644
--- a/ansible/roles/prometheus/templates/prometheus.yml.j2
+++ b/ansible/roles/prometheus/templates/prometheus.yml.j2
@@ -174,6 +174,17 @@ scrape_configs:
replacement: '{{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_blackbox_exporter_port }}'
{% endif %}
+
+{% if enable_prometheus_libvirt_exporter | bool %}
+ - job_name: libvirt_exporter
+ scrape_interval: {{ prometheus_libvirt_exporter_interval }}
+ static_configs:
+ - targets:
+{% for host in groups["prometheus-libvirt-exporter"] %}
+ - '{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ hostvars[host]['prometheus_libvirt_exporter_port'] }}'
+{% endfor %}
+{% endif %}
+
{% if enable_prometheus_alertmanager | bool %}
- job_name: alertmanager
static_configs:
diff --git a/ansible/site.yml b/ansible/site.yml
index fbeb40db5e..f71f8acf4f 100644
--- a/ansible/site.yml
+++ b/ansible/site.yml
@@ -27,6 +27,7 @@
- enable_barbican_{{ enable_barbican | bool }}
- enable_blazar_{{ enable_blazar | bool }}
- enable_ceilometer_{{ enable_ceilometer | bool }}
+ - enable_ceph_rgw_{{ enable_ceph_rgw | bool }}
- enable_chrony_{{ enable_chrony | bool }}
- enable_cinder_{{ enable_cinder | bool }}
- enable_cloudkitty_{{ enable_cloudkitty | bool }}
@@ -162,7 +163,12 @@
tags: blazar
when: enable_blazar | bool
- include_role:
- name: cinder
+ role: ceph-rgw
+ tasks_from: loadbalancer
+ tags: ceph-rgw
+ when: enable_ceph_rgw | bool
+ - include_role:
+ role: cinder
tasks_from: loadbalancer
tags: cinder
when: enable_cinder | bool
@@ -656,6 +662,19 @@
tags: swift,
when: enable_swift | bool }
+- name: Apply role ceph-rgw
+ gather_facts: false
+ hosts:
+ # NOTE(mgoddard): This is only used to register Keystone services, and
+ # could run on any host running kolla-toolbox.
+ - kolla-toolbox
+ - '&enable_ceph_rgw_True'
+ serial: '{{ kolla_serial|default("0") }}'
+ roles:
+ - { role: ceph-rgw,
+ tags: ceph-rgw,
+ when: enable_ceph_rgw | bool }
+
- name: Apply role glance
gather_facts: false
hosts:
@@ -1224,3 +1243,13 @@
- { role: masakari,
tags: masakari,
when: enable_masakari | bool }
+
+- name: Apply role caso
+ gather_facts: false
+ hosts:
+ - caso
+ serial: '{{ kolla_serial|default("0") }}'
+ roles:
+ - { role: caso,
+ tags: caso,
+ when: enable_caso | bool }
diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst
index 9681dc966a..fb61fccfd6 100644
--- a/doc/source/contributor/index.rst
+++ b/doc/source/contributor/index.rst
@@ -21,3 +21,4 @@ We welcome everyone to join our project!
bug-triage
ptl-guide
release-management
+ setup-identity-provider
diff --git a/doc/source/contributor/setup-identity-provider.rst b/doc/source/contributor/setup-identity-provider.rst
new file mode 100644
index 0000000000..99e5ae7807
--- /dev/null
+++ b/doc/source/contributor/setup-identity-provider.rst
@@ -0,0 +1,193 @@
+.. _setup-identity-provider:
+
+============================
+Test Identity Provider setup
+============================
+
+This guide shows how to create an Identity Provider that handles the OpenID
+Connect protocol to authenticate users when
+:keystone-doc:`using Federation with OpenStack
+` (these configurations must not
+be used in a production environment).
+
+Keycloak
+========
+
+Keycloak is a Java application that implements an Identity Provider handling
+both OpenID Connect and SAML protocols.
+
+To setup a Keycloak instance for testing is pretty simple with Docker.
+
+Creating the Docker Keycloak instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Run the docker command:
+
+.. code-block:: console
+
+ docker run -p 8080:8080 -p 8443:8443 -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=admin quay.io/keycloak/keycloak:latest
+
+This will create a Keycloak instance that has the admin credentials as
+admin/admin and is listening on port 8080.
+
+After creating the instance, you will need to log in to the Keycloak as
+administrator and setup the first Identity Provider.
+
+Creating an Identity Provider with Keycloak
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following guide assumes that the steps are executed from the same machine
+(localhost), but you can change the hostname if you want to run it from
+elsewhere.
+
+In this guide, we will use the 'new_realm' as the realm name in Keycloak, so,
+if you want to use any other realm name, you must to change 'new_realm' in the
+URIs used in the guide and replace the 'new_realm' with the realm name that you
+are using.
+
+- Access the admin console on http://localhost:8080/auth/ in the Administration Console option.
+- Authenticate using the credentials defined in the creation step.
+- Create a new realm in the http://localhost:8080/auth/admin/master/console/#/create/realm page.
+- After creating a realm, you will need to create a client to be used by Keystone; to do it, just access http://localhost:8080/auth/admin/master/console/#/create/client/new_realm.
+- To create a client, you will need to set the client_id (just choose anyone),
+ the protocol (must be openid-connect) and the Root Url (you can leave it
+ blank)
+- After creating the client, you will need to update some client's attributes
+ like:
+
+ - Enable the Implicit flow (this one allows you to use the OpenStack CLI with
+ oidcv3 plugin)
+ - Set Access Type to confidential
+ - Add the Horizon and Keystone URIs to the Valid Redirect URIs. Keystone should be within the '/redirect_uri' path, for example: https://horizon.com/ and https://keystone.com/redirect_uri
+ - Save the changes
+ - Access the client's Mappers tab to add the user's attributes that will be
+ shared with the client (Keystone):
+
+ - In this guide, we will need the following attribute mappers in Keycloak:
+
+ ==================================== ==============
+ name/user attribute/token claim name mapper type
+ ==================================== ==============
+ openstack-user-domain user attribute
+ openstack-default-project user attribute
+ ==================================== ==============
+
+- After creating the client, you will need to create a user in that realm to
+ log in OpenStack via identity federation
+- To create a user, access http://localhost:8080/auth/admin/master/console/#/create/user/new_realm and fill the form with the user's data
+- After creating the user, you can access the tab "Credentials" to set the
+ user's password
+- Then, in the tab "Attributes", you must set the authorization attributes to
+ be used by Keystone, these attributes are defined in the :ref:`attribute
+ mapping ` in Keystone
+
+After you create the Identity provider, you will need to get some data from the
+Identity Provider to configure in Kolla-Ansible
+
+Configuring Kolla Ansible to use the Identity Provider
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section is about how one can get the data needed in
+:ref:`Setup OIDC via Kolla Ansible `.
+
+- name: The realm name, in this case it will be "new_realm"
+- identifier: http://localhost:8080/auth/realms/new_realm/ (again, the "new_realm" is the name of the realm)
+- certificate_file: This one can be downloaded from http://localhost:8080/auth/admin/master/console/#/realms/new_realm/keys
+- metadata_folder:
+
+ - localhost%3A8080%2Fauth%2Frealms%2Fnew_realm.client:
+
+ - client_id: Access http://localhost:8080/auth/admin/master/console/#/realms/new_realm/clients , and access the client you created for Keystone, copy the Client ID displayed in the page
+ - client_secret: In the same page you got the client_id, access the tab
+ "Credentials" and copy the secret value
+ - localhost%3A8080%2Fauth%2Frealms%2Fnew_realm.provider: Copy the json from http://localhost:8080/auth/realms/new_realm/.well-known/openid-configuration (the "new_realm" is the realm name)
+ - localhost%3A8080%2Fauth%2Frealms%2Fnew_realm.conf: You can leave this file
+ as an empty json "{}"
+
+
+After you finished the configuration of the Identity Provider, your main
+configuration should look something like the following:
+
+.. code-block::
+
+ keystone_identity_providers:
+ - name: "new_realm"
+ openstack_domain: "new_domain"
+ protocol: "openid"
+ identifier: "http://localhost:8080/auth/realms/new_realm"
+ public_name: "Authenticate via new_realm"
+ attribute_mapping: "attribute_mapping_keycloak_new_realm"
+ metadata_folder: "/root/inDev/meta-idp"
+ certificate_file: "/root/inDev/certs/LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem"
+ keystone_identity_mappings:
+ - name: "attribute_mapping_keycloak_new_realm"
+ file: "/root/inDev/attr_map/attribute_mapping.json"
+
+Then, after deploying OpenStack, you should be able to log in Horizon
+using the "Authenticate using" -> "Authenticate via new_realm", and writing
+"new_realm.com" in the "E-mail or domain name" field. After that, you will be
+redirected to a new page to choose the Identity Provider in Keystone. Just click in the link
+"localhost:8080/auth/realms/new_realm"; this will redirect you to Keycloak (idP) where
+you will need to log in with the user that you created. If the user's
+attributes in Keycloak are ok, the user will be created in OpenStack and you will
+be able to log in Horizon.
+
+.. _attribute_mapping:
+
+Attribute mapping
+~~~~~~~~~~~~~~~~~
+This section shows how to create the attribute mapping to map an Identity
+Provider user to a Keystone user (ephemeral).
+
+The 'OIDC-' prefix in the remote types is defined in the 'OIDCClaimPrefix'
+configuration in the wsgi-keystone.conf file; this prefix must be in the
+attribute mapping as the mod-oidc-wsgi is adding the prefix in the user's
+attributes before sending it to Keystone. The attribute 'openstack-user-domain'
+will define the user's domain in OpenStack and the attribute
+'openstack-default-project' will define the user's project in the OpenStack
+(the user will be assigned with the role 'member' in the project)
+
+.. code-block:: json
+
+ [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}",
+ "email": "{1}",
+ "domain": {
+ "name": "{2}"
+ }
+ },
+ "domain": {
+ "name": "{2}"
+ },
+ "projects": [
+ {
+ "name": "{3}",
+ "roles": [
+ {
+ "name": "member"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "remote": [
+ {
+ "type": "OIDC-preferred_username"
+ },
+ {
+ "type": "OIDC-email"
+ },
+ {
+ "type": "OIDC-openstack-user-domain"
+ },
+ {
+ "type": "OIDC-openstack-default-project"
+ }
+ ]
+ }
+ ]
diff --git a/doc/source/reference/shared-services/keystone-guide.rst b/doc/source/reference/shared-services/keystone-guide.rst
index 2012b868f6..126e53c3d9 100644
--- a/doc/source/reference/shared-services/keystone-guide.rst
+++ b/doc/source/reference/shared-services/keystone-guide.rst
@@ -40,3 +40,241 @@ be configured in Keystone as necessary.
Further infomation on Fernet tokens is available in the :keystone-doc:`Keystone
documentation `.
+
+Federated identity
+------------------
+
+Keystone allows users to be authenticated via identity federation. This means
+integrating OpenStack Keystone with an identity provider. The use of identity
+federation allows users to access OpenStack services without the necessity of
+an account in the OpenStack environment per se. The authentication is then
+off-loaded to the identity provider of the federation.
+
+To enable identity federation, you will need to execute a set of configurations
+in multiple OpenStack systems. Therefore, it is easier to use Kolla Ansible
+to execute this process for operators.
+
+For upstream documentations, please see
+:keystone-doc:`Configuring Keystone for Federation
+`
+
+Supported protocols
+~~~~~~~~~~~~~~~~~~~
+
+OpenStack supports both OpenID Connect and SAML protocols for federated
+identity, but for now, kolla Ansible supports only OpenID Connect.
+Therefore, if you desire to use SAML in your environment, you will need
+to set it up manually or extend Kolla Ansible to also support it.
+
+.. _setup-oidc-kolla-ansible:
+
+Setting up OpenID Connect via Kolla Ansible
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+First, you will need to register the OpenStack (Keystone) in your Identity
+provider as a Service Provider.
+
+After registering Keystone, you will need to add the Identity Provider
+configurations in your kolla-ansible globals configuration as the example
+below:
+
+.. code-block:: yaml
+
+ keystone_identity_providers:
+ - name: "myidp1"
+ openstack_domain: "my-domain"
+ protocol: "openid"
+ identifier: "https://accounts.google.com"
+ public_name: "Authenticate via myidp1"
+ attribute_mapping: "mappingId1"
+ metadata_folder: "path/to/metadata/folder"
+ certificate_file: "path/to/certificate/file.pem"
+
+ keystone_identity_mappings:
+ - name: "mappingId1"
+ file: "/full/qualified/path/to/mapping/json/file/to/mappingId1"
+
+Identity providers configurations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+name
+****
+
+The internal name of the Identity provider in OpenStack.
+
+openstack_domain
+****************
+
+The OpenStack domain that the Identity Provider belongs.
+
+protocol
+********
+
+The federated protocol used by the IdP; e.g. openid or saml. We support only
+OpenID connect right now.
+
+identifier
+**********
+
+The Identity provider URL; e.g. https://accounts.google.com .
+
+public_name
+***********
+
+The Identity provider public name that will be shown for users in the Horizon
+login page.
+
+attribute_mapping
+*****************
+
+The attribute mapping to be used for the Identity Provider. This mapping is
+expected to already exist in OpenStack or be configured in the
+`keystone_identity_mappings` property.
+
+metadata_folder
+***************
+
+Path to the folder containing all of the identity provider metadata as JSON
+files.
+
+The metadata folder must have all your Identity Providers configurations,
+the name of the files will be the name (with path) of the Issuer configuration.
+Such as:
+
+.. code-block::
+
+ -
+ - keycloak.example.org%2Fauth%2Frealms%2Fidp.client
+ |
+ - keycloak.example.org%2Fauth%2Frealms%2Fidp.conf
+ |
+ - keycloak.example.org%2Fauth%2Frealms%2Fidp.provider
+
+.. note::
+
+ The name of the file must be URL-encoded if needed. For example, if you have
+ an Issuer with ``/`` in the URL, then you need to escape it to ``%2F`` by
+ applying a URL escape in the file name.
+
+The content of these files must be a JSON
+
+``client``:
+
+The ``.client`` file handles the Service Provider credentials in the Issuer.
+
+During the first step, when you registered the OpenStack as a
+Service Provider in the Identity Provider, you submitted a `cliend_id` and
+generated a `client_secret`, so these are the values you must use in this
+JSON file.
+
+.. code-block:: json
+
+ {
+ "client_id":"",
+ "client_secret":""
+ }
+
+``conf``:
+
+This file will be a JSON that overrides some of the OpenID Connect options. The
+options that can be overridden are listed in the
+`OpenID Connect Apache2 plugin documentation`_.
+.. _`OpenID Connect Apache2 plugin documentation`: https://github.com/zmartzone/mod_auth_openidc/wiki/Multiple-Providers#opclient-configuration
+
+If you do not want to override the config values, you can leave this file as
+an empty JSON file such as ``{}``.
+
+``provider``:
+
+This file will contain all specifications about the IdentityProvider. To
+simplify, you can just use the JSON returned in the ``.well-known``
+Identity provider's endpoint:
+
+.. code-block:: json
+
+ {
+ "issuer": "https://accounts.google.com",
+ "authorization_endpoint": "https://accounts.google.com/o/oauth2/v2/auth",
+ "token_endpoint": "https://oauth2.googleapis.com/token",
+ "userinfo_endpoint": "https://openidconnect.googleapis.com/v1/userinfo",
+ "revocation_endpoint": "https://oauth2.googleapis.com/revoke",
+ "jwks_uri": "https://www.googleapis.com/oauth2/v3/certs",
+ "response_types_supported": [
+ "code",
+ "token",
+ "id_token",
+ "code token",
+ "code id_token",
+ "token id_token",
+ "code token id_token",
+ "none"
+ ],
+ "subject_types_supported": [
+ "public"
+ ],
+ "id_token_signing_alg_values_supported": [
+ "RS256"
+ ],
+ "scopes_supported": [
+ "openid",
+ "email",
+ "profile"
+ ],
+ "token_endpoint_auth_methods_supported": [
+ "client_secret_post",
+ "client_secret_basic"
+ ],
+ "claims_supported": [
+ "aud",
+ "email",
+ "email_verified",
+ "exp",
+ "family_name",
+ "given_name",
+ "iat",
+ "iss",
+ "locale",
+ "name",
+ "picture",
+ "sub"
+ ],
+ "code_challenge_methods_supported": [
+ "plain",
+ "S256"
+ ]
+ }
+
+certificate_file
+****************
+
+Path to the Identity Provider certificate file, the file must be named as
+'certificate-key-id.pem'. E.g.
+
+.. code-block::
+
+ - fb8ca5b7d8d9a5c6c6788071e866c6c40f3fc1f9.pem
+
+You can find the key-id in the Identity provider
+`.well-known/openid-configuration` `jwks_uri` like in
+`https://www.googleapis.com/oauth2/v3/certs` :
+
+.. code-block:: json
+
+ {
+ "keys": [
+ {
+ "e": "AQAB",
+ "use": "sig",
+ "n": "zK8PHf_6V3G5rU-viUOL1HvAYn7q--dxMoU...",
+ "kty": "RSA",
+ "kid": "fb8ca5b7d8d9a5c6c6788071e866c6c40f3fc1f9",
+ "alg": "RS256"
+ }
+ ]
+ }
+
+.. note::
+
+ The public key is different from the certificate, the file in this
+ configuration must be the Identity provider's certificate and not the
+ Identity provider's public key.
diff --git a/doc/source/reference/storage/external-ceph-guide.rst b/doc/source/reference/storage/external-ceph-guide.rst
index d119716c02..4a787459ea 100644
--- a/doc/source/reference/storage/external-ceph-guide.rst
+++ b/doc/source/reference/storage/external-ceph-guide.rst
@@ -212,3 +212,68 @@ type ``default_share_type``, please see :doc:`Manila in Kolla `.
For more details on the CephFS Native driver, please see
:manila-doc:`CephFS Native driver `.
+
+RadosGW
+-------
+
+As of the Wallaby 12.0.0 release, Kolla Ansible supports integration with Ceph
+RadosGW. This includes:
+
+* Registration of Swift-compatible endpoints in Keystone
+* Load balancing across RadosGW API servers using HAProxy
+
+See the `Ceph documentation
+`__ for further information,
+including changes that must be applied to the Ceph cluster configuration.
+
+Enable Ceph RadosGW integration:
+
+.. code-block:: yaml
+
+ enable_ceph_rgw: true
+
+Keystone integration
+====================
+
+A Keystone user and endpoints are registered by default, however this may be
+avoided by setting ``enable_ceph_rgw_keystone`` to ``false``. If registration
+is enabled, the username is defined via ``ceph_rgw_keystone_user``, and this
+defaults to ``ceph_rgw``. The hostnames used by the endpoints default to
+``ceph_rgw_external_fqdn`` and ``ceph_rgw_internal_fqdn`` for the public and
+internal endpoints respectively. These default to ``kolla_external_fqdn`` and
+``kolla_internal_fqdn`` respectively. The port used by the endpoints is defined
+via ``ceph_rgw_port``, and defaults to 6780.
+
+By default RadosGW supports both Swift and S3 API, and it is not completely
+compatible with Swift API. The option ``ceph_rgw_compatibility`` can
+enable/disable complete RadosGW compatibility with Swift API. After changing
+the value, run the ``kolla-ansible deploy`` command to enable.
+
+By default, the RadosGW endpoint URL does not include the project (account) ID.
+This prevents cross-project and public object access. This can be resolved by
+setting ``ceph_rgw_account_in_url`` to ``true``.
+
+Load balancing
+==============
+
+.. note::
+
+ Users of Ceph RadosGW can generate very high volumes of traffic. It is
+ advisable to use a separate load balancer for RadosGW for anything other
+ than small or lightly utilised RadosGW deployments.
+
+Load balancing is enabled by default, however this may be avoided by setting
+``enable_ceph_rgw_loadbalancer`` to ``false``. If using load balancing, the
+RadosGW hosts and ports must be configured. For example:
+
+.. code-block:: yaml
+
+ ceph_rgw_hosts:
+ - rgw-host-1:6780
+ - rgw-host-1:6780
+
+If using hostnames, these should be resolvable from the host running HAProxy.
+Alternatively IP addresses may be used.
+
+The HAProxy frontend port is defined via ``ceph_rgw_port``, and defaults to
+6780.
diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml
index 70780ad00f..091ebd01df 100644
--- a/etc/kolla/globals.yml
+++ b/etc/kolla/globals.yml
@@ -289,6 +289,7 @@
#enable_ceilometer_ipmi: "no"
#enable_cells: "no"
#enable_central_logging: "no"
+#enable_ceph_rgw: "no"
#enable_chrony: "yes"
#enable_cinder: "no"
#enable_cinder_backup: "yes"
@@ -674,6 +675,7 @@
# Prometheus
############
#enable_prometheus_server: "{{ enable_prometheus | bool }}"
+#enable_prometheus_libvirt_exporter: "no"
#enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}"
#enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
#enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml
index d1319f2ae9..033cd41e50 100644
--- a/etc/kolla/passwords.yml
+++ b/etc/kolla/passwords.yml
@@ -9,6 +9,11 @@
rbd_secret_uuid:
cinder_rbd_secret_uuid:
+############
+# cASO
+############
+caso_keystone_password:
+
###################
# Database options
####################
@@ -256,3 +261,13 @@ redis_master_password:
####################
prometheus_mysql_exporter_database_password:
prometheus_alertmanager_password:
+
+####################
+# Ceph RadosGW options
+####################
+ceph_rgw_keystone_password:
+
+###############################
+# OpenStack identity federation
+###############################
+keystone_federation_openid_crypto_password:
diff --git a/releasenotes/notes/add-keystone-support-to-openid-connect-859b12492f8347fe.yaml b/releasenotes/notes/add-keystone-support-to-openid-connect-859b12492f8347fe.yaml
new file mode 100644
index 0000000000..956c3cb5cc
--- /dev/null
+++ b/releasenotes/notes/add-keystone-support-to-openid-connect-859b12492f8347fe.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add support for the OpenID Connect authentication protocol in Keystone and
+ enables both ID and access token authentication flows.
diff --git a/releasenotes/notes/add-prometheus-libvirt-exporter-b05a3a9c08db517c.yaml b/releasenotes/notes/add-prometheus-libvirt-exporter-b05a3a9c08db517c.yaml
new file mode 100644
index 0000000000..39e0174b7f
--- /dev/null
+++ b/releasenotes/notes/add-prometheus-libvirt-exporter-b05a3a9c08db517c.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - |
+ Deploys and configures the prometheus-libvirt-exporter image as
+ (https://github.com/kumina/libvirt_exporter) as part of the
+ Prometheus monitoring stack.
+upgrade:
+ - |
+ The libvirt sockets in the Nova libvirt container have been moved to a
+ docker volume to support monitoring of libvirt. As part of this upgrade
+ the Nova libvirt containers will be restarted. This will affect users
+ of instances running in those containers.
diff --git a/releasenotes/notes/ceph-rgw-062e0544a004f7b1.yaml b/releasenotes/notes/ceph-rgw-062e0544a004f7b1.yaml
new file mode 100644
index 0000000000..b17de1205a
--- /dev/null
+++ b/releasenotes/notes/ceph-rgw-062e0544a004f7b1.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Adds support for integration with Ceph RadosGW.
diff --git a/releasenotes/notes/prometheus-active-passive-be4fb033493e958f.yaml b/releasenotes/notes/prometheus-active-passive-be4fb033493e958f.yaml
new file mode 100644
index 0000000000..9af437157f
--- /dev/null
+++ b/releasenotes/notes/prometheus-active-passive-be4fb033493e958f.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - |
+ Prometheus has been switched to active/passive mode. See bug `1928193
+ `__.
+fixes:
+ - |
+ Fixes an issue with misaligned data points in grafana when loadbalancing
+ over multiple prometheus server instances. See bug `1928193
+ `__.
diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2
index bccee85739..14047689d6 100644
--- a/tests/templates/globals-default.j2
+++ b/tests/templates/globals-default.j2
@@ -124,6 +124,11 @@ enable_cinder: "yes"
glance_backend_ceph: "yes"
cinder_backend_ceph: "yes"
nova_backend_ceph: "yes"
+enable_ceph_rgw: {{ not is_upgrade or previous_release != 'victoria' }}
+ceph_rgw_hosts:
+{% for host in hostvars %}
+ - {{ hostvars[host]['ansible_host'] }}:6780
+{% endfor %}
{% endif %}
{% if tls_enabled %}