diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c48a4e795..8b265aaf0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -110,7 +110,7 @@ jobs: ./foremanctl deploy --add-feature foreman_azure_rm --add-feature foreman_google - name: Run tests run: | - ./forge test --pytest-args="--certificate-source=${{ matrix.certificate_source }} --database-mode=${{ matrix.database }}" + ./forge test --pytest-args="--certificate-source=${{ matrix.certificate_source }} --database-mode=${{ matrix.database }} --user=foremanctl" - name: Run smoker run: | ./forge smoker @@ -219,7 +219,7 @@ jobs: ./foremanctl deploy --add-feature foreman_azure_rm --add-feature foreman_google - name: Stop services run: - vagrant ssh quadlet -- sudo systemctl stop foreman.target + vagrant ssh quadlet -- sudo systemctl --machine=foremanctl@ --user stop foreman.target - name: Configure upgrade version run: | sed -i '/container_tag_stream:/ s/:.*/: "${{ matrix.upgrade_to }}"/' src/vars/images.yml @@ -231,7 +231,7 @@ jobs: ./foremanctl deploy - name: Run tests run: | - ./forge test + ./forge test --pytest-args="--user=foremanctl" - name: Generate sos reports if: ${{ always() }} run: ./forge sos diff --git a/development/playbooks/deploy-dev/deploy-dev.yaml b/development/playbooks/deploy-dev/deploy-dev.yaml index 738381cd4..402924770 100644 --- a/development/playbooks/deploy-dev/deploy-dev.yaml +++ b/development/playbooks/deploy-dev/deploy-dev.yaml @@ -1,4 +1,14 @@ --- +- name: Setup rootless user environment + hosts: "{{ target_host if target_host is defined and target_host != '' else 'quadlet' }}" + become: true + roles: + - role: rootless_user + tasks: + - name: Map rootless_user_xdg_runtime_dir to foremanctl namespace + ansible.builtin.set_fact: + foremanctl_xdg_runtime_dir: "{{ rootless_user_xdg_runtime_dir }}" + - name: Deploy Foreman Development Environment hosts: "{{ target_host if target_host is defined and target_host != '' else 'quadlet' }}" become: true diff --git a/development/playbooks/remote-database/remote-database.yaml b/development/playbooks/remote-database/remote-database.yaml index bcb489eb5..77857e8f5 100644 --- a/development/playbooks/remote-database/remote-database.yaml +++ b/development/playbooks/remote-database/remote-database.yaml @@ -1,10 +1,22 @@ --- +- name: Setup rootless user environment + hosts: + - database + become: true + roles: + - role: rootless_user + tasks: + - name: Map rootless_user_xdg_runtime_dir to foremanctl namespace + ansible.builtin.set_fact: + foremanctl_xdg_runtime_dir: "{{ rootless_user_xdg_runtime_dir }}" + - name: Setup remote database hosts: - database become: true vars_files: - "../../../src/vars/database.yml" + - "../../../src/vars/base.yaml" roles: - role: pre_install - role: postgresql diff --git a/docs/certificates.md b/docs/certificates.md index 6f189f9b3..63f0ab2b8 100644 --- a/docs/certificates.md +++ b/docs/certificates.md @@ -42,15 +42,19 @@ foremanctl deploy --certificate-source=installer After deployment, certificates are available at: **Default Source:** -- CA Certificate: `/root/certificates/certs/ca.crt` -- Server Certificate: `/root/certificates/certs/.crt` -- Client Certificate: `/root/certificates/certs/-client.crt` +- CA Certificate: `/var/lib/foremanctl/certificates/certs/ca.crt` +- Server Certificate: `/var/lib/foremanctl/certificates/certs/.crt` +- Client Certificate: `/var/lib/foremanctl/certificates/certs/-client.crt` **Installer Source:** - CA Certificate: `/root/ssl-build/katello-default-ca.crt` - Server Certificate: `/root/ssl-build//-apache.crt` - Client Certificate: `/root/ssl-build//-foreman-client.crt` +**Note for Rootless Deployments:** +- Default certificates are owned by `foremanctl:foremanctl` user and group +- Installer certificates remain in `/root/ssl-build/` with group ownership and permissions automatically configured during deployment to allow the `foremanctl` user to read them + ### Current Limitations - Only supports single hostname (no multiple DNS names) @@ -99,6 +103,7 @@ Certificate paths are defined in source-specific variable files: **Default Source (`src/vars/default_certificates.yml`):** ```yaml +certificates_ca_directory: /var/lib/foremanctl/certificates ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt" server_certificate: "{{ certificates_ca_directory }}/certs/{{ ansible_facts['fqdn'] }}.crt" client_certificate: "{{ certificates_ca_directory }}/certs/{{ ansible_facts['fqdn'] }}-client.crt" @@ -106,9 +111,10 @@ client_certificate: "{{ certificates_ca_directory }}/certs/{{ ansible_facts['fqd **Installer Source (`src/vars/installer_certificates.yml`):** ```yaml -ca_certificate: "/root/ssl-build/katello-default-ca.crt" -server_certificate: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-apache.crt" -client_certificate: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-foreman-client.crt" +certificates_ca_directory: /root/ssl-build +ca_certificate: "{{ certificates_ca_directory }}/katello-default-ca.crt" +server_certificate: "{{ certificates_ca_directory }}/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-apache.crt" +client_certificate: "{{ certificates_ca_directory }}/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-foreman-client.crt" ``` #### Integration with Deployment @@ -138,12 +144,14 @@ The `certificate_checks` role uses `foreman-certificate-check` binary to validat **Directory Structure:** ``` -/root/certificates/ +/var/lib/foremanctl/certificates/ ├── certs/ # Public certificates ├── private/ # Private keys and passwords └── requests/ # Certificate signing requests ``` +All certificate files and directories are owned by `foremanctl:foremanctl` to support rootless Podman deployments. + **OpenSSL Configuration:** - Custom configuration template supports SAN extensions - Single DNS entry per certificate: `subjectAltName = DNS:{{ certificates_hostname }}` diff --git a/src/playbooks/deploy/deploy.yaml b/src/playbooks/deploy/deploy.yaml index 89180f1bc..683728e01 100644 --- a/src/playbooks/deploy/deploy.yaml +++ b/src/playbooks/deploy/deploy.yaml @@ -1,5 +1,16 @@ --- -- name: Setup quadlet demo machine +- name: Setup rootless user environment + hosts: + - quadlet + become: true + roles: + - role: rootless_user + tasks: + - name: Map rootless_user_xdg_runtime_dir to foremanctl namespace + ansible.builtin.set_fact: + foremanctl_xdg_runtime_dir: "{{ rootless_user_xdg_runtime_dir }}" + +- name: Deploy Foreman services hosts: - quadlet become: true diff --git a/src/playbooks/migrate-to-rootless/metadata.obsah.yaml b/src/playbooks/migrate-to-rootless/metadata.obsah.yaml new file mode 100644 index 000000000..110dbdf2c --- /dev/null +++ b/src/playbooks/migrate-to-rootless/metadata.obsah.yaml @@ -0,0 +1,15 @@ +--- +help: | + Migrate existing rootful Foreman deployment to rootless + + WARNING: This is a destructive operation that will: + - Stop all running services + - Transfer ownership of data volumes + - Remove system-scoped systemd units + - Recreate everything in user scope + + PREREQUISITES: + - Backup all data before running this migration + - Ensure no active users or operations are running + - Test this in a non-production environment first +... diff --git a/src/playbooks/migrate-to-rootless/migrate-to-rootless.yaml b/src/playbooks/migrate-to-rootless/migrate-to-rootless.yaml new file mode 100644 index 000000000..856ded710 --- /dev/null +++ b/src/playbooks/migrate-to-rootless/migrate-to-rootless.yaml @@ -0,0 +1,387 @@ +--- +# Migration Playbook: Rootful to Rootless Podman Deployment +# +# This playbook migrates an existing rootful Foreman Quadlet deployment to rootless. +# +# WARNING: This is a destructive operation that will: +# - Stop all running services +# - Transfer ownership of data volumes +# - Remove system-scoped systemd units +# - Recreate everything in user scope +# +# PREREQUISITES: +# - Backup all data before running this migration +# - Ensure no active users or operations are running +# - Test this in a non-production environment first +# +# USAGE: +# ansible-playbook -i inventory migrate-to-rootless.yaml +# +- name: Migrate Foreman from rootful to rootless deployment + hosts: + - quadlet + become: true + vars_files: + - "../../vars/defaults.yml" + - "../../vars/base.yaml" + - "../../roles/postgresql/defaults/main.yml" + - "../../roles/redis/defaults/main.yml" + - "../../roles/pulp/defaults/main.yaml" + vars: + migration_backup_dir: "/var/backups/foreman-migration-{{ ansible_date_time.iso8601_basic_short }}" + # Storage directories with their container-specific UIDs/GIDs + # UIDs are from the container images, not the host + migration_data_volumes: + - path: "{{ postgresql_data_dir }}" + uid: "{{ postgresql_container_uid }}" + gid: "{{ postgresql_container_gid }}" + - path: "{{ redis_data_dir }}" + uid: "{{ redis_container_uid }}" + gid: "{{ redis_container_gid }}" + - path: /var/lib/pulp + uid: "{{ pulp_container_uid }}" + gid: "{{ pulp_container_gid }}" + # Legacy variable for backwards compatibility + migration_data_paths: "{{ migration_data_volumes | map(attribute='path') | list }}" + + tasks: + - name: Verify this is a rootful deployment + ansible.builtin.stat: + path: /etc/containers/systemd/foreman.container + register: migration_rootful_check + failed_when: not migration_rootful_check.stat.exists + + - name: Display migration warning + ansible.builtin.pause: + prompt: | + + ================================================================ + WARNING: DESTRUCTIVE MIGRATION IN PROGRESS + ================================================================ + + This will migrate your Foreman deployment from rootful to + rootless Podman containers. + + ALL SERVICES WILL BE STOPPED during migration. + + Backup directory: {{ migration_backup_dir }} + + Press Ctrl+C to abort, or Enter to continue... + ================================================================ + + - name: Create backup directory + ansible.builtin.file: + path: "{{ migration_backup_dir }}" + state: directory + mode: '0700' + + # ============================================================ + # Phase 1: Stop and backup rootful deployment + # ============================================================ + + - name: Stop foreman.target (rootful) + ansible.builtin.systemd: + name: foreman.target + state: stopped + failed_when: false + register: migration_stop_target + + - name: Stop all Foreman-related services (rootful) + ansible.builtin.systemd: + name: "{{ item }}" + state: stopped + loop: + - foreman + - candlepin + - pulp-api + - pulp-content + - pulp-worker.target + - postgresql + - redis + - foreman-proxy + failed_when: false + + - name: Backup rootful quadlet files + ansible.builtin.copy: + src: /etc/containers/systemd/ + dest: "{{ migration_backup_dir }}/quadlets/" + remote_src: true + mode: '0600' + + - name: Backup rootful systemd units + ansible.builtin.shell: | + mkdir -p {{ migration_backup_dir }}/systemd/ + cp -a /etc/systemd/system/foreman* {{ migration_backup_dir }}/systemd/ 2>/dev/null || true + cp -a /etc/systemd/system/pulp* {{ migration_backup_dir }}/systemd/ 2>/dev/null || true + cp -a /etc/systemd/system/dynflow* {{ migration_backup_dir }}/systemd/ 2>/dev/null || true + args: + executable: /bin/bash + changed_when: true + + - name: List rootful Podman secrets + ansible.builtin.command: podman secret ls --format json + register: migration_rootful_secrets + changed_when: false + + - name: Save secret list to backup + ansible.builtin.copy: + content: "{{ migration_rootful_secrets.stdout }}" + dest: "{{ migration_backup_dir }}/secrets.json" + mode: '0600' + + # ============================================================ + # Phase 2: Create rootless user and setup + # ============================================================ + + - name: Setup rootless user environment (creates user/group with auto-allocated matching UID/GID) + ansible.builtin.include_role: + name: rootless_user + + - name: Map rootless_user_xdg_runtime_dir to foremanctl namespace + ansible.builtin.set_fact: + foremanctl_xdg_runtime_dir: "{{ rootless_user_xdg_runtime_dir }}" + + # ============================================================ + # Phase 3: Migrate data volumes + # ============================================================ + + - name: Get current ownership of data directories + ansible.builtin.stat: + path: "{{ item }}" + loop: "{{ migration_data_paths }}" + register: migration_data_stat + failed_when: false + + - name: Display volume migration plan + ansible.builtin.debug: + msg: | + Migrating volumes with podman unshare (mapping to container UIDs): + {% for volume in migration_data_volumes %} + - {{ volume.path }}: will be owned by container UID {{ volume.uid }}:{{ volume.gid }} + {% endfor %} + + - name: Change ownership of data directories using podman unshare + ansible.builtin.shell: | + cd /tmp + sudo -u {{ foremanctl_user }} XDG_RUNTIME_DIR={{ foremanctl_xdg_runtime_dir }} \ + podman unshare chown -R {{ item.uid }}:{{ item.gid }} {{ item.path }} + args: + executable: /bin/bash + loop: "{{ migration_data_volumes }}" + when: migration_data_stat.results | selectattr('stat.exists') | list | length > 0 + changed_when: true + + - name: Update directory ownership to rootless user + ansible.builtin.file: + path: "{{ item }}" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0755' + recurse: false + state: directory + loop: "{{ migration_data_paths }}" + + # ============================================================ + # Phase 4: Remove rootful configuration + # ============================================================ + + - name: Disable rootful services + ansible.builtin.systemd: + name: "{{ item }}" + enabled: false + loop: + - foreman + - candlepin + - pulp-api + - pulp-content + - pulp-worker.target + - postgresql + - redis + - foreman-proxy + - foreman.target + failed_when: false + + - name: Remove rootful quadlet files + ansible.builtin.file: + path: "/etc/containers/systemd/{{ item }}" + state: absent + loop: + - foreman.container + - candlepin.container + - pulp-api.container + - pulp-content.container + - pulp-worker@.container + - postgresql.container + - redis.container + - foreman-proxy.container + - dynflow-sidekiq@.container + - foreman-recurring@*.container + + - name: Remove rootful systemd units + ansible.builtin.file: + path: "/etc/systemd/system/{{ item }}" + state: absent + loop: + - foreman.target + - pulp-worker.target + - foreman-recurring@*.timer + - foreman-recurring@*.service + - dynflow-sidekiq@*.service + + - name: Remove rootful Podman secrets + ansible.builtin.shell: | + for secret in $(podman secret ls --format {% raw %}'{{.Name}}'{% endraw %}); do + podman secret rm "$secret" 2>/dev/null || true + done + args: + executable: /bin/bash + changed_when: true + + - name: Reload systemd daemon (system scope) + ansible.builtin.systemd: + daemon_reload: true + + # ============================================================ + # Phase 5: Deploy rootless configuration + # ============================================================ + + - name: Run rootless deployment + ansible.builtin.include_role: + name: "{{ item }}" + loop: + - certificates + - postgresql + - redis + - candlepin + - pulp + - foreman + - systemd_target + - foreman_proxy + + # ============================================================ + # Phase 6: Verification + # ============================================================ + + - name: Wait for services to stabilize + ansible.builtin.pause: + seconds: 10 + + - name: Gather user systemd service facts + become: true + become_user: "{{ foremanctl_user }}" + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + ansible.builtin.service_facts: + + - name: Verify rootless services are running + ansible.builtin.assert: + that: + - "'{{ item }}.service' in ansible_facts.services" + - "ansible_facts.services['{{ item }}.service'].state == 'running'" + fail_msg: "Service {{ item }} is not running" + success_msg: "Service {{ item }} is active" + loop: + - foreman + - postgresql + - redis + - candlepin + - pulp-api + - pulp-content + + - name: Get user info for migration summary + ansible.builtin.getent: + database: passwd + key: "{{ foremanctl_user }}" + + - name: Set UID/GID facts for display + ansible.builtin.set_fact: + foremanctl_uid: "{{ ansible_facts['getent_passwd'][foremanctl_user][1] }}" + foremanctl_gid: "{{ ansible_facts['getent_passwd'][foremanctl_user][2] }}" + + - name: Display migration summary + ansible.builtin.debug: + msg: | + + ================================================================ + MIGRATION COMPLETED SUCCESSFULLY + ================================================================ + + Foreman is now running in rootless mode. + + Service user: {{ foremanctl_user }} (UID {{ foremanctl_uid }}) + Quadlets: {{ foremanctl_quadlet_dir }} + Systemd units: {{ foremanctl_systemd_user_dir }} + + Backup directory: {{ migration_backup_dir }} + + Verify services: + systemctl --machine={{ foremanctl_user }}@ --user status foreman.target + + View logs: + journalctl --machine={{ foremanctl_user }}@ --user -u foreman -f + + ================================================================ + + - name: Save migration report + ansible.builtin.copy: + content: | + Foreman Rootful to Rootless Migration Report + ============================================= + + Migration Date: {{ ansible_date_time.iso8601 }} + Hostname: {{ ansible_facts['fqdn'] }} + + Service User: {{ foremanctl_user }} (UID {{ foremanctl_uid }}) + Service Group: {{ foremanctl_group }} (GID {{ foremanctl_gid }}) + + Quadlet Directory: {{ foremanctl_quadlet_dir }} + Systemd User Directory: {{ foremanctl_systemd_user_dir }} + XDG_RUNTIME_DIR: {{ foremanctl_xdg_runtime_dir }} + + Migrated Data Volumes: + {% for path in migration_data_paths %} + - {{ path }} + {% endfor %} + + Backup Location: {{ migration_backup_dir }} + + Active Services: + {% for item in migration_service_check.results %} + - {{ item.item }}: {{ item.stdout }} + {% endfor %} + + Verification Commands: + ---------------------- + + # Check service status + systemctl --machine={{ foremanctl_user }}@ --user status foreman.target + + # List containers + sudo -u {{ foremanctl_user }} podman ps + + # View logs + journalctl --user --machine={{ foremanctl_user }}@ -u foreman -f + + # Check linger status + loginctl show-user {{ foremanctl_user }} + + Rollback Instructions: + ---------------------- + + If you need to rollback to rootful deployment: + + 1. Stop rootless services: + systemctl --machine={{ foremanctl_user }}@ --user stop foreman.target + + 2. Restore rootful quadlets: + sudo cp -a {{ migration_backup_dir }}/quadlets/* /etc/containers/systemd/ + sudo cp -a {{ migration_backup_dir }}/systemd/* /etc/systemd/system/ + + 3. Reload and start: + sudo systemctl daemon-reload + sudo systemctl start foreman.target + + IMPORTANT: Keep the backup directory until you've verified the migration is stable. + dest: "{{ migration_backup_dir }}/MIGRATION_REPORT.txt" + mode: '0600' +... diff --git a/src/playbooks/pull-images/pull-images.yaml b/src/playbooks/pull-images/pull-images.yaml index fff77f6e7..2daee2002 100644 --- a/src/playbooks/pull-images/pull-images.yaml +++ b/src/playbooks/pull-images/pull-images.yaml @@ -1,4 +1,15 @@ --- +- name: Setup rootless user environment + hosts: + - quadlet + become: true + roles: + - role: rootless_user + tasks: + - name: Map rootless_user_xdg_runtime_dir to foremanctl namespace + ansible.builtin.set_fact: + foremanctl_xdg_runtime_dir: "{{ rootless_user_xdg_runtime_dir }}" + - name: Pull images hosts: - quadlet @@ -7,19 +18,27 @@ - "../../vars/flavors/{{ flavor }}.yml" - "../../vars/images.yml" - "../../vars/base.yaml" - become: true tasks: - name: Install podman + become: true ansible.builtin.package: name: - podman - name: Pull an image + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" containers.podman.podman_image: name: "{{ item }}" loop: "{{ images }}" - name: Pull foreman_proxy images + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" containers.podman.podman_image: name: "{{ item }}" loop: "{{ foreman_proxy_images }}" @@ -27,6 +46,10 @@ - "'foreman-proxy' in enabled_features" - name: Pull database images + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" containers.podman.podman_image: name: "{{ item }}" loop: "{{ database_images }}" diff --git a/src/roles/candlepin/handlers/main.yml b/src/roles/candlepin/handlers/main.yml index c416304a6..2b952f219 100644 --- a/src/roles/candlepin/handlers/main.yml +++ b/src/roles/candlepin/handlers/main.yml @@ -1,5 +1,10 @@ --- - name: Restart candlepin + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" ansible.builtin.systemd: name: candlepin state: restarted + scope: user diff --git a/src/roles/candlepin/tasks/artemis.yml b/src/roles/candlepin/tasks/artemis.yml index 75cb91e40..ea22d680d 100644 --- a/src/roles/candlepin/tasks/artemis.yml +++ b/src/roles/candlepin/tasks/artemis.yml @@ -8,57 +8,63 @@ ansible.builtin.set_fact: candlepin_artemis_client_dn: "{{ candlepin_openssl_response.stdout | replace('subject=', '') }}" -- name: Create Candlepin broker.xml - containers.podman.podman_secret: - state: present - name: candlepin-artemis-broker-xml - data: "{{ lookup('ansible.builtin.template', 'broker.xml.j2') }}" - labels: - filename: broker.xml - app: artemis - notify: - - Restart candlepin +- name: Configure Artemis secrets as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Create Candlepin broker.xml + containers.podman.podman_secret: + state: present + name: candlepin-artemis-broker-xml + data: "{{ lookup('ansible.builtin.template', 'broker.xml.j2') }}" + labels: + filename: broker.xml + app: artemis + notify: + - Restart candlepin -- name: Create Tomcat login config - containers.podman.podman_secret: - state: present - name: candlepin-artemis-login-config - data: "{{ lookup('ansible.builtin.template', 'login.config') }}" - labels: - filename: login.config - app: artemis - notify: - - Restart candlepin + - name: Create Tomcat login config + containers.podman.podman_secret: + state: present + name: candlepin-artemis-login-config + data: "{{ lookup('ansible.builtin.template', 'login.config') }}" + labels: + filename: login.config + app: artemis + notify: + - Restart candlepin -- name: Create Tomcat jaas.conf - containers.podman.podman_secret: - state: present - name: candlepin-artemis-jaas-conf - data: "{{ lookup('ansible.builtin.template', 'jaas.conf') }}" - labels: - filename: jaas.conf - app: artemis - notify: - - Restart candlepin + - name: Create Tomcat jaas.conf + containers.podman.podman_secret: + state: present + name: candlepin-artemis-jaas-conf + data: "{{ lookup('ansible.builtin.template', 'jaas.conf') }}" + labels: + filename: jaas.conf + app: artemis + notify: + - Restart candlepin -- name: Create Tomcat cert-roles.properties - containers.podman.podman_secret: - state: present - name: candlepin-artemis-cert-roles-properties - data: "{{ lookup('ansible.builtin.template', 'cert-roles.properties') }}" - labels: - filename: cert-roles.properties - app: artemis - notify: - - Restart candlepin + - name: Create Tomcat cert-roles.properties + containers.podman.podman_secret: + state: present + name: candlepin-artemis-cert-roles-properties + data: "{{ lookup('ansible.builtin.template', 'cert-roles.properties') }}" + labels: + filename: cert-roles.properties + app: artemis + notify: + - Restart candlepin -- name: Create Tomcat cert-users.properties - containers.podman.podman_secret: - state: present - name: candlepin-artemis-cert-users-properties - data: "{{ lookup('ansible.builtin.template', 'cert-users.properties.j2') }}" - labels: - filename: cert-users.properties - app: artemis - notify: - - Restart candlepin + - name: Create Tomcat cert-users.properties + containers.podman.podman_secret: + state: present + name: candlepin-artemis-cert-users-properties + data: "{{ lookup('ansible.builtin.template', 'cert-users.properties.j2') }}" + labels: + filename: cert-users.properties + app: artemis + notify: + - Restart candlepin diff --git a/src/roles/candlepin/tasks/certs.yml b/src/roles/candlepin/tasks/certs.yml index c90468c49..a6af7ca9d 100644 --- a/src/roles/candlepin/tasks/certs.yml +++ b/src/roles/candlepin/tasks/certs.yml @@ -3,7 +3,10 @@ community.crypto.openssl_pkcs12: action: export passphrase: "{{ candlepin_keystore_password }}" - path: "/root/candlepin.keystore" + path: "{{ candlepin_keystore_path }}" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0600' friendly_name: 'tomcat' privatekey_path: "{{ candlepin_tomcat_key }}" certificate_path: "{{ candlepin_tomcat_certificate }}" @@ -14,66 +17,75 @@ community.crypto.openssl_pkcs12: action: export passphrase: "{{ candlepin_keystore_password }}" - path: "/root/candlepin.truststore" + path: "{{ candlepin_truststore_path }}" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0600' friendly_name: 'artemis-client' privatekey_path: "{{ candlepin_client_key }}" certificate_path: "{{ candlepin_client_certificate }}" other_certificates: "{{ candlepin_ca_certificate }}" state: present -- name: Create the podman secret for Candlepin CA certificate - containers.podman.podman_secret: - state: present - name: candlepin-ca-cert - path: "{{ candlepin_ca_certificate }}" - labels: - app: candlepin - notify: - - Restart candlepin - - name: Decrypt Candlepin CA key ansible.builtin.command: openssl pkey -in "{{ candlepin_ca_key }}" -passin "file:{{ candlepin_ca_key_password }}" register: _candlepin_ca_key changed_when: false -- name: Create the podman secret for Candlepin CA key - containers.podman.podman_secret: - state: present - name: candlepin-ca-key - data: "{{ _candlepin_ca_key.stdout }}" - labels: - app: candlepin - notify: - - Restart candlepin +- name: Configure Candlepin certificate secrets as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Create the podman secret for Candlepin CA certificate + containers.podman.podman_secret: + state: present + name: candlepin-ca-cert + path: "{{ candlepin_ca_certificate }}" + labels: + app: candlepin + notify: + - Restart candlepin -- name: Create the podman secret for Tomcat keystore - containers.podman.podman_secret: - state: present - name: candlepin-tomcat-keystore - path: "/root/candlepin.keystore" - labels: - filename: candlepin.keystore - app: tomcat - notify: - - Restart candlepin + - name: Create the podman secret for Candlepin CA key + containers.podman.podman_secret: + state: present + name: candlepin-ca-key + data: "{{ _candlepin_ca_key.stdout }}" + labels: + app: candlepin + notify: + - Restart candlepin -- name: Create the podman secret for Tomcat truststore - containers.podman.podman_secret: - state: present - name: candlepin-tomcat-truststore - path: "/root/candlepin.truststore" - labels: - filename: candlepin.truststore - app: tomcat - notify: - - Restart candlepin + - name: Create the podman secret for Tomcat keystore + containers.podman.podman_secret: + state: present + name: candlepin-tomcat-keystore + path: "{{ candlepin_keystore_path }}" + labels: + filename: candlepin.keystore + app: tomcat + notify: + - Restart candlepin -- name: Create the podman secret for the keystore password - containers.podman.podman_secret: - state: present - name: candlepin-tomcat-keystore-password - data: "{{ candlepin_keystore_password }}" - labels: - app: tomcat - notify: - - Restart candlepin + - name: Create the podman secret for Tomcat truststore + containers.podman.podman_secret: + state: present + name: candlepin-tomcat-truststore + path: "{{ candlepin_truststore_path }}" + labels: + filename: candlepin.truststore + app: tomcat + notify: + - Restart candlepin + + - name: Create the podman secret for the keystore password + containers.podman.podman_secret: + state: present + name: candlepin-tomcat-keystore-password + data: "{{ candlepin_keystore_password }}" + labels: + app: tomcat + notify: + - Restart candlepin diff --git a/src/roles/candlepin/tasks/main.yml b/src/roles/candlepin/tasks/main.yml index a3c1f88f0..4dd449b0d 100644 --- a/src/roles/candlepin/tasks/main.yml +++ b/src/roles/candlepin/tasks/main.yml @@ -3,9 +3,11 @@ ansible.builtin.file: path: "{{ item }}" state: directory - owner: root - group: root + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0755' + # XXX: Check if it's needed + # setype: container_var_lib_t loop: - /var/log/candlepin - /var/log/tomcat @@ -14,92 +16,100 @@ ansible.builtin.include_tasks: file: certs.yml -- name: Create Candlepin configuration - containers.podman.podman_secret: - state: present - name: candlepin-candlepin-conf - data: "{{ lookup('ansible.builtin.template', 'candlepin.conf.j2') }}" - labels: - filename: candlepin.conf - app: candlepin - notify: - - Restart candlepin +- name: Configure Candlepin container as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Create Candlepin configuration + containers.podman.podman_secret: + state: present + name: candlepin-candlepin-conf + data: "{{ lookup('ansible.builtin.template', 'candlepin.conf.j2') }}" + labels: + filename: candlepin.conf + app: candlepin + notify: + - Restart candlepin -- name: Create Tomcat server.xml - containers.podman.podman_secret: - state: present - name: candlepin-tomcat-server-xml - data: "{{ lookup('ansible.builtin.template', 'server.xml.j2') }}" - labels: - filename: server.xml - app: tomcat - notify: - - Restart candlepin + - name: Create Tomcat server.xml + containers.podman.podman_secret: + state: present + name: candlepin-tomcat-server-xml + data: "{{ lookup('ansible.builtin.template', 'server.xml.j2') }}" + labels: + filename: server.xml + app: tomcat + notify: + - Restart candlepin -- name: Create Tomcat server configuration - containers.podman.podman_secret: - state: present - name: candlepin-tomcat-conf - data: "{{ lookup('ansible.builtin.template', 'tomcat.conf') }}" - labels: - filename: tomcat.conf - app: tomcat - notify: - - Restart candlepin + - name: Create Tomcat server configuration + containers.podman.podman_secret: + state: present + name: candlepin-tomcat-conf + data: "{{ lookup('ansible.builtin.template', 'tomcat.conf') }}" + labels: + filename: tomcat.conf + app: tomcat + notify: + - Restart candlepin -- name: Setup artemis - ansible.builtin.include_tasks: - file: artemis.yml + - name: Setup artemis + ansible.builtin.include_tasks: + file: artemis.yml -- name: Pull the Candlepin container image - containers.podman.podman_image: - name: "{{ candlepin_container_image }}:{{ candlepin_container_tag }}" - state: present + - name: Pull the Candlepin container image + containers.podman.podman_image: + name: "{{ candlepin_container_image }}:{{ candlepin_container_tag }}" + state: present -- name: Deploy Candlepin quadlet - containers.podman.podman_container: - name: "candlepin" - image: "{{ candlepin_container_image }}:{{ candlepin_container_tag }}" - state: quadlet - network: host - hostname: "{{ ansible_facts['fqdn'] }}" - secrets: - - 'candlepin-ca-cert,target=/etc/candlepin/certs/candlepin-ca.crt,mode=0440,type=mount' - - 'candlepin-ca-key,target=/etc/candlepin/certs/candlepin-ca.key,mode=0440,type=mount' - - 'candlepin-tomcat-keystore,target=/etc/candlepin/certs/keystore,mode=0440,type=mount' - - 'candlepin-tomcat-truststore,target=/etc/candlepin/certs/truststore,mode=0440,type=mount' - - 'candlepin-candlepin-conf,target=/etc/candlepin/candlepin.conf,mode=0440,type=mount' - - 'candlepin-artemis-broker-xml,target=/etc/candlepin/broker.xml,mode=440,type=mount' - - 'candlepin-tomcat-server-xml,target=/etc/tomcat/server.xml,mode=440,type=mount' - - 'candlepin-tomcat-conf,target=/etc/tomcat/tomcat.conf,mode=440,type=mount' - - 'candlepin-artemis-login-config,target=/etc/tomcat/login.config,mode=440,type=mount' - - 'candlepin-artemis-cert-roles-properties,target=/etc/tomcat/cert-roles.properties,mode=440,type=mount' - - 'candlepin-artemis-cert-users-properties,target=/etc/tomcat/cert-users.properties,mode=440,type=mount' - - 'candlepin-artemis-jaas-conf,target=/etc/tomcat/conf.d/jaas.conf,mode=440,type=mount' - volumes: - - /var/log/candlepin:/var/log/candlepin:Z - - /var/log/tomcat:/var/log/tomcat:Z - quadlet_options: - - | - [Install] - WantedBy=default.target foreman.target - [Unit] - PartOf=foreman.target - Wants=redis.service postgresql.service - After=redis.service postgresql.service - [Service] - TimeoutStartSec=300 - healthcheck: curl --fail --insecure https://localhost:23443/candlepin/status - sdnotify: healthy + - name: Deploy Candlepin quadlet + containers.podman.podman_container: + name: "candlepin" + image: "{{ candlepin_container_image }}:{{ candlepin_container_tag }}" + state: quadlet + network: host + hostname: "{{ ansible_facts['fqdn'] }}" + secrets: + - 'candlepin-ca-cert,target=/etc/candlepin/certs/candlepin-ca.crt,mode=0440,type=mount' + - 'candlepin-ca-key,target=/etc/candlepin/certs/candlepin-ca.key,mode=0440,type=mount' + - 'candlepin-tomcat-keystore,target=/etc/candlepin/certs/keystore,mode=0440,type=mount' + - 'candlepin-tomcat-truststore,target=/etc/candlepin/certs/truststore,mode=0440,type=mount' + - 'candlepin-candlepin-conf,target=/etc/candlepin/candlepin.conf,mode=0440,type=mount' + - 'candlepin-artemis-broker-xml,target=/etc/candlepin/broker.xml,mode=440,type=mount' + - 'candlepin-tomcat-server-xml,target=/etc/tomcat/server.xml,mode=440,type=mount' + - 'candlepin-tomcat-conf,target=/etc/tomcat/tomcat.conf,mode=440,type=mount' + - 'candlepin-artemis-login-config,target=/etc/tomcat/login.config,mode=440,type=mount' + - 'candlepin-artemis-cert-roles-properties,target=/etc/tomcat/cert-roles.properties,mode=440,type=mount' + - 'candlepin-artemis-cert-users-properties,target=/etc/tomcat/cert-users.properties,mode=440,type=mount' + - 'candlepin-artemis-jaas-conf,target=/etc/tomcat/conf.d/jaas.conf,mode=440,type=mount' + volumes: + - /var/log/candlepin:/var/log/candlepin:Z + - /var/log/tomcat:/var/log/tomcat:Z + quadlet_options: + - | + [Install] + WantedBy=default.target foreman.target + [Unit] + PartOf=foreman.target + Wants=redis.service postgresql.service + After=redis.service postgresql.service + [Service] + TimeoutStartSec=300 + healthcheck: curl --fail --insecure https://localhost:23443/candlepin/status + sdnotify: healthy -- name: Run daemon reload to make Quadlet create the service files - ansible.builtin.systemd: - daemon_reload: true + - name: Run daemon reload to make Quadlet create the service files + ansible.builtin.systemd: + daemon_reload: true + scope: user -- name: Flush handlers to restart services - ansible.builtin.meta: flush_handlers + - name: Flush handlers to restart services + ansible.builtin.meta: flush_handlers -- name: Start the Candlepin Service - ansible.builtin.systemd: - name: candlepin - state: started + - name: Start the Candlepin Service + ansible.builtin.systemd: + name: candlepin + state: started + scope: user diff --git a/src/roles/certificates/defaults/main.yml b/src/roles/certificates/defaults/main.yml index c217f72f0..69d21810c 100644 --- a/src/roles/certificates/defaults/main.yml +++ b/src/roles/certificates/defaults/main.yml @@ -1,6 +1,6 @@ --- certificates_ca: true -certificates_ca_directory: /root/certificates # Change this to /var/lib? +certificates_ca_directory: /root/certificates certificates_ca_directory_keys: "{{ certificates_ca_directory }}/private" certificates_ca_directory_certs: "{{ certificates_ca_directory }}/certs" certificates_ca_directory_requests: "{{ certificates_ca_directory }}/requests" diff --git a/src/roles/certificates/tasks/ca.yml b/src/roles/certificates/tasks/ca.yml index f426dd735..ac9969658 100644 --- a/src/roles/certificates/tasks/ca.yml +++ b/src/roles/certificates/tasks/ca.yml @@ -4,38 +4,52 @@ name: openssl state: present +- name: 'Create base certificates directory' + ansible.builtin.file: + path: "{{ certificates_ca_directory }}" + state: directory + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0755' + - name: 'Create certs directory' ansible.builtin.file: path: "{{ certificates_ca_directory_certs }}" state: directory + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0755' - name: 'Create keys directory' ansible.builtin.file: path: "{{ certificates_ca_directory_keys }}" state: directory - mode: '0755' + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0700' - name: 'Create requests directory' ansible.builtin.file: path: "{{ certificates_ca_directory_requests }}" state: directory + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0755' - name: 'Deploy configuration file' ansible.builtin.template: src: openssl.cnf.j2 dest: "{{ certificates_ca_directory }}/openssl.cnf" - owner: root - group: root + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0644' - name: 'Create index file' ansible.builtin.file: path: "{{ certificates_ca_directory }}/index.txt" state: touch - owner: root - group: root + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0644' - name: 'Ensure serial starting number' @@ -43,16 +57,16 @@ src: serial.j2 dest: "{{ certificates_ca_directory }}/serial" force: false - owner: root - group: root + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0644' - name: 'Create CA key password file' ansible.builtin.copy: content: "{{ certificates_ca_password }}" dest: "{{ certificates_ca_directory_keys }}/ca.pwd" - owner: root - group: root + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0600' no_log: true @@ -70,3 +84,17 @@ -passout "file:{{ certificates_ca_directory_keys }}/ca.pwd" args: creates: "{{ certificates_ca_directory_certs }}/ca.crt" + +- name: 'Set CA key ownership and permissions' + ansible.builtin.file: + path: "{{ certificates_ca_directory_keys }}/ca.key" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0600' + +- name: 'Set CA certificate ownership and permissions' + ansible.builtin.file: + path: "{{ certificates_ca_directory_certs }}/ca.crt" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0644' diff --git a/src/roles/certificates/tasks/issue.yml b/src/roles/certificates/tasks/issue.yml index f532c7d93..3a24b8534 100644 --- a/src/roles/certificates/tasks/issue.yml +++ b/src/roles/certificates/tasks/issue.yml @@ -7,6 +7,13 @@ args: creates: "{{ certificates_ca_directory_keys }}/{{ certificates_hostname }}.key" +- name: 'Set server key ownership and permissions' + ansible.builtin.file: + path: "{{ certificates_ca_directory_keys }}/{{ certificates_hostname }}.key" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0600' + - name: 'Creating server signing request' ansible.builtin.command: > openssl req @@ -19,6 +26,13 @@ args: creates: "{{ certificates_ca_directory_requests }}/{{ certificates_hostname }}.csr" +- name: 'Set server CSR ownership and permissions' + ansible.builtin.file: + path: "{{ certificates_ca_directory_requests }}/{{ certificates_hostname }}.csr" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0644' + - name: 'Sign server signing request' ansible.builtin.command: > openssl ca @@ -32,6 +46,13 @@ args: creates: "{{ certificates_ca_directory_certs }}/{{ certificates_hostname }}.crt" +- name: 'Set server certificate ownership and permissions' + ansible.builtin.file: + path: "{{ certificates_ca_directory_certs }}/{{ certificates_hostname }}.crt" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0644' + - name: 'Create client key' ansible.builtin.command: > openssl genrsa @@ -40,6 +61,13 @@ args: creates: "{{ certificates_ca_directory_keys }}/{{ certificates_hostname }}-client.key" +- name: 'Set client key ownership and permissions' + ansible.builtin.file: + path: "{{ certificates_ca_directory_keys }}/{{ certificates_hostname }}-client.key" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0600' + - name: 'Creating client signing request' ansible.builtin.command: > openssl req @@ -52,6 +80,13 @@ args: creates: "{{ certificates_ca_directory_requests }}/{{ certificates_hostname }}-client.csr" +- name: 'Set client CSR ownership and permissions' + ansible.builtin.file: + path: "{{ certificates_ca_directory_requests }}/{{ certificates_hostname }}-client.csr" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0644' + - name: 'Sign client signing request' ansible.builtin.command: > openssl ca @@ -64,3 +99,10 @@ -out "{{ certificates_ca_directory_certs }}/{{ certificates_hostname }}-client.crt" args: creates: "{{ certificates_ca_directory_certs }}/{{ certificates_hostname }}-client.crt" + +- name: 'Set client certificate ownership and permissions' + ansible.builtin.file: + path: "{{ certificates_ca_directory_certs }}/{{ certificates_hostname }}-client.crt" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0644' diff --git a/src/roles/check_subuid_subgid/tasks/main.yaml b/src/roles/check_subuid_subgid/tasks/main.yaml index 6b77787ee..1bd983f21 100644 --- a/src/roles/check_subuid_subgid/tasks/main.yaml +++ b/src/roles/check_subuid_subgid/tasks/main.yaml @@ -1,19 +1,17 @@ --- -- name: Check /etc/subuid for current user - ansible.builtin.command: grep "^{{ ansible_facts['user_id'] }}:" /etc/subuid - register: check_subuid_subgid_subuid - ignore_errors: true - changed_when: false +- name: Read /etc/subuid file + ansible.builtin.slurp: + src: /etc/subuid + register: check_subuid_subgid_subuid_file -- name: Check /etc/subgid for current user - ansible.builtin.command: grep "^{{ ansible_facts['user_id'] }}:" /etc/subgid - register: check_subuid_subgid_subgid - ignore_errors: true - changed_when: false +- name: Read /etc/subgid file + ansible.builtin.slurp: + src: /etc/subgid + register: check_subuid_subgid_subgid_file -- name: Assert /etc/subuid and /etc/subgid have entries for {{ ansible_facts['user_id'] }} +- name: Assert /etc/subuid and /etc/subgid have entries for {{ foremanctl_user }} ansible.builtin.assert: that: - - check_subuid_subgid_subuid is success - - check_subuid_subgid_subgid is success - fail_msg: "Entries for user {{ ansible_facts['user_id'] }} are missing in /etc/subuid or /etc/subgid" + - "foremanctl_user + ':' in (check_subuid_subgid_subuid_file.content | b64decode)" + - "foremanctl_user + ':' in (check_subuid_subgid_subgid_file.content | b64decode)" + fail_msg: "Entries for user {{ foremanctl_user }} are missing in /etc/subuid or /etc/subgid" diff --git a/src/roles/checks/tasks/main.yml b/src/roles/checks/tasks/main.yml index cc1f44040..e7146344f 100644 --- a/src/roles/checks/tasks/main.yml +++ b/src/roles/checks/tasks/main.yml @@ -1,4 +1,24 @@ --- +- name: Make installer certificates readable by foremanctl user + when: certificate_source == 'installer' + block: + - name: Add execute permission to /root for foremanctl group traversal + ansible.builtin.file: + path: /root + mode: o+x + + - name: Change group ownership of /root/ssl-build to foremanctl + ansible.builtin.file: + path: /root/ssl-build + group: "{{ foremanctl_group }}" + recurse: true + + - name: Add group read/execute permissions to /root/ssl-build + ansible.builtin.file: + path: /root/ssl-build + mode: g+rX + recurse: true + - name: Execute checks ansible.builtin.include_tasks: execute_check.yml loop: @@ -6,8 +26,46 @@ - check_database_connection - check_system_requirements +- name: Check if linger is enabled for rootless user + ansible.builtin.command: loginctl show-user {{ foremanctl_user }} + register: checks_linger_status + changed_when: false + failed_when: false + +- name: Verify linger is enabled + ansible.builtin.assert: + that: + - "'Linger=yes' in checks_linger_status.stdout" + fail_msg: "Linger is not enabled for {{ foremanctl_user }} user. Run: loginctl enable-linger {{ foremanctl_user }}" + success_msg: "Linger is enabled for {{ foremanctl_user }} user" + +- name: Read unprivileged port start setting + ansible.builtin.slurp: + src: /proc/sys/net/ipv4/ip_unprivileged_port_start + register: checks_sysctl_ports + +- name: Verify unprivileged port configuration + ansible.builtin.assert: + that: + - (checks_sysctl_ports.content | b64decode | trim | int) <= 80 + fail_msg: "Unprivileged port start is too high: {{ checks_sysctl_ports.content | b64decode | trim }}" + success_msg: "Unprivileged port start is configured correctly" + +- name: Verify XDG_RUNTIME_DIR exists + ansible.builtin.stat: + path: "{{ foremanctl_xdg_runtime_dir }}" + register: checks_xdg_stat + +- name: Assert XDG_RUNTIME_DIR exists + ansible.builtin.assert: + that: + - checks_xdg_stat.stat.exists + - checks_xdg_stat.stat.isdir + fail_msg: "XDG_RUNTIME_DIR {{ foremanctl_xdg_runtime_dir }} does not exist" + success_msg: "XDG_RUNTIME_DIR exists" + - name: Report status of checks ansible.builtin.fail: msg: "{{ checks_results }}" when: - - checks_results|default([])|length > 0 + - checks_results | default([]) | length > 0 diff --git a/src/roles/foreman/handlers/main.yml b/src/roles/foreman/handlers/main.yml index 4bfc00a01..67472c6ba 100644 --- a/src/roles/foreman/handlers/main.yml +++ b/src/roles/foreman/handlers/main.yml @@ -1,14 +1,24 @@ --- - name: Restart foreman + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" ansible.builtin.systemd: name: foreman state: restarted + scope: user - name: Restart dynflow-sidekiq@ - ansible.builtin.systemd: - name: "dynflow-sidekiq@{{ item }}" - state: restarted + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" loop: - orchestrator - worker - worker-hosts-queue + ansible.builtin.systemd: + name: "dynflow-sidekiq@{{ item }}" + state: restarted + scope: user diff --git a/src/roles/foreman/tasks/main.yaml b/src/roles/foreman/tasks/main.yaml index a3bfdd9a8..ec77d63e2 100644 --- a/src/roles/foreman/tasks/main.yaml +++ b/src/roles/foreman/tasks/main.yaml @@ -1,251 +1,275 @@ --- -- name: Pull the Foreman container image - containers.podman.podman_image: - name: "{{ foreman_container_image }}:{{ foreman_container_tag }}" - state: present +- name: Configure Foreman containers and services as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Pull the Foreman container image + containers.podman.podman_image: + name: "{{ foreman_container_image }}:{{ foreman_container_tag }}" + state: present -- name: Create secret for DATABASE_URL - containers.podman.podman_secret: - state: present - name: foreman-database-url - data: "postgresql://{{ foreman_database_user }}:{{ foreman_database_password }}@{{ foreman_database_host }}:{{ foreman_database_port }}/{{ foreman_database_name }}?pool={{ foreman_database_pool }}&sslmode={{ foreman_database_ssl_mode }}{% if foreman_database_ssl_ca is defined %}&sslrootcert={{ foreman_database_ssl_ca }}{% endif %}" # yamllint disable-line rule:line-length - notify: - - Restart foreman - - Restart dynflow-sidekiq@ + - name: Create secret for DATABASE_URL + containers.podman.podman_secret: + state: present + name: foreman-database-url + data: "postgresql://{{ foreman_database_user }}:{{ foreman_database_password }}@{{ foreman_database_host }}:{{ foreman_database_port }}/{{ foreman_database_name }}?pool={{ foreman_database_pool }}&sslmode={{ foreman_database_ssl_mode }}{% if foreman_database_ssl_ca is defined %}&sslrootcert={{ foreman_database_ssl_ca }}{% endif %}" # yamllint disable-line rule:line-length + notify: + - Restart foreman + - Restart dynflow-sidekiq@ -- name: Create secret for SEED_ADMIN_USER - containers.podman.podman_secret: - state: present - name: foreman-seed-admin-user - data: "{{ foreman_initial_admin_username }}" - notify: - - Restart foreman - - Restart dynflow-sidekiq@ + - name: Create secret for SEED_ADMIN_USER + containers.podman.podman_secret: + state: present + name: foreman-seed-admin-user + data: "{{ foreman_initial_admin_username }}" + notify: + - Restart foreman + - Restart dynflow-sidekiq@ -- name: Create secret for SEED_ADMIN_PASSWORD - containers.podman.podman_secret: - state: present - name: foreman-seed-admin-password - data: "{{ foreman_initial_admin_password }}" - notify: - - Restart foreman - - Restart dynflow-sidekiq@ + - name: Create secret for SEED_ADMIN_PASSWORD + containers.podman.podman_secret: + state: present + name: foreman-seed-admin-password + data: "{{ foreman_initial_admin_password }}" + notify: + - Restart foreman + - Restart dynflow-sidekiq@ -- name: Create settings config secret - containers.podman.podman_secret: - state: present - name: foreman-settings-yaml - data: "{{ lookup('ansible.builtin.template', 'settings.yaml.j2') }}" - notify: - - Restart foreman - - Restart dynflow-sidekiq@ + - name: Create settings config secret + containers.podman.podman_secret: + state: present + name: foreman-settings-yaml + data: "{{ lookup('ansible.builtin.template', 'settings.yaml.j2') }}" + notify: + - Restart foreman + - Restart dynflow-sidekiq@ -- name: Create katello config secret - containers.podman.podman_secret: - state: present - name: foreman-katello-yaml - data: "{{ lookup('ansible.builtin.template', 'katello.yaml.j2') }}" - notify: - - Restart foreman - - Restart dynflow-sidekiq@ + - name: Create katello config secret + containers.podman.podman_secret: + state: present + name: foreman-katello-yaml + data: "{{ lookup('ansible.builtin.template', 'katello.yaml.j2') }}" + notify: + - Restart foreman + - Restart dynflow-sidekiq@ -- name: Create dynflow hosts_queue worker config secret - containers.podman.podman_secret: - state: present - name: foreman-dynflow-worker-hosts-queue-yaml - data: "{{ lookup('ansible.builtin.template', 'dynflow-worker-hosts-queue.yml') }}" - notify: - - Restart dynflow-sidekiq@ + - name: Create dynflow hosts_queue worker config secret + containers.podman.podman_secret: + state: present + name: foreman-dynflow-worker-hosts-queue-yaml + data: "{{ lookup('ansible.builtin.template', 'dynflow-worker-hosts-queue.yml') }}" + notify: + - Restart dynflow-sidekiq@ -- name: Create the podman secret for Foreman CA certificate - containers.podman.podman_secret: - name: foreman-ca-cert - path: "{{ foreman_ca_certificate }}" - state: present - notify: - - Restart foreman - - Restart dynflow-sidekiq@ + - name: Create the podman secret for Foreman CA certificate + containers.podman.podman_secret: + name: foreman-ca-cert + path: "{{ foreman_ca_certificate }}" + state: present + notify: + - Restart foreman + - Restart dynflow-sidekiq@ -- name: Create the podman secret for Foreman client certificate - containers.podman.podman_secret: - state: present - name: foreman-client-cert - path: "{{ foreman_client_certificate }}" - notify: - - Restart foreman - - Restart dynflow-sidekiq@ + - name: Create the podman secret for Foreman client certificate + containers.podman.podman_secret: + state: present + name: foreman-client-cert + path: "{{ foreman_client_certificate }}" + notify: + - Restart foreman + - Restart dynflow-sidekiq@ -- name: Create the podman secret for Foreman client key - containers.podman.podman_secret: - state: present - name: foreman-client-key - path: "{{ foreman_client_key }}" - notify: - - Restart foreman - - Restart dynflow-sidekiq@ + - name: Create the podman secret for Foreman client key + containers.podman.podman_secret: + state: present + name: foreman-client-key + path: "{{ foreman_client_key }}" + notify: + - Restart foreman + - Restart dynflow-sidekiq@ -- name: Deploy Foreman Container - containers.podman.podman_container: - name: "foreman" - image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" - state: quadlet - sdnotify: true - network: host - hostname: "{{ ansible_facts['fqdn'] }}" - volume: - - 'foreman-data-run:/var/run/foreman:z' - secrets: - - 'foreman-database-url,type=env,target=DATABASE_URL' - - 'foreman-seed-admin-user,type=env,target=SEED_ADMIN_USER' - - 'foreman-seed-admin-password,type=env,target=SEED_ADMIN_PASSWORD' - - 'foreman-settings-yaml,type=mount,target=/etc/foreman/settings.yaml' - - 'foreman-katello-yaml,type=mount,target=/etc/foreman/plugins/katello.yaml' - - 'foreman-ca-cert,type=mount,target=/etc/foreman/katello-default-ca.crt' - - 'foreman-client-cert,type=mount,target=/etc/foreman/client_cert.pem' - - 'foreman-client-key,type=mount,target=/etc/foreman/client_key.pem' - env: - FOREMAN_PUMA_THREADS_MIN: "{{ foreman_puma_threads_min }}" - FOREMAN_PUMA_THREADS_MAX: "{{ foreman_puma_threads_max }}" - FOREMAN_PUMA_WORKERS: "{{ foreman_puma_workers }}" - FOREMAN_ENABLED_PLUGINS: "{{ foreman_plugins | join(' ') }}" - quadlet_options: - - | - [Install] - WantedBy=default.target foreman.target - [Unit] - PartOf=foreman.target - notify: Restart foreman + - name: Deploy Foreman Container + containers.podman.podman_container: + name: "foreman" + image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" + state: quadlet + sdnotify: true + network: host + hostname: "{{ ansible_facts['fqdn'] }}" + volume: + - 'foreman-data-run:/var/run/foreman:z' + secrets: + - 'foreman-database-url,type=env,target=DATABASE_URL' + - 'foreman-seed-admin-user,type=env,target=SEED_ADMIN_USER' + - 'foreman-seed-admin-password,type=env,target=SEED_ADMIN_PASSWORD' + - 'foreman-settings-yaml,type=mount,target=/etc/foreman/settings.yaml' + - 'foreman-katello-yaml,type=mount,target=/etc/foreman/plugins/katello.yaml' + - 'foreman-ca-cert,type=mount,target=/etc/foreman/katello-default-ca.crt' + - 'foreman-client-cert,type=mount,target=/etc/foreman/client_cert.pem' + - 'foreman-client-key,type=mount,target=/etc/foreman/client_key.pem' + env: + FOREMAN_PUMA_THREADS_MIN: "{{ foreman_puma_threads_min }}" + FOREMAN_PUMA_THREADS_MAX: "{{ foreman_puma_threads_max }}" + FOREMAN_PUMA_WORKERS: "{{ foreman_puma_workers }}" + FOREMAN_ENABLED_PLUGINS: "{{ foreman_plugins | join(' ') }}" + quadlet_options: + - | + [Install] + WantedBy=default.target foreman.target + [Unit] + PartOf=foreman.target + notify: Restart foreman -- name: Deploy Dynflow Container - containers.podman.podman_container: - name: "dynflow-sidekiq-%i" - quadlet_filename: "dynflow-sidekiq@" - image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" - state: quadlet - sdnotify: true - network: host - hostname: "{{ ansible_facts['fqdn'] }}" - volume: - - 'foreman-data-run:/var/run/foreman:z' - secrets: - - 'foreman-database-url,type=env,target=DATABASE_URL' - - 'foreman-settings-yaml,type=mount,target=/etc/foreman/settings.yaml' - - 'foreman-katello-yaml,type=mount,target=/etc/foreman/plugins/katello.yaml' - - 'foreman-ca-cert,type=mount,target=/etc/foreman/katello-default-ca.crt' - - 'foreman-client-cert,type=mount,target=/etc/foreman/client_cert.pem' - - 'foreman-client-key,type=mount,target=/etc/foreman/client_key.pem' - - 'foreman-dynflow-worker-hosts-queue-yaml,type=mount,target=/etc/foreman/dynflow/worker-hosts-queue.yml' - env: - DYNFLOW_REDIS_URL: "redis://localhost:6379/6" - REDIS_PROVIDER: "DYNFLOW_REDIS_URL" - FOREMAN_ENABLED_PLUGINS: "{{ foreman_plugins | join(' ') }}" - command: "/usr/libexec/foreman/sidekiq-selinux -e production -r ./extras/dynflow-sidekiq.rb -C /etc/foreman/dynflow/%i.yml" - quadlet_options: - - | - [Install] - WantedBy=default.target foreman.target - [Unit] - PartOf=foreman.target - Wants=redis.service postgresql.service - After=redis.service postgresql.service - - | - [Service] - Restart=on-failure - RestartSec=1 + - name: Deploy Dynflow Container + containers.podman.podman_container: + name: "dynflow-sidekiq-%i" + quadlet_filename: "dynflow-sidekiq@" + image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" + state: quadlet + sdnotify: true + network: host + hostname: "{{ ansible_facts['fqdn'] }}" + volume: + - 'foreman-data-run:/var/run/foreman:z' + secrets: + - 'foreman-database-url,type=env,target=DATABASE_URL' + - 'foreman-settings-yaml,type=mount,target=/etc/foreman/settings.yaml' + - 'foreman-katello-yaml,type=mount,target=/etc/foreman/plugins/katello.yaml' + - 'foreman-ca-cert,type=mount,target=/etc/foreman/katello-default-ca.crt' + - 'foreman-client-cert,type=mount,target=/etc/foreman/client_cert.pem' + - 'foreman-client-key,type=mount,target=/etc/foreman/client_key.pem' + - 'foreman-dynflow-worker-hosts-queue-yaml,type=mount,target=/etc/foreman/dynflow/worker-hosts-queue.yml' + env: + DYNFLOW_REDIS_URL: "redis://localhost:6379/6" + REDIS_PROVIDER: "DYNFLOW_REDIS_URL" + FOREMAN_ENABLED_PLUGINS: "{{ foreman_plugins | join(' ') }}" + command: "/usr/libexec/foreman/sidekiq-selinux -e production -r ./extras/dynflow-sidekiq.rb -C /etc/foreman/dynflow/%i.yml" + quadlet_options: + - | + [Install] + WantedBy=default.target foreman.target + [Unit] + PartOf=foreman.target + Wants=redis.service postgresql.service + After=redis.service postgresql.service + - | + [Service] + Restart=on-failure + RestartSec=1 -- name: Create Dynflow Container instances - ansible.builtin.file: - state: link - src: "/etc/containers/systemd/dynflow-sidekiq@.container" - dest: "/etc/containers/systemd/dynflow-sidekiq@{{ item }}.container" - loop: - - orchestrator - - worker - - worker-hosts-queue + - name: Create Dynflow Container instances + ansible.builtin.file: + state: link + src: "{{ foremanctl_quadlet_dir }}/dynflow-sidekiq@.container" + dest: "{{ foremanctl_quadlet_dir }}/dynflow-sidekiq@{{ item }}.container" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + loop: + - orchestrator + - worker + - worker-hosts-queue -- name: Define templated Quadlet for recurring Foreman rake tasks - when: foreman_recurring_tasks_enabled - loop: "{{ foreman_recurring_tasks }}" - loop_control: - label: "{{ item.instance }}" - containers.podman.podman_container: - name: "foreman-recurring-{{ item.instance }}" - quadlet_filename: "foreman-recurring@{{ item.instance }}" - state: quadlet - image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" - sdnotify: false - network: host - hostname: "{{ ansible_facts['fqdn'] }}" - command: "foreman-rake {{ item.rake }}" - volume: - - 'foreman-data-run:/var/run/foreman:z' - secrets: - - 'foreman-database-url,type=env,target=DATABASE_URL' - - 'foreman-seed-admin-user,type=env,target=SEED_ADMIN_USER' - - 'foreman-seed-admin-password,type=env,target=SEED_ADMIN_PASSWORD' - - 'foreman-settings-yaml,type=mount,target=/etc/foreman/settings.yaml' - - 'foreman-katello-yaml,type=mount,target=/etc/foreman/plugins/katello.yaml' - - 'foreman-ca-cert,type=mount,target=/etc/foreman/katello-default-ca.crt' - - 'foreman-client-cert,type=mount,target=/etc/foreman/client_cert.pem' - - 'foreman-client-key,type=mount,target=/etc/foreman/client_key.pem' - quadlet_options: - - | - [Unit] - PartOf=foreman.target - StartLimitIntervalSec=0 - - | - [Service] - TimeoutStartSec=30m - TimeoutStopSec=2m - KillMode=mixed - SyslogIdentifier=foreman-recurring-%i + - name: Configure recurring Foreman rake tasks + when: foreman_recurring_tasks_enabled + block: + - name: Define templated Quadlet for recurring Foreman rake tasks + containers.podman.podman_container: + name: "foreman-recurring-{{ item.instance }}" + quadlet_filename: "foreman-recurring@{{ item.instance }}" + state: quadlet + image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" + sdnotify: false + network: host + hostname: "{{ ansible_facts['fqdn'] }}" + command: "foreman-rake {{ item.rake }}" + volume: + - 'foreman-data-run:/var/run/foreman:z' + secrets: + - 'foreman-database-url,type=env,target=DATABASE_URL' + - 'foreman-seed-admin-user,type=env,target=SEED_ADMIN_USER' + - 'foreman-seed-admin-password,type=env,target=SEED_ADMIN_PASSWORD' + - 'foreman-settings-yaml,type=mount,target=/etc/foreman/settings.yaml' + - 'foreman-katello-yaml,type=mount,target=/etc/foreman/plugins/katello.yaml' + - 'foreman-ca-cert,type=mount,target=/etc/foreman/katello-default-ca.crt' + - 'foreman-client-cert,type=mount,target=/etc/foreman/client_cert.pem' + - 'foreman-client-key,type=mount,target=/etc/foreman/client_key.pem' + quadlet_options: + - | + [Unit] + PartOf=foreman.target + StartLimitIntervalSec=0 + - | + [Service] + TimeoutStartSec=30m + TimeoutStopSec=2m + KillMode=mixed + SyslogIdentifier=foreman-recurring-%i + loop: "{{ foreman_recurring_tasks }}" + loop_control: + label: "{{ item.instance }}" -- name: Render timers for recurring tasks - when: foreman_recurring_tasks_enabled - ansible.builtin.template: - src: foreman-recurring@.timer.j2 - dest: "/etc/systemd/system/foreman-recurring@{{ item.instance }}.timer" - mode: "0644" - loop: "{{ foreman_recurring_tasks }}" - loop_control: - label: "{{ item.instance }}" + - name: Render timers for recurring tasks + ansible.builtin.template: + src: foreman-recurring@.timer.j2 + dest: "{{ foremanctl_systemd_user_dir }}/foreman-recurring@{{ item.instance }}.timer" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: "0644" + loop: "{{ foreman_recurring_tasks }}" + loop_control: + label: "{{ item.instance }}" -- name: Run daemon reload to make Quadlet create the service files - ansible.builtin.systemd: - daemon_reload: true + - name: Run daemon reload to make Quadlet create the service files + ansible.builtin.systemd: + daemon_reload: true + scope: user -- name: Migrate and seed the Foreman database - containers.podman.podman_container: - name: foreman-db-migrate - image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" - command: - - bash - - -c - - bin/rails db:migrate && bin/rails db:seed - detach: false - network: host - env: - FOREMAN_ENABLED_PLUGINS: "{{ foreman_plugins | join(' ') }}" - secrets: - - 'foreman-database-url,type=env,target=DATABASE_URL' - - 'foreman-seed-admin-user,type=env,target=SEED_ADMIN_USER' - - 'foreman-seed-admin-password,type=env,target=SEED_ADMIN_PASSWORD' - - 'foreman-settings-yaml,type=mount,target=/etc/foreman/settings.yaml' + - name: Migrate and seed the Foreman database + containers.podman.podman_container: + name: foreman-db-migrate + image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" + command: + - bash + - -c + - bin/rails db:migrate && bin/rails db:seed + detach: false + network: host + env: + FOREMAN_ENABLED_PLUGINS: "{{ foreman_plugins | join(' ') }}" + secrets: + - 'foreman-database-url,type=env,target=DATABASE_URL' + - 'foreman-seed-admin-user,type=env,target=SEED_ADMIN_USER' + - 'foreman-seed-admin-password,type=env,target=SEED_ADMIN_PASSWORD' + - 'foreman-settings-yaml,type=mount,target=/etc/foreman/settings.yaml' -- name: Flush handlers to restart services - ansible.builtin.meta: flush_handlers + - name: Flush handlers to restart services + ansible.builtin.meta: flush_handlers -- name: Start services - ansible.builtin.systemd: - name: "{{ item }}" - state: started - async: 60 - poll: 0 - loop: - - dynflow-sidekiq@orchestrator - - dynflow-sidekiq@worker - - dynflow-sidekiq@worker-hosts-queue - - foreman + - name: Start services + ansible.builtin.systemd: + name: "{{ item }}" + state: started + scope: user + async: 60 + poll: 0 + loop: + - dynflow-sidekiq@orchestrator + - dynflow-sidekiq@worker + - dynflow-sidekiq@worker-hosts-queue + - foreman + + - name: Enable & start recurring timers + when: foreman_recurring_tasks_enabled + ansible.builtin.systemd: + name: "foreman-recurring@{{ item.instance }}.timer" + enabled: true + state: started + scope: user + loop: "{{ foreman_recurring_tasks }}" + loop_control: + label: "{{ item.instance }}" - name: Wait for Foreman service to be accessible ansible.builtin.uri: @@ -256,16 +280,6 @@ delay: 5 register: foreman_status -- name: Enable & start recurring timers - when: foreman_recurring_tasks_enabled - ansible.builtin.systemd: - name: "foreman-recurring@{{ item.instance }}.timer" - enabled: true - state: started - loop: "{{ foreman_recurring_tasks }}" - loop_control: - label: "{{ item.instance }}" - - name: Wait for Foreman tasks to be ready ansible.builtin.uri: url: '{{ foreman_url }}/api/v2/ping' diff --git a/src/roles/foreman_proxy/handlers/main.yml b/src/roles/foreman_proxy/handlers/main.yml index 6cea8a881..3a2f927f2 100644 --- a/src/roles/foreman_proxy/handlers/main.yml +++ b/src/roles/foreman_proxy/handlers/main.yml @@ -1,5 +1,10 @@ --- - name: Restart Foreman Proxy + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" ansible.builtin.systemd: name: foreman-proxy state: restarted + scope: user diff --git a/src/roles/foreman_proxy/tasks/certs.yaml b/src/roles/foreman_proxy/tasks/certs.yaml index fbb504ecd..0f0139ccc 100644 --- a/src/roles/foreman_proxy/tasks/certs.yaml +++ b/src/roles/foreman_proxy/tasks/certs.yaml @@ -1,48 +1,54 @@ --- -- name: Create the podman secret for Foreman Proxy CA certificate - containers.podman.podman_secret: - name: foreman-proxy-ssl-ca - path: "{{ server_ca_certificate }}" - state: present - notify: - - Restart Foreman Proxy +- name: Configure Foreman Proxy certificate secrets as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Create the podman secret for Foreman Proxy CA certificate + containers.podman.podman_secret: + name: foreman-proxy-ssl-ca + path: "{{ server_ca_certificate }}" + state: present + notify: + - Restart Foreman Proxy -- name: Create the podman secret for foreman-proxy Proxy server certificate (for HTTPS) - containers.podman.podman_secret: - state: present - name: foreman-proxy-ssl-cert - path: "{{ server_certificate }}" - notify: - - Restart Foreman Proxy + - name: Create the podman secret for foreman-proxy Proxy server certificate (for HTTPS) + containers.podman.podman_secret: + state: present + name: foreman-proxy-ssl-cert + path: "{{ server_certificate }}" + notify: + - Restart Foreman Proxy -- name: Create the podman secret for Foreman Proxy server key (for HTTPS) - containers.podman.podman_secret: - state: present - name: foreman-proxy-ssl-key - path: "{{ server_key }}" - notify: - - Restart Foreman Proxy + - name: Create the podman secret for Foreman Proxy server key (for HTTPS) + containers.podman.podman_secret: + state: present + name: foreman-proxy-ssl-key + path: "{{ server_key }}" + notify: + - Restart Foreman Proxy -- name: Create the podman secret for Foreman Proxy Foreman CA - containers.podman.podman_secret: - state: present - name: foreman-proxy-foreman-ssl-ca - path: "{{ server_ca_certificate }}" - notify: - - Restart Foreman Proxy + - name: Create the podman secret for Foreman Proxy Foreman CA + containers.podman.podman_secret: + state: present + name: foreman-proxy-foreman-ssl-ca + path: "{{ server_ca_certificate }}" + notify: + - Restart Foreman Proxy -- name: Create the podman secret for Foreman Proxy Foreman client certificate - containers.podman.podman_secret: - state: present - name: foreman-proxy-foreman-ssl-cert - path: "{{ client_certificate }}" - notify: - - Restart Foreman Proxy + - name: Create the podman secret for Foreman Proxy Foreman client certificate + containers.podman.podman_secret: + state: present + name: foreman-proxy-foreman-ssl-cert + path: "{{ client_certificate }}" + notify: + - Restart Foreman Proxy -- name: Create the podman secret for Foreman Proxy Foreman client key - containers.podman.podman_secret: - state: present - name: foreman-proxy-foreman-ssl-key - path: "{{ client_key }}" - notify: - - Restart Foreman Proxy + - name: Create the podman secret for Foreman Proxy Foreman client key + containers.podman.podman_secret: + state: present + name: foreman-proxy-foreman-ssl-key + path: "{{ client_key }}" + notify: + - Restart Foreman Proxy diff --git a/src/roles/foreman_proxy/tasks/configs.yaml b/src/roles/foreman_proxy/tasks/configs.yaml index 568cfd789..51da91da9 100644 --- a/src/roles/foreman_proxy/tasks/configs.yaml +++ b/src/roles/foreman_proxy/tasks/configs.yaml @@ -1,16 +1,22 @@ --- -- name: Create settings config secret - containers.podman.podman_secret: - state: present - name: foreman-proxy-settings-yml - data: "{{ lookup('ansible.builtin.template', 'settings.yml.j2') }}" - notify: - - Restart Foreman Proxy +- name: Configure Foreman Proxy config secrets as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Create settings config secret + containers.podman.podman_secret: + state: present + name: foreman-proxy-settings-yml + data: "{{ lookup('ansible.builtin.template', 'settings.yml.j2') }}" + notify: + - Restart Foreman Proxy -- name: Create logs config secret - containers.podman.podman_secret: - state: present - name: foreman-proxy-logs-yml - data: "{{ lookup('ansible.builtin.template', 'settings.d/logs.yml.j2') }}" - notify: - - Restart Foreman Proxy + - name: Create logs config secret + containers.podman.podman_secret: + state: present + name: foreman-proxy-logs-yml + data: "{{ lookup('ansible.builtin.template', 'settings.d/logs.yml.j2') }}" + notify: + - Restart Foreman Proxy diff --git a/src/roles/foreman_proxy/tasks/main.yaml b/src/roles/foreman_proxy/tasks/main.yaml index 47eaff731..70586303e 100644 --- a/src/roles/foreman_proxy/tasks/main.yaml +++ b/src/roles/foreman_proxy/tasks/main.yaml @@ -1,51 +1,59 @@ --- -- name: Pull the Foreman Proxy container image - containers.podman.podman_image: - name: "{{ foreman_proxy_container_image }}:{{ foreman_proxy_container_tag }}" - state: present +- name: Configure Foreman Proxy container as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Pull the Foreman Proxy container image + containers.podman.podman_image: + name: "{{ foreman_proxy_container_image }}:{{ foreman_proxy_container_tag }}" + state: present -- name: Create config secrets - ansible.builtin.include_tasks: configs.yaml + - name: Create config secrets + ansible.builtin.include_tasks: configs.yaml -- name: Create certs secrets - ansible.builtin.include_tasks: certs.yaml + - name: Create certs secrets + ansible.builtin.include_tasks: certs.yaml -- name: Deploy Foreman Proxy Container - containers.podman.podman_container: - name: "foreman-proxy" - image: "{{ foreman_proxy_container_image }}:{{ foreman_proxy_container_tag }}" - state: quadlet - sdnotify: true - network: host - hostname: "{{ ansible_facts['fqdn'] }}" - secrets: - - 'foreman-proxy-settings-yml,type=mount,target=/etc/foreman-proxy/settings.yml' - - 'foreman-proxy-logs-yml,type=mount,target=/etc/foreman-proxy/settings.d/logs.yml' - - 'foreman-proxy-ssl-ca,type=mount,target=/etc/foreman-proxy/ssl_ca.pem' - - 'foreman-proxy-ssl-cert,type=mount,target=/etc/foreman-proxy/ssl_cert.pem' - - 'foreman-proxy-ssl-key,type=mount,target=/etc/foreman-proxy/ssl_key.pem' - - 'foreman-proxy-foreman-ssl-ca,type=mount,target=/etc/foreman-proxy/foreman_ssl_ca.pem' - - 'foreman-proxy-foreman-ssl-cert,type=mount,target=/etc/foreman-proxy/foreman_ssl_cert.pem' - - 'foreman-proxy-foreman-ssl-key,type=mount,target=/etc/foreman-proxy/foreman_ssl_key.pem' - quadlet_options: - - | - [Install] - WantedBy=default.target foreman.target - [Unit] - PartOf=foreman.target - notify: Restart Foreman Proxy + - name: Deploy Foreman Proxy Container + containers.podman.podman_container: + name: "foreman-proxy" + image: "{{ foreman_proxy_container_image }}:{{ foreman_proxy_container_tag }}" + state: quadlet + sdnotify: true + network: host + hostname: "{{ ansible_facts['fqdn'] }}" + secrets: + - 'foreman-proxy-settings-yml,type=mount,target=/etc/foreman-proxy/settings.yml' + - 'foreman-proxy-logs-yml,type=mount,target=/etc/foreman-proxy/settings.d/logs.yml' + - 'foreman-proxy-ssl-ca,type=mount,target=/etc/foreman-proxy/ssl_ca.pem' + - 'foreman-proxy-ssl-cert,type=mount,target=/etc/foreman-proxy/ssl_cert.pem' + - 'foreman-proxy-ssl-key,type=mount,target=/etc/foreman-proxy/ssl_key.pem' + - 'foreman-proxy-foreman-ssl-ca,type=mount,target=/etc/foreman-proxy/foreman_ssl_ca.pem' + - 'foreman-proxy-foreman-ssl-cert,type=mount,target=/etc/foreman-proxy/foreman_ssl_cert.pem' + - 'foreman-proxy-foreman-ssl-key,type=mount,target=/etc/foreman-proxy/foreman_ssl_key.pem' + quadlet_options: + - | + [Install] + WantedBy=default.target foreman.target + [Unit] + PartOf=foreman.target + notify: Restart Foreman Proxy -- name: Run daemon reload to make Quadlet create the service files - ansible.builtin.systemd: - daemon_reload: true + - name: Run daemon reload to make Quadlet create the service files + ansible.builtin.systemd: + daemon_reload: true + scope: user -- name: Flush handlers to restart services - ansible.builtin.meta: flush_handlers + - name: Flush handlers to restart services + ansible.builtin.meta: flush_handlers -- name: Start the Foreman Proxy Service - ansible.builtin.systemd: - name: foreman-proxy - state: started + - name: Start the Foreman Proxy Service + ansible.builtin.systemd: + name: foreman-proxy + state: started + scope: user - name: Register Foreman Proxy to Foreman theforeman.foreman.smart_proxy: diff --git a/src/roles/postgresql/defaults/main.yml b/src/roles/postgresql/defaults/main.yml index 0530ec787..4510b189a 100644 --- a/src/roles/postgresql/defaults/main.yml +++ b/src/roles/postgresql/defaults/main.yml @@ -7,6 +7,10 @@ postgresql_restart_policy: always postgresql_data_dir: /var/lib/pgsql/data +# Container user namespace UID/GID (postgres user in sclorg/postgresql-13-c9s) +postgresql_container_uid: 26 +postgresql_container_gid: 26 + postgresql_admin_password: "CHANGEME" postgresql_max_connections: 500 diff --git a/src/roles/postgresql/handlers/main.yml b/src/roles/postgresql/handlers/main.yml index d45bb9df9..b14543c12 100644 --- a/src/roles/postgresql/handlers/main.yml +++ b/src/roles/postgresql/handlers/main.yml @@ -1,5 +1,10 @@ --- - name: Restart postgresql + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" ansible.builtin.systemd: name: "{{ postgresql_container_name }}" state: restarted + scope: user diff --git a/src/roles/postgresql/tasks/main.yml b/src/roles/postgresql/tasks/main.yml index baf3d5537..05731755e 100644 --- a/src/roles/postgresql/tasks/main.yml +++ b/src/roles/postgresql/tasks/main.yml @@ -1,58 +1,76 @@ --- -- name: Pull PostgreSQL container image - containers.podman.podman_image: - name: "{{ postgresql_container_image }}:{{ postgresql_container_tag }}" - state: present - - name: Create PostgreSQL storage directory ansible.builtin.file: path: "{{ postgresql_data_dir }}" state: directory + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: "0700" - owner: 26 - group: 26 + setype: container_var_lib_t -- name: Create Podman secret for PostgreSQL admin password - containers.podman.podman_secret: - name: postgresql-admin-password - data: "{{ postgresql_admin_password }}" - notify: - - Restart postgresql +- name: Set PostgreSQL directory ownership for container UID/GID + ansible.builtin.shell: | + cd /tmp + sudo -u {{ foremanctl_user }} XDG_RUNTIME_DIR={{ foremanctl_xdg_runtime_dir }} \ + podman unshare chown -R {{ postgresql_container_uid }}:{{ postgresql_container_gid }} {{ postgresql_data_dir }} + args: + executable: /bin/bash + changed_when: true + +- name: Configure PostgreSQL container as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Pull PostgreSQL container image + containers.podman.podman_image: + name: "{{ postgresql_container_image }}:{{ postgresql_container_tag }}" + state: present + + - name: Create Podman secret for PostgreSQL admin password + containers.podman.podman_secret: + name: postgresql-admin-password + data: "{{ postgresql_admin_password }}" + notify: + - Restart postgresql -- name: Deploy PostgreSQL container - containers.podman.podman_container: - name: "{{ postgresql_container_name }}" - image: "{{ postgresql_container_image }}:{{ postgresql_container_tag }}" - state: quadlet - healthcheck: pg_isready - sdnotify: healthy - network: host - volumes: - - "{{ postgresql_data_dir }}:/var/lib/pgsql/data:Z" - secrets: - - 'postgresql-admin-password,target=POSTGRESQL_ADMIN_PASSWORD,type=env' - env: - POSTGRESQL_MAX_CONNECTIONS: "{{ postgresql_max_connections }}" - POSTGRESQL_SHARED_BUFFERS: "{{ postgresql_shared_buffers }}" - POSTGRESQL_EFFECTIVE_CACHE_SIZE: "{{ postgresql_effective_cache_size }}" - quadlet_options: - - | - [Install] - WantedBy=default.target foreman.target - [Unit] - PartOf=foreman.target + - name: Deploy PostgreSQL container + containers.podman.podman_container: + name: "{{ postgresql_container_name }}" + image: "{{ postgresql_container_image }}:{{ postgresql_container_tag }}" + state: quadlet + healthcheck: pg_isready + sdnotify: healthy + network: host + volumes: + - "{{ postgresql_data_dir }}:/var/lib/pgsql/data:Z" + secrets: + - 'postgresql-admin-password,target=POSTGRESQL_ADMIN_PASSWORD,type=env' + env: + POSTGRESQL_MAX_CONNECTIONS: "{{ postgresql_max_connections }}" + POSTGRESQL_SHARED_BUFFERS: "{{ postgresql_shared_buffers }}" + POSTGRESQL_EFFECTIVE_CACHE_SIZE: "{{ postgresql_effective_cache_size }}" + quadlet_options: + - | + [Install] + WantedBy=default.target foreman.target + [Unit] + PartOf=foreman.target -- name: Run daemon reload - ansible.builtin.systemd: - daemon_reload: true + - name: Run daemon reload + ansible.builtin.systemd: + daemon_reload: true + scope: user -- name: Flush handlers to restart services - ansible.builtin.meta: flush_handlers + - name: Flush handlers to restart services + ansible.builtin.meta: flush_handlers -- name: Start the PostgreSQL Service - ansible.builtin.systemd: - name: "{{ postgresql_container_name }}" - state: started + - name: Start the PostgreSQL Service + ansible.builtin.systemd: + name: "{{ postgresql_container_name }}" + state: started + scope: user # SCRAM-SHA-256 is default for PostgreSQL 14+, # after the upgrade, we can drop this task. diff --git a/src/roles/pulp/defaults/main.yaml b/src/roles/pulp/defaults/main.yaml index 72db8f6ce..a82f47696 100644 --- a/src/roles/pulp/defaults/main.yaml +++ b/src/roles/pulp/defaults/main.yaml @@ -5,6 +5,10 @@ pulp_api_image: "{{ pulp_container_image }}:{{ pulp_container_tag }}" pulp_content_image: "{{ pulp_container_image }}:{{ pulp_container_tag }}" pulp_worker_image: "{{ pulp_container_image }}:{{ pulp_container_tag }}" +# Container user namespace UID/GID (pulp user in pulp-oci-images) +pulp_container_uid: 700 +pulp_container_gid: 700 + pulp_worker_count: "{{ [8, ansible_facts['processor_nproc']] | min }}" pulp_content_service_worker_count: "{{ (2 * ([8, ansible_facts['processor_nproc']] | min)) + 1 }}" pulp_api_service_worker_count: "{{ ([4, ansible_facts['processor_nproc']] | min) + 1 }}" diff --git a/src/roles/pulp/handlers/main.yml b/src/roles/pulp/handlers/main.yml index ac363b9d5..7a23e9cc0 100644 --- a/src/roles/pulp/handlers/main.yml +++ b/src/roles/pulp/handlers/main.yml @@ -1,15 +1,30 @@ --- - name: Restart pulp-api + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" ansible.builtin.systemd: name: pulp-api state: restarted + scope: user - name: Restart pulp-content + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" ansible.builtin.systemd: name: pulp-content state: restarted + scope: user - name: Restart pulp-worker + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" ansible.builtin.systemd: name: pulp-worker.target state: restarted + scope: user diff --git a/src/roles/pulp/tasks/main.yaml b/src/roles/pulp/tasks/main.yaml index 79b5d563d..7fe1a14de 100644 --- a/src/roles/pulp/tasks/main.yaml +++ b/src/roles/pulp/tasks/main.yaml @@ -1,271 +1,301 @@ -- name: Pull the Pulp API container image - containers.podman.podman_image: - name: "{{ pulp_api_image }}" - state: present - -- name: Pull the Pulp Content container image - containers.podman.podman_image: - name: "{{ pulp_content_image }}" - state: present - -- name: Pull the Pulp Worker container image - containers.podman.podman_image: - name: "{{ pulp_worker_image }}" - state: present - -- name: Create Pulp storage +--- +- name: Create Pulp storage directories ansible.builtin.file: path: "{{ item | split(':') | first }}" state: directory + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: "0755" + setype: container_var_lib_t loop: "{{ pulp_volumes }}" - name: Create Pulp storage subdirs ansible.builtin.file: path: "/var/lib/pulp/{{ item }}" state: directory + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: "0755" + setype: container_var_lib_t loop: - tmp - assets - media -- name: Create DB password secret - containers.podman.podman_secret: - state: present - name: pulp-db-password - data: "{{ pulp_database_password }}" - notify: - - Restart pulp-api - - Restart pulp-content - - Restart pulp-worker - - name: Generate Django secret key - ansible.builtin.command: "bash -c 'openssl rand -base64 50 | tr -d \"\\n\" | tr \"+/\" \"-_\" > /var/lib/pulp/django_secret_key'" - args: - creates: /var/lib/pulp/django_secret_key - -- name: Set secret key file permissions - ansible.builtin.file: - path: /var/lib/pulp/django_secret_key - owner: root - group: root + ansible.builtin.copy: + content: "{{ lookup('pipe', 'openssl rand -base64 50 | tr -d \"\\n\" | tr \"+/\" \"-_\"') }}" + dest: /var/lib/pulp/django_secret_key + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0600' - -- name: Create django pulp secret key secret - containers.podman.podman_secret: - state: present - name: pulp-django-secret-key - path: /var/lib/pulp/django_secret_key - notify: - - Restart pulp-api - - Restart pulp-content - - Restart pulp-worker + setype: container_var_lib_t + force: false - name: Generate database symmetric key - ansible.builtin.command: "bash -c 'openssl rand -base64 32 | tr \"+/\" \"-_\" > /var/lib/pulp/database_fields.symmetric.key'" + ansible.builtin.copy: + content: "{{ lookup('pipe', 'openssl rand -base64 32 | tr \"+/\" \"-_\"') }}" + dest: /var/lib/pulp/database_fields.symmetric.key + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0600' + setype: container_var_lib_t + force: false + +- name: Set Pulp directory ownership for container UID/GID + ansible.builtin.shell: | + cd /tmp + sudo -u {{ foremanctl_user }} XDG_RUNTIME_DIR={{ foremanctl_xdg_runtime_dir }} \ + podman unshare chown -R {{ pulp_container_uid }}:{{ pulp_container_gid }} /var/lib/pulp args: - creates: /var/lib/pulp/database_fields.symmetric.key + executable: /bin/bash + changed_when: true -- name: Create database symmetric key secret - containers.podman.podman_secret: - state: present - name: pulp-symmetric-key - path: /var/lib/pulp/database_fields.symmetric.key - notify: - - Restart pulp-api - - Restart pulp-content - - Restart pulp-worker +- name: Configure Pulp containers and services as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Pull the Pulp API container image + containers.podman.podman_image: + name: "{{ pulp_api_image }}" + state: present -- name: Deploy Pulp API Container - containers.podman.podman_container: - name: "{{ pulp_api_container_name }}" - image: "{{ pulp_api_image }}" - state: quadlet - sdnotify: true - command: pulp-api - network: host - hostname: "pulp-api.{{ ansible_facts['fqdn'] }}" - volumes: "{{ pulp_volumes }}" - security_opt: - - "label=disable" - secrets: - - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' - - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' - - 'pulp-django-secret-key,type=env,target=PULP_SECRET_KEY' - env: "{{ pulp_settings_env }}" - quadlet_options: - - | - [Install] - WantedBy=default.target foreman.target - [Unit] - PartOf=foreman.target - Wants=redis.service postgresql.service - After=redis.service postgresql.service - [Service] - Restart=always - RestartSec=3 - notify: Restart pulp-api + - name: Pull the Pulp Content container image + containers.podman.podman_image: + name: "{{ pulp_content_image }}" + state: present -- name: Deploy Pulp Content Container - containers.podman.podman_container: - name: "{{ pulp_content_container_name }}" - image: "{{ pulp_content_image }}" - state: quadlet - sdnotify: true - command: pulp-content - network: host - hostname: "pulp-content.{{ ansible_facts['fqdn'] }}" - volumes: "{{ pulp_volumes }}" - security_opt: - - "label=disable" - secrets: - - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' - - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' - - 'pulp-django-secret-key,type=env,target=PULP_SECRET_KEY' - env: "{{ pulp_settings_env }}" - quadlet_options: - - | - [Install] - WantedBy=default.target foreman.target - [Unit] - PartOf=foreman.target - Wants=redis.service postgresql.service - After=redis.service postgresql.service - [Service] - Restart=always - RestartSec=3 - notify: Restart pulp-content + - name: Pull the Pulp Worker container image + containers.podman.podman_image: + name: "{{ pulp_worker_image }}" + state: present -- name: Deploy Pulp Worker Template - containers.podman.podman_container: - name: "{{ pulp_worker_container_name }}-%i" - quadlet_filename: "{{ pulp_worker_container_name }}@" - image: "{{ pulp_worker_image }}" - state: quadlet - command: pulp-worker - network: host - hostname: "pulp-worker-%i.{{ ansible_facts['fqdn'] }}" - volumes: "{{ pulp_volumes }}" - security_opt: - - "label=disable" - secrets: - - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' - - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' - - 'pulp-django-secret-key,type=env,target=PULP_SECRET_KEY' - env: "{{ pulp_settings_env }}" - quadlet_options: - - | - [Install] - WantedBy=foreman.target pulp-worker.target - [Unit] - PartOf=pulp-worker.target foreman.target - Wants=redis.service postgresql.service - After=redis.service postgresql.service - [Service] - Restart=always - RestartSec=3 - SyslogIdentifier={{ pulp_worker_container_name }}@%i - notify: Restart pulp-worker + - name: Create DB password secret + containers.podman.podman_secret: + state: present + name: pulp-db-password + data: "{{ pulp_database_password }}" + notify: + - Restart pulp-api + - Restart pulp-content + - Restart pulp-worker -- name: Create Pulp Worker Container instances - ansible.builtin.file: - state: link - src: "/etc/containers/systemd/{{ pulp_worker_container_name }}@.container" - dest: "/etc/containers/systemd/{{ item }}.container" - loop: "{{ pulp_worker_services }}" + - name: Create django pulp secret key secret + containers.podman.podman_secret: + state: present + name: pulp-django-secret-key + path: /var/lib/pulp/django_secret_key + notify: + - Restart pulp-api + - Restart pulp-content + - Restart pulp-worker -- name: Create pulp-worker.target - ansible.builtin.copy: - dest: /etc/systemd/system/pulp-worker.target - owner: root - group: root - mode: '0644' - content: | - [Unit] - Description=Pulp Worker Services - [Install] - WantedBy=foreman.target + - name: Create database symmetric key secret + containers.podman.podman_secret: + state: present + name: pulp-symmetric-key + path: /var/lib/pulp/database_fields.symmetric.key + notify: + - Restart pulp-api + - Restart pulp-content + - Restart pulp-worker -- name: Run daemon reload to load service files - ansible.builtin.systemd: - daemon_reload: true + - name: Deploy Pulp API Container + containers.podman.podman_container: + name: "{{ pulp_api_container_name }}" + image: "{{ pulp_api_image }}" + state: quadlet + sdnotify: true + command: pulp-api + network: host + hostname: "pulp-api.{{ ansible_facts['fqdn'] }}" + volumes: "{{ pulp_volumes }}" + security_opt: + - "label=disable" + secrets: + - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' + - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' + - 'pulp-django-secret-key,type=env,target=PULP_SECRET_KEY' + env: "{{ pulp_settings_env }}" + quadlet_options: + - | + [Install] + WantedBy=default.target foreman.target + [Unit] + PartOf=foreman.target + Wants=redis.service postgresql.service + After=redis.service postgresql.service + [Service] + Restart=always + RestartSec=3 + notify: Restart pulp-api -- name: Migrate the Pulp database - containers.podman.podman_container: - name: pulpcore-manager-migrate - image: "{{ pulp_api_image }}" - command: pulpcore-manager migrate --noinput - detach: false - network: host - volumes: "{{ pulp_volumes }}" - secrets: - - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' - - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' - env: "{{ pulp_settings_database_env }}" + - name: Deploy Pulp Content Container + containers.podman.podman_container: + name: "{{ pulp_content_container_name }}" + image: "{{ pulp_content_image }}" + state: quadlet + sdnotify: true + command: pulp-content + network: host + hostname: "pulp-content.{{ ansible_facts['fqdn'] }}" + volumes: "{{ pulp_volumes }}" + security_opt: + - "label=disable" + secrets: + - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' + - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' + - 'pulp-django-secret-key,type=env,target=PULP_SECRET_KEY' + env: "{{ pulp_settings_env }}" + quadlet_options: + - | + [Install] + WantedBy=default.target foreman.target + [Unit] + PartOf=foreman.target + Wants=redis.service postgresql.service + After=redis.service postgresql.service + [Service] + Restart=always + RestartSec=3 + notify: Restart pulp-content -- name: Ensure Pulp admin user exists - containers.podman.podman_container: - name: pulpcore-manager-admin-password - image: "{{ pulp_api_image }}" - command: pulpcore-manager reset-admin-password --random - detach: false - network: host - volumes: "{{ pulp_volumes }}" - secrets: - - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' - - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' - env: "{{ pulp_settings_database_env }}" + - name: Deploy Pulp Worker Template + containers.podman.podman_container: + name: "{{ pulp_worker_container_name }}-%i" + quadlet_filename: "{{ pulp_worker_container_name }}@" + image: "{{ pulp_worker_image }}" + state: quadlet + command: pulp-worker + network: host + hostname: "pulp-worker-%i.{{ ansible_facts['fqdn'] }}" + volumes: "{{ pulp_volumes }}" + security_opt: + - "label=disable" + secrets: + - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' + - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' + - 'pulp-django-secret-key,type=env,target=PULP_SECRET_KEY' + env: "{{ pulp_settings_env }}" + quadlet_options: + - | + [Install] + WantedBy=foreman.target pulp-worker.target + [Unit] + PartOf=pulp-worker.target foreman.target + Wants=redis.service postgresql.service + After=redis.service postgresql.service + [Service] + Restart=always + RestartSec=3 + SyslogIdentifier={{ pulp_worker_container_name }}@%i + notify: Restart pulp-worker -- name: Flush handlers to restart services - ansible.builtin.meta: flush_handlers + - name: Create Pulp Worker Container instances + ansible.builtin.file: + state: link + src: "{{ foremanctl_quadlet_dir }}/{{ pulp_worker_container_name }}@.container" + dest: "{{ foremanctl_quadlet_dir }}/{{ item }}.container" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + loop: "{{ pulp_worker_services }}" -- name: Start Pulp services - ansible.builtin.systemd: - name: "{{ item }}" - state: started - async: 60 - poll: 0 - loop: "{{ pulp_all_services }}" - register: pulp_services + - name: Create pulp-worker.target + ansible.builtin.copy: + dest: "{{ foremanctl_systemd_user_dir }}/pulp-worker.target" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0644' + content: | + [Unit] + Description=Pulp Worker Services + [Install] + WantedBy=foreman.target -- name: Wait for Pulp services - ansible.builtin.async_status: - jid: "{{ item.ansible_job_id }}" - register: _pulp_job_result - until: _pulp_job_result is finished - retries: 100 - delay: 1 - loop: "{{ pulp_services.results }}" + - name: Run daemon reload to load service files + ansible.builtin.systemd: + daemon_reload: true + scope: user -- name: Enable and start pulp-worker.target - ansible.builtin.systemd: - name: pulp-worker.target - enabled: true - state: started + - name: Migrate the Pulp database + containers.podman.podman_container: + name: pulpcore-manager-migrate + image: "{{ pulp_api_image }}" + command: pulpcore-manager migrate --noinput + detach: false + network: host + volumes: "{{ pulp_volumes }}" + secrets: + - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' + - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' + env: "{{ pulp_settings_database_env }}" -- name: Gather service facts to find existing pulp-worker instances - ansible.builtin.service_facts: + - name: Ensure Pulp admin user exists + containers.podman.podman_container: + name: pulpcore-manager-admin-password + image: "{{ pulp_api_image }}" + command: pulpcore-manager reset-admin-password --random + detach: false + network: host + volumes: "{{ pulp_volumes }}" + secrets: + - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' + - 'pulp-db-password,type=env,target=PULP_DATABASES__default__PASSWORD' + env: "{{ pulp_settings_database_env }}" -- name: Build list of existing pulp-worker services - ansible.builtin.set_fact: - pulp_existing_workers: "{{ ansible_facts.services.keys() | select('match', '^' + pulp_worker_container_name + '@\\d+\\.service$') | list }}" + - name: Flush handlers to restart services + ansible.builtin.meta: flush_handlers -- name: Stop and disable old pulp-worker instances - ansible.builtin.systemd: - name: "{{ item }}" - enabled: false - state: stopped - loop: "{{ pulp_existing_workers }}" - when: - - pulp_existing_workers | length > 0 - - (item | regex_replace('^' + pulp_worker_container_name + '@(\\d+)\\.service$', '\\1') | int) > (pulp_worker_count | int) + - name: Start Pulp services + ansible.builtin.systemd: + name: "{{ item }}" + state: started + scope: user + async: 60 + poll: 0 + loop: "{{ pulp_all_services }}" + register: pulp_services -- name: Remove container symlinks for old pulp-worker instances - ansible.builtin.file: - path: "/etc/containers/systemd/{{ item | regex_replace('\\.service$', '.container') }}" - state: absent - loop: "{{ pulp_existing_workers }}" - when: - - pulp_existing_workers | length > 0 - - (item | regex_replace('^' + pulp_worker_container_name + '@(\\d+)\\.service$', '\\1') | int) > (pulp_worker_count | int) + - name: Wait for Pulp services + ansible.builtin.async_status: + jid: "{{ item.ansible_job_id }}" + register: _pulp_job_result + until: _pulp_job_result is finished + retries: 100 + delay: 1 + loop: "{{ pulp_services.results }}" + + - name: Gather service facts to find existing pulp-worker instances + ansible.builtin.service_facts: + + - name: Build list of existing pulp-worker services + ansible.builtin.set_fact: + pulp_existing_workers: "{{ ansible_facts.services.keys() | select('match', '^' + pulp_worker_container_name + '@\\d+\\.service$') | list }}" + + - name: Enable and start pulp-worker.target + ansible.builtin.systemd: + name: pulp-worker.target + enabled: true + state: started + scope: user + + - name: Clean up old pulp-worker instances + when: + - pulp_existing_workers | length > 0 + - (item | regex_replace('^' + pulp_worker_container_name + '@(\\d+)\\.service$', '\\1') | int) > (pulp_worker_count | int) + block: + - name: Stop and disable old pulp-worker instances + ansible.builtin.systemd: + name: "{{ item }}" + enabled: false + state: stopped + scope: user + loop: "{{ pulp_existing_workers }}" + + - name: Remove container symlinks for old pulp-worker instances + ansible.builtin.file: + path: "{{ foremanctl_quadlet_dir }}/{{ item | regex_replace('\\.service$', '.container') }}" + state: absent + loop: "{{ pulp_existing_workers }}" diff --git a/src/roles/redis/defaults/main.yml b/src/roles/redis/defaults/main.yml index 5c0c3e140..a1d96f8ea 100644 --- a/src/roles/redis/defaults/main.yml +++ b/src/roles/redis/defaults/main.yml @@ -1,3 +1,9 @@ --- redis_container_image: quay.io/sclorg/redis-6-c9s redis_container_tag: "latest" + +# Container user namespace UID/GID (redis user in sclorg/redis-6-c9s) +redis_container_uid: 1001 +redis_container_gid: 1001 + +redis_data_dir: /var/lib/redis diff --git a/src/roles/redis/tasks/main.yaml b/src/roles/redis/tasks/main.yaml index 441691e15..96fce75fa 100644 --- a/src/roles/redis/tasks/main.yaml +++ b/src/roles/redis/tasks/main.yaml @@ -1,40 +1,58 @@ --- -- name: Pull Redis container image - containers.podman.podman_image: - name: "{{ redis_container_image }}:{{ redis_container_tag }}" - state: present - - name: Create directory for Redis data ansible.builtin.file: - path: /var/lib/redis + path: "{{ redis_data_dir }}" state: directory - owner: 1001 - group: 1001 + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" mode: '0755' + setype: container_var_lib_t + +- name: Set Redis directory ownership for container UID/GID + ansible.builtin.shell: | + cd /tmp + sudo -u {{ foremanctl_user }} XDG_RUNTIME_DIR={{ foremanctl_xdg_runtime_dir }} \ + podman unshare chown -R {{ redis_container_uid }}:{{ redis_container_gid }} {{ redis_data_dir }} + args: + executable: /bin/bash + changed_when: true + +- name: Configure Redis container as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Pull Redis container image + containers.podman.podman_image: + name: "{{ redis_container_image }}:{{ redis_container_tag }}" + state: present -- name: Run Redis as a container - containers.podman.podman_container: - name: redis - image: "{{ redis_container_image }}:{{ redis_container_tag }}" - state: quadlet - sdnotify: true - command: ["run-redis", "--supervised", "systemd"] - volumes: - - /var/lib/redis:/data:Z - ports: - - "6379:6379" - quadlet_options: - - | - [Install] - WantedBy=default.target foreman.target - [Unit] - PartOf=foreman.target + - name: Run Redis as a container + containers.podman.podman_container: + name: redis + image: "{{ redis_container_image }}:{{ redis_container_tag }}" + state: quadlet + sdnotify: true + command: ["run-redis", "--supervised", "systemd"] + volumes: + - "{{ redis_data_dir }}:/data:Z" + ports: + - "6379:6379" + quadlet_options: + - | + [Install] + WantedBy=default.target foreman.target + [Unit] + PartOf=foreman.target -- name: Run daemon reload - ansible.builtin.systemd: - daemon_reload: true + - name: Run daemon reload + ansible.builtin.systemd: + daemon_reload: true + scope: user -- name: Start the Redis Service - ansible.builtin.systemd: - name: redis - state: started + - name: Start the Redis Service + ansible.builtin.systemd: + name: redis + state: started + scope: user diff --git a/src/roles/rootless_user/defaults/main.yaml b/src/roles/rootless_user/defaults/main.yaml new file mode 100644 index 000000000..544fc0dd5 --- /dev/null +++ b/src/roles/rootless_user/defaults/main.yaml @@ -0,0 +1,21 @@ +--- +# Dedicated service user for rootless Podman deployment +rootless_user_name: foremanctl +rootless_user_group: foremanctl +rootless_user_home: "/var/lib/{{ rootless_user_name }}" +rootless_user_shell: /sbin/nologin +rootless_user_comment: "Foremanctl Service User" + +# Rootless Podman directories +rootless_user_quadlet_dir: "{{ rootless_user_home }}/.config/containers/systemd" +rootless_user_systemd_user_dir: "{{ rootless_user_home }}/.config/systemd/user" + +# Subuid/subgid ranges for user namespaces +rootless_user_subuid_start: 100000 +rootless_user_subuid_count: 65536 +rootless_user_subgid_start: 100000 +rootless_user_subgid_count: 65536 + +# Unprivileged port binding configuration +rootless_user_unprivileged_port_start: 80 +... diff --git a/src/roles/rootless_user/tasks/main.yaml b/src/roles/rootless_user/tasks/main.yaml new file mode 100644 index 000000000..922ab9315 --- /dev/null +++ b/src/roles/rootless_user/tasks/main.yaml @@ -0,0 +1,80 @@ +--- +- name: "Create system group: {{ rootless_user_group }}" + ansible.builtin.group: + name: "{{ rootless_user_group }}" + system: true + state: present + +- name: "Create system user: {{ rootless_user_name }}" + ansible.builtin.user: + name: "{{ rootless_user_name }}" + group: "{{ rootless_user_group }}" + home: "{{ rootless_user_home }}" + shell: "{{ rootless_user_shell }}" + comment: "{{ rootless_user_comment }}" + system: true + create_home: true + state: present + +- name: Get user info to determine UID + ansible.builtin.getent: + database: passwd + key: "{{ rootless_user_name }}" + +- name: Set rootless_user_xdg_runtime_dir based on actual UID + ansible.builtin.set_fact: + rootless_user_xdg_runtime_dir: "/run/user/{{ ansible_facts['getent_passwd'][rootless_user_name][1] }}" + +- name: "Add subuid entry for user: {{ rootless_user_name }}" + ansible.builtin.lineinfile: + path: /etc/subuid + regexp: "^{{ rootless_user_name }}:" + line: "{{ rootless_user_name }}:{{ rootless_user_subuid_start }}:{{ rootless_user_subuid_count }}" + create: true + mode: '0644' + +- name: "Add subgid entry for user: {{ rootless_user_name }}" + ansible.builtin.lineinfile: + path: /etc/subgid + regexp: "^{{ rootless_user_group }}:" + line: "{{ rootless_user_group }}:{{ rootless_user_subgid_start }}:{{ rootless_user_subgid_count }}" + create: true + mode: '0644' + +- name: Configure unprivileged port binding + ansible.posix.sysctl: + name: net.ipv4.ip_unprivileged_port_start + value: "{{ rootless_user_unprivileged_port_start }}" + state: present + sysctl_set: true + reload: true + +- name: "Enable lingering for user: {{ rootless_user_name }}" + ansible.builtin.command: loginctl enable-linger {{ rootless_user_name }} + register: rootless_user_linger_result + changed_when: rootless_user_linger_result is succeeded + failed_when: rootless_user_linger_result is failed + +- name: Verify XDG_RUNTIME_DIR exists + ansible.builtin.stat: + path: "{{ rootless_user_xdg_runtime_dir }}" + register: rootless_user_xdg_stat + failed_when: not rootless_user_xdg_stat.stat.exists + +- name: Create user config directories + become: true + become_user: "{{ rootless_user_name }}" + ansible.builtin.file: + path: "{{ item }}" + state: directory + # XXX: Check this + # owner: "{{ rootless_user_name }}" + # group: "{{ rootless_user_group }}" + mode: '0755' + loop: + # - "{{ rootless_user_home }}/.config" + # - "{{ rootless_user_home }}/.config/containers" + - "{{ rootless_user_home }}/.config/containers/systemd" + # - "{{ rootless_user_home }}/.config/systemd" + - "{{ rootless_user_home }}/.config/systemd/user" +... diff --git a/src/roles/systemd_target/tasks/main.yml b/src/roles/systemd_target/tasks/main.yml index 189fec8d9..90d0fc314 100644 --- a/src/roles/systemd_target/tasks/main.yml +++ b/src/roles/systemd_target/tasks/main.yml @@ -1,22 +1,30 @@ --- -- name: Define foreman.target - ansible.builtin.copy: - dest: /etc/systemd/system/foreman.target - owner: root - group: root - mode: '0644' - content: | - [Unit] - Description=Foreman services - [Install] - WantedBy=default.target +- name: Configure Foreman systemd target as rootless user + environment: + XDG_RUNTIME_DIR: "{{ foremanctl_xdg_runtime_dir }}" + become: true + become_user: "{{ foremanctl_user }}" + block: + - name: Define foreman.target + ansible.builtin.copy: + dest: "{{ foremanctl_systemd_user_dir }}/foreman.target" + owner: "{{ foremanctl_user }}" + group: "{{ foremanctl_group }}" + mode: '0644' + content: | + [Unit] + Description=Foreman services + [Install] + WantedBy=default.target -- name: Run daemon reload to load new target - ansible.builtin.systemd: - daemon_reload: true + - name: Run daemon reload to load new target + ansible.builtin.systemd: + daemon_reload: true + scope: user -- name: Start foreman.target - ansible.builtin.systemd_service: - name: foreman.target - state: started - enabled: true + - name: Start foreman.target + ansible.builtin.systemd_service: + name: foreman.target + state: started + enabled: true + scope: user diff --git a/src/vars/base.yaml b/src/vars/base.yaml index 32c6f2c3f..850e6536c 100644 --- a/src/vars/base.yaml +++ b/src/vars/base.yaml @@ -1,4 +1,12 @@ --- +# Rootless Podman deployment configuration +foremanctl_user: foremanctl +foremanctl_group: foremanctl +foremanctl_home: "/var/lib/{{ foremanctl_user }}" +foremanctl_quadlet_dir: "{{ foremanctl_home }}/.config/containers/systemd" +foremanctl_systemd_user_dir: "{{ foremanctl_home }}/.config/systemd/user" +# Note: foremanctl_xdg_runtime_dir is set dynamically by rootless_user role after user creation + certificates_hostnames: - "{{ ansible_facts['fqdn'] }}" - localhost @@ -7,6 +15,8 @@ certificates_ca_password: "CHANGEME" candlepin_keystore_password: "CHANGEME" candlepin_oauth_secret: "CHANGEME" +candlepin_keystore_path: "{{ foremanctl_home }}/candlepin.keystore" +candlepin_truststore_path: "{{ foremanctl_home }}/candlepin.truststore" candlepin_ca_key_password: "{{ ca_key_password }}" candlepin_ca_key: "{{ ca_key }}" candlepin_ca_certificate: "{{ ca_certificate }}" diff --git a/src/vars/default_certificates.yml b/src/vars/default_certificates.yml index 09f47c5c9..7a179f514 100644 --- a/src/vars/default_certificates.yml +++ b/src/vars/default_certificates.yml @@ -1,5 +1,8 @@ --- -certificates_ca_directory: /root/certificates +# Hardcoded path instead of using {{ foremanctl_home }}/certificates +# because base.yaml (which defines foremanctl_home) loads after this file +# and base.yaml contains variables that reference certificate paths from this file +certificates_ca_directory: /var/lib/foremanctl/certificates ca_key_password: "{{ certificates_ca_directory }}/private/ca.pwd" ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt" ca_key: "{{ certificates_ca_directory }}/private/ca.key" diff --git a/src/vars/installer_certificates.yml b/src/vars/installer_certificates.yml index c6ab83af3..0449adcb9 100644 --- a/src/vars/installer_certificates.yml +++ b/src/vars/installer_certificates.yml @@ -1,12 +1,16 @@ --- -ca_key_password: "/root/ssl-build/katello-default-ca.pwd" -ca_certificate: "/root/ssl-build/katello-default-ca.crt" -ca_key: "/root/ssl-build/katello-default-ca.key" -server_certificate: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-apache.crt" -server_key: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-apache.key" -server_ca_certificate: "/root/ssl-build/katello-server-ca.crt" -client_certificate: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-foreman-client.crt" -client_key: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-foreman-client.key" +# Standard foreman-installer certificate location +# Note: For rootless deployments, permissions are automatically configured +# during deployment to allow the foremanctl user to read these certificates +certificates_ca_directory: /root/ssl-build +ca_key_password: "{{ certificates_ca_directory }}/katello-default-ca.pwd" +ca_certificate: "{{ certificates_ca_directory }}/katello-default-ca.crt" +ca_key: "{{ certificates_ca_directory }}/katello-default-ca.key" +server_certificate: "{{ certificates_ca_directory }}/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-apache.crt" +server_key: "{{ certificates_ca_directory }}/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-apache.key" +server_ca_certificate: "{{ certificates_ca_directory }}/katello-server-ca.crt" +client_certificate: "{{ certificates_ca_directory }}/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-foreman-client.crt" +client_key: "{{ certificates_ca_directory }}/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-foreman-client.key" client_ca_certificate: "{{ ca_certificate }}" -localhost_key: "/root/ssl-build/localhost/localhost-tomcat.key" -localhost_certificate: "/root/ssl-build/localhost/localhost-tomcat.crt" +localhost_key: "{{ certificates_ca_directory }}/localhost/localhost-tomcat.key" +localhost_certificate: "{{ certificates_ca_directory }}/localhost/localhost-tomcat.crt" diff --git a/tests/candlepin_test.py b/tests/candlepin_test.py index dc78faa8e..c990324ad 100644 --- a/tests/candlepin_test.py +++ b/tests/candlepin_test.py @@ -1,14 +1,18 @@ import re +from conftest import get_service -def assert_secret_content(server, secret_name, secret_value): - secret = server.run(f'podman secret inspect --format {"{{.SecretData}}"} --showsecret {secret_name}') +def assert_secret_content(server, secret_name, secret_value, user): + if user: + secret = server.run(f'cd /tmp && sudo -u {user} podman secret inspect --format {{"{{.SecretData}}"}} --showsecret {secret_name}') + else: + secret = server.run(f'podman secret inspect --format {"{{.SecretData}}"} --showsecret {secret_name}') assert secret.succeeded assert secret.stdout.strip() == secret_value -def test_candlepin_service(server): - candlepin = server.service("candlepin") +def test_candlepin_service(server, user): + candlepin = get_service(server, "candlepin", user) assert candlepin.is_running @@ -33,10 +37,10 @@ def test_artemis_auth(server, certificates): assert cmd.succeeded, f"exit: {cmd.rc}\n\nstdout:\n{cmd.stdout}\n\nstderr:\n{cmd.stderr}" -def test_certs_users_file(server, certificates): +def test_certs_users_file(server, certificates, user): cmd = server.run(f'openssl x509 -noout -subject -in {certificates["client_certificate"]} -nameopt rfc2253,sep_comma_plus_space') subject = cmd.stdout.replace("subject=", "").rstrip() - assert_secret_content(server, 'candlepin-artemis-cert-users-properties', f'katelloUser={subject}') + assert_secret_content(server, 'candlepin-artemis-cert-users-properties', f'katelloUser={subject}', user) def test_tls(server): @@ -57,5 +61,5 @@ def test_tls(server): assert "least strength: A" in result -def test_cert_roles(server): - assert_secret_content(server, 'candlepin-artemis-cert-roles-properties', 'candlepinEventsConsumer=katelloUser') +def test_cert_roles(server, user): + assert_secret_content(server, 'candlepin-artemis-cert-roles-properties', 'candlepinEventsConsumer=katelloUser', user) diff --git a/tests/conftest.py b/tests/conftest.py index d9ed914cc..62fd82c3e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,9 +13,59 @@ SSH_CONFIG='./.tmp/ssh-config' +class GenericService: + """Generic service checker that handles both user services and container services""" + def __init__(self, host, service_name, user=None): + self.host = host + self.service_name = service_name + self.user = user # If set, use user systemd; otherwise check podman container + + @property + def is_running(self): + """Check if service/container is running""" + if self.user: + # User systemd service + cmd = self.host.run( + f"systemctl --machine={self.user}@ --user is-active {self.service_name}" + ) + return cmd.stdout.strip() == "active" + else: + # Podman container + cmd = self.host.run(f"podman inspect -f '{{{{.State.Running}}}}' {self.service_name}") + return cmd.succeeded and cmd.stdout.strip() == "true" + + @property + def is_enabled(self): + """Check if service is enabled""" + if self.user: + # User systemd service + cmd = self.host.run( + f"systemctl --machine={self.user}@ --user is-enabled {self.service_name}" + ) + return cmd.stdout.strip() in ("enabled", "static") + else: + # Containers don't have enabled state, just return is_running + return self.is_running + + @property + def exists(self): + """Check if service/container exists""" + if self.user: + # User systemd service + cmd = self.host.run( + f"systemctl --machine={self.user}@ --user list-unit-files {self.service_name}" + ) + return self.service_name in cmd.stdout + else: + # Podman container + cmd = self.host.run(f"podman ps -a --filter name={self.service_name} --format '{{{{.Names}}}}'") + return self.service_name in cmd.stdout + + def pytest_addoption(parser): parser.addoption("--certificate-source", action="store", default="default", choices=('default', 'installer'), help="Where to obtain certificates from") parser.addoption("--database-mode", action="store", default="internal", choices=('internal', 'external'), help="Whether the database is internal or external") + parser.addoption("--user", action="store", default="none", help="User for rootless services (use 'none' for rootful mode)") @pytest.fixture(scope="module") @@ -47,8 +97,13 @@ def client_fqdn(client_hostname): def certificates(pytestconfig, server_fqdn): source = pytestconfig.getoption("certificate_source") env = Environment(loader=FileSystemLoader("."), autoescape=select_autoescape()) + + # First pass: render to get the certificates_ca_directory value template = env.get_template(f"./src/vars/{source}_certificates.yml") - context = {'certificates_ca_directory': '/root/certificates', + first_pass = yaml.safe_load(template.render({'ansible_facts': {'fqdn': server_fqdn}})) + + # Second pass: render with the certificates_ca_directory from the template itself + context = {'certificates_ca_directory': first_pass['certificates_ca_directory'], 'ansible_facts': {'fqdn': server_fqdn}} return yaml.safe_load(template.render(context)) @@ -67,6 +122,36 @@ def client(client_hostname): yield testinfra.get_host(f'paramiko://{client_hostname}', sudo=True, ssh_config=SSH_CONFIG) +@pytest.fixture(scope="module") +def user(pytestconfig): + """User for rootless services (None for rootful mode)""" + user_value = pytestconfig.getoption("user") + return None if user_value == "none" else user_value + + +@pytest.fixture(scope="module") +def user_uid(server, user): + """Get the UID of the user""" + cmd = server.run(f"id -u {user}") + return cmd.stdout.strip() + + +@pytest.fixture(scope="module") +def user_service(server, user): + """Factory fixture for user service checking""" + def _user_service(service_name): + return GenericService(server, service_name, user=user) + return _user_service + + +def get_service(host, service_name, user=None): + """Generic helper to get either rootful or rootless service based on user parameter""" + if user: + return GenericService(host, service_name, user=user) + else: + return host.service(service_name) + + @pytest.fixture(scope="module") def database(database_mode, server): if database_mode == 'external': @@ -74,6 +159,13 @@ def database(database_mode, server): else: yield server +@pytest.fixture(scope="module") +def database_user_service(database_mode, database, user): + """Factory fixture for database service checking""" + def _service(service_name): + # Both internal and external databases run as user services + return GenericService(database, service_name, user=user) + return _service @pytest.fixture(scope="module") def ssh_config(server_hostname): diff --git a/tests/foreman_proxy_test.py b/tests/foreman_proxy_test.py index 840372ad9..9046ae3ab 100644 --- a/tests/foreman_proxy_test.py +++ b/tests/foreman_proxy_test.py @@ -1,4 +1,5 @@ import json +from conftest import get_service FOREMAN_PROXY_PORT = 8443 @@ -8,8 +9,8 @@ def test_foreman_proxy_features(server, certificates, server_fqdn): features = json.loads(cmd.stdout) assert "logs" in features -def test_foreman_proxy_service(server): - foreman_proxy = server.service("foreman-proxy") +def test_foreman_proxy_service(server, user): + foreman_proxy = get_service(server, "foreman-proxy", user) assert foreman_proxy.is_running def test_foreman_proxy_port(server): diff --git a/tests/foreman_target_test.py b/tests/foreman_target_test.py index f6244b70e..ba11c99d0 100644 --- a/tests/foreman_target_test.py +++ b/tests/foreman_target_test.py @@ -1,4 +1,7 @@ -def test_foreman_target(server): - foreman_target = server.service("foreman.target") +from conftest import get_service + + +def test_foreman_target(server, user): + foreman_target = get_service(server, "foreman.target", user) assert foreman_target.is_running assert foreman_target.is_enabled diff --git a/tests/foreman_test.py b/tests/foreman_test.py index f3cb51a70..8d252c368 100644 --- a/tests/foreman_test.py +++ b/tests/foreman_test.py @@ -1,6 +1,7 @@ import json import pytest +from conftest import get_service FOREMAN_HOST = 'localhost' FOREMAN_PORT = 3000 @@ -26,8 +27,8 @@ def foreman_status(foreman_status_curl): return json.loads(foreman_status_curl.stdout) -def test_foreman_service(server): - foreman = server.service("foreman") +def test_foreman_service(server, user): + foreman = get_service(server, "foreman", user) assert foreman.is_running @@ -56,26 +57,29 @@ def test_katello_services_status(foreman_status, katello_service): @pytest.mark.parametrize("dynflow_instance", ['orchestrator', 'worker', 'worker-hosts-queue']) -def test_foreman_dynflow_container_instances(server, dynflow_instance): - file = server.file(f"/etc/containers/systemd/dynflow-sidekiq@{dynflow_instance}.container") +def test_foreman_dynflow_container_instances(server, dynflow_instance, user): + if user: + file = server.file(f"/var/lib/{user}/.config/containers/systemd/dynflow-sidekiq@{dynflow_instance}.container") + else: + file = server.file(f"/etc/containers/systemd/dynflow-sidekiq@{dynflow_instance}.container") assert file.exists assert file.is_symlink @pytest.mark.parametrize("dynflow_instance", ['orchestrator', 'worker', 'worker-hosts-queue']) -def test_foreman_dynflow_service_instances(server, dynflow_instance): - service = server.service(f"dynflow-sidekiq@{dynflow_instance}") +def test_foreman_dynflow_service_instances(server, dynflow_instance, user): + service = get_service(server, f"dynflow-sidekiq@{dynflow_instance}", user) assert service.is_running @pytest.mark.parametrize("instance", RECURRING_INSTANCES) -def test_foreman_recurring_timers_enabled_and_running(server, instance): - timer = server.service(f"foreman-recurring@{instance}.timer") +def test_foreman_recurring_timers_enabled_and_running(server, instance, user): + timer = get_service(server, f"foreman-recurring@{instance}.timer", user) assert timer.is_enabled assert timer.is_running @pytest.mark.parametrize("instance", RECURRING_INSTANCES) -def test_foreman_recurring_services_exist(server, instance): - service = server.service(f"foreman-recurring@{instance}.service") +def test_foreman_recurring_services_exist(server, instance, user): + service = get_service(server, f"foreman-recurring@{instance}.service", user) assert service.exists diff --git a/tests/postgresql_test.py b/tests/postgresql_test.py index fa84f6d0b..4237e05f0 100644 --- a/tests/postgresql_test.py +++ b/tests/postgresql_test.py @@ -1,9 +1,10 @@ import csv import pytest +from conftest import get_service -def test_postgresql_service(database): - postgresql = database.service("postgresql") +def test_postgresql_service(database, user): + postgresql = get_service(database, "postgresql", user) assert postgresql.is_running @@ -12,33 +13,46 @@ def test_postgresql_port(database): assert postgresql.port("5432").is_reachable -def test_postgresql_databases(database): - result = database.run("podman exec postgresql psql -U postgres -c '\\l'") +def test_postgresql_databases(database, user): + if user: + result = database.run(f"cd /tmp && sudo -u {user} podman exec postgresql psql -U postgres -c '\\l'") + else: + result = database.run("podman exec postgresql psql -U postgres -c '\\l'") assert "foreman" in result.stdout assert "candlepin" in result.stdout assert "pulp" in result.stdout -def test_postgresql_users(database): - result = database.run("podman exec postgresql psql -U postgres -c '\\du'") +def test_postgresql_users(database, user): + if user: + result = database.run(f"cd /tmp && sudo -u {user} podman exec postgresql psql -U postgres -c '\\du'") + else: + result = database.run("podman exec postgresql psql -U postgres -c '\\du'") assert "foreman" in result.stdout assert "candlepin" in result.stdout assert "pulp" in result.stdout -def test_postgresql_password_encryption(database): - result = database.run("podman exec postgresql psql -U postgres -c 'SHOW password_encryption'") +def test_postgresql_password_encryption(database, user): + if user: + result = database.run(f"cd /tmp && sudo -u {user} podman exec postgresql psql -U postgres -c 'SHOW password_encryption'") + else: + result = database.run("podman exec postgresql psql -U postgres -c 'SHOW password_encryption'") assert "scram-sha-256" in result.stdout - result = database.run("echo 'COPY (select * from pg_shadow) TO STDOUT (FORMAT CSV);' | podman exec -i postgresql psql -U postgres") + if user: + result = database.run(f"cd /tmp && echo 'COPY (select * from pg_shadow) TO STDOUT (FORMAT CSV);' | sudo -u {user} podman exec -i postgresql psql -U postgres") + else: + result = database.run("echo 'COPY (select * from pg_shadow) TO STDOUT (FORMAT CSV);' | podman exec -i postgresql psql -U postgres") reader = csv.reader(result.stdout.splitlines()) for row in reader: assert ("SCRAM-SHA-256" in row[6]) -def test_postgresql_missing_with_external(server, database_mode): +def test_postgresql_missing_with_external(server, database_mode, user): if database_mode == 'internal': pytest.skip("Test only applies if database_mode=external") else: - assert not server.service("postgresql").exists + postgresql = get_service(server, "postgresql", user) + assert not postgresql.exists diff --git a/tests/pulp_test.py b/tests/pulp_test.py index eb96b5d59..013bb1b2a 100644 --- a/tests/pulp_test.py +++ b/tests/pulp_test.py @@ -1,5 +1,6 @@ import json import pytest +from conftest import GenericService, get_service PULP_HOST = 'localhost' PULP_API_PORT = 24817 @@ -13,21 +14,25 @@ def pulp_status_curl(server): def pulp_status(pulp_status_curl): return json.loads(pulp_status_curl.stdout) -def test_pulp_api_service(server): - pulp_api = server.service("pulp-api") +def test_pulp_api_service(server, user): + pulp_api = get_service(server, "pulp-api", user) assert pulp_api.is_running -def test_pulp_content_service(server): - pulp_content = server.service("pulp-content") + +def test_pulp_content_service(server, user): + pulp_content = get_service(server, "pulp-content", user) assert pulp_content.is_running -def test_pulp_worker_services(server): - result = server.run("systemctl list-units --all --type=service --no-legend 'pulp-worker@*.service' | awk '{print $1}'") +def test_pulp_worker_services(server, user): + if user: + result = server.run(f"systemctl --machine={user}@ --user list-units --all --type=service --no-legend 'pulp-worker@*.service' | awk '{{print $1}}'") + else: + result = server.run("systemctl list-units --all --type=service --no-legend 'pulp-worker@*.service' | awk '{print $1}'") worker_services = [s.strip() for s in result.stdout.split('\n') if s.strip()] assert len(worker_services) > 0 for worker_service in worker_services: - worker = server.service(worker_service) + worker = get_service(server, worker_service, user) assert worker.is_running def test_pulp_api_port(server): @@ -65,11 +70,15 @@ def test_pulp_status_workers(pulp_status): def test_pulp_volumes(server): assert server.file("/var/lib/pulp").is_directory -def test_pulp_worker_target(server): - pulp_worker_target = server.service("pulp-worker.target") +def test_pulp_worker_target(server, user): + pulp_worker_target = get_service(server, "pulp-worker.target", user) assert pulp_worker_target.is_running assert pulp_worker_target.is_enabled -def test_pulp_manager_check(server): - result = server.run("podman exec -ti pulp-api pulpcore-manager check --deploy") + +def test_pulp_manager_check(server, user): + if user: + result = server.run(f"cd /tmp && sudo -u {user} podman exec -ti pulp-api pulpcore-manager check --deploy") + else: + result = server.run("podman exec -ti pulp-api pulpcore-manager check --deploy") assert result.succeeded diff --git a/tests/redis_test.py b/tests/redis_test.py index c612ba035..9c8ebf3ff 100644 --- a/tests/redis_test.py +++ b/tests/redis_test.py @@ -1,12 +1,13 @@ import pytest +from conftest import get_service REDIS_HOST = 'localhost' REDIS_PORT = 6379 -def test_redis_service(server): - redis = server.service("redis") +def test_redis_service(server, user): + redis = get_service(server, "redis", user) assert redis.is_running