Skip to content

Commit abfd482

Browse files
author
Matt Pryor
committed
Run zenith-client init with a command to get foreground behaviour
1 parent 538bfed commit abfd482

File tree

4 files changed

+23
-15
lines changed

4 files changed

+23
-15
lines changed

group_vars/openstack.yml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,5 +47,3 @@ cluster_groups_validation:
4747
cluster_groups_zenith:
4848
# Any hosts in the grafana group should go in the zenith group
4949
zenith: [grafana]
50-
# Any hosts in the zenith group should have podman installed
51-
podman: [zenith]

roles/zenith_proxy/tasks/main.yml

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -44,15 +44,16 @@
4444
become_user: "{{ zenith_proxy_podman_user }}"
4545

4646
- name: Initialise Zenith client
47-
containers.podman.podman_container:
48-
name: "{{ zenith_proxy_service_name }}-init"
49-
image: "{{ zenith_proxy_client_image }}"
50-
command: [zenith-client, init]
51-
detach: no
52-
tty: yes
53-
volumes:
54-
- /etc/zenith/{{ zenith_proxy_service_name }}:/etc/zenith:ro
55-
- "{{ zenith_proxy_service_name }}-ssh:/home/zenith/.ssh"
47+
# Use a foreground command rather than the podman_container module as I could not
48+
# work out the combination of parameters that produced the desired behaviour :-(
49+
command: >-
50+
podman run
51+
--name {{ zenith_proxy_service_name }}-init
52+
--replace
53+
--volume /etc/zenith/{{ zenith_proxy_service_name }}:/etc/zenith:ro
54+
--volume {{ zenith_proxy_service_name }}-ssh:/home/zenith/.ssh
55+
{{ zenith_proxy_client_image }}
56+
zenith-client init
5657
become: true
5758
become_user: "{{ zenith_proxy_podman_user }}"
5859
register: zenith_proxy_client_init

slurm-infra.yml

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,13 @@
5858
groups: "{{ hostvars[item].group_names | terraform_infra_expand_groups(cluster_groups_validation) }}"
5959
loop: "{{ groups.get('cluster', []) }}"
6060
when: cluster_run_validation | default(false) | bool
61+
# If Zenith is enabled, add the zenith groups to hosts
62+
- name: Add clusters hosts to Zenith groups
63+
add_host:
64+
name: "{{ item }}"
65+
groups: "{{ hostvars[item].group_names | terraform_infra_expand_groups(cluster_groups_zenith) }}"
66+
loop: "{{ groups.get('cluster', []) }}"
67+
when: zenith_subdomain_monitoring is defined
6168

6269
# Ensure that the hosts in the cluster can all refer to each other by their hostname
6370
- hosts: cluster
@@ -115,16 +122,18 @@
115122

116123
# Deploy the monitoring stack first
117124
- import_playbook: vendor/stackhpc/ansible-slurm-appliance/ansible/monitoring.yml
118-
# Ensure that podman is installed on the host running grafana
119-
- hosts: grafana,!podman
125+
126+
# Configure the Zenith clients that are required
127+
# First, ensure that podman is installed on all hosts that will run Zenith clients
128+
- hosts: zenith,!podman
120129
tasks:
121130
- import_role:
122131
name: podman
123132
tasks_from: prereqs.yml
124133
- import_role:
125134
name: podman
126135
tasks_from: config.yml
127-
# Then deploy the Zenith client onto the Grafana hosts to talk to it
136+
# Deploy the Zenith client for Grafana
128137
- hosts: grafana
129138
tasks:
130139
- include_role:

ui-meta/slurm-infra.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ usage_template: |-
7272
Access to the cluster is via the external IP using SSH as the `rocky` user:
7373
7474
```
75-
$ ssh rocky@{{ cluster.outputs.cluster_access_ip | default('<cluster ip>') }}
75+
$ ssh rocky@{{ cluster.outputs.cluster_access_ip | default('[cluster ip]') }}
7676
[rocky@{{ cluster.name }}-login-0 ~]$ sinfo
7777
PARTITION AVAIL TIMELIMIT NODES STATE NODELIST
7878
compute* up 60-00:00:0 {{ "%3s" | format(cluster.parameter_values.compute_count) }} idle {{ cluster.name }}-compute-[0-{{ cluster.parameter_values.compute_count - 1 }}]

0 commit comments

Comments
 (0)