|
30 | 30 | tuned_started: true
|
31 | 31 |
|
32 | 32 | nfs_client_mnt_point: "/mnt"
|
33 |
| - nfs_client_mnt_options: |
| 33 | + nfs_client_mnt_options: "defaults,nosuid,nodev" |
34 | 34 | nfs_client_mnt_state: mounted
|
35 | 35 | nfs_configurations:
|
36 | 36 | nfs_enable:
|
|
48 | 48 | - _netdev # prevents mount blocking early boot before networking available
|
49 | 49 | - rw
|
50 | 50 |
|
51 |
| - basic_users_groups: [] |
52 |
| - basic_users_manage_homedir: false # homedir must already exist on shared filesystem |
53 | 51 | basic_users_userdefaults:
|
54 | 52 | state: present
|
55 |
| - create_home: "{{ basic_users_manage_homedir }}" |
56 |
| - generate_ssh_key: "{{ basic_users_manage_homedir }}" |
| 53 | + generate_ssh_key: true |
57 | 54 | ssh_key_comment: "{{ item.name }}"
|
58 | 55 | basic_users_users: []
|
| 56 | + basic_users_groups: [] |
59 | 57 |
|
60 | 58 | tasks:
|
61 | 59 | - block:
|
|
96 | 94 | when: _mount_mnt_cluster.failed
|
97 | 95 |
|
98 | 96 | - name: Check if hostvars exist
|
| 97 | + become_user: slurm |
99 | 98 | stat:
|
100 | 99 | path: "/mnt/cluster/hostvars/{{ ansible_hostname }}/hostvars.yml"
|
101 | 100 | register: hostvars_stat
|
|
109 | 108 | - meta: end_play
|
110 | 109 | when: not hostvars_stat.stat.exists
|
111 | 110 |
|
112 |
| - - name: Load hostvars from NFS |
| 111 | + - name: Sync /mnt/cluster to /tmp |
| 112 | + become_user: slurm |
| 113 | + synchronize: |
| 114 | + src: "/mnt/cluster/" |
| 115 | + dest: "/tmp/cluster/" |
| 116 | + archive: yes |
| 117 | + recursive: yes |
| 118 | + |
| 119 | + - name: Unmount /mnt/cluster after sync |
| 120 | + become_user: slurm |
| 121 | + mount: |
| 122 | + path: /mnt/cluster |
| 123 | + state: unmounted |
| 124 | + |
| 125 | + - name: Load hostvars |
113 | 126 | # this is higher priority than vars block = normal ansible's hostvars
|
114 | 127 | include_vars:
|
115 |
| - file: "/mnt/cluster/hostvars/{{ ansible_hostname }}/hostvars.yml" # can't use inventory_hostname |
116 |
| - |
117 |
| - # TODO: should /mnt/cluster now be UNMOUNTED to avoid future hang-ups? |
| 128 | + file: "/tmp/cluster/hostvars/{{ ansible_hostname }}/hostvars.yml" |
118 | 129 |
|
119 | 130 | - name: Run chrony role
|
120 | 131 | ansible.builtin.include_role:
|
121 | 132 | name: mrlesmithjr.chrony
|
122 |
| - when: enable_chrony | bool |
| 133 | + tasks_from: config_chrony.yml |
| 134 | + vars: |
| 135 | + # workaround for set_facts.yml: |
| 136 | + chrony_config: /etc/chrony.conf |
| 137 | + chrony_service: chronyd |
| 138 | + when: enable_chrony |
123 | 139 |
|
124 | 140 | - name: Configure resolve.conf
|
125 | 141 | block:
|
|
149 | 165 |
|
150 | 166 | - name: Copy cluster /etc/hosts
|
151 | 167 | copy:
|
152 |
| - src: /mnt/cluster/hosts |
| 168 | + src: /tmp/cluster/hosts |
153 | 169 | dest: /etc/hosts
|
154 | 170 | owner: root
|
155 | 171 | group: root
|
|
160 | 176 | ansible.builtin.include_role:
|
161 | 177 | name: cacerts
|
162 | 178 | vars:
|
163 |
| - cacerts_cert_dir: "/mnt/cluster/cacerts" |
| 179 | + cacerts_cert_dir: "/tmp/cluster/cacerts" |
164 | 180 | when: enable_cacerts
|
165 | 181 |
|
166 | 182 | - name: Configure sshd
|
167 | 183 | ansible.builtin.include_role:
|
168 | 184 | name: sshd
|
169 | 185 | vars:
|
170 |
| - sshd_conf_src: "/mnt/cluster/hostconfig/{{ ansible_hostname }}/sshd.conf" |
| 186 | + sshd_conf_src: "/tmp/cluster/hostconfig/{{ ansible_hostname }}/sshd.conf" |
171 | 187 | when: enable_sshd
|
172 | 188 |
|
173 | 189 | - name: Configure tuned
|
|
179 | 195 | name: sssd
|
180 | 196 | tasks_from: configure.yml
|
181 | 197 | vars:
|
182 |
| - sssd_conf_src: "/mnt/cluster/hostconfig/{{ ansible_hostname }}/sssd.conf" |
| 198 | + sssd_conf_src: "/tmp/cluster/hostconfig/{{ ansible_hostname }}/sssd.conf" |
183 | 199 | when: enable_sssd
|
184 | 200 |
|
185 | 201 | # NFS client mount
|
|
194 | 210 | block:
|
195 | 211 | - name: Read manila share info from nfs file
|
196 | 212 | include_vars:
|
197 |
| - file: /mnt/cluster/manila_share_info.yml |
| 213 | + file: /tmp/cluster/manila_share_info.yml |
198 | 214 | no_log: true # contains secrets
|
199 | 215 |
|
200 | 216 | - name: Ensure Ceph configuration directory exists
|
|
275 | 291 | loop: "{{ basic_users_groups }}"
|
276 | 292 |
|
277 | 293 | - name: Create users
|
278 |
| - user: "{{ basic_users_userdefaults | combine(item) | filter_user_params() }}" |
| 294 | + user: "{{ basic_users_userdefaults | combine(item) | filter_user_params() | combine(_disable_homedir) }}" |
279 | 295 | loop: "{{ basic_users_users }}"
|
280 | 296 | loop_control:
|
281 |
| - label: "{{ item.name }} [{{ item.state | default('present') }}]" |
282 |
| - register: basic_users_info |
| 297 | + label: "{{ item.name }}" |
| 298 | + vars: |
| 299 | + _disable_homedir: # ensure this task doesn't touch $HOME |
| 300 | + create_home: false |
| 301 | + generate_ssh_key: false |
283 | 302 |
|
284 | 303 | - name: Write sudo rules
|
285 | 304 | blockinfile:
|
286 |
| - path: /etc/sudoers.d/80-{{ item.name}}-user |
| 305 | + path: /etc/sudoers.d/80-{{ item.name }}-user |
287 | 306 | block: "{{ item.sudo }}"
|
288 | 307 | create: true
|
289 | 308 | loop: "{{ basic_users_users }}"
|
290 | 309 | loop_control:
|
291 | 310 | label: "{{ item.name }}"
|
292 |
| - when: "'sudo' in item" |
| 311 | + when: |
| 312 | + - item.state | default('present') == 'present' |
| 313 | + - "'sudo' in item" |
293 | 314 | when: enable_basic_users
|
294 | 315 |
|
295 | 316 | - name: EESSI
|
296 | 317 | block:
|
297 | 318 | - name: Copy cvmfs config
|
298 | 319 | copy:
|
299 |
| - src: /mnt/cluster/cvmfs/default.local |
| 320 | + src: /tmp/cluster/cvmfs/default.local |
300 | 321 | dest: /etc/cvmfs/default.local
|
301 | 322 | owner: root
|
302 | 323 | group: root
|
|
0 commit comments