Skip to content

Commit dc094c8

Browse files
hollysilkBharat Kunwar
authored andcommitted
given parameters openhpc prefix
1 parent 26cd4e3 commit dc094c8

File tree

6 files changed

+31
-31
lines changed

6 files changed

+31
-31
lines changed

defaults/main.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
---
2-
slurm_service_enabled: true
3-
slurm_service: slurmd
4-
slurm_control_host: # Example: "{{ groups['cluster_login'] | first }}"
5-
slurm_partitions: []
6-
slurm_cluster_name:
2+
openhpc_slurm_service_enabled: true
3+
openhpc_slurm_service: slurmd
4+
openhpc_slurm_control_host: # Example: "{{ groups['cluster_login'] | first }}"
5+
openhpc_slurm_partitions: []
6+
openhpc_cluster_name:
77
openhpc_packages: []
88
openhpc_enable:
99
control: false

handlers/main.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
---
1+
--
22
- name: Restart SLURM service
33
service:
4-
name: "{{slurm_service}}"
4+
name: "{{openhpc_slurm_service}}"
55
state: reloaded
6-
when: slurm_service_enabled | bool
6+
when: openhpc_slurm_service_enabled | bool
77

88
- name: Restart Munge service
99
service:
1010
name: "munge"
1111
state: restarted
12-
when: slurm_service_enabled | bool
12+
when: openhpc_slurm_service_enabled | bool

tasks/compute.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
- name: Select the SLURM service to control
1313
set_fact:
14-
slurm_service: slurmd
14+
openhpc_slurm_service: slurmd
1515

1616
- name: Install OpenHPC LMOD
1717
yum:

tasks/control.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,4 +9,4 @@
99

1010
- name: Select the SLURM service to control
1111
set_fact:
12-
slurm_service: slurmctld
12+
openhpc_slurm_service: slurmctld

tasks/runtime.yml

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
---
2-
- name: Fail if slurm_control_host or slurm_cluster_name or slurm_partitions are undefined
2+
- name: Fail if openhpc_slurm_control_host or openhpc_cluster_name or openhpc_slurm_partitions are undefined
33
fail:
4-
msg: "Undefined slurm_control_host or slurm_cluster_name or slurm_partitions."
4+
msg: "Undefined openhpc_slurm_control_host or openhpc_cluster_name or openhpc_slurm_partitions."
55
when:
6-
slurm_control_host == none or
7-
slurm_cluster_name == none or
8-
slurm_partitions | length == 0
6+
openhpc_slurm_control_host == none or
7+
openhpc_cluster_name == none or
8+
openhpc_slurm_partitions | length == 0
99

1010
- name: Install OpenHPC runtime Slurm packages
1111
yum:
@@ -27,25 +27,25 @@
2727
- name: Ensure the Munge service is enabled
2828
service:
2929
name: munge
30-
enabled: "{{ slurm_service_enabled | bool }}"
30+
enabled: "{{ openhpc_slurm_service_enabled | bool }}"
3131
notify:
3232
- Restart Munge service
3333

3434
- name: Generate a Munge key for the platform
3535
command: "dd if=/dev/urandom of=/etc/munge/munge.key bs=1 count=1024"
3636
args:
3737
creates: "/etc/munge/munge.key"
38-
when: inventory_hostname == slurm_control_host
38+
when: inventory_hostname == openhpc_slurm_control_host
3939

4040
- name: Retrieve Munge key from Slurm control host
4141
slurp:
4242
src: "/etc/munge/munge.key"
4343
register: slurm_munge_key
44-
when: inventory_hostname == slurm_control_host
44+
when: inventory_hostname == openhpc_slurm_control_host
4545

4646
- name: Write Munge key
4747
copy:
48-
content: "{{ hostvars[slurm_control_host]['slurm_munge_key']['content'] | b64decode }}"
48+
content: "{{ hostvars[openhpc_slurm_control_host]['slurm_munge_key']['content'] | b64decode }}"
4949
dest: "/etc/munge/munge.key"
5050
owner: munge
5151
group: munge
@@ -55,8 +55,8 @@
5555

5656
- name: Ensure SLURM services are enabled
5757
service:
58-
name: "{{ slurm_service }}"
59-
enabled: "{{ slurm_service_enabled | bool }}"
58+
name: "{{ openhpc_slurm_service }}"
59+
enabled: "{{ openhpc_slurm_service_enabled | bool }}"
6060
notify:
6161
- Restart SLURM service
6262

@@ -77,8 +77,8 @@
7777
# the handler, ensure it's running here.
7878
- name: Ensure SLURM services are running
7979
service:
80-
name: "{{ slurm_service }}"
81-
state: "{{ 'started' if slurm_service_enabled | bool else 'stopped' }}"
80+
name: "{{ openhpc_slurm_service }}"
81+
state: "{{ 'started' if openhpc_slurm_service_enabled | bool else 'stopped' }}"
8282

8383
# Install OpenHPC runtime
8484
- name: Ensure selected OpenHPC packages are installed

templates/slurm.conf.j2

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88
#
99
# See the slurm.conf man page for more information.
1010
#
11-
ClusterName={{ slurm_cluster_name }}
12-
ControlMachine={{ slurm_control_host }}
11+
ClusterName={{ openhpc_cluster_name }}
12+
ControlMachine={{ openhpc_slurm_control_host }}
1313
#ControlAddr=
1414
#BackupController=
1515
#BackupAddr=
@@ -102,13 +102,13 @@ PropagateResourceLimitsExcept=MEMLOCK
102102
#SlurmctldLogFile=/var/log/slurmctld.log
103103
AccountingStorageType=accounting_storage/filetxt
104104
Epilog=/etc/slurm/slurm.epilog.clean
105-
{% for part in slurm_partitions %}
105+
{% for part in openhpc_slurm_partitions %}
106106
{% for group in part.get('groups', [part]) %}
107-
NodeName={{group.cluster_name|default(slurm_cluster_name)}}-{{group.name}}-[0-{{group.num_nodes-1}}] \
107+
NodeName={{group.cluster_name|default(openhpc_cluster_name)}}-{{group.name}}-[0-{{group.num_nodes-1}}] \
108108
{% if 'ram_mb' in group %}
109109
RealMemory={{group.ram_mb}} \
110110
{% endif %}
111-
{% set group_name = group.cluster_name|default(slurm_cluster_name) ~ '_' ~ group.name %}
111+
{% set group_name = group.cluster_name|default(openhpc_cluster_name) ~ '_' ~ group.name %}
112112
{# If using --limit, the first host in each group may not have facts available. Find one that does. #}
113113
{% set group_hosts = groups[group_name] | intersect(play_hosts) %}
114114
{% if group_hosts | length > 0 %}
@@ -120,8 +120,8 @@ NodeName={{group.cluster_name|default(slurm_cluster_name)}}-{{group.name}}-[0-{{
120120
State=UNKNOWN
121121
{% endfor %}
122122
{% endfor %}
123-
{% for part in slurm_partitions %}
124-
PartitionName={{part.name}} Nodes={% for group in part.get('groups', [part]) %}{{group.cluster_name|default(slurm_cluster_name)}}-{{group.name}}-[0-{{group.num_nodes-1}}]{% if not loop.last %},{% endif %}{% endfor %} Default=YES MaxTime=24:00:00 State=UP
123+
{% for part in openhpc_slurm_partitions %}
124+
PartitionName={{part.name}} Nodes={% for group in part.get('groups', [part]) %}{{group.cluster_name|default(openhpc_cluster_name)}}-{{group.name}}-[0-{{group.num_nodes-1}}]{% if not loop.last %},{% endif %}{% endfor %} Default=YES MaxTime=24:00:00 State=UP
125125
{% endfor %}
126126
# Want nodes that drop out of SLURM's configuration to be automatically
127127
# returned to service when they come back.

0 commit comments

Comments
 (0)