@@ -151,35 +151,34 @@ def update_nodes_in_queue(strategy, queues)
151151 end
152152end
153153
154- ruby_block "Update slurm topology" do
155- block do
156- # Update slurm_parallelcluster_topology to add/remove Block Topology plugin
157- template "#{ node [ 'cluster' ] [ 'slurm' ] [ 'install_dir' ] } /etc/slurm_parallelcluster_topology.conf" do
158- source 'slurm/slurm_parallelcluster_topology.conf.erb'
159- owner 'root'
160- group 'root'
161- mode '0644'
162- end
154+ # Update slurm_parallelcluster_topology to add/remove Block Topology plugin
155+ template "#{ node [ 'cluster' ] [ 'slurm' ] [ 'install_dir' ] } /etc/slurm_parallelcluster_topology.conf" do
156+ source 'slurm/slurm_parallelcluster_topology.conf.erb'
157+ owner 'root'
158+ group 'root'
159+ mode '0644'
160+ not_if { is_amazon_linux_2? }
161+ end
163162
164- if node [ 'cluster' ] [ 'p6egb200_block_sizes' ] . nil? && are_queues_updated? && ::File . exist? ( "#{ node [ 'cluster' ] [ 'slurm' ] [ 'install_dir' ] } /etc/topology.conf" )
165- # If topology.conf exist and Capacity Block is removed, we cleanup
166- topology_generator_command_args = " --cleanup"
167- elsif node [ 'cluster' ] [ 'p6egb200_block_sizes' ] . nil? && !are_queues_updated?
168- # We do nothing if p6e-gb200 is not used and queues are not updated
169- topology_generator_command_args = nil
170- else
171- topology_generator_command_args = " --block-sizes #{ node [ 'cluster' ] [ 'p6egb200_block_sizes' ] } "
172- end
173- # Update Slurm topology.conf file
174- execute "update or cleanup topology.conf" do
175- command "#{ cookbook_virtualenv_path } /bin/python #{ node [ 'cluster' ] [ 'scripts_dir' ] } /slurm/pcluster_topology_generator.py" \
176- " --output-file #{ node [ 'cluster' ] [ 'slurm' ] [ 'install_dir' ] } /etc/topology.conf" \
177- " --input-file #{ node [ 'cluster' ] [ 'cluster_config_path' ] } " \
178- "#{ topology_generator_command_args } "
179- not_if { ::File . exist? ( node [ 'cluster' ] [ 'previous_cluster_config_path' ] ) && topology_generator_command_args . nil? }
180- end
163+ def topology_generator_command_args
164+ if node [ 'cluster' ] [ 'p6egb200_block_sizes' ] . nil? && are_queues_updated? && ::File . exist? ( "#{ node [ 'cluster' ] [ 'slurm' ] [ 'install_dir' ] } /etc/topology.conf" )
165+ # If topology.conf exist and Capacity Block is removed, we cleanup
166+ " --cleanup"
167+ elsif node [ 'cluster' ] [ 'p6egb200_block_sizes' ] . nil? && !are_queues_updated?
168+ # We do nothing if p6e-gb200 is not used and queues are not updated
169+ nil
170+ else
171+ " --block-sizes #{ node [ 'cluster' ] [ 'p6egb200_block_sizes' ] } "
181172 end
182- not_if { platform? ( 'amazon' ) && node [ 'platform_version' ] == "2" }
173+ end
174+
175+ # Update Slurm topology.conf file
176+ execute "update or cleanup topology.conf" do
177+ command "#{ cookbook_virtualenv_path } /bin/python #{ node [ 'cluster' ] [ 'scripts_dir' ] } /slurm/pcluster_topology_generator.py" \
178+ " --output-file #{ node [ 'cluster' ] [ 'slurm' ] [ 'install_dir' ] } /etc/topology.conf" \
179+ " --input-file #{ node [ 'cluster' ] [ 'cluster_config_path' ] } " \
180+ "#{ topology_generator_command_args } "
181+ not_if { ::File . exist? ( node [ 'cluster' ] [ 'previous_cluster_config_path' ] ) && topology_generator_command_args . nil? || is_amazon_linux_2? }
183182end
184183
185184execute "generate_pcluster_slurm_configs" do
0 commit comments