Skip to content

Commit 9c6e344

Browse files
Enrico Usaienrico-usai
authored andcommitted
Remove unnecessary version test for Slurm and PMIx
The same checks are performed in Kitchen Tests we don't need to execute them again. Signed-off-by: Enrico Usai <[email protected]>
1 parent 50bc782 commit 9c6e344

File tree

1 file changed

+4
-10
lines changed

1 file changed

+4
-10
lines changed

tests/integration-tests/tests/schedulers/test_slurm.py

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,6 @@ def test_slurm(
9696
remote_command_executor = RemoteCommandExecutor(cluster, use_login_node=use_login_node)
9797
slurm_root_path = _retrieve_slurm_root_path(remote_command_executor)
9898
slurm_commands = scheduler_commands_factory(remote_command_executor)
99-
_test_slurm_version(remote_command_executor)
10099

101100
if supports_impi:
102101
_test_mpi_job_termination(remote_command_executor, test_datadir, slurm_commands, region, cluster)
@@ -229,18 +228,19 @@ def test_slurm_pmix(pcluster_config_reader, scheduler, clusters_factory, use_log
229228
remote_command_executor = RemoteCommandExecutor(cluster, use_login_node=use_login_node)
230229

231230
# Ensure the expected PMIx version is listed when running `srun --mpi=list`.
232-
# Since we're installing PMIx v4.2.6, we expect to see pmix and pmix_v4 in the output.
233231
# Sample output:
234232
# [ec2-user@ip-172-31-33-187 ~]$ srun 2>&1 --mpi=list
235233
# srun: MPI types are...
236234
# srun: none
237235
# srun: openmpi
238236
# srun: pmi2
239237
# srun: pmix
240-
# srun: pmix_v4
238+
# srun: pmix_vX
239+
#
240+
# _vX is the Major number of the PMIx version installed and used to compile slurm.
241+
# We check this in the cookbook, so we do not repeat the check here
241242
mpi_list_output = remote_command_executor.run_remote_command("srun 2>&1 --mpi=list").stdout
242243
assert_that(mpi_list_output).matches(r"\s+pmix($|\s+)")
243-
assert_that(mpi_list_output).matches(r"\s+pmix_v4($|\s+)")
244244

245245
# Compile and run an MPI program interactively
246246
mpi_module = "openmpi"
@@ -1708,12 +1708,6 @@ def _gpu_resource_check(slurm_commands, partition, instance_type, instance_type_
17081708
assert_that(job_info).contains(f"TresPerNode=gres:gpu:{gpus_per_instance}", f"CpusPerTres=gres:gpu:{cpus_per_gpu}")
17091709

17101710

1711-
def _test_slurm_version(remote_command_executor):
1712-
logging.info("Testing Slurm Version")
1713-
version = remote_command_executor.run_remote_command("sinfo -V").stdout
1714-
assert_that(version).is_equal_to("slurm 23.02.5")
1715-
1716-
17171711
def _test_job_dependencies(slurm_commands, region, stack_name, scaledown_idletime):
17181712
logging.info("Testing cluster doesn't scale when job dependencies are not satisfied")
17191713
job_id = slurm_commands.submit_command_and_assert_job_accepted(

0 commit comments

Comments
 (0)