@@ -65,7 +65,7 @@ def test_slurm(region, pcluster_config_reader, clusters_factory, test_datadir, a
6565 _test_slurm_version (remote_command_executor )
6666
6767 if supports_impi :
68- _test_mpi_job_termination (remote_command_executor , test_datadir )
68+ _test_mpi_job_termination (remote_command_executor , test_datadir , instance_type = "c5.xlarge" )
6969
7070 _assert_no_node_in_cluster (region , cluster .cfn_name , slurm_commands )
7171 _test_job_dependencies (slurm_commands , region , cluster .cfn_name , scaledown_idletime )
@@ -655,7 +655,7 @@ def _terminate_nodes_manually(instance_ids, region):
655655 logging .info ("Terminated nodes: {}" .format (instance_ids ))
656656
657657
658- def _test_mpi_job_termination (remote_command_executor , test_datadir ):
658+ def _test_mpi_job_termination (remote_command_executor , test_datadir , instance_type ):
659659 """
660660 Test canceling mpirun job will not leave stray processes.
661661
@@ -669,7 +669,7 @@ def _test_mpi_job_termination(remote_command_executor, test_datadir):
669669
670670 # Submit mpi_job, which runs Intel MPI benchmarks with intelmpi
671671 # Leaving 1 vcpu on each node idle so that the process check job can run while mpi_job is running
672- result = slurm_commands .submit_script (str (test_datadir / "mpi_job.sh" ))
672+ result = slurm_commands .submit_script (str (test_datadir / "mpi_job.sh" ), constraint = instance_type )
673673 job_id = slurm_commands .assert_job_submitted (result .stdout )
674674
675675 # Wait for compute node to start and check that mpi processes are started
0 commit comments