Skip to content

Commit c0367cc

Browse files
authored
Merge pull request #146 from MFlowCode/fix-143
2 parents dae6563 + 26c4dc1 commit c0367cc

File tree

5 files changed

+47
-34
lines changed

5 files changed

+47
-34
lines changed

mfc.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ if [ "$1" == "load" ]; then
132132
MODULES=("${MODULES[@]}" "python/3.8.5")
133133
elif [ "$u_computer" == "p" ]; then # Phoenix
134134
if [ "$u_cg" == "c" ]; then
135-
MODULES=("gcc/10.3.0-o57x6h" "mvapich2/2.3.6-ouywal")
135+
MODULES=("gcc/10.3.0-o57x6h" "openmpi/4.1.4")
136136
elif [ "$u_cg" == "g" ]; then
137137
MODULES=("cuda/11.7.0-7sdye3" "nvhpc/22.11")
138138
fi

toolchain/mfc/args.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ def add_common_arguments(p, mask = None):
9595
run.add_argument( "--dry-run", action="store_true", default=False, help="(Batch) Run without submitting batch file.")
9696
run.add_argument("--case-optimization", action="store_true", default=False, help="(GPU Optimization) Compile MFC targets with some case parameters hard-coded.")
9797
run.add_argument( "--no-build", action="store_true", default=False, help="(Testing) Do not rebuild MFC.")
98+
run.add_argument("--wait", action="store_true", default=False, help="(Batch) Wait for the job to finish.")
9899

99100
# === BENCH ===
100101
add_common_arguments(bench, "t")

toolchain/mfc/run/queues.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import os, typing, dataclasses
22

33
from .. import common
4-
4+
from ..state import ARG
55

66
@dataclasses.dataclass
77
class QueueSystem:
@@ -27,6 +27,9 @@ def is_active(self) -> bool:
2727
return common.does_command_exist("qsub")
2828

2929
def gen_submit_cmd(self, filename: str) -> typing.List[str]:
30+
if ARG("wait"):
31+
raise common.MFCException("PBS Queue: Sorry, --wait is unimplemented.")
32+
3033
return ["qsub", filename]
3134

3235

@@ -38,7 +41,12 @@ def is_active(self) -> bool:
3841
return common.does_command_exist("bsub") and common.does_command_exist("bqueues")
3942

4043
def gen_submit_cmd(self, filename: str) -> None:
41-
return ["bsub", filename]
44+
cmd = ["bsub"]
45+
46+
if ARG("wait"):
47+
cmd += ["--wait"]
48+
49+
return cmd + [filename]
4250

4351

4452
class SLURMSystem(QueueSystem):
@@ -49,7 +57,12 @@ def is_active(self) -> bool:
4957
return common.does_command_exist("sbatch")
5058

5159
def gen_submit_cmd(self, filename: str) -> None:
52-
return ["sbatch", filename]
60+
cmd = ["sbatch"]
61+
62+
if ARG("wait"):
63+
cmd += ["--wait"]
64+
65+
return cmd + [filename]
5366

5467

5568
QUEUE_SYSTEMS = [ LSFSystem(), SLURMSystem(), PBSSystem() ]

toolchain/templates/pbs.sh

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -65,19 +65,21 @@ for binpath in {MFC::BINARIES}; do
6565

6666
echo -e ":) Running $binpath:"
6767

68-
srun \
69-
--nodes={nodes} \
70-
--ntasks-per-node {tasks_per_node} \
71-
{MFC::PROFILER} "$binpath"
72-
73-
#>
74-
#> srun --mpi=pmix \
75-
#> {MFC::PROFILER} "$binpath"
76-
#>
77-
#> mpirun \
78-
#> -np {tasks_per_node*nodes} \
79-
#> {MFC::PROFILER} "$binpath"
80-
#>
68+
if command -v srun > /dev/null 2>&1; then
69+
srun \
70+
--nodes {nodes} \
71+
--ntasks-per-node {tasks_per_node} \
72+
{MFC::PROFILER} "$binpath"
73+
74+
#>
75+
#> srun --mpi=pmix \
76+
#> {MFC::PROFILER} "$binpath"
77+
else
78+
mpirun \
79+
-np {tasks_per_node*nodes} \
80+
{MFC::PROFILER} "$binpath"
81+
82+
fi
8183

8284
done
8385

toolchain/templates/slurm.sh

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -74,24 +74,21 @@ for binpath in {MFC::BINARIES}; do
7474

7575
echo -e ":) Running $binpath:"
7676

77-
#>
78-
#> Note: This MPI executable might not be well supported
79-
#> on your system - if at all. {MFC::BIN} refers to
80-
#> the path the MFC executable.
81-
#>
82-
#>srun \
83-
#> --nodes={nodes} \
84-
#> --ntasks-per-node {cpus_per_node} \
85-
#> --mpi=pmi2 \
86-
#> {MFC::PROFILER} "{MFC::BIN}"
87-
#>
88-
#> srun --mpi=pmix \
89-
#> {MFC::PROFILER} "{MFC::BIN}"
90-
#>
77+
if command -v srun > /dev/null 2>&1; then
78+
srun \
79+
--nodes {nodes} \
80+
--ntasks-per-node {tasks_per_node} \
81+
{MFC::PROFILER} "$binpath"
9182

92-
mpirun \
93-
-np {nodes*tasks_per_node} \
94-
{MFC::PROFILER} "$binpath"
83+
#>
84+
#> srun --mpi=pmix \
85+
#> {MFC::PROFILER} "$binpath"
86+
#>
87+
else
88+
mpirun \
89+
-np {nodes*tasks_per_node} \
90+
{MFC::PROFILER} "$binpath"
91+
fi
9592

9693
done
9794

0 commit comments

Comments
 (0)