|
1 | 1 | using MPI
|
2 | 2 |
|
3 |
| -comm, comm_size, rank = MPI.init_mpi() |
| 3 | +MPI.Init() |
| 4 | +rank = MPI.Comm_rank(MPI.COMM_WORLD) |
| 5 | +size = MPI.Comm_size(MPI.COMM_WORLD) |
4 | 6 |
|
5 | 7 | include("01-hello-impl.jl")
|
6 | 8 | include("02-broadcast-impl.jl")
|
7 | 9 | include("03-reduce-impl.jl")
|
8 | 10 | include("04-sendrecv-impl.jl")
|
9 | 11 |
|
10 | 12 | if length(ARGS) == 0
|
11 |
| - print("Please specify a transport option to use [MPI|TCP]\n") |
12 |
| - exit() |
| 13 | + println("Please specify a transport option to use [MPI|TCP]") |
| 14 | + MPI.Finalize() |
| 15 | + exit(1) |
13 | 16 | elseif ARGS[1] == "TCP"
|
14 |
| - manager = MPI.start(TCP_TRANSPORT_ALL) # does not return on worker |
| 17 | + manager = MPI.start_main_loop(TCP_TRANSPORT_ALL) # does not return on worker |
15 | 18 | elseif ARGS[1] == "MPI"
|
16 |
| - manager = MPI.start(MPI_TRANSPORT_ALL) # does not return on worker |
| 19 | + manager = MPI.start_main_loop(MPI_TRANSPORT_ALL) # does not return on worker |
17 | 20 | else
|
18 |
| - print("Valid transport options are [MPI|TCP]\n") |
19 |
| - exit() |
| 21 | + println("Valid transport options are [MPI|TCP]") |
| 22 | + MPI.Finalize() |
| 23 | + exit(1) |
20 | 24 | end
|
21 | 25 |
|
22 |
| -if rank == 0 |
23 |
| - nloops = 10^2 |
24 |
| - function foo(n) |
25 |
| - a=ones(n) |
26 |
| - remotecall_fetch(x->x, 2, a); |
| 26 | +# Check whether a worker accidentally returned |
| 27 | +@assert rank == 0 |
27 | 28 |
|
28 |
| - @elapsed for i in 1:nloops |
29 |
| - remotecall_fetch(x->x, 2, a) |
30 |
| - end |
31 |
| - end |
32 |
| - |
33 |
| - n=10^3 |
34 |
| - foo(1) |
35 |
| - t=foo(n) |
36 |
| - println("$t seconds for $nloops loops of send-recv of array size $n") |
37 |
| - |
38 |
| - n=10^6 |
39 |
| - foo(1) |
40 |
| - t=foo(n) |
41 |
| - println("$t seconds for $nloops loops of send-recv of array size $n") |
42 |
| - |
43 |
| - |
44 |
| - print("EXAMPLE: HELLO\n") |
45 |
| - @mpi_do manager do_hello() |
46 |
| - print("EXAMPLE: BROADCAST\n") |
47 |
| - @mpi_do manager do_broadcast() |
48 |
| - print("EXAMPLE: REDUCE\n") |
49 |
| - @mpi_do manager do_reduce() |
50 |
| - print("EXAMPLE: SENDRECV\n") |
51 |
| - @mpi_do manager do_sendrecv() |
52 |
| - |
53 |
| - # Abscence of a MPI Finalize causes the cluster to hang - don't yet know why |
54 |
| - if ARGS[1] == "TCP" |
55 |
| - @mpi_do manager MPI.Finalize() |
56 |
| - elseif ARGS[1] == "MPI" |
57 |
| - @everywhere (MPI.Finalize(); exit()) |
| 29 | +nloops = 10^2 |
| 30 | +function foo(n) |
| 31 | + a=ones(n) |
| 32 | + remotecall_fetch(x->x, mod1(2, size), a); |
| 33 | + @elapsed for i in 1:nloops |
| 34 | + remotecall_fetch(x->x, mod1(2, size), a) |
58 | 35 | end
|
59 | 36 | end
|
| 37 | + |
| 38 | +n=10^3 |
| 39 | +foo(1) |
| 40 | +t=foo(n) |
| 41 | +println("$t seconds for $nloops loops of send-recv of array size $n") |
| 42 | + |
| 43 | +n=10^6 |
| 44 | +foo(1) |
| 45 | +t=foo(n) |
| 46 | +println("$t seconds for $nloops loops of send-recv of array size $n") |
| 47 | + |
| 48 | +# We cannot run these examples since they use MPI.Barrier and other blocking |
| 49 | +# communication, disabling our event loop |
| 50 | +# print("EXAMPLE: HELLO\n") |
| 51 | +# @mpi_do manager do_hello() |
| 52 | +# print("EXAMPLE: BROADCAST\n") |
| 53 | +# @mpi_do manager do_broadcast() |
| 54 | +# print("EXAMPLE: REDUCE\n") |
| 55 | +# @mpi_do manager do_reduce() |
| 56 | +# print("EXAMPLE: SENDRECV\n") |
| 57 | +# @mpi_do manager do_sendrecv() |
| 58 | + |
| 59 | +MPI.stop_main_loop(manager) |
0 commit comments