Skip to content

Commit 33e4253

Browse files
authored
Deprecate cluster manager (#296)
Functionality has been moved to https://github.com/JuliaParallel/MPIClusterManagers.jl
1 parent ba6ff68 commit 33e4253

25 files changed

+142
-556
lines changed

.travis.yml

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
language: julia
2-
sudo: required
3-
dist: trusty
2+
dist: xenial
43

54
os:
65
- linux
@@ -66,8 +65,8 @@ jobs:
6665
script:
6766
- export DOCUMENTER_DEBUG="true"
6867
- julia --color=yes --project=docs/ -e 'using Pkg;
69-
Pkg.instantiate();
7068
Pkg.develop(PackageSpec(path=pwd()));
69+
Pkg.instantiate();
7170
Pkg.build()'
7271
- julia --color=yes --project=docs/ docs/make.jl
7372
- stage: "Coverage"

docs/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
build/
22
site/
3+
src/examples/

docs/examples/01-hello.jl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
using MPI
2+
MPI.Init()
3+
4+
comm = MPI.COMM_WORLD
5+
print("Hello world, I am rank $(MPI.Comm_rank(comm)) of $(MPI.Comm_size(comm))\n")
6+
MPI.Barrier(comm)

docs/examples/02-broadcast.jl

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
import MPI
2+
3+
MPI.Init()
4+
5+
comm = MPI.COMM_WORLD
6+
N = 5
7+
root = 0
8+
9+
if MPI.Comm_rank(comm) == root
10+
println(" Running on $(MPI.Comm_size(comm)) processes")
11+
end
12+
MPI.Barrier(comm)
13+
14+
if MPI.Comm_rank(comm) == root
15+
A = [i*(1.0 + im*2.0) for i = 1:N]
16+
else
17+
A = Array{ComplexF64}(undef, N)
18+
end
19+
20+
MPI.Bcast!(A, root, comm)
21+
22+
println("rank = $(MPI.Comm_rank(comm)), A = $A")
23+
24+
if MPI.Comm_rank(comm) == root
25+
B = Dict("foo" => "bar")
26+
else
27+
B = nothing
28+
end
29+
30+
B = MPI.bcast(B, root, comm)
31+
println("rank = $(MPI.Comm_rank(comm)), B = $B")
32+
33+
if MPI.Comm_rank(comm) == root
34+
f = x -> x^2 + 2x - 1
35+
else
36+
f = nothing
37+
end
38+
39+
f = MPI.bcast(f, root, comm)
40+
println("rank = $(MPI.Comm_rank(comm)), f(3) = $(f(3))")

docs/examples/03-reduce.jl

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
using MPI
2+
3+
MPI.Init()
4+
5+
comm = MPI.COMM_WORLD
6+
root = 0
7+
8+
r = MPI.Comm_rank(comm)
9+
10+
sr = MPI.Reduce(r, +, root, comm)
11+
12+
if MPI.Comm_rank(comm) == root
13+
println("sum of ranks = $sr")
14+
end
15+

docs/examples/04-sendrecv.jl

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
using MPI
2+
3+
MPI.Init()
4+
5+
comm = MPI.COMM_WORLD
6+
rank = MPI.Comm_rank(comm)
7+
size = MPI.Comm_size(comm)
8+
9+
dst = mod(rank+1, size)
10+
src = mod(rank-1, size)
11+
12+
N = 4
13+
14+
send_mesg = Array{Float64}(undef, N)
15+
recv_mesg = Array{Float64}(undef, N)
16+
17+
fill!(send_mesg, Float64(rank))
18+
19+
rreq = MPI.Irecv!(recv_mesg, src, src+32, comm)
20+
21+
println("$rank: Sending $rank -> $dst = $send_mesg")
22+
sreq = MPI.Isend(send_mesg, dst, rank+32, comm)
23+
24+
stats = MPI.Waitall!([rreq, sreq])
25+
26+
println("$rank: Received $src -> $rank = $recv_mesg")
27+
28+
MPI.Barrier(comm)

docs/make.jl

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,38 @@
11
using Documenter
22
using MPI
33

4+
# generate example markdown
5+
EXAMPLES = [
6+
"Hello world" => "examples/01-hello.md",
7+
"Broadcast" => "examples/02-broadcast.md",
8+
"Reduce" => "examples/03-reduce.md",
9+
"Send/receive" => "examples/04-sendrecv.md",
10+
]
11+
12+
examples_md_dir = joinpath(@__DIR__,"src/examples")
13+
isdir(examples_md_dir) || mkdir(examples_md_dir)
14+
15+
for (example_title, example_md) in EXAMPLES
16+
example_jl = example_md[1:end-2]*"jl"
17+
open(joinpath(@__DIR__, "src", example_md), "w") do mdfile
18+
println(mdfile, "# $example_title")
19+
println(mdfile)
20+
println(mdfile, "`$example_jl`")
21+
println(mdfile, "```julia")
22+
write(mdfile, read(joinpath(@__DIR__,example_jl)))
23+
println(mdfile, "```")
24+
println(mdfile)
25+
26+
println(mdfile, "```")
27+
println(mdfile, "> mpiexec -n 3 julia $example_jl")
28+
cd(@__DIR__) do
29+
write(mdfile, read(`$(MPI.mpiexec) -n 3 $(joinpath(Sys.BINDIR, Base.julia_exename())) --project $example_jl`))
30+
end
31+
println(mdfile, "```")
32+
end
33+
end
34+
35+
436
makedocs(
537
sitename = "MPI.jl",
638
format = Documenter.HTML(
@@ -11,6 +43,7 @@ makedocs(
1143
"index.md",
1244
"installing.md",
1345
"usage.md",
46+
"Examples" => EXAMPLES,
1447
"functions.md",
1548
]
1649
)

docs/src/usage.md

Lines changed: 12 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -1,115 +1,26 @@
11
# Usage
22

3-
## MPI-only mode
3+
MPI is based on a [single program, multiple data (SPMD)](https://en.wikipedia.org/wiki/SPMD) model, where multiple processes are launched running independent programs, which then communicate as necessary via messages.
44

5-
To run a Julia script with MPI, first make sure that `using MPI` or
6-
`import MPI` is included at the top of your script. You should then be
7-
able to run the MPI job as expected, e.g. to run [`examples/01-hello.jl`](https://github.com/JuliaParallel/MPI.jl/blob/master/examples/01-hello.jl),
8-
9-
```
10-
mpirun -np 3 julia 01-hello.jl
11-
```
12-
13-
## MPI and Julia parallel constructs together
14-
15-
In order for MPI calls to be made from a Julia cluster, it requires the use of
16-
`MPIManager`, a cluster manager that will start the julia workers using `mpirun`
17-
18-
It has three modes of operation
19-
20-
- Only worker processes execute MPI code. The Julia master process executes outside of and
21-
is not part of the MPI cluster. Free bi-directional TCP/IP connectivity is required
22-
between all processes
23-
24-
- All processes (including Julia master) are part of both the MPI as well as Julia cluster.
25-
Free bi-directional TCP/IP connectivity is required between all processes.
26-
27-
- All processes are part of both the MPI as well as Julia cluster. MPI is used as the transport
28-
for julia messages. This is useful on environments which do not allow TCP/IP connectivity
29-
between worker processes
30-
31-
### MPIManager: only workers execute MPI code
32-
33-
An example is provided in `examples/05-juliacman.jl`.
34-
The julia master process is NOT part of the MPI cluster. The main script should be
35-
launched directly, `MPIManager` internally calls `mpirun` to launch julia/MPI workers.
36-
All the workers started via `MPIManager` will be part of the MPI cluster.
37-
38-
```
39-
MPIManager(;np=Sys.CPU_THREADS, mpi_cmd=false, launch_timeout=60.0)
40-
```
41-
42-
If not specified, `mpi_cmd` defaults to `mpirun -np $np`
43-
`stdout` from the launched workers is redirected back to the julia session calling `addprocs` via a TCP connection.
44-
Thus the workers must be able to freely connect via TCP to the host session.
45-
The following lines will be typically required on the julia master process to support both julia and MPI:
5+
A script should include `using MPI` and [`MPI.Init()`](@ref) statements, for example
466

477
```julia
48-
# to import MPIManager
8+
# examples/01-hello.jl
499
using MPI
10+
MPI.Init()
5011

51-
# need to also import Distributed to use addprocs()
52-
using Distributed
53-
54-
# specify, number of mpi workers, launch cmd, etc.
55-
manager=MPIManager(np=4)
56-
57-
# start mpi workers and add them as julia workers too.
58-
addprocs(manager)
59-
```
60-
61-
To execute code with MPI calls on all workers, use `@mpi_do`.
62-
63-
`@mpi_do manager expr` executes `expr` on all processes that are part of `manager`.
64-
65-
For example:
66-
```
67-
@mpi_do manager begin
68-
comm=MPI.COMM_WORLD
69-
println("Hello world, I am $(MPI.Comm_rank(comm)) of $(MPI.Comm_size(comm))"))
70-
end
71-
```
72-
executes on all MPI workers belonging to `manager` only
73-
74-
[`examples/05-juliacman.jl`](https://github.com/JuliaParallel/MPI.jl/blob/master/examples/05-juliacman.jl) is a simple example of calling MPI functions on all workers interspersed with Julia parallel methods.
75-
76-
This should be run _without_ `mpirun`:
77-
```
78-
julia 05-juliacman.jl
79-
```
80-
81-
A single instation of `MPIManager` can be used only once to launch MPI workers (via `addprocs`).
82-
To create multiple sets of MPI clusters, use separate, distinct `MPIManager` objects.
83-
84-
`procs(manager::MPIManager)` returns a list of julia pids belonging to `manager`
85-
`mpiprocs(manager::MPIManager)` returns a list of MPI ranks belonging to `manager`
86-
87-
Fields `j2mpi` and `mpi2j` of `MPIManager` are associative collections mapping julia pids to MPI ranks and vice-versa.
88-
89-
### MPIManager: TCP/IP transport - all processes execute MPI code
90-
91-
Useful on environments which do not allow TCP connections outside of the cluster
92-
93-
An example is in [`examples/06-cman-transport.jl`](https://github.com/JuliaParallel/MPI.jl/blob/master/examples/06-cman-transport.jl):
94-
```
95-
mpirun -np 5 julia 06-cman-transport.jl TCP
12+
comm = MPI.COMM_WORLD
13+
println("Hello world, I am $(MPI.Comm_rank(comm)) of $(MPI.Comm_size(comm))")
14+
MPI.Barrier(comm)
9615
```
9716

98-
This launches a total of 5 processes, mpi rank 0 is the julia pid 1. mpi rank 1 is julia pid 2 and so on.
99-
100-
The program must call `MPI.start(TCP_TRANSPORT_ALL)` with argument `TCP_TRANSPORT_ALL`.
101-
On mpi rank 0, it returns a `manager` which can be used with `@mpi_do`
102-
On other processes (i.e., the workers) the function does not return
103-
104-
105-
### MPIManager: MPI transport - all processes execute MPI code
106-
107-
`MPI.start` must be called with option `MPI_TRANSPORT_ALL` to use MPI as transport.
17+
The program can then be launched via an MPI launch command (typically `mpiexec`, `mpirun` or `srun`), e.g.
10818
```
109-
mpirun -np 5 julia 06-cman-transport.jl MPI
19+
$ mpiexec -n 3 julia --project examples/01-hello.jl
20+
Hello world, I am rank 0 of 3
21+
Hello world, I am rank 2 of 3
22+
Hello world, I am rank 1 of 3
11023
```
111-
will run the example using MPI as transport.
112-
11324

11425
## Finalizers
11526

examples/01-hello-impl.jl

Lines changed: 0 additions & 5 deletions
This file was deleted.

examples/01-hello.jl

Lines changed: 0 additions & 12 deletions
This file was deleted.

0 commit comments

Comments
 (0)