Skip to content

Commit 9a1c0ce

Browse files
committed
Rename Monte Carlo implementation back to montecarlo.jl
Modify communication: - Cannot use MPI_Isend without holding on to the communication buffer. Instead, use MPI_Send. - Instead of polling, use MPI_Recv with MPI_ANY_SOURCE. - Call MPI_Init and MPI_Finalize from the main program - Ensure that the batch size divides the amount of work per worker.
1 parent 4e6ceb9 commit 9a1c0ce

File tree

3 files changed

+65
-68
lines changed

3 files changed

+65
-68
lines changed

examples/07-pi-impl.jl

Lines changed: 0 additions & 63 deletions
This file was deleted.

examples/07-pi.jl renamed to examples/07-pi-montecarlo.jl

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ like to use, subject to X-1 being an even divisor
77
of 1e6, e.g., set X=5.
88
=#
99

10-
include("07-pi-impl.jl")
10+
include("montecarlo.jl")
1111

1212
function pi_wrapper()
1313
4.0 * (norm(rand(2)) < 1)
@@ -24,10 +24,13 @@ end
2424

2525
# do the monte carlo: 10^6 reps of single draws
2626
function main()
27-
reps = 10^7 # desired number of MC reps
28-
nreturns = 1
29-
pooled = 10^5
30-
montecarlo(pi_wrapper, pi_monitor, reps, nreturns, pooled)
27+
MPI.Init()
28+
n_evals = 10^7 # desired number of MC reps
29+
n_returns = 1
30+
batchsize = 10^5
31+
montecarlo(pi_wrapper, pi_monitor,
32+
MPI.COMM_WORLD, n_evals, n_returns, batchsize)
33+
MPI.Finalize()
3134
end
3235

3336
main()

examples/montecarlo.jl

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import MPI
2+
3+
function montecarlo(mc_eval::Function, mc_monitor::Function,
4+
comm::MPI.Comm, # MPI communicator
5+
n_evals::Integer, # total number of evaluations
6+
n_returns::Integer, # number of return values per evaluation
7+
batchsize::Integer=1) # transmission size
8+
rank = MPI.Comm_rank(comm)
9+
commsize = MPI.Comm_size(comm)
10+
11+
# bookkeeping
12+
if mod(n_evals, commsize-1) != 0
13+
if rank == 0
14+
println("Error (montecarlo):")
15+
println(" n_evals=$n_evals, commsize=$commsize")
16+
println("Choose commsize so that n_evals/(commsize-1) is integer")
17+
end
18+
MPI.Finalize()
19+
exit(1)
20+
end
21+
n_pernode = div(n_evals, commsize-1)
22+
23+
# number of evaluations per worker
24+
if mod(n_pernode, batchsize) != 0
25+
batchsize = div(n_pernode, commsize-1)
26+
end
27+
if mod(n_pernode, batchsize) != 0
28+
if rank == 0
29+
println("Error (montecarlo):")
30+
println(" n_pernode=$n_pernode, batchsize=$batchsize")
31+
println("Choose commsize so that n_pernode/batchsize is integer")
32+
end
33+
MPI.Finalize()
34+
exit(1)
35+
end
36+
37+
if rank > 0
38+
# workers
39+
contrib = zeros(batchsize, n_returns)
40+
@inbounds for i = 1:div(n_pernode, batchsize)
41+
# do work
42+
for j = 1:batchsize
43+
contrib[j,:] = mc_eval()
44+
end
45+
MPI.Send(contrib, 0, 0, comm)
46+
end
47+
else
48+
# manager
49+
results = zeros(n_evals, n_returns)
50+
contrib = zeros(batchsize, n_returns)
51+
for nresults = 1:div(n_evals, batchsize)
52+
MPI.Recv!(contrib, MPI.ANY_SOURCE, 0, comm)
53+
results[nresults*batchsize-batchsize+1:nresults*batchsize,:] = contrib
54+
mc_monitor(nresults*batchsize, results)
55+
end
56+
end
57+
end

0 commit comments

Comments
 (0)