37
37
38
38
39
39
"""
40
- Bcast!(buf, root::Integer, comm::Comm )
40
+ Bcast!(buf, comm::Comm; root::Integer=0 )
41
41
42
42
Broadcast the buffer `buf` from `root` to all processes in `comm`.
43
43
@@ -47,6 +47,9 @@ Broadcast the buffer `buf` from `root` to all processes in `comm`.
47
47
# External links
48
48
$(_doc_external (" MPI_Bcast" ))
49
49
"""
50
+ Bcast! (buf, comm:: Comm ; root:: Integer = Cint (0 )) =
51
+ Bcast! (buf, root, comm)
52
+
50
53
function Bcast! (buf:: Buffer , root:: Integer , comm:: Comm )
51
54
# int MPI_Bcast(void* buffer, int count, MPI_Datatype datatype, int root,
52
55
# MPI_Comm comm)
@@ -60,14 +63,17 @@ function Bcast!(data, root::Integer, comm::Comm)
60
63
end
61
64
62
65
"""
63
- bcast(obj, root::Integer, comm::Comm )
66
+ bcast(obj, comm::Comm; root::Integer=0 )
64
67
65
- Broadcast the object `obj` from rank `root` to all processes on `comm`. This is able to handle arbitrary data.
68
+ Broadcast the object `obj` from rank `root` to all processes on `comm`. This is
69
+ able to handle arbitrary data.
66
70
67
71
# See also
68
72
69
73
- [`Bcast!`](@ref)
70
74
"""
75
+ bcast (obj, comm:: Comm ; root:: Integer = Cint (0 )) =
76
+ bcast (obj, root, comm)
71
77
function bcast (obj, root:: Integer , comm:: Comm )
72
78
isroot = Comm_rank (comm) == root
73
79
count = Ref {Cint} ()
@@ -87,7 +93,8 @@ function bcast(obj, root::Integer, comm::Comm)
87
93
end
88
94
89
95
"""
90
- Scatter!(sendbuf::Union{UBuffer,Nothing}, recvbuf, root::Integer, comm::Comm)
96
+ Scatter!(sendbuf::Union{UBuffer,Nothing}, recvbuf, comm::Comm;
97
+ root::Integer=0)
91
98
92
99
Splits the buffer `sendbuf` in the `root` process into `Comm_size(comm)` chunks,
93
100
sending the `j`-th chunk to the process of rank `j-1` into the `recvbuf` buffer.
@@ -101,9 +108,9 @@ defined. On the root process, it can also be [`MPI.IN_PLACE`](@ref), in which ca
101
108
unmodified. For example:
102
109
```
103
110
if root == MPI.Comm_rank(comm)
104
- MPI.Scatter!(UBuffer(buf, count), MPI.IN_PLACE, root, comm)
111
+ MPI.Scatter!(UBuffer(buf, count), MPI.IN_PLACE, comm; root=root )
105
112
else
106
- MPI.Scatter!(nothing, buf, root, comm)
113
+ MPI.Scatter!(nothing, buf, comm; root=root)
107
114
end
108
115
```
109
116
113
120
# External links
114
121
$(_doc_external (" MPI_Scatter" ))
115
122
"""
123
+ Scatter! (sendbuf, recvbuf, comm:: Comm ; root:: Integer = Cint (0 )) =
124
+ Scatter! (sendbuf, recvbuf, root, comm)
116
125
function Scatter! (sendbuf:: UBuffer , recvbuf:: Buffer , root:: Integer , comm:: Comm )
117
126
if sendbuf. nchunks != = nothing && Comm_rank (comm) == root
118
127
@assert sendbuf. nchunks >= Comm_size (comm)
@@ -134,24 +143,24 @@ Scatter!(sendbuf::Nothing, recvbuf, root::Integer, comm::Comm) =
134
143
135
144
# determine UBuffer count from recvbuf
136
145
Scatter! (sendbuf:: AbstractArray{T} , recvbuf:: Union{Ref{T},AbstractArray{T}} , root:: Integer , comm:: Comm ) where {T} =
137
- Scatter! (UBuffer (sendbuf,length (recvbuf)), recvbuf, root, comm)
146
+ Scatter! (UBuffer (sendbuf,length (recvbuf)), recvbuf, root, comm)
138
147
139
148
"""
140
- Scatter(sendbuf, T, root::Integer, comm::Comm )
149
+ Scatter(sendbuf, T, comm::Comm; root::Integer=0 )
141
150
142
151
Splits the buffer `sendbuf` in the `root` process into `Comm_size(comm)` chunks,
143
152
sending the `j`-th chunk to the process of rank `j-1` as an object of type `T`.
144
153
145
154
# See also
146
155
- [`Scatter!`](@ref)
147
156
"""
148
- function Scatter (sendbuf, :: Type{T} , root:: Integer , comm:: Comm ) where {T}
157
+ Scatter (sendbuf, T, comm; root:: Integer = Cint (0 )) =
158
+ Scatter (sendbuf, T, root, comm)
159
+ Scatter (sendbuf, :: Type{T} , root:: Integer , comm:: Comm ) where {T} =
149
160
Scatter! (sendbuf, Ref {T} (), root, comm)[]
150
- end
151
-
152
161
153
162
"""
154
- Scatterv!(sendbuf, recvbuf, root, comm)
163
+ Scatterv!(sendbuf, recvbuf, comm::Comm; root::Integer=0 )
155
164
156
165
Splits the buffer `sendbuf` in the `root` process into `Comm_size(comm)` chunks and sends
157
166
the `j`th chunk to the process of rank `j-1` into the `recvbuf` buffer.
@@ -164,9 +173,9 @@ defined. On the root process, it can also be [`MPI.IN_PLACE`](@ref), in which ca
164
173
unmodified. For example:
165
174
```
166
175
if root == MPI.Comm_rank(comm)
167
- MPI.Scatterv!(VBuffer(buf, counts), MPI.IN_PLACE, root, comm)
176
+ MPI.Scatterv!(VBuffer(buf, counts), MPI.IN_PLACE, comm; root=root )
168
177
else
169
- MPI.Scatterv!(nothing, buf, root, comm)
178
+ MPI.Scatterv!(nothing, buf, comm; root=root )
170
179
end
171
180
```
172
181
176
185
# External links
177
186
$(_doc_external (" MPI_Scatterv" ))
178
187
"""
188
+ Scatterv! (sendbuf, recvbuf, comm:: Comm ; root:: Integer = Cint (0 )) =
189
+ Scatterv! (sendbuf, recvbuf, root, comm)
179
190
function Scatterv! (sendbuf:: VBuffer , recvbuf:: Buffer , root:: Integer , comm:: Comm )
180
191
if Comm_rank (comm) == root
181
192
@assert length (sendbuf. counts) >= Comm_size (comm)
@@ -198,7 +209,7 @@ Scatterv!(sendbuf::Nothing, recvbuf, root::Integer, comm::Comm) =
198
209
199
210
200
211
"""
201
- Gather!(sendbuf, recvbuf::Union{UBuffer,Nothing}, root::Integer, comm::Comm )
212
+ Gather!(sendbuf, recvbuf, comm::Comm; root::Integer=0 )
202
213
203
214
Each process sends the contents of the buffer `sendbuf` to the `root` process. The `root`
204
215
process stores elements in rank order in the buffer buffer `recvbuf`.
@@ -212,9 +223,9 @@ case the corresponding entries in `recvbuf` are assumed to be already in place (
212
223
corresponds the behaviour of `MPI_IN_PLACE` in `MPI_Gather`). For example:
213
224
```
214
225
if root == MPI.Comm_rank(comm)
215
- MPI.Gather!(MPI.IN_PLACE, UBuffer(buf, count), root, comm)
226
+ MPI.Gather!(MPI.IN_PLACE, UBuffer(buf, count), comm; root=root )
216
227
else
217
- MPI.Gather!(buf, nothing, root, comm)
228
+ MPI.Gather!(buf, nothing, comm; root=root )
218
229
end
219
230
```
220
231
@@ -230,6 +241,8 @@ can be `nothing`.
230
241
# External links
231
242
$(_doc_external (" MPI_Gather" ))
232
243
"""
244
+ Gather! (sendbuf, recvbuf, comm:: Comm ; root:: Integer = Cint (0 )) =
245
+ Gather! (sendbuf, recvbuf, root, comm)
233
246
function Gather! (sendbuf:: Buffer , recvbuf:: UBuffer , root:: Integer , comm:: Comm )
234
247
if recvbuf. nchunks != = nothing && Comm_rank (comm) == root
235
248
@assert recvbuf. nchunks >= Comm_size (comm)
@@ -255,7 +268,7 @@ Gather!(sendbuf::Nothing, recvbuf, root::Integer, comm::Comm) =
255
268
256
269
257
270
"""
258
- Gather(sendbuf, root, comm::Comm)
271
+ Gather(sendbuf, comm::Comm; root=0 )
259
272
260
273
Each process sends the contents of the buffer `sendbuf` to the `root` process. The `root`
261
274
allocates the output buffer and stores elements in rank order.
@@ -271,13 +284,15 @@ processes.
271
284
# External links
272
285
$(_doc_external (" MPI_Gather" ))
273
286
"""
287
+ Gather (sendbuf, comm:: Comm ; root:: Integer = Cint (0 )) =
288
+ Gather (sendbuf, root, comm)
274
289
Gather (sendbuf:: AbstractArray , root:: Integer , comm:: Comm ) =
275
290
Gather! (sendbuf, Comm_rank (comm) == root ? similar (sendbuf, Comm_size (comm) * length (sendbuf)) : nothing , root, comm)
276
291
Gather (object:: T , root:: Integer , comm:: Comm ) where {T} =
277
292
Gather! (Ref (object), Comm_rank (comm) == root ? Array {T} (undef, Comm_size (comm)) : nothing , root, comm)
278
293
279
294
"""
280
- Gatherv!(sendbuf, recvbuf::Union{VBuffer,Nothing}, root, comm )
295
+ Gatherv!(sendbuf, recvbuf, comm::Comm; root::Integer=0 )
281
296
282
297
Each process sends the contents of the buffer `sendbuf` to the `root` process. The `root`
283
298
stores elements in rank order in the buffer `recvbuf`.
@@ -289,9 +304,9 @@ On the root process, `sendbuf` can be [`MPI.IN_PLACE`](@ref), in which case the
289
304
corresponding entries in `recvbuf` are assumed to be already in place. For example
290
305
```
291
306
if root == MPI.Comm_rank(comm)
292
- Gatherv!(MPI.IN_PLACE, VBuffer(buf, counts), root, comm)
307
+ Gatherv!(MPI.IN_PLACE, VBuffer(buf, counts), comm; root=root )
293
308
else
294
- Gatherv!(buf, nothing, root, comm)
309
+ Gatherv!(buf, nothing, comm; root=root )
295
310
end
296
311
```
297
312
@@ -307,6 +322,8 @@ can be `nothing`.
307
322
# External links
308
323
$(_doc_external (" MPI_Gatherv" ))
309
324
"""
325
+ Gatherv! (sendbuf, recvbuf, comm:: Comm ; root:: Integer = Cint (0 )) =
326
+ Gatherv! (sendbuf, recvbuf, root, comm)
310
327
function Gatherv! (sendbuf:: Buffer , recvbuf:: VBuffer , root:: Integer , comm:: Comm )
311
328
if Comm_rank (comm) == root
312
329
@assert length (recvbuf. counts) >= Comm_size (comm)
@@ -518,7 +535,7 @@ Alltoall(sendbuf::UBuffer, comm::Comm) =
518
535
"""
519
536
Alltoallv!(sendbuf::VBuffer, recvbuf::VBuffer, comm::Comm)
520
537
521
- Similar to [`Alltoall!`](@ref), except with different size chunks per process.
538
+ Similar to [`Alltoall!`](@ref), except with different size chunks per process.
522
539
523
540
# See also
524
541
- [`VBuffer`](@ref)
@@ -542,7 +559,7 @@ function Alltoallv!(sendbuf::VBuffer, recvbuf::VBuffer, comm::Comm)
542
559
sendbuf. data, sendbuf. counts, sendbuf. displs, sendbuf. datatype,
543
560
recvbuf. data, recvbuf. counts, recvbuf. displs, recvbuf. datatype,
544
561
comm)
545
-
562
+
546
563
return recvbuf. data
547
564
end
548
565
@@ -553,13 +570,13 @@ end
553
570
554
571
# mutating
555
572
"""
556
- Reduce!(sendbuf, recvbuf, op, root::Integer, comm::Comm )
557
- Reduce!(sendrecvbuf, op, root::Integer, comm::Comm )
573
+ Reduce!(sendbuf, recvbuf, op, comm::Comm; root::Integer=0 )
574
+ Reduce!(sendrecvbuf, op, comm::Comm; root::Integer=0 )
558
575
559
576
Performs elementwise reduction using the operator `op` on the buffer `sendbuf` and stores
560
577
the result in `recvbuf` on the process of rank `root`.
561
578
562
- On non-root processes `recvbuf` is ignored, and can be `nothing`.
579
+ On non-root processes `recvbuf` is ignored, and can be `nothing`.
563
580
564
581
To perform the reduction in place, provide a single buffer `sendrecvbuf`.
565
582
@@ -571,6 +588,11 @@ To perform the reduction in place, provide a single buffer `sendrecvbuf`.
571
588
# External links
572
589
$(_doc_external (" MPI_Reduce" ))
573
590
"""
591
+ Reduce! (sendrecvbuf, op, comm:: Comm ; root:: Integer = Cint (0 )) =
592
+ Reduce! (sendrecvbuf, op, root, comm)
593
+ Reduce! (sendbuf, recvbuf, op, comm:: Comm ; root:: Integer = Cint (0 )) =
594
+ Reduce! (sendbuf, recvbuf, op, root, comm)
595
+
574
596
function Reduce! (rbuf:: RBuffer , op:: Union{Op,MPI_Op} , root:: Integer , comm:: Comm )
575
597
# int MPI_Reduce(const void* sendbuf, void* recvbuf, int count,
576
598
# MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
597
619
598
620
# allocating
599
621
"""
600
- recvbuf = Reduce(sendbuf, op, root::Integer, comm::Comm )
622
+ recvbuf = Reduce(sendbuf, op, comm::Comm; root::Integer=0 )
601
623
602
624
Performs elementwise reduction using the operator `op` on the buffer `sendbuf`, returning
603
625
the result `recvbuf` on the process of rank `root`, and `nothing` on non-root processes.
@@ -612,8 +634,10 @@ the result `recvbuf` on the process of rank `root`, and `nothing` on non-root pr
612
634
# External links
613
635
$(_doc_external (" MPI_Reduce" ))
614
636
"""
637
+ Reduce (sendbuf, op, comm:: Comm ; root:: Integer = Cint (0 )) =
638
+ Reduce (sendbuf, op, root, comm)
615
639
function Reduce (sendbuf:: AbstractArray , op, root:: Integer , comm:: Comm )
616
- if Comm_rank (comm) == root
640
+ if Comm_rank (comm) == root
617
641
Reduce! (sendbuf, similar (sendbuf), op, root, comm)
618
642
else
619
643
Reduce! (sendbuf, nothing , op, root, comm)
0 commit comments