@@ -130,6 +130,7 @@ int ompi_io_ompio_gatherv_array (void *sbuf,
130130 int err = OMPI_SUCCESS ;
131131 char * ptmp ;
132132 OPAL_PTRDIFF_TYPE extent , lb ;
133+ ompi_request_t * * reqs = NULL ;
133134
134135 rank = ompi_comm_rank (comm );
135136
@@ -153,7 +154,10 @@ int ompi_io_ompio_gatherv_array (void *sbuf,
153154 if (OMPI_SUCCESS != err ) {
154155 return OMPI_ERROR ;
155156 }
156-
157+ reqs = (ompi_request_t * * ) malloc ( procs_per_group * sizeof (ompi_request_t * ));
158+ if ( NULL == reqs ) {
159+ return OMPI_ERR_OUT_OF_RESOURCE ;
160+ }
157161 for (i = 0 ; i < procs_per_group ; i ++ ) {
158162 ptmp = ((char * ) rbuf ) + (extent * disps [i ]);
159163
@@ -168,26 +172,34 @@ int ompi_io_ompio_gatherv_array (void *sbuf,
168172 rcounts [i ],
169173 rdtype );
170174 }
175+ reqs [i ] = MPI_REQUEST_NULL ;
171176 }
172177 else {
173178 /* Only receive if there is something to receive */
174179 if (rcounts [i ] > 0 ) {
175- err = MCA_PML_CALL (recv (ptmp ,
176- rcounts [i ],
177- rdtype ,
178- procs_in_group [i ],
179- OMPIO_TAG_GATHERV ,
180- comm ,
181- MPI_STATUS_IGNORE ));
180+ err = MCA_PML_CALL (irecv (ptmp ,
181+ rcounts [i ],
182+ rdtype ,
183+ procs_in_group [i ],
184+ OMPIO_TAG_GATHERV ,
185+ comm ,
186+ & reqs [ i ] ));
182187 }
188+ else {
189+ reqs [i ] = MPI_REQUEST_NULL ;
190+ }
183191 }
184192
185193 if (OMPI_SUCCESS != err ) {
194+ free ( reqs );
186195 return err ;
187196 }
188197 }
189198 /* All done */
190-
199+ err = ompi_request_wait_all ( procs_per_group , reqs , MPI_STATUSES_IGNORE );
200+ if ( NULL != reqs ) {
201+ free ( reqs );
202+ }
191203 return err ;
192204}
193205
@@ -207,6 +219,7 @@ int ompi_io_ompio_scatterv_array (void *sbuf,
207219 int err = OMPI_SUCCESS ;
208220 char * ptmp ;
209221 OPAL_PTRDIFF_TYPE extent , lb ;
222+ ompi_request_t * * reqs = NULL ;
210223
211224 rank = ompi_comm_rank (comm );
212225
@@ -230,7 +243,11 @@ int ompi_io_ompio_scatterv_array (void *sbuf,
230243 if (OMPI_SUCCESS != err ) {
231244 return OMPI_ERROR ;
232245 }
233-
246+ reqs = ( ompi_request_t * * ) malloc ( procs_per_group * sizeof ( ompi_request_t * ));
247+ if (NULL == reqs ) {
248+ return OMPI_ERR_OUT_OF_RESOURCE ;
249+ }
250+
234251 for (i = 0 ; i < procs_per_group ; ++ i ) {
235252 ptmp = ((char * ) sbuf ) + (extent * disps [i ]);
236253
@@ -245,25 +262,34 @@ int ompi_io_ompio_scatterv_array (void *sbuf,
245262 rcount ,
246263 rdtype );
247264 }
265+ reqs [i ] = MPI_REQUEST_NULL ;
248266 }
249267 else {
250268 /* Only receive if there is something to receive */
251269 if (scounts [i ] > 0 ) {
252- err = MCA_PML_CALL (send (ptmp ,
253- scounts [i ],
254- sdtype ,
255- procs_in_group [i ],
256- OMPIO_TAG_SCATTERV ,
257- MCA_PML_BASE_SEND_STANDARD ,
258- comm ));
270+ err = MCA_PML_CALL (isend (ptmp ,
271+ scounts [i ],
272+ sdtype ,
273+ procs_in_group [i ],
274+ OMPIO_TAG_SCATTERV ,
275+ MCA_PML_BASE_SEND_STANDARD ,
276+ comm ,
277+ & reqs [i ]));
278+ }
279+ else {
280+ reqs [i ] = MPI_REQUEST_NULL ;
259281 }
260282 }
261- if (OMPI_SUCCESS != err ) {
283+ if (OMPI_SUCCESS != err ) {
284+ free ( reqs );
262285 return err ;
263286 }
264287 }
265288 /* All done */
266-
289+ err = ompi_request_wait_all ( procs_per_group , reqs , MPI_STATUSES_IGNORE );
290+ if ( NULL != reqs ) {
291+ free ( reqs );
292+ }
267293 return err ;
268294}
269295
@@ -337,7 +363,8 @@ int ompi_io_ompio_gather_array (void *sbuf,
337363 OPAL_PTRDIFF_TYPE incr ;
338364 OPAL_PTRDIFF_TYPE extent , lb ;
339365 int err = OMPI_SUCCESS ;
340-
366+ ompi_request_t * * reqs = NULL ;
367+
341368 rank = ompi_comm_rank (comm );
342369
343370 /* Everyone but the writers sends data and returns. */
@@ -356,8 +383,13 @@ int ompi_io_ompio_gather_array (void *sbuf,
356383 opal_datatype_get_extent (& rdtype -> super , & lb , & extent );
357384 incr = extent * rcount ;
358385
359- for (i = 0 , ptmp = (char * ) rbuf ;
360- i < procs_per_group ;
386+ reqs = ( ompi_request_t * * ) malloc ( procs_per_group * sizeof ( ompi_request_t * ));
387+ if (NULL == reqs ) {
388+ return OMPI_ERR_OUT_OF_RESOURCE ;
389+ }
390+
391+ for (i = 0 , ptmp = (char * ) rbuf ;
392+ i < procs_per_group ;
361393 ++ i , ptmp += incr ) {
362394 if (procs_in_group [i ] == rank ) {
363395 if (MPI_IN_PLACE != sbuf ) {
@@ -371,15 +403,16 @@ int ompi_io_ompio_gather_array (void *sbuf,
371403 else {
372404 err = OMPI_SUCCESS ;
373405 }
406+ reqs [i ] = MPI_REQUEST_NULL ;
374407 }
375408 else {
376- err = MCA_PML_CALL (recv (ptmp ,
377- rcount ,
378- rdtype ,
379- procs_in_group [i ],
380- OMPIO_TAG_GATHER ,
381- comm ,
382- MPI_STATUS_IGNORE ));
409+ err = MCA_PML_CALL (irecv (ptmp ,
410+ rcount ,
411+ rdtype ,
412+ procs_in_group [i ],
413+ OMPIO_TAG_GATHER ,
414+ comm ,
415+ & reqs [ i ] ));
383416 /*
384417 for (k=0 ; k<4 ; k++)
385418 printf ("RECV %p %d \n",
@@ -389,11 +422,16 @@ int ompi_io_ompio_gather_array (void *sbuf,
389422 }
390423
391424 if (OMPI_SUCCESS != err ) {
425+ free ( reqs );
392426 return err ;
393427 }
394428 }
395429
396430 /* All done */
431+ err = ompi_request_wait_all ( procs_per_group , reqs , MPI_STATUSES_IGNORE );
432+ if ( NULL != reqs ) {
433+ free ( reqs );
434+ }
397435
398436 return err ;
399437}
@@ -408,7 +446,8 @@ int ompi_io_ompio_bcast_array (void *buff,
408446{
409447 int i , rank ;
410448 int err = OMPI_SUCCESS ;
411-
449+ ompi_request_t * * reqs = NULL ;
450+
412451 rank = ompi_comm_rank (comm );
413452
414453 /* Non-writers receive the data. */
@@ -424,24 +463,33 @@ int ompi_io_ompio_bcast_array (void *buff,
424463 }
425464
426465 /* Writers sends data to all others. */
427-
466+ reqs = ( ompi_request_t * * ) malloc ( procs_per_group * sizeof ( ompi_request_t * ));
467+ if (NULL == reqs ) {
468+ return OMPI_ERR_OUT_OF_RESOURCE ;
469+ }
428470
429471 for (i = 0 ; i < procs_per_group ; i ++ ) {
430472 if (procs_in_group [i ] == rank ) {
473+ reqs [i ] = MPI_REQUEST_NULL ;
431474 continue ;
432475 }
433-
434- err = MCA_PML_CALL ( send ( buff ,
435- count ,
436- datatype ,
437- procs_in_group [ i ] ,
438- OMPIO_TAG_BCAST ,
439- MCA_PML_BASE_SEND_STANDARD ,
440- comm ));
476+ err = MCA_PML_CALL ( isend ( buff ,
477+ count ,
478+ datatype ,
479+ procs_in_group [ i ],
480+ OMPIO_TAG_BCAST ,
481+ MCA_PML_BASE_SEND_STANDARD ,
482+ comm ,
483+ & reqs [ i ] ));
441484 if (OMPI_SUCCESS != err ) {
485+ free ( reqs );
442486 return err ;
443487 }
444488 }
445-
489+ err = ompi_request_wait_all ( procs_per_group , reqs , MPI_STATUSES_IGNORE );
490+ if ( NULL != reqs ) {
491+ free ( reqs );
492+ }
493+
446494 return err ;
447495}
0 commit comments