Skip to content

Commit 25a3465

Browse files
authored
Merge pull request #3712 from gpaulsen/rel_v3.0.x_pr3500_pr3635
Rel v3.0.x pr3500 pr3635
2 parents dcf6080 + 9f412c3 commit 25a3465

File tree

1 file changed

+62
-61
lines changed

1 file changed

+62
-61
lines changed

ompi/mpi/c/sendrecv_replace.c

Lines changed: 62 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
33
* University Research and Technology
44
* Corporation. All rights reserved.
5-
* Copyright (c) 2004-2010 The University of Tennessee and The University
5+
* Copyright (c) 2004-2017 The University of Tennessee and The University
66
* of Tennessee Research Foundation. All rights
77
* reserved.
88
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
@@ -12,6 +12,7 @@
1212
* Copyright (c) 2010-2012 Oracle and/or its affiliates. All rights reserved.
1313
* Copyright (c) 2015 Research Organization for Information Science
1414
* and Technology (RIST). All rights reserved.
15+
* Copyright (c) 2017 IBM Corporation. All rights reserved.
1516
* $COPYRIGHT$
1617
*
1718
* Additional copyrights may follow
@@ -48,10 +49,10 @@ int MPI_Sendrecv_replace(void * buf, int count, MPI_Datatype datatype,
4849
int rc = MPI_SUCCESS;
4950

5051
MEMCHECKER(
51-
memchecker_datatype(datatype);
52-
memchecker_call(&opal_memchecker_base_isdefined, buf, count, datatype);
53-
memchecker_comm(comm);
54-
);
52+
memchecker_datatype(datatype);
53+
memchecker_call(&opal_memchecker_base_isdefined, buf, count, datatype);
54+
memchecker_comm(comm);
55+
);
5556

5657
if ( MPI_PARAM_CHECK ) {
5758
rc = MPI_SUCCESS;
@@ -76,68 +77,68 @@ int MPI_Sendrecv_replace(void * buf, int count, MPI_Datatype datatype,
7677

7778
/* simple case */
7879
if ( source == MPI_PROC_NULL || dest == MPI_PROC_NULL || count == 0 ) {
79-
rc = PMPI_Sendrecv(buf,count,datatype,dest,sendtag,buf,count,datatype,source,recvtag,comm,status);
80+
rc = PMPI_Sendrecv(buf, count, datatype, dest, sendtag, buf, count, datatype, source, recvtag, comm, status);
8081

8182
OPAL_CR_EXIT_LIBRARY();
8283
return rc;
83-
} else {
84-
85-
opal_convertor_t convertor;
86-
struct iovec iov;
87-
unsigned char recv_data[2048];
88-
size_t packed_size, max_data;
89-
uint32_t iov_count;
90-
ompi_status_public_t recv_status;
91-
ompi_proc_t* proc = ompi_comm_peer_lookup(comm,source);
92-
if(proc == NULL) {
93-
rc = MPI_ERR_RANK;
94-
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
95-
}
96-
97-
/* initialize convertor to unpack recv buffer */
98-
OBJ_CONSTRUCT(&convertor, opal_convertor_t);
99-
opal_convertor_copy_and_prepare_for_recv( proc->super.proc_convertor, &(datatype->super),
100-
count, buf, 0, &convertor );
101-
102-
/* setup a buffer for recv */
103-
opal_convertor_get_packed_size( &convertor, &packed_size );
104-
if( packed_size > sizeof(recv_data) ) {
105-
rc = PMPI_Alloc_mem(packed_size, MPI_INFO_NULL, &iov.iov_base);
106-
if(OMPI_SUCCESS != rc) {
107-
OMPI_ERRHANDLER_RETURN(OMPI_ERR_OUT_OF_RESOURCE, comm, MPI_ERR_BUFFER, FUNC_NAME);
108-
}
109-
} else {
110-
iov.iov_base = (caddr_t)recv_data;
111-
}
112-
113-
/* recv into temporary buffer */
114-
rc = PMPI_Sendrecv( buf, count, datatype, dest, sendtag, iov.iov_base, packed_size,
115-
MPI_BYTE, source, recvtag, comm, &recv_status );
116-
if (rc != MPI_SUCCESS) {
117-
if(packed_size > sizeof(recv_data))
118-
PMPI_Free_mem(iov.iov_base);
119-
OBJ_DESTRUCT(&convertor);
120-
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
121-
}
122-
123-
/* unpack into users buffer */
124-
iov.iov_len = recv_status._ucount;
125-
iov_count = 1;
126-
max_data = recv_status._ucount;
127-
opal_convertor_unpack(&convertor, &iov, &iov_count, &max_data );
84+
}
12885

129-
/* return status to user */
130-
if(status != MPI_STATUS_IGNORE) {
131-
*status = recv_status;
132-
}
86+
/**
87+
* If we look for an optimal solution, then we should receive the data into a temporary buffer
88+
* and once the send completes we would unpack back into the original buffer. However, if the
89+
* sender is unknown, this approach can only be implementing by receiving with the recv datatype
90+
* (potentially non-contiguous) and thus the allocated memory will be larger than the size of the
91+
* datatype. A simpler, but potentially less efficient approach is to work on the data we have
92+
* control of, aka the sent data, and pack it into a contiguous buffer before posting the receive.
93+
* Once the send completes, we free it.
94+
*/
95+
opal_convertor_t convertor;
96+
unsigned char packed_data[2048];
97+
struct iovec iov = { .iov_base = packed_data, .iov_len = sizeof(packed_data) };
98+
size_t packed_size, max_data;
99+
uint32_t iov_count;
100+
ompi_status_public_t recv_status;
101+
ompi_proc_t* proc = ompi_comm_peer_lookup(comm, dest);
102+
if(proc == NULL) {
103+
rc = MPI_ERR_RANK;
104+
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
105+
}
133106

134-
/* release resources */
135-
if(packed_size > sizeof(recv_data)) {
136-
PMPI_Free_mem(iov.iov_base);
107+
/* initialize convertor to unpack recv buffer */
108+
OBJ_CONSTRUCT(&convertor, opal_convertor_t);
109+
opal_convertor_copy_and_prepare_for_send( proc->super.proc_convertor, &(datatype->super),
110+
count, buf, 0, &convertor );
111+
112+
/* setup a buffer for recv */
113+
opal_convertor_get_packed_size( &convertor, &packed_size );
114+
if( packed_size > sizeof(packed_data) ) {
115+
rc = PMPI_Alloc_mem(packed_size, MPI_INFO_NULL, &iov.iov_base);
116+
if(OMPI_SUCCESS != rc) {
117+
rc = OMPI_ERR_OUT_OF_RESOURCE;
118+
goto cleanup_and_return;
137119
}
138-
OBJ_DESTRUCT(&convertor);
120+
iov.iov_len = packed_size;
121+
}
122+
max_data = packed_size;
123+
iov_count = 1;
124+
rc = opal_convertor_pack(&convertor, &iov, &iov_count, &max_data);
125+
126+
/* recv into temporary buffer */
127+
rc = PMPI_Sendrecv( iov.iov_base, packed_size, MPI_PACKED, dest, sendtag, buf, count,
128+
datatype, source, recvtag, comm, &recv_status );
129+
130+
cleanup_and_return:
131+
/* return status to user */
132+
if(status != MPI_STATUS_IGNORE) {
133+
*status = recv_status;
134+
}
139135

140-
OPAL_CR_EXIT_LIBRARY();
141-
return MPI_SUCCESS;
136+
/* release resources */
137+
if(packed_size > sizeof(packed_data)) {
138+
PMPI_Free_mem(iov.iov_base);
142139
}
140+
OBJ_DESTRUCT(&convertor);
141+
142+
OPAL_CR_EXIT_LIBRARY();
143+
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
143144
}

0 commit comments

Comments
 (0)