|
| 1 | +.\" -*- nroff -*- |
| 2 | +.\" Copyright 2015 Los Alamos National Security, LLC. All rights reserved. |
| 3 | +.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. |
| 4 | +.\" Copyright 2007-2008 Sun Microsystems, Inc. |
| 5 | +.\" Copyright (c) 1996 Thinking Machines Corporation |
| 6 | +.\" $COPYRIGHT$ |
| 7 | +.TH MPI_Win_allocate_shared 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" |
| 8 | +.SH NAME |
| 9 | +\fBMPI_Win_allocate_shared\fP \- One-sided MPI call that allocates |
| 10 | +shared memory and returns a window object for RMA operations. |
| 11 | + |
| 12 | +.SH SYNTAX |
| 13 | +.ft R |
| 14 | +.SH C Syntax |
| 15 | +.nf |
| 16 | +#include <mpi.h> |
| 17 | +int MPI_Win_allocate_shared (MPI_Aint \fIsize\fP, int \fIdisp_unit\fP, MPI_Info \fIinfo\fP, |
| 18 | + MPI_Comm \fIcomm\fP, void *\fIbaseptr\fP, MPI_Win *\fIwin\fP) |
| 19 | + |
| 20 | +.fi |
| 21 | +.SH Fortran Syntax |
| 22 | +.nf |
| 23 | +INCLUDE 'mpif.h' |
| 24 | +MPI_WIN_ALLOCATE_SHARED(\fSIZE, DISP_UNIT, INFO, COMM, BASEPTR, WIN, IERROR\fP) |
| 25 | + INTEGER(KIND=MPI_ADDRESS_KIND) \fISIZE, BASEPTR\fP |
| 26 | + INTEGER \fIDISP_UNIT, INFO, COMM, WIN, IERROR\fP |
| 27 | + |
| 28 | +.fi |
| 29 | +.SH INPUT PARAMETERS |
| 30 | +.ft R |
| 31 | +.TP 1i |
| 32 | +size |
| 33 | +Size of window in bytes (nonnegative integer). |
| 34 | +.TP 1i |
| 35 | +disp_unit |
| 36 | +Local unit size for displacements, in bytes (positive integer). |
| 37 | +.TP 1i |
| 38 | +info |
| 39 | +Info argument (handle). |
| 40 | +.TP 1i |
| 41 | +comm |
| 42 | +Communicator (handle). |
| 43 | + |
| 44 | +.SH OUTPUT PARAMETERS |
| 45 | +.ft R |
| 46 | +.TP 1i |
| 47 | +baseptr |
| 48 | +Initial address of window. |
| 49 | +.TP 1i |
| 50 | +win |
| 51 | +Window object returned by the call (handle). |
| 52 | +.TP 1i |
| 53 | +IERROR |
| 54 | +Fortran only: Error status (integer). |
| 55 | + |
| 56 | +.SH DESCRIPTION |
| 57 | +.ft R |
| 58 | +\fBMPI_Win_allocate_shared\fP is a collective call executed by all |
| 59 | +processes in the group of \fIcomm\fP. On each process, it allocates |
| 60 | +memory of at least \fIsize\fP bytes that is shared among all processes |
| 61 | +in \fIcomm\fP, and returns a pointer to the locally allocated segment |
| 62 | +in \fIbaseptr\fP that can be used for load/store accesses on the |
| 63 | +calling process. The locally allocated memory can be the target of |
| 64 | +load/store accesses by remote processes; the base pointers for other |
| 65 | +processes can be queried using the function |
| 66 | +\fBMPI_Win_shared_query\fP. The call also returns a window object that |
| 67 | +can be used by all processes in \fIcomm\fP to perform RMA |
| 68 | +operations. The \fIsize\fP argument may be different at each process |
| 69 | +and \fIsize\fP = 0 is valid. It is the user's responsibility to ensure |
| 70 | +that the communicator \fIcomm\fP represents a group of processes that |
| 71 | +can create a shared memory segment that can be accessed by all |
| 72 | +processes in the group. The discussions of rationales for |
| 73 | +\fBMPI_Alloc_mem\fP and \fBMPI_Free_mem\fP in MPI-3.1 \[char167] 8.2 |
| 74 | +also apply to \fBMPI_Win_allocate_shared\fP; in particular, see the |
| 75 | +rationale in MPI-3.1 \[char167] 8.2 for an explanation of the type |
| 76 | +used for \fIbaseptr\fP. The allocated memory is contiguous across |
| 77 | +process ranks unless the info key \fIalloc_shared_noncontig\fP is |
| 78 | +specified. Contiguous across process ranks means that the first |
| 79 | +address in the memory segment of process i is consecutive with the |
| 80 | +last address in the memory segment of process i - 1. This may enable |
| 81 | +the user to calculate remote address offsets with local information |
| 82 | +only. |
| 83 | +.sp |
| 84 | +The following info keys are supported: |
| 85 | +.ft R |
| 86 | +.TP 1i |
| 87 | +alloc_shared_noncontig |
| 88 | +If not set to \fItrue\fP, the allocation strategy is to allocate |
| 89 | +contiguous memory across process ranks. This may limit the performance |
| 90 | +on some architectures because it does not allow the implementation to |
| 91 | +modify the data layout (e.g., padding to reduce access latency). |
| 92 | +.sp |
| 93 | +.TP 1i |
| 94 | +blocking_fence |
| 95 | +If set to \fItrue\fP, the osc/sm component will use \fBMPI_Barrier\FP |
| 96 | +for \fBMPI_Win_fence\fP. If set to \fIfalse\fP a condition variable |
| 97 | +and counter will be used instead. The default value is |
| 98 | +\fIfalse\fP. This info key is Open MPI specific. |
| 99 | +.sp |
| 100 | +.TP 1i |
| 101 | +For additional supported info keys see \fBMPI_Win_create\fP. |
| 102 | +.sp |
| 103 | + |
| 104 | +.SH NOTES |
| 105 | +Common choices for \fIdisp_unit\fP are 1 (no scaling), and (in C |
| 106 | +syntax) \fIsizeof(type)\fP, for a window that consists of an array of |
| 107 | +elements of type \fItype\fP. The later choice will allow one to use |
| 108 | +array indices in RMA calls, and have those scaled correctly to byte |
| 109 | +displacements, even in a heterogeneous environment. |
| 110 | +.sp |
| 111 | + |
| 112 | +.SH ERRORS |
| 113 | +Almost all MPI routines return an error value; C routines as the value |
| 114 | +of the function and Fortran routines in the last argument. |
| 115 | +.sp |
| 116 | +Before the error value is returned, the current MPI error handler is |
| 117 | +called. By default, this error handler aborts the MPI job, except for |
| 118 | +I/O function errors. The error handler may be changed with |
| 119 | +MPI_Comm_set_errhandler; the predefined error handler |
| 120 | +MPI_ERRORS_RETURN may be used to cause error values to be |
| 121 | +returned. Note that MPI does not guarantee that an MPI program can |
| 122 | +continue past an error. |
| 123 | + |
| 124 | +.SH SEE ALSO |
| 125 | +.ft R |
| 126 | +.sp |
| 127 | +MPI_Alloc_mem |
| 128 | +MPI_Free_mem |
| 129 | +MPI_Win_allocate |
| 130 | +MPI_Win_create |
0 commit comments