Skip to content

Commit 1e77226

Browse files
authored
Merge pull request #205 from esmf-org/feature/pio_with_mpiuni
Enable PIO with mpiuni This PR enables building and running with the internal PIO when using mpiuni. This is especially relevant for people using ESMPy, since ESMPy is sometimes built without mpi – and this is apparently needed on many HPC systems (see also conda-forge/esmpy-feedstock#70). This resolves #131 .
2 parents 03d0eb5 + 4478342 commit 1e77226

File tree

10 files changed

+187
-45
lines changed

10 files changed

+187
-45
lines changed

build/common.mk

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1699,12 +1699,6 @@ export ESMF_PIO = $(ESMF_PIODEFAULT)
16991699
endif
17001700

17011701
ifeq ($(ESMF_PIO),internal)
1702-
ifeq ($(ESMF_COMM),mpiuni)
1703-
#TODO: This turns PIO off if it was set to internal from a default setting.
1704-
#TODO: We need to do this while our internal PIO does not support mpiuni mode,
1705-
#TODO: but want to allow external PIO or explicit ESMF_PIO setting for developm. #TODO: Eventually this should become unnecessary.
1706-
ESMF_PIO = OFF
1707-
endif
17081702
ifndef ESMF_NETCDF
17091703
# PIO, starting with version 2, depends on NetCDF. Defaulting to internal needs
17101704
# be turned off if there is no NetCDF available. Externally set PIO will be let

src/Infrastructure/IO/PIO/makefile

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,14 @@ else
1717
PIO_CMAKE_OPTS += -DPIO_ENABLE_LOGGING=OFF -DCMAKE_BUILD_TYPE=release
1818
endif
1919

20+
ifeq ($(ESMF_COMM),mpiuni)
21+
# Use ESMF's mpiuni as a stand-in for the mpi-serial library that PIO expects
22+
PIO_CMAKE_OPTS += -DPIO_USE_MPISERIAL=ON -DMPISERIAL_PATH=$(ESMF_DIR)/src/Infrastructure/stubs/mpiuni
23+
24+
# There are problems building PIO's tests with mpiuni; for now, just disable this internal testing
25+
PIO_CMAKE_OPTS += -DPIO_ENABLE_TESTS=OFF
26+
endif
27+
2028
ifdef ESMF_NETCDF_INCLUDE
2129
ifneq ("$(wildcard $(ESMF_NETCDF_LIBPATH)/libnetcdf.a)","")
2230
PIO_CMAKE_OPTS += -DNetCDF_C_INCLUDE_DIR=$(ESMF_NETCDF_INCLUDE) -DNetCDF_C_LIBRARY=$(ESMF_NETCDF_LIBPATH)/libnetcdf.a

src/Infrastructure/IO/doc/IO_rest.tex

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,6 @@
22

33
\begin{enumerate}
44

5-
% See https://github.com/esmf-org/esmf/issues/131
6-
\item {\bf I/O of NetCDF files requires a real MPI library.}
7-
Currently I/O of NetCDF files (which uses PIO) requires a real MPI
8-
library: it cannot be done with ESMF\_COMM set to "mpiuni".
9-
105
\item {\bf Limited data formats supported.}
116
Currently a small fraction of the anticipated data formats is implemented by
127
ESMF. The data I/O uses NetCDF format, and ESMF Info
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
This is a dummy file needed to satisfy PIO's FindMPISERIAL.cmake.
2+
3+
This is needed because, when building with mpiuni, we tell PIO that this
4+
mpiuni directory is the location of mpi-serial (since we use mpiuni for
5+
the same purpose as mpi-serial). But for FindMPISERIAL.cmake to succeed,
6+
it needs to find a libmpi-serial.a in the mpi-serial directory; hence,
7+
we put this file here to trick it into thinking that this is truly an
8+
mpi-serial installation.

src/Infrastructure/stubs/mpiuni/mpi.c

Lines changed: 100 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,28 @@ static int num_attr = 1,mpi_tag_ub = 100000000;
5555

5656
/*
5757
To avoid problems with prototypes to the system memcpy() it is duplicated here
58+
59+
This version also supports checking for MPI_IN_PLACE
5860
*/
59-
int MPIUNI_Memcpy(void *a,const void* b,int n) {
61+
int MPIUNI_Memcpy(void *a,const void* b,int n,enum CheckForMPIInPlace_Flag check_flag) {
62+
switch(check_flag) {
63+
case CHECK_FOR_MPI_IN_PLACE_NONE:
64+
// No pre-check in this case; proceed to the actual memcpy
65+
break;
66+
case CHECK_FOR_MPI_IN_PLACE_SOURCE:
67+
if (b == MPI_IN_PLACE) {
68+
// If the source is MPI_IN_PLACE, do nothing
69+
return 0;
70+
}
71+
break;
72+
case CHECK_FOR_MPI_IN_PLACE_DEST:
73+
if (a == MPI_IN_PLACE) {
74+
// If the dest is MPI_IN_PLACE, do nothing
75+
return 0;
76+
}
77+
break;
78+
}
79+
6080
int i;
6181
char *aa= (char*)a;
6282
char *bb= (char*)b;
@@ -179,6 +199,84 @@ int Petsc_MPI_Finalize(void)
179199
return 0;
180200
}
181201

202+
int ESMC_MPI_Alltoallw(void *sendbuf, int *sendcounts, int *sdispls,
203+
MPI_Datatype *sendtypes, void *recvbuf, int *recvcounts,
204+
int *rdispls, MPI_Datatype *recvtypes, MPI_Comm comm)
205+
{
206+
// Since we are only implementing this for the single-processor case, the counts, displs
207+
// and types arguments should all have length 1. We assume that's the case in this
208+
// implementation.
209+
210+
// Displacements are not implemented so return an error code if they are non-zero
211+
if (sdispls[0] != 0 || rdispls[0] != 0) {
212+
return MPI_ERR_INTERN;
213+
}
214+
215+
MPIUNI_Memcpy(recvbuf, sendbuf, sendcounts[0]*sendtypes[0], CHECK_FOR_MPI_IN_PLACE_SOURCE);
216+
return MPI_SUCCESS;
217+
}
218+
219+
int ESMC_MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
220+
MPI_Datatype sendtype, void *recvbuf, int recvcount,
221+
MPI_Datatype recvtype, int root, MPI_Comm comm)
222+
{
223+
// Since we are only implementing this for the single-processor case, the sendcounts and
224+
// displs arguments should have length 1. We assume that's the case in this
225+
// implementation.
226+
227+
// Displacements are not implemented so return an error code if they are non-zero
228+
if (displs[0] != 0) {
229+
return MPI_ERR_INTERN;
230+
}
231+
232+
MPIUNI_Memcpy(recvbuf, sendbuf, sendcounts[0]*sendtype, CHECK_FOR_MPI_IN_PLACE_DEST);
233+
return MPI_SUCCESS;
234+
}
235+
236+
int ESMC_MPI_Type_create_hvector(int count, int blocklength, MPI_Aint stride,
237+
MPI_Datatype oldtype, MPI_Datatype *newtype)
238+
{
239+
// Note mpiuni's definition of each datatype as sizeof(raw-type).
240+
//
241+
// From some experimentation with a real MPI library, the MPI_Type_size of newtype is
242+
// independent of the value of stride. Since the MPI_Datatype in mpiuni is just the size
243+
// of the datatype, we ignore the possible complexity of stride in this implementation.
244+
*newtype = count*blocklength*oldtype;
245+
return MPI_SUCCESS;
246+
}
247+
248+
int ESMC_MPI_Type_create_indexed_block(int count, int blocklength,
249+
const int array_of_displacements[],
250+
MPI_Datatype oldtype,
251+
MPI_Datatype *newtype)
252+
{
253+
// Note mpiuni's definition of each datatype as sizeof(raw-type).
254+
//
255+
// From some experimentation with a real MPI library, the MPI_Type_size of newtype is
256+
// independent of the values in array_of_displacements. Since the MPI_Datatype in mpiuni
257+
// is just the size of the datatype, we ignore the possible complexity of
258+
// array_of_displacements in this implementation.
259+
*newtype = count*blocklength*oldtype;
260+
return MPI_SUCCESS;
261+
}
262+
263+
int ESMC_MPI_Type_hvector(int count, int blocklength, MPI_Aint stride,
264+
MPI_Datatype oldtype, MPI_Datatype *newtype)
265+
{
266+
// MPI_Type_hvector is a deprecated version of MPI_Type_create_hvector; the only
267+
// difference is in how stride is specified (bytes vs. elements); since we ignore stride
268+
// in our implementation of MPI_Type_create_hvector, we can use the same implementation
269+
// for both.
270+
return ESMC_MPI_Type_create_hvector(count, blocklength, stride, oldtype, newtype);
271+
}
272+
273+
int ESMC_MPI_Type_size(MPI_Datatype datatype, int *size)
274+
{
275+
// Note that, conveniently, mpiuni defines each datatype as sizeof(raw-type)
276+
*size = datatype;
277+
return MPI_SUCCESS;
278+
}
279+
182280
#if !defined (ESMF_OS_MinGW)
183281
// POSIX version
184282
double ESMC_MPI_Wtime(void)
@@ -403,7 +501,7 @@ void MPIUNI_STDCALL mpi_allreduce(void *sendbuf,void *recvbuf,int *count,int *da
403501
*ierr = MPI_ERR_OP;
404502
return;
405503
}
406-
MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
504+
MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype],CHECK_FOR_MPI_IN_PLACE_SOURCE);
407505
*ierr = MPI_SUCCESS;
408506
}
409507
void MPIUNI_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)

src/Infrastructure/stubs/mpiuni/mpi.h

Lines changed: 45 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -82,12 +82,16 @@ extern void *MPIUNI_TMP;
8282
#define MPI_COMM_WORLD 1
8383
#define MPI_COMM_SELF MPI_COMM_WORLD
8484
#define MPI_COMM_NULL 0
85+
#define MPI_GROUP_EMPTY (-1)
86+
#define MPI_GROUP_NULL 0
8587
#define MPI_SUCCESS 0
8688
#define MPI_IDENT 0
8789
#define MPI_CONGRUENT 0
8890
#define MPI_SIMILAR 0
8991
#define MPI_UNEQUAL 3
9092
#define MPI_ANY_SOURCE (-2)
93+
#define MPI_PROC_NULL (-3)
94+
#define MPI_ROOT (-4)
9195
#define MPI_KEYVAL_INVALID 0
9296
#define MPI_ERR_UNKNOWN 18
9397
#define MPI_ERR_INTERN 21
@@ -109,13 +113,19 @@ typedef int MPI_Info; /* handle */
109113

110114
#define MPI_INFO_NULL (0)
111115

116+
#define MPI_IN_PLACE (void *)(-1)
117+
enum CheckForMPIInPlace_Flag {
118+
CHECK_FOR_MPI_IN_PLACE_NONE,
119+
CHECK_FOR_MPI_IN_PLACE_SOURCE,
120+
CHECK_FOR_MPI_IN_PLACE_DEST
121+
};
112122

113-
114-
extern int MPIUNI_Memcpy(void*,const void*,int);
123+
extern int MPIUNI_Memcpy(void*,const void*,int,enum CheckForMPIInPlace_Flag);
115124

116125
/* In order to handle datatypes, we make them into "sizeof(raw-type)";
117126
this allows us to do the MPIUNI_Memcpy's easily */
118127
#define MPI_Datatype int
128+
#define MPI_DATATYPE_NULL 0
119129
#define MPI_FLOAT sizeof(float)
120130
#define MPI_DOUBLE sizeof(double)
121131
#define MPI_LONG_DOUBLE sizeof(long double)
@@ -140,6 +150,7 @@ extern int MPIUNI_Memcpy(void*,const void*,int);
140150
#define MPI_2INTEGER (2*sizeof(int))
141151
#define MPI_UNSIGNED_CHAR sizeof(unsigned char)
142152
#define MPI_UNSIGNED_LONG sizeof(unsigned long)
153+
#define MPI_OFFSET sizeof(MPI_Offset)
143154
#define MPIU_PETSCLOGDOUBLE sizeof(PetscLogDouble)
144155
#define MPI_REQUEST_NULL ((MPI_Request)0)
145156

@@ -197,6 +208,14 @@ extern int Petsc_MPI_Initialized(int *);
197208
extern int Petsc_MPI_Comm_dup(MPI_Comm,MPI_Comm *);
198209
extern int Petsc_MPI_Finalize(void);
199210
extern int Petsc_MPI_Finalized(int *);
211+
extern int ESMC_MPI_Alltoallw(void *,int *,int *,MPI_Datatype *,
212+
void *,int *,int *,MPI_Datatype *,MPI_Comm);
213+
extern int ESMC_MPI_Scatterv(void *,int *,int *,MPI_Datatype,
214+
void *,int,MPI_Datatype,int,MPI_Comm);
215+
extern int ESMC_MPI_Type_create_hvector(int,int,MPI_Aint,MPI_Datatype,MPI_Datatype *);
216+
extern int ESMC_MPI_Type_create_indexed_block(int,int,const int[],MPI_Datatype,MPI_Datatype *);
217+
extern int ESMC_MPI_Type_hvector(int,int,MPI_Aint,MPI_Datatype,MPI_Datatype *);
218+
extern int ESMC_MPI_Type_size(MPI_Datatype,int *);
200219
extern double ESMC_MPI_Wtime(void);
201220

202221
#define MPI_Abort Petsc_MPI_Abort
@@ -210,6 +229,12 @@ extern double ESMC_MPI_Wtime(void);
210229
#define MPI_Comm_dup Petsc_MPI_Comm_dup
211230
#define MPI_Finalize Petsc_MPI_Finalize
212231
#define MPI_Finalized Petsc_MPI_Finalized
232+
#define MPI_Alltoallw ESMC_MPI_Alltoallw
233+
#define MPI_Scatterv ESMC_MPI_Scatterv
234+
#define MPI_Type_create_hvector ESMC_MPI_Type_create_hvector
235+
#define MPI_Type_create_indexed_block ESMC_MPI_Type_create_indexed_block
236+
#define MPI_Type_hvector ESMC_MPI_Type_hvector
237+
#define MPI_Type_size ESMC_MPI_Type_size
213238
#define MPI_Wtime ESMC_MPI_Wtime
214239

215240
/*
@@ -458,13 +483,12 @@ extern double ESMC_MPI_Wtime(void);
458483
dest,sendtag,recvbuf,recvcount,\
459484
recvtype,source,recvtag,\
460485
comm,status) \
461-
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * (sendtype))
486+
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * (sendtype),CHECK_FOR_MPI_IN_PLACE_NONE)
462487
#define MPI_Sendrecv_replace(buf,count, datatype,dest,sendtag,\
463488
source,recvtag,comm,status) MPI_SUCCESS
464489
#define MPI_Type_contiguous(count, oldtype,newtype) \
465490
(*(newtype) = (count)*(oldtype),MPI_SUCCESS)
466491
#define MPI_Type_vector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
467-
#define MPI_Type_hvector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
468492
#define MPI_Type_indexed(count,array_of_blocklengths,\
469493
array_of_displacements, oldtype,\
470494
newtype) MPI_SUCCESS
@@ -478,8 +502,6 @@ extern double ESMC_MPI_Wtime(void);
478502
(*(address) = (long)(char *)(location),MPI_SUCCESS)
479503
#define MPI_Type_extent(datatype,extent) \
480504
MPI_Abort(MPI_COMM_WORLD,0)
481-
#define MPI_Type_size(datatype,size) \
482-
MPI_Abort(MPI_COMM_WORLD,0)
483505
#define MPI_Type_lb(datatype,displacement) \
484506
MPI_Abort(MPI_COMM_WORLD,0)
485507
#define MPI_Type_ub(datatype,displacement) \
@@ -513,7 +535,7 @@ extern double ESMC_MPI_Wtime(void);
513535
MPIUNI_TMP = (void*)(long) (root),\
514536
MPIUNI_TMP = (void*)(long) (recvtype),\
515537
MPIUNI_TMP = (void*)(long) (comm),\
516-
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
538+
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype),CHECK_FOR_MPI_IN_PLACE_SOURCE), \
517539
MPI_SUCCESS)
518540
#define MPI_Gatherv(sendbuf,sendcount, sendtype,\
519541
recvbuf,recvcounts,displs,\
@@ -523,7 +545,7 @@ extern double ESMC_MPI_Wtime(void);
523545
MPIUNI_TMP = (void*)(long) (recvtype),\
524546
MPIUNI_TMP = (void*)(long) (root),\
525547
MPIUNI_TMP = (void*)(long) (comm),\
526-
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
548+
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype),CHECK_FOR_MPI_IN_PLACE_SOURCE), \
527549
MPI_SUCCESS)
528550
#define MPI_Scatter(sendbuf,sendcount, sendtype,\
529551
recvbuf,recvcount, recvtype,\
@@ -536,32 +558,20 @@ extern double ESMC_MPI_Wtime(void);
536558
MPIUNI_TMP = (void*)(long) (recvtype),\
537559
MPIUNI_TMP = (void*)(long) (root),\
538560
MPIUNI_TMP = (void*)(long) (comm),MPI_Abort(MPI_COMM_WORLD,0))
539-
#define MPI_Scatterv(sendbuf,sendcounts,displs,\
540-
sendtype, recvbuf,recvcount,\
541-
recvtype,root,comm) \
542-
(MPIUNI_TMP = (void*)(long) (sendbuf),\
543-
MPIUNI_TMP = (void*)(long) (sendcounts),\
544-
MPIUNI_TMP = (void*)(long) (displs),\
545-
MPIUNI_TMP = (void*)(long) (sendtype),\
546-
MPIUNI_TMP = (void*)(long) (recvbuf),\
547-
MPIUNI_TMP = (void*)(long) (recvcount),\
548-
MPIUNI_TMP = (void*)(long) (recvtype),\
549-
MPIUNI_TMP = (void*)(long) (root),\
550-
MPIUNI_TMP = (void*)(long) (comm),MPI_Abort(MPI_COMM_WORLD,0))
551561
#define MPI_Allgather(sendbuf,sendcount, sendtype,\
552562
recvbuf,recvcount, recvtype,comm) \
553563
(MPIUNI_TMP = (void*)(long) (recvcount),\
554564
MPIUNI_TMP = (void*)(long) (recvtype),\
555565
MPIUNI_TMP = (void*)(long) (comm),\
556-
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
566+
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype),CHECK_FOR_MPI_IN_PLACE_SOURCE), \
557567
MPI_SUCCESS)
558568
#define MPI_Allgatherv(sendbuf,sendcount, sendtype,\
559569
recvbuf,recvcounts,displs,recvtype,comm) \
560570
(MPIUNI_TMP = (void*)(long) (recvcounts),\
561571
MPIUNI_TMP = (void*)(long) (displs),\
562572
MPIUNI_TMP = (void*)(long) (recvtype),\
563573
MPIUNI_TMP = (void*)(long) (comm),\
564-
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
574+
MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype),CHECK_FOR_MPI_IN_PLACE_SOURCE), \
565575
MPI_SUCCESS)
566576
#define MPI_Alltoall(sendbuf,sendcount, sendtype,\
567577
recvbuf,recvcount, recvtype,\
@@ -571,13 +581,13 @@ extern double ESMC_MPI_Wtime(void);
571581
rdispls, recvtype,comm) MPI_Abort(MPI_COMM_WORLD,0)
572582
#define MPI_Reduce(sendbuf, recvbuf,count,\
573583
datatype,op,root,comm) \
574-
(MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
584+
(MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype),CHECK_FOR_MPI_IN_PLACE_SOURCE), \
575585
MPIUNI_TMP = (void*)(long) (comm),MPI_SUCCESS)
576586
#define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
577-
(MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
587+
(MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype),CHECK_FOR_MPI_IN_PLACE_SOURCE), \
578588
MPIUNI_TMP = (void*)(long) (comm),MPI_SUCCESS)
579589
#define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
580-
(MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
590+
(MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype),CHECK_FOR_MPI_IN_PLACE_SOURCE), \
581591
MPIUNI_TMP = (void*)(long) (comm),MPI_SUCCESS)
582592
#define MPI_Reduce_scatter(sendbuf, recvbuf,recvcounts,\
583593
datatype,op,comm) \
@@ -626,6 +636,15 @@ extern double ESMC_MPI_Wtime(void);
626636
remote_leader,tag,newintercomm) MPI_SUCCESS
627637
#define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
628638

639+
#define MPI_Info_create(info) \
640+
(MPIUNI_TMP = (void*)(long) (info),\
641+
MPI_SUCCESS)
642+
#define MPI_Info_set(info,key,value) \
643+
(MPIUNI_TMP = (void*)(long) (info),\
644+
MPIUNI_TMP = (void*)(long) (key),\
645+
MPIUNI_TMP = (void*)(long) (value),\
646+
MPI_SUCCESS)
647+
629648
#define MPI_Topo_test(comm,status) MPI_SUCCESS
630649
#define MPI_Cart_create(comm_old,ndims,dims,periods,\
631650
reorder,comm_cart) MPI_SUCCESS
@@ -649,7 +668,7 @@ extern double ESMC_MPI_Wtime(void);
649668
#define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPI_Abort(MPI_COMM_WORLD,0)
650669
#define MPI_Graph_map(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
651670
#define MPI_Get_processor_name(name,result_len) \
652-
(MPIUNI_Memcpy(name,"localhost",9*sizeof(char)),name[10] = 0,*(result_len) = 10)
671+
(MPIUNI_Memcpy(name,"localhost",9*sizeof(char),CHECK_FOR_MPI_IN_PLACE_NONE),name[10] = 0,*(result_len) = 10)
653672
#define MPI_Errhandler_create(function,errhandler) \
654673
(MPIUNI_TMP = (void*)(long) (errhandler),\
655674
MPI_SUCCESS)

src/Infrastructure/stubs/mpiuni/mpirun

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,14 @@ do
3333
done
3434

3535
if [ $# -gt 0 ]; then
36-
# If relative path is used prepend a ./
37-
progname=`dirname $1`/`basename $1`
36+
progname=$1
3837
shift
38+
# If the given command isn't in PATH, assume relative path is used, so prepend a ./
39+
if ! command -v $progname &> /dev/null; then
40+
progname=`dirname $progname`/`basename $progname`
41+
fi
3942

40-
# Execute the program
43+
# Execute the program
4144
$progname $*
4245
exit $?
4346
fi

src/addon/esmpy/doc/install.rst

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ The following packages are *optional*:
1717
* ESMF installation with NetCDF - required to create :class:`Grids <esmpy.api.grid.Grid>`, :class:`Meshes <esmpy.api.mesh.Mesh>` and :class:`Fields <esmpy.api.field.Field>` from file, and to write regridding weights to file
1818
- NetCDF must be built as a shared library for ESMPy installation to succeed
1919
* ESMF installation with PIO (the Parallel IO library) - required to create :class:`Meshes <esmpy.api.mesh.Mesh>` and :class:`Fields <esmpy.api.field.Field>` from file, and to write regridding weights to file
20-
- Note that building ESMF with PIO requires building with a real MPI library (not mpiuni)
2120
* `mpi4py <https://mpi4py.readthedocs.io/en/stable/>`_- python bindings to MPI, needed to run some of the parallel regridding examples
2221
* `pytest <https://docs.pytest.org/en/7.1.x/>`_ - for testing
2322

0 commit comments

Comments
 (0)