Skip to content

Commit c468fb7

Browse files
authored
FieldsIO implementation for 0D to 2D cartesian grid fields, with MPI (#512)
* TL: added fieldsIO implementation to helpers * TL: fixed the fieldsIO when mpi4py not available * TL: SO IMPORTANT CHANGE THANKS BLACK !!! * TL: cleaning and docstrings * TL: added a small comment * TL: tentative to solve static typing issue with no mpi4py * TL: forgot black, ofc * TL: introducing modern python tactics in ci 🤓 * TL: attempt to solve the fenics tests * TL: pip is a little trickster 😅 * TL: satisfying thomas's requests * TL: seems like ruff is my new best friend * TL: renaming grid[...] to coord[...] * TL: last fixes
1 parent b1657fd commit c468fb7

File tree

5 files changed

+1269
-0
lines changed

5 files changed

+1269
-0
lines changed

.github/workflows/ci_pipeline.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,9 @@ jobs:
5555
- name: Install additional packages as needed
5656
run: |
5757
micromamba install -y --file etc/environment-tests.yml --freeze-installed
58+
- name: Install pySDC as a package in the current environment
59+
run: |
60+
pip install --no-deps -e .
5861
- name: Run pytest for CPU stuff
5962
run: |
6063
echo "print('Loading sitecustomize.py...')

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ step_*.png
1010
*.swp
1111
*_data.json
1212
!_dataRef.json
13+
*.pysdc
1314

1415
# Created by https://www.gitignore.io
1516

pySDC/helpers/blocks.py

Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
class BlockDecomposition(object):
2+
"""
3+
Class decomposing a cartesian space domain (1D to 3D) into a given number of processors.
4+
5+
Parameters
6+
----------
7+
nProcs : int
8+
Total number of processors for space block decomposition.
9+
gridSizes : list[int]
10+
Number of grid points in each dimension
11+
algo : str, optional
12+
Algorithm used for the block decomposition :
13+
14+
- Hybrid : approach minimizing interface communication, inspired from
15+
the `[Hybrid CFD solver] <https://web.stanford.edu/group/ctr/ResBriefs07/5_larsson1_pp47_58.pdf>`_.
16+
- ChatGPT : quickly generated using `[ChatGPT] <https://chatgpt.com>`_.
17+
The default is "Hybrid".
18+
gRank : int, optional
19+
If provided, the global rank that will determine the local block distribution. Default is None.
20+
order : str, optional
21+
The order used when computing the rank block distribution. Default is `C`.
22+
"""
23+
24+
def __init__(self, nProcs, gridSizes, algo="Hybrid", gRank=None, order="C"):
25+
dim = len(gridSizes)
26+
assert dim in [1, 2, 3], "block decomposition only works for 1D, 2D or 3D domains"
27+
28+
if algo == "ChatGPT":
29+
30+
nBlocks = [1] * dim
31+
for i in range(2, int(nProcs**0.5) + 1):
32+
while nProcs % i == 0:
33+
nBlocks[0] *= i
34+
nProcs //= i
35+
nBlocks.sort()
36+
37+
if nProcs > 1:
38+
nBlocks[0] *= nProcs
39+
40+
nBlocks.sort()
41+
while len(nBlocks) < dim:
42+
smallest = nBlocks.pop(0)
43+
nBlocks += [1, smallest]
44+
nBlocks.sort()
45+
46+
while len(nBlocks) > dim:
47+
smallest = nBlocks.pop(0)
48+
next_smallest = nBlocks.pop(0)
49+
nBlocks.append(smallest * next_smallest)
50+
nBlocks.sort()
51+
52+
elif algo == "Hybrid":
53+
rest = nProcs
54+
facs = {
55+
1: [1],
56+
2: [2, 1],
57+
3: [2, 3, 1],
58+
}[dim]
59+
exps = [0] * dim
60+
for n in range(dim - 1):
61+
while (rest % facs[n]) == 0:
62+
exps[n] = exps[n] + 1
63+
rest = rest // facs[n]
64+
if rest > 1:
65+
facs[dim - 1] = rest
66+
exps[dim - 1] = 1
67+
68+
nBlocks = [1] * dim
69+
for n in range(dim - 1, -1, -1):
70+
while exps[n] > 0:
71+
dummymax = -1
72+
dmax = 0
73+
for d, nPts in enumerate(gridSizes):
74+
dummy = (nPts + nBlocks[d] - 1) // nBlocks[d]
75+
if dummy >= dummymax:
76+
dummymax = dummy
77+
dmax = d
78+
nBlocks[dmax] = nBlocks[dmax] * facs[n]
79+
exps[n] = exps[n] - 1
80+
81+
else:
82+
raise NotImplementedError(f"algo={algo}")
83+
84+
# Store attributes
85+
self.dim = dim
86+
self.nBlocks = nBlocks
87+
self.gridSizes = gridSizes
88+
89+
# Used for rank block distribution
90+
self.gRank = gRank
91+
self.order = order
92+
93+
@property
94+
def ranks(self):
95+
gRank, order = self.gRank, self.order
96+
assert gRank is not None, "gRank attribute need to be set"
97+
dim, nBlocks = self.dim, self.nBlocks
98+
if dim == 1:
99+
return (gRank,)
100+
elif dim == 2:
101+
div = nBlocks[-1] if order == "C" else nBlocks[0]
102+
return (gRank // div, gRank % div)
103+
else:
104+
raise NotImplementedError(f"dim={dim}")
105+
106+
@property
107+
def localBounds(self):
108+
iLocList, nLocList = [], []
109+
for rank, nPoints, nBlocks in zip(self.ranks, self.gridSizes, self.nBlocks):
110+
n0 = nPoints // nBlocks
111+
nRest = nPoints - nBlocks * n0
112+
nLoc = n0 + 1 * (rank < nRest)
113+
iLoc = rank * n0 + nRest * (rank >= nRest) + rank * (rank < nRest)
114+
115+
iLocList.append(iLoc)
116+
nLocList.append(nLoc)
117+
return iLocList, nLocList
118+
119+
120+
if __name__ == "__main__":
121+
# Base usage of this module for a 2D decomposition
122+
from mpi4py import MPI
123+
from time import sleep
124+
125+
comm: MPI.Intracomm = MPI.COMM_WORLD
126+
MPI_SIZE = comm.Get_size()
127+
MPI_RANK = comm.Get_rank()
128+
129+
blocks = BlockDecomposition(MPI_SIZE, [256, 64], gRank=MPI_RANK)
130+
if MPI_RANK == 0:
131+
print(f"nBlocks : {blocks.nBlocks}")
132+
133+
ranks = blocks.ranks
134+
bounds = blocks.localBounds
135+
136+
comm.Barrier()
137+
sleep(0.01 * MPI_RANK)
138+
print(f"[Rank {MPI_RANK}] pRankX={ranks}, bounds={bounds}")

0 commit comments

Comments
 (0)