Skip to content

Commit c317c79

Browse files
committed
TL: added fieldsIO implementation to helpers
1 parent b1657fd commit c317c79

File tree

4 files changed

+919
-0
lines changed

4 files changed

+919
-0
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ step_*.png
1010
*.swp
1111
*_data.json
1212
!_dataRef.json
13+
*.pysdc
1314

1415
# Created by https://www.gitignore.io
1516

pySDC/helpers/blocks.py

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
class BlockDecomposition(object):
2+
"""
3+
Class decomposing a cartesian space domain (1D to 3D) into a given number of processors.
4+
5+
Parameters
6+
----------
7+
nProcs : int
8+
Total number of processors for space block decomposition.
9+
gridSizes : list[int]
10+
Number of grid points in each dimension
11+
algo : str, optional
12+
Algorithm used for hte block decomposition :
13+
14+
- Hybrid : approach minimizing interface communication, inspired from
15+
the `[Hybrid CFD solver] <https://web.stanford.edu/group/ctr/ResBriefs07/5_larsson1_pp47_58.pdf>`_.
16+
- ChatGPT : quickly generated using `[ChatGPT] <https://chatgpt.com>`_.
17+
The default is "Hybrid".
18+
gRank : int, optional
19+
If provided, the global rank that will determine the local block distribution. Default is None.
20+
order : str, optional
21+
The order used when computing the rank block distribution. Default is `C`.
22+
"""
23+
24+
def __init__(self, nProcs, gridSizes, algo="Hybrid", gRank=None, order="C"):
25+
dim = len(gridSizes)
26+
assert dim in [1, 2, 3], "block decomposition only works for 1D, 2D or 3D domains"
27+
28+
if algo == "ChatGPT":
29+
30+
nBlocks = [1] * dim
31+
for i in range(2, int(nProcs**0.5) + 1):
32+
while nProcs % i == 0:
33+
nBlocks[0] *= i
34+
nProcs //= i
35+
nBlocks.sort()
36+
37+
if nProcs > 1:
38+
nBlocks[0] *= nProcs
39+
40+
nBlocks.sort()
41+
while len(nBlocks) < dim:
42+
smallest = nBlocks.pop(0)
43+
nBlocks += [1, smallest]
44+
nBlocks.sort()
45+
46+
while len(nBlocks) > dim:
47+
smallest = nBlocks.pop(0)
48+
next_smallest = nBlocks.pop(0)
49+
nBlocks.append(smallest * next_smallest)
50+
nBlocks.sort()
51+
52+
elif algo == "Hybrid":
53+
rest = nProcs
54+
facs = {
55+
1: [1],
56+
2: [2, 1],
57+
3: [2, 3, 1],
58+
}[dim]
59+
exps = [0] * dim
60+
for n in range(dim - 1):
61+
while (rest % facs[n]) == 0:
62+
exps[n] = exps[n] + 1
63+
rest = rest // facs[n]
64+
if rest > 1:
65+
facs[dim - 1] = rest
66+
exps[dim - 1] = 1
67+
68+
nBlocks = [1] * dim
69+
for n in range(dim - 1, -1, -1):
70+
while exps[n] > 0:
71+
dummymax = -1
72+
dmax = 0
73+
for d, nPts in enumerate(gridSizes):
74+
dummy = (nPts + nBlocks[d] - 1) // nBlocks[d]
75+
if dummy >= dummymax:
76+
dummymax = dummy
77+
dmax = d
78+
nBlocks[dmax] = nBlocks[dmax] * facs[n]
79+
exps[n] = exps[n] - 1
80+
81+
else:
82+
raise NotImplementedError(f"algo={algo}")
83+
84+
# Store attributes
85+
self.dim = dim
86+
self.nBlocks = nBlocks
87+
self.gridSizes = gridSizes
88+
89+
# Used for rank block distribution
90+
self.gRank = gRank
91+
self.order = order
92+
93+
@property
94+
def ranks(self):
95+
gRank, order = self.gRank, self.order
96+
assert gRank is not None, "gRank attribute need to be set"
97+
dim, nBlocks = self.dim, self.nBlocks
98+
if dim == 1:
99+
return (gRank,)
100+
elif dim == 2:
101+
div = nBlocks[-1] if order == "C" else nBlocks[0]
102+
return (gRank // div, gRank % div)
103+
else:
104+
raise NotImplementedError(f"dim={dim}")
105+
106+
@property
107+
def localBounds(self):
108+
iLocList, nLocList = [], []
109+
for rank, nPoints, nBlocks in zip(self.ranks, self.gridSizes, self.nBlocks):
110+
n0 = nPoints // nBlocks
111+
nRest = nPoints - nBlocks * n0
112+
nLoc = n0 + 1 * (rank < nRest)
113+
iLoc = rank * n0 + nRest * (rank >= nRest) + rank * (rank < nRest)
114+
115+
iLocList.append(iLoc)
116+
nLocList.append(nLoc)
117+
return iLocList, nLocList
118+
119+
120+
if __name__ == "__main__":
121+
from mpi4py import MPI
122+
from time import sleep
123+
124+
comm: MPI.Intracomm = MPI.COMM_WORLD
125+
MPI_SIZE = comm.Get_size()
126+
MPI_RANK = comm.Get_rank()
127+
128+
blocks = BlockDecomposition(MPI_SIZE, [256, 64], gRank=MPI_RANK)
129+
if MPI_RANK == 0:
130+
print(f"nBlocks : {blocks.nBlocks}")
131+
132+
ranks = blocks.ranks
133+
bounds = blocks.localBounds
134+
135+
comm.Barrier()
136+
sleep(0.01 * MPI_RANK)
137+
print(f"[Rank {MPI_RANK}] pRankX={ranks}, bounds={bounds}")

0 commit comments

Comments
 (0)