Skip to content

Commit 5491e18

Browse files
Implemented MPI-parallel multilevel diagonal SDC (#427)
* Implemented MPI-parallel multilevel diagonal SDC * Minor cleanup * Fixed tau correction in `base_transfer_MPI` * Removed duplicate implementation in project
1 parent a549854 commit 5491e18

File tree

8 files changed

+387
-280
lines changed

8 files changed

+387
-280
lines changed

pySDC/core/BaseTransfer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ def __init__(self, fine_level, coarse_level, base_transfer_params, space_transfe
4545
# set up logger
4646
self.logger = logging.getLogger('transfer')
4747

48-
# just copy by object
4948
self.fine = fine_level
5049
self.coarse = coarse_level
5150

pySDC/implementations/sweeper_classes/generic_implicit_MPI.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,13 @@ def predict(self):
148148
L.status.unlocked = True
149149
L.status.updated = True
150150

151+
def communicate_tau_correction_for_full_interval(self):
152+
L = self.level
153+
P = L.prob
154+
if self.rank < self.comm.size - 1:
155+
L.tau[-1] = P.u_init
156+
self.comm.Bcast(L.tau[-1], root=self.comm.size - 1)
157+
151158

152159
class generic_implicit_MPI(SweeperMPI, generic_implicit):
153160
"""
@@ -250,6 +257,7 @@ def compute_end_point(self):
250257
L.uend += L.u[0]
251258

252259
# add up tau correction of the full interval (last entry)
253-
if L.tau[-1] is not None:
260+
if L.tau[self.rank] is not None:
261+
self.communicate_tau_correction_for_full_interval()
254262
L.uend += L.tau[-1]
255263
return None

pySDC/implementations/sweeper_classes/imex_1st_order_MPI.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ def compute_end_point(self):
107107
L.uend += L.u[0]
108108

109109
# add up tau correction of the full interval (last entry)
110-
if L.tau[-1] is not None:
110+
if L.tau[self.rank] is not None:
111+
self.communicate_tau_correction_for_full_interval()
111112
L.uend += L.tau[-1]
112113
return None
Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,176 @@
1+
from mpi4py import MPI
2+
3+
from pySDC.core.Errors import UnlockError
4+
from pySDC.core.BaseTransfer import base_transfer
5+
6+
7+
class base_transfer_MPI(base_transfer):
8+
"""
9+
Standard base_transfer class
10+
11+
Attributes:
12+
logger: custom logger for sweeper-related logging
13+
params(__Pars): parameter object containing the custom parameters passed by the user
14+
fine (pySDC.Level.level): reference to the fine level
15+
coarse (pySDC.Level.level): reference to the coarse level
16+
"""
17+
18+
def __init__(self, *args, **kwargs):
19+
super().__init__(*args, **kwargs)
20+
self.comm_fine = self.fine.sweep.comm
21+
self.comm_coarse = self.coarse.sweep.comm
22+
23+
if (
24+
self.comm_fine.size != self.fine.sweep.coll.num_nodes
25+
or self.comm_coarse.size != self.coarse.sweep.coll.num_nodes
26+
):
27+
raise NotImplementedError(
28+
f'{type(self).__name__} only works when each rank administers one collocation node so far!'
29+
)
30+
31+
def restrict(self):
32+
"""
33+
Space-time restriction routine
34+
35+
The routine applies the spatial restriction operator to the fine values on the fine nodes, then reevaluates f
36+
on the coarse level. This is used for the first part of the FAS correction tau via integration. The second part
37+
is the integral over the fine values, restricted to the coarse level. Finally, possible tau corrections on the
38+
fine level are restricted as well.
39+
"""
40+
41+
F, G = self.fine, self.coarse
42+
CF, CG = self.comm_fine, self.comm_coarse
43+
SG = G.sweep
44+
PG = G.prob
45+
46+
# only if the level is unlocked at least by prediction
47+
if not F.status.unlocked:
48+
raise UnlockError('fine level is still locked, cannot use data from there')
49+
50+
# restrict fine values in space
51+
tmp_u = self.space_transfer.restrict(F.u[CF.rank + 1])
52+
53+
# restrict collocation values
54+
G.u[0] = self.space_transfer.restrict(F.u[0])
55+
recvBuf = [None for _ in range(SG.coll.num_nodes)]
56+
recvBuf[CG.rank] = PG.u_init
57+
for n in range(SG.coll.num_nodes):
58+
CF.Reduce(self.Rcoll[n, CF.rank] * tmp_u, recvBuf[CG.rank], root=n, op=MPI.SUM)
59+
G.u[CG.rank + 1] = recvBuf[CG.rank]
60+
61+
# re-evaluate f on coarse level
62+
G.f[0] = PG.eval_f(G.u[0], G.time)
63+
G.f[CG.rank + 1] = PG.eval_f(G.u[CG.rank + 1], G.time + G.dt * SG.coll.nodes[CG.rank])
64+
65+
# build coarse level tau correction part
66+
tauG = G.sweep.integrate()
67+
68+
# build fine level tau correction part
69+
tauF = F.sweep.integrate()
70+
71+
# restrict fine level tau correction part in space
72+
tmp_tau = self.space_transfer.restrict(tauF)
73+
74+
# restrict fine level tau correction part in collocation
75+
tauFG = tmp_tau.copy()
76+
for n in range(SG.coll.num_nodes):
77+
recvBuf = tauFG if n == CG.rank else None
78+
CF.Reduce(self.Rcoll[n, CF.rank] * tmp_tau, recvBuf, root=n, op=MPI.SUM)
79+
80+
# build tau correction
81+
G.tau[CG.rank] = tauFG - tauG
82+
83+
if F.tau[CF.rank] is not None:
84+
tmp_tau = self.space_transfer.restrict(F.tau[CF.rank])
85+
86+
# restrict possible tau correction from fine in collocation
87+
recvBuf = [None for _ in range(SG.coll.num_nodes)]
88+
recvBuf[CG.rank] = PG.u_init
89+
for n in range(SG.coll.num_nodes):
90+
CF.Reduce(self.Rcoll[n, CF.rank] * tmp_tau, recvBuf[CG.rank], root=n, op=MPI.SUM)
91+
G.tau[CG.rank] += recvBuf[CG.rank]
92+
else:
93+
pass
94+
95+
# save u and rhs evaluations for interpolation
96+
G.uold[CG.rank + 1] = PG.dtype_u(G.u[CG.rank + 1])
97+
G.fold[CG.rank + 1] = PG.dtype_f(G.f[CG.rank + 1])
98+
99+
# works as a predictor
100+
G.status.unlocked = True
101+
102+
return None
103+
104+
def prolong(self):
105+
"""
106+
Space-time prolongation routine
107+
108+
This routine applies the spatial prolongation routine to the difference between the computed and the restricted
109+
values on the coarse level and then adds this difference to the fine values as coarse correction.
110+
"""
111+
112+
# get data for easier access
113+
F, G = self.fine, self.coarse
114+
CF, CG = self.comm_fine, self.comm_coarse
115+
SF = F.sweep
116+
PF = F.prob
117+
118+
# only of the level is unlocked at least by prediction or restriction
119+
if not G.status.unlocked:
120+
raise UnlockError('coarse level is still locked, cannot use data from there')
121+
122+
# build coarse correction
123+
124+
# interpolate values in space first
125+
tmp_u = self.space_transfer.prolong(G.u[CF.rank + 1] - G.uold[CF.rank + 1])
126+
127+
# interpolate values in collocation
128+
recvBuf = [None for _ in range(SF.coll.num_nodes)]
129+
recvBuf[CF.rank] = F.u[CF.rank + 1].copy()
130+
for n in range(SF.coll.num_nodes):
131+
132+
CG.Reduce(self.Pcoll[n, CG.rank] * tmp_u, recvBuf[n], root=n, op=MPI.SUM)
133+
F.u[CF.rank + 1] += recvBuf[CF.rank]
134+
135+
# re-evaluate f on fine level
136+
F.f[CF.rank + 1] = PF.eval_f(F.u[CF.rank + 1], F.time + F.dt * SF.coll.nodes[CF.rank])
137+
138+
return None
139+
140+
def prolong_f(self):
141+
"""
142+
Space-time prolongation routine w.r.t. the rhs f
143+
144+
This routine applies the spatial prolongation routine to the difference between the computed and the restricted
145+
values on the coarse level and then adds this difference to the fine values as coarse correction.
146+
"""
147+
148+
# get data for easier access
149+
F, G = self.fine, self.coarse
150+
CF, CG = self.comm_fine, self.comm_coarse
151+
SF = F.sweep
152+
153+
# only of the level is unlocked at least by prediction or restriction
154+
if not G.status.unlocked:
155+
raise UnlockError('coarse level is still locked, cannot use data from there')
156+
157+
# build coarse correction
158+
159+
# interpolate values in space first
160+
tmp_u = self.space_transfer.prolong(G.u[CF.rank + 1] - G.uold[CF.rank + 1])
161+
tmp_f = self.space_transfer.prolong(G.f[CF.rank + 1] - G.fold[CF.rank + 1])
162+
163+
# interpolate values in collocation
164+
recvBuf_u = [None for _ in range(SF.coll.num_nodes)]
165+
recvBuf_f = [None for _ in range(SF.coll.num_nodes)]
166+
recvBuf_u[CF.rank] = F.u[CF.rank + 1].copy()
167+
recvBuf_f[CF.rank] = F.f[CF.rank + 1].copy()
168+
for n in range(SF.coll.num_nodes):
169+
170+
CG.Reduce(self.Pcoll[n, CG.rank] * tmp_u, recvBuf_u[CF.rank], root=n, op=MPI.SUM)
171+
CG.Reduce(self.Pcoll[n, CG.rank] * tmp_f, recvBuf_f[CF.rank], root=n, op=MPI.SUM)
172+
173+
F.u[CF.rank + 1] += recvBuf_u[CF.rank]
174+
F.f[CF.rank + 1] += recvBuf_f[CF.rank]
175+
176+
return None

pySDC/projects/parallelSDC/AllenCahn_parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
1212
from pySDC.implementations.transfer_classes.TransferMesh_FFT2D import mesh_to_mesh_fft2d
1313
from pySDC.playgrounds.Allen_Cahn.AllenCahn_monitor import monitor
14-
from pySDC.projects.parallelSDC.BaseTransfer_MPI import base_transfer_MPI
14+
from pySDC.implementations.transfer_classes.BaseTransferMPI import base_transfer_MPI
1515
from pySDC.implementations.sweeper_classes.generic_implicit_MPI import generic_implicit_MPI
1616

1717

0 commit comments

Comments
 (0)