Skip to content

Commit ed5aeef

Browse files
committed
added parallel tests for the compression playground
1 parent 07f5727 commit ed5aeef

File tree

3 files changed

+426
-0
lines changed

3 files changed

+426
-0
lines changed
Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,153 @@
1+
from argparse import ArgumentParser
2+
import numpy as np
3+
from mpi4py import MPI
4+
5+
from pySDC.helpers.stats_helper import filter_stats, sort_stats
6+
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
7+
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
8+
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
9+
from pySDC.implementations.problem_classes.AllenCahn_MPIFFT import allencahn_imex, allencahn_imex_timeforcing
10+
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
11+
from pySDC.projects.AllenCahn_Bayreuth.AllenCahn_dump import dump
12+
13+
14+
def run_simulation(name=None, nprocs_space=None):
15+
"""
16+
A simple test program to do PFASST runs for the AC equation
17+
"""
18+
19+
# set MPI communicator
20+
comm = MPI.COMM_WORLD
21+
22+
world_rank = comm.Get_rank()
23+
world_size = comm.Get_size()
24+
25+
# split world communicator to create space-communicators
26+
if nprocs_space is not None:
27+
color = int(world_rank / nprocs_space)
28+
else:
29+
color = int(world_rank / 1)
30+
space_comm = comm.Split(color=color)
31+
space_comm.Set_name('Space-Comm')
32+
space_size = space_comm.Get_size()
33+
space_rank = space_comm.Get_rank()
34+
35+
# split world communicator to create time-communicators
36+
if nprocs_space is not None:
37+
color = int(world_rank % nprocs_space)
38+
else:
39+
color = int(world_rank / world_size)
40+
time_comm = comm.Split(color=color)
41+
time_comm.Set_name('Time-Comm')
42+
time_size = time_comm.Get_size()
43+
time_rank = time_comm.Get_rank()
44+
45+
# print(time_size, space_size, world_size)
46+
47+
# initialize level parameters
48+
level_params = dict()
49+
level_params['restol'] = 1E-08
50+
level_params['dt'] = 1E-03
51+
level_params['nsweeps'] = [3, 1]
52+
53+
# initialize sweeper parameters
54+
sweeper_params = dict()
55+
sweeper_params['collocation_class'] = CollGaussRadau_Right
56+
sweeper_params['num_nodes'] = [3]
57+
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
58+
sweeper_params['initial_guess'] = 'zero'
59+
60+
# initialize problem parameters
61+
problem_params = dict()
62+
# This defines the number of 'patches' for the simulation per dimension in 2D. L=4 means: 4x4 patches
63+
problem_params['L'] = 4.0
64+
# problem_params['L'] = 16.0
65+
# This defines the number of nodes in space, ideally with about 144 nodes per patch (48 * 12 / 4)
66+
problem_params['nvars'] = [(48 * 12, 48 * 12), (8 * 12, 8 * 12)]
67+
# problem_params['nvars'] = [(48 * 48, 48 * 48), (8 * 48, 8 * 48)]
68+
problem_params['eps'] = [0.04]
69+
problem_params['radius'] = 0.25
70+
problem_params['comm'] = space_comm
71+
problem_params['name'] = name
72+
problem_params['init_type'] = 'circle_rand'
73+
problem_params['spectral'] = False
74+
75+
if name == 'AC-bench-constforce':
76+
problem_params['dw'] = [-23.59]
77+
78+
# initialize step parameters
79+
step_params = dict()
80+
step_params['maxiter'] = 50
81+
82+
# initialize controller parameters
83+
controller_params = dict()
84+
controller_params['logger_level'] = 20 if space_rank == 0 else 99 # set level depending on rank
85+
controller_params['predict_type'] = 'fine_only'
86+
# controller_params['hook_class'] = dump # activate to get data output at each step
87+
88+
# fill description dictionary for easy step instantiation
89+
description = dict()
90+
description['problem_params'] = problem_params # pass problem parameters
91+
description['sweeper_class'] = imex_1st_order
92+
description['sweeper_params'] = sweeper_params # pass sweeper parameters
93+
description['level_params'] = level_params # pass level parameters
94+
description['step_params'] = step_params # pass step parameters
95+
description['space_transfer_class'] = fft_to_fft
96+
97+
if name == 'AC-bench-noforce' or name == 'AC-bench-constforce':
98+
description['problem_class'] = allencahn_imex
99+
elif name == 'AC-bench-timeforce':
100+
description['problem_class'] = allencahn_imex_timeforcing
101+
else:
102+
raise NotImplementedError(f'{name} is not implemented')
103+
104+
# set time parameters
105+
t0 = 0.0
106+
Tend = 4 * 0.001
107+
108+
if space_rank == 0 and time_rank == 0:
109+
out = f'---------> Running {name} with {time_size} process(es) in time and {space_size} process(es) in space...'
110+
print(out)
111+
112+
# instantiate controller
113+
controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm)
114+
115+
# get initial values on finest level
116+
P = controller.S.levels[0].prob
117+
uinit = P.u_exact(t0)
118+
119+
# call main function to get things done...
120+
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
121+
122+
timing = sort_stats(filter_stats(stats, type='timing_setup'), sortby='time')
123+
max_timing_setup = time_comm.allreduce(timing[0][1], MPI.MAX)
124+
timing = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')
125+
max_timing = time_comm.allreduce(timing[0][1], MPI.MAX)
126+
127+
if space_rank == 0 and time_rank == time_size - 1:
128+
print()
129+
130+
out = f'Setup time: {max_timing_setup:.4f} sec.'
131+
print(out)
132+
133+
out = f'Time to solution: {max_timing:.4f} sec.'
134+
print(out)
135+
136+
iter_counts = sort_stats(filter_stats(stats, type='niter'), sortby='time')
137+
niters = np.array([item[1] for item in iter_counts])
138+
out = f'Mean number of iterations: {np.mean(niters):.4f}'
139+
print(out)
140+
141+
142+
if __name__ == "__main__":
143+
# Add parser to get number of processors in space and setup (have to do this here to enable automatic testing)
144+
# Run this file via `mpirun -np N python run_parallel_AC_MPIFFT.py -n P`,
145+
# where N is the overall number of processors and P is the number of processors used for spatial parallelization.
146+
parser = ArgumentParser()
147+
parser.add_argument("-s", "--setup", help='Specifies the setup', type=str, default='AC-bench-noforce',
148+
choices=['AC-bench-noforce', 'AC-bench-constforce', 'AC-bench-timeforce'])
149+
parser.add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
150+
args = parser.parse_args()
151+
152+
run_simulation(name=args.setup, nprocs_space=args.nprocs_space)
153+
Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
from mpi4py import MPI
2+
3+
from pySDC.helpers.stats_helper import filter_stats, sort_stats
4+
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
5+
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
6+
from pySDC.implementations.problem_classes.HeatEquation_1D_FD import heat1d
7+
from pySDC.implementations.sweeper_classes.generic_LU import generic_implicit
8+
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
9+
10+
11+
def set_parameters_ml():
12+
"""
13+
Helper routine to set parameters for the following multi-level runs
14+
15+
Returns:
16+
dict: dictionary containing the simulation parameters
17+
dict: dictionary containing the controller parameters
18+
float: starting time
19+
float: end time
20+
"""
21+
# initialize level parameters
22+
level_params = dict()
23+
level_params['restol'] = 5E-10
24+
level_params['dt'] = 0.125
25+
26+
# initialize sweeper parameters
27+
sweeper_params = dict()
28+
sweeper_params['collocation_class'] = CollGaussRadau_Right
29+
sweeper_params['QI'] = 'LU'
30+
sweeper_params['num_nodes'] = [3]
31+
32+
# initialize problem parameters
33+
problem_params = dict()
34+
problem_params['nu'] = 0.1 # diffusion coefficient
35+
problem_params['freq'] = 2 # frequency for the test value
36+
problem_params['nvars'] = [63, 31] # number of degrees of freedom for each level
37+
38+
# initialize step parameters
39+
step_params = dict()
40+
step_params['maxiter'] = 50
41+
step_params['errtol'] = 1E-05
42+
43+
# initialize space transfer parameters
44+
space_transfer_params = dict()
45+
space_transfer_params['rorder'] = 2
46+
space_transfer_params['iorder'] = 6
47+
48+
# initialize controller parameters
49+
controller_params = dict()
50+
controller_params['logger_level'] = 30
51+
controller_params['all_to_done'] = True # can ask the controller to keep iterating all steps until the end
52+
controller_params['use_iteration_estimator'] = False # activate iteration estimator
53+
54+
# fill description dictionary for easy step instantiation
55+
description = dict()
56+
description['problem_class'] = heat1d # pass problem class
57+
description['problem_params'] = problem_params # pass problem parameters
58+
description['sweeper_class'] = generic_implicit # pass sweeper
59+
description['sweeper_params'] = sweeper_params # pass sweeper parameters
60+
description['level_params'] = level_params # pass level parameters
61+
description['step_params'] = step_params # pass step parameters
62+
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
63+
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
64+
65+
# set time parameters
66+
t0 = 0.0
67+
Tend = 1.0
68+
69+
return description, controller_params, t0, Tend
70+
71+
72+
if __name__ == "__main__":
73+
"""
74+
A simple test program to do MPI-parallel PFASST runs
75+
"""
76+
77+
# set MPI communicator
78+
comm = MPI.COMM_WORLD
79+
80+
# get parameters from Part A
81+
description, controller_params, t0, Tend = set_parameters_ml()
82+
83+
# instantiate controllers
84+
controller = controller_MPI(controller_params=controller_params, description=description, comm=comm)
85+
# get initial values on finest level
86+
P = controller.S.levels[0].prob
87+
uinit = P.u_exact(t0)
88+
89+
# call main functions to get things done...
90+
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
91+
92+
# filter statistics by type (number of iterations)
93+
filtered_stats = filter_stats(stats, type='niter')
94+
95+
# convert filtered statistics to list of iterations count, sorted by process
96+
iter_counts = sort_stats(filtered_stats, sortby='time')
97+
98+
# combine statistics into list of statistics
99+
iter_counts_list = comm.gather(iter_counts, root=0)
100+
101+
rank = comm.Get_rank()
102+
size = comm.Get_size()
103+
104+
if rank == 0:
105+
106+
out = 'Working with %2i processes...' % size
107+
print(out)
108+
109+
# compute exact solutions and compare with both results
110+
uex = P.u_exact(Tend)
111+
err = abs(uex - uend)
112+
113+
out = 'Error vs. exact solution: %12.8e' % err
114+
print(out)
115+
116+
# build one list of statistics instead of list of lists, the sort by time
117+
iter_counts_gather = [item for sublist in iter_counts_list for item in sublist]
118+
iter_counts = sorted(iter_counts_gather, key=lambda tup: tup[0])
119+
120+
# compute and print statistics
121+
for item in iter_counts:
122+
out = 'Number of iterations for time %4.2f: %1i ' % (item[0], item[1])
123+
print(out)

0 commit comments

Comments
 (0)