Skip to content

Commit 0cc658c

Browse files
Merge pull request #569 from lkotipal/eVlasiator-dev
eVlasiator update to AMR / 6D runs
2 parents 97d05df + f951e98 commit 0cc658c

File tree

21 files changed

+1178
-700
lines changed

21 files changed

+1178
-700
lines changed
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
# module load gnu9/9.3.0
2+
# module load openmpi4/4.0.5
3+
# module load pmix/3.1.4
4+
CMP = mpicxx
5+
LNK = mpicxx
6+
#======== Vectorization ==========
7+
#Set vector backend type for vlasov solvers, sets precision and length.
8+
#Options:
9+
# AVX: VEC4D_AGNER, VEC4F_AGNER, VEC8F_AGNER
10+
# AVX512: VEC8D_AGNER, VEC16F_AGNER
11+
# Fallback: VEC4D_FALLBACK, VEC4F_FALLBACK, VEC8F_FALLBACK
12+
ifeq ($(DISTRIBUTION_FP_PRECISION),SPF)
13+
#Single-precision
14+
VECTORCLASS = VEC8F_AGNER
15+
else
16+
#Double-precision
17+
VECTORCLASS = VEC4D_AGNER
18+
endif
19+
#======= Compiler and compilation flags =========
20+
# NOTES on compiler flags:
21+
# CXXFLAGS is for compiler flags, they are always used
22+
# MATHFLAGS are for special math etc. flags, these are only applied on solver functions
23+
# LDFLAGS flags for linker
24+
#-DNO_WRITE_AT_ALL: Define to disable write at all to
25+
# avoid memleak (much slower IO)
26+
#-DMPICH_IGNORE_CXX_SEEK: Ignores some multiple definition
27+
# errors that come up when using
28+
# mpi.h in c++ on Cray
29+
#
30+
# CXXFLAGS = -DMPICH_IGNORE_CXX_SEEK
31+
FLAGS =
32+
#GNU flags:
33+
CC_BRAND = gcc
34+
CC_BRAND_VERSION = 9.3.0
35+
CXXFLAGS += -g -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -m64 -march=znver2 #-flto
36+
testpackage: CXXFLAGS = -g -O2 -fopenmp -funroll-loops -std=c++17 -m64 -march=znver2
37+
MATHFLAGS = -ffast-math
38+
LDFLAGS = -lrt -lgfortran -std=c++17 -lgomp
39+
LIB_MPI = -lgomp -lmpi
40+
#======== PAPI ==========
41+
#Add PAPI_MEM define to use papi to report memory consumption?
42+
CXXFLAGS += -DPAPI_MEM
43+
#======== Allocator =========
44+
#Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc
45+
#Configure jemalloc with --with-jemalloc-prefix=je_ when installing it
46+
CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE
47+
# BOOST_VERSION = current trilinos version
48+
# ZOLTAN_VERSION = current trilinos verson
49+
#
50+
#======== Libraries ===========
51+
MPT_BRAND = OpenMPI
52+
MPT_VERSION = 4.0.5
53+
JEMALLOC_VERSION = 5.2.1
54+
LIBRARY_PREFIX_B = /proj/ykempf/libraries
55+
LIBRARY_PREFIX = /proj/group/spacephysics/libraries
56+
#compiled libraries mostly in modules
57+
LIB_PROFILE = -L$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/phiprof/lib -lphiprof -Wl,-rpath=$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/phiprof/lib
58+
INC_PROFILE = -I$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/phiprof/include
59+
LIB_VLSV = -L$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/vlsv -lvlsv -Wl,-rpath=$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/vlsv
60+
INC_VLSV = -I$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/vlsv
61+
LIB_JEMALLOC = -L$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/jemalloc/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/jemalloc/lib
62+
INC_JEMALLOC = -I$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/jemalloc/include
63+
LIB_BOOST = -L$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/boost/lib -lboost_program_options -Wl,-rpath=$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/boost/lib
64+
INC_BOOST = -I$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/boost/include
65+
LIB_ZOLTAN = -L$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/zoltan/lib -lzoltan -Wl,-rpath=$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/zoltan/lib
66+
INC_ZOLTAN = -I$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/zoltan/include
67+
LIB_PAPI = -L$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/papi/lib -lpapi -Wl,-rpath=$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/papi/lib
68+
INC_PAPI = -I$(LIBRARY_PREFIX_B)/$(CC_BRAND)/$(CC_BRAND_VERSION)/$(MPT_BRAND)/$(MPT_VERSION)/carrington/papi/include
69+
#header libraries
70+
INC_EIGEN = -I$(LIBRARY_PREFIX)/ -I$(LIBRARY_PREFIX)/Eigen/
71+
INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid/
72+
INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/
73+
INC_VECTORCLASS = -I/proj/ykempf/vectorclass/version1

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ OBJS = version.o memoryallocation.o backgroundfield.o quadr.o dipole.o linedipo
196196
IPShock.o object_wrapper.o\
197197
verificationLarmor.o Shocktest.o grid.o ioread.o iowrite.o vlasiator.o logger.o\
198198
common.o parameters.o readparameters.o spatial_cell.o mesh_data_container.o\
199-
vlasovmover.o $(FIELDSOLVER).o fs_common.o fs_limiters.o gridGlue.o
199+
vlasovmover.o $(FIELDSOLVER).o fs_common.o fs_limiters.o gridGlue.o vlsvreaderinterface.o
200200

201201
# Add Vlasov solver objects (depend on mesh: AMR or non-AMR)
202202
ifeq ($(MESH),AMR)

fieldsolver/gridGlue.cpp

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -867,3 +867,77 @@ void getdBvolFieldsFromFsGrid(
867867
MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE);
868868

869869
}
870+
871+
void feedBoundaryIntoFsGrid(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid,
872+
const std::vector<CellID>& cells,
873+
FsGrid< fsgrids::technical, 2> & technicalGrid) {
874+
875+
int ii;
876+
//sorted list of dccrg cells. cells is typicall already sorted, but just to make sure....
877+
std::vector<CellID> dccrgCells = cells;
878+
std::sort(dccrgCells.begin(), dccrgCells.end());
879+
880+
//Datastructure for coupling
881+
std::map<int, std::set<CellID> > onDccrgMapRemoteProcess;
882+
std::map<int, std::set<CellID> > onFsgridMapRemoteProcess;
883+
std::map<CellID, std::vector<int64_t> > onFsgridMapCells;
884+
885+
// map receive process => receive buffers
886+
std::map<int, std::vector<int> > receivedData;
887+
888+
// send buffers to each process
889+
std::map<int, std::vector<int> > sendData;
890+
891+
//list of requests
892+
std::vector<MPI_Request> sendRequests;
893+
std::vector<MPI_Request> receiveRequests;
894+
895+
//computeCoupling
896+
computeCoupling(mpiGrid, cells, technicalGrid, onDccrgMapRemoteProcess, onFsgridMapRemoteProcess, onFsgridMapCells);
897+
898+
// Post receives
899+
receiveRequests.resize(onFsgridMapRemoteProcess.size());
900+
ii=0;
901+
for(auto const &receives: onFsgridMapRemoteProcess){
902+
int process = receives.first;
903+
int count = receives.second.size();
904+
receivedData[process].resize(count);
905+
MPI_Irecv(receivedData[process].data(), count * sizeof(int),
906+
MPI_BYTE, process, 1, MPI_COMM_WORLD,&(receiveRequests[ii++]));
907+
}
908+
909+
// Launch sends
910+
ii=0;
911+
sendRequests.resize(onDccrgMapRemoteProcess.size());
912+
for (auto const &snd : onDccrgMapRemoteProcess){
913+
int targetProc = snd.first;
914+
auto& sendBuffer=sendData[targetProc];
915+
for(CellID sendCell: snd.second){
916+
//Collect data to send for this dccrg cell
917+
sendBuffer.push_back(mpiGrid[sendCell]->sysBoundaryFlag);
918+
}
919+
int count = sendBuffer.size(); //note, compared to receive this includes all elements to be sent
920+
MPI_Isend(sendBuffer.data(), sendBuffer.size() * sizeof(int),
921+
MPI_BYTE, targetProc, 1, MPI_COMM_WORLD,&(sendRequests[ii]));
922+
ii++;
923+
}
924+
925+
MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE);
926+
927+
for(auto const &receives: onFsgridMapRemoteProcess){
928+
int process = receives.first; //data received from this process
929+
int* receiveBuffer = receivedData[process].data(); // data received from process
930+
for(auto const &cell: receives.second){ //loop over cellids (dccrg) for receive
931+
// this part heavily relies on both sender and receiver having cellids sorted!
932+
for(auto lid: onFsgridMapCells[cell]){
933+
// Now save the values to face-averages
934+
technicalGrid.get(lid)->sysBoundaryFlag = receiveBuffer[0];
935+
}
936+
937+
receiveBuffer++;
938+
}
939+
}
940+
941+
MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE);
942+
943+
}

fieldsolver/gridGlue.hpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,10 @@ void getDerivativesFromFsGrid(
9797
int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid,
9898
const std::vector<CellID>& cells);
9999

100+
void feedBoundaryIntoFsGrid(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid,
101+
const std::vector<CellID>& cells,
102+
FsGrid< fsgrids::technical, 2> & technicalGrid);
103+
100104

101105
/*! Transfer field data from an FsGrid back into the appropriate CellParams slot in DCCRG
102106
* \param sourceGrid Fieldsolver grid for these quantities

grid.cpp

Lines changed: 25 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,6 @@ int globalflags::AMRstencilWidth = VLASOV_STENCIL_WIDTH;
6363

6464
extern Logger logFile, diagnostic;
6565

66-
void initVelocityGridGeometry(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid);
67-
void initSpatialCellCoordinates(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid);
68-
void initializeStencils(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid);
69-
7066
void writeVelMesh(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid) {
7167
const vector<CellID>& cells = getLocalCells();
7268

@@ -157,7 +153,10 @@ void initializeGrids(
157153

158154

159155
phiprof::start("Refine spatial cells");
160-
if(P::amrMaxSpatialRefLevel > 0 && project.refineSpatialCells(mpiGrid)) {
156+
recalculateLocalCellsCache();
157+
// refineSpatialCells should be a nop if amrMaxSpatialRefLevel is 0. Make this better later
158+
if(project.refineSpatialCells(mpiGrid)) {
159+
mpiGrid.balance_load();
161160
recalculateLocalCellsCache();
162161
}
163162
phiprof::stop("Refine spatial cells");
@@ -184,6 +183,9 @@ void initializeGrids(
184183
phiprof::start("Set spatial cell coordinates");
185184
initSpatialCellCoordinates(mpiGrid);
186185
phiprof::stop("Set spatial cell coordinates");
186+
187+
SpatialCell::set_mpi_transfer_type(Transfer::CELL_DIMENSIONS);
188+
mpiGrid.update_copies_of_remote_neighbors(SYSBOUNDARIES_NEIGHBORHOOD_ID);
187189

188190
phiprof::start("Initialize system boundary conditions");
189191
if(sysBoundaries.initSysBoundaries(project, P::t_min) == false) {
@@ -204,8 +206,8 @@ void initializeGrids(
204206
// Check refined cells do not touch boundary cells
205207
phiprof::start("Check boundary refinement");
206208
if(!sysBoundaries.checkRefinement(mpiGrid)) {
207-
cerr << "(MAIN) ERROR: Boundary cells must have identical refinement level " << endl;
208-
exit(1);
209+
cerr << "(MAIN) WARNING: Boundary cells don't have identical refinement level " << endl;
210+
//exit(1);
209211
}
210212
phiprof::stop("Check boundary refinement");
211213

@@ -251,7 +253,8 @@ void initializeGrids(
251253

252254
// Update technicalGrid
253255
technicalGrid.updateGhostCells(); // This needs to be done at some point
254-
256+
257+
bool needCurl = false;
255258
if (!P::isRestart) {
256259
//Initial state based on project, background field in all cells
257260
//and other initial values in non-sysboundary cells
@@ -262,12 +265,23 @@ void initializeGrids(
262265
// Each initialization has to be independent to avoid threading problems
263266

264267
// Allow the project to set up data structures for it's setCell calls
265-
bool needCurl=false;
266268
project.setupBeforeSetCell(cells, mpiGrid, needCurl);
267-
if (needCurl==true) {
268-
// Communicate the perturbed B-fields and E-fileds read from the start file over to FSgrid
269+
if (needCurl) {
269270
feedPerBIntoFsGrid(mpiGrid, cells, perBGrid);
270271
perBGrid.updateGhostCells();
272+
}
273+
}
274+
275+
phiprof::start("setProjectBField");
276+
project.setProjectBField(perBGrid, BgBGrid, technicalGrid);
277+
// Set E field here as well?
278+
perBGrid.updateGhostCells();
279+
BgBGrid.updateGhostCells();
280+
EGrid.updateGhostCells();
281+
phiprof::stop("setProjectBField");
282+
283+
if (!P::isRestart) {
284+
if (needCurl) {
271285
// E is needed only because both volumetric fields are calculated in 1 call
272286
feedEIntoFsGrid(mpiGrid, cells, EGrid);
273287
EGrid.updateGhostCells();
@@ -367,13 +381,6 @@ void initializeGrids(
367381
phiprof::stop("Init moments");
368382
}
369383

370-
phiprof::start("setProjectBField");
371-
project.setProjectBField(perBGrid, BgBGrid, technicalGrid);
372-
perBGrid.updateGhostCells();
373-
BgBGrid.updateGhostCells();
374-
EGrid.updateGhostCells();
375-
phiprof::stop("setProjectBField");
376-
377384
phiprof::start("Finish fsgrid setup");
378385
feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,technicalGrid, false);
379386
if(!P::isRestart) {

grid.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,5 +115,8 @@ void shrink_to_fit_grid_data(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>
115115
bool validateMesh(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid,const uint popID);
116116

117117
void setFaceNeighborRanks( dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid );
118+
void initVelocityGridGeometry(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid);
119+
void initSpatialCellCoordinates(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid);
120+
void initializeStencils(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid);
118121

119122
#endif

0 commit comments

Comments
 (0)