Skip to content
This repository was archived by the owner on Mar 20, 2023. It is now read-only.

Commit 7a29e1b

Browse files
authored
Merge pull request #884 from BlueBrain/jblanco/memory_report_init
Improve memory usage in report initialization (#882)
2 parents 8bea2f0 + c0fef44 commit 7a29e1b

21 files changed

+248
-185
lines changed

.github/workflows/coreneuron-ci.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@ jobs:
3636
- {cmake_option: "-DCORENRN_ENABLE_MPI_DYNAMIC=ON", flag_warnings: ON}
3737
- {cmake_option: "-DCORENRN_ENABLE_MPI_DYNAMIC=ON -DCORENRN_ENABLE_SHARED=OFF"}
3838
- {cmake_option: "-DCORENRN_ENABLE_MPI=OFF"}
39-
- {use_nmodl: ON, py_version: 3.6.7}
39+
- {use_nmodl: ON, py_version: 3.7}
4040
- {use_nmodl: ON}
41-
- {use_ispc: ON, py_version: 3.6.7}
41+
- {use_ispc: ON, py_version: 3.7}
4242
include:
4343
- os: ubuntu-20.04
4444
config:

.github/workflows/test-as-submodule.yml

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ jobs:
2828
fail-fast: false
2929
env:
3030
CMAKE_BUILD_PARALLEL_LEVEL: ${{matrix.cores}}
31+
SDK_ROOT: $(xcrun --sdk macosx --show-sdk-path)
32+
3133
steps:
3234

3335
- name: Install homebrew packages
@@ -36,16 +38,16 @@ jobs:
3638
brew install bison coreutils flex ninja openmpi
3739
python3 -m pip install --upgrade numpy pytest pytest-cov
3840
echo /usr/local/opt/flex/bin:/usr/local/opt/bison/bin >> $GITHUB_PATH
39-
echo "CC=clang" >> $GITHUB_ENV
40-
echo "CXX=clang++" >> $GITHUB_ENV
41+
echo "CC=gcc" >> $GITHUB_ENV
42+
echo "CXX=g++" >> $GITHUB_ENV
4143
4244
- name: Install apt packages
4345
if: startsWith(matrix.os, 'ubuntu')
4446
run: |
4547
sudo apt-get update
4648
sudo apt-get install bison cython3 flex libfl-dev libopenmpi-dev \
47-
ninja-build openmpi-bin python3-dev python3-numpy python3-pytest \
48-
python3-pytest-cov
49+
ninja-build openmpi-bin python3-dev
50+
python3 -m pip install --upgrade numpy pytest pytest-cov
4951
echo "CC=gcc" >> $GITHUB_ENV
5052
echo "CXX=g++" >> $GITHUB_ENV
5153

CMake/packages/Findnmodl.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
find_program(nmodl_BINARY NAMES nmodl${CMAKE_EXECUTABLE_SUFFIX}
3333
HINTS "${CORENRN_NMODL_DIR}/bin" QUIET)
3434

35-
find_path(nmodl_INCLUDE "nmodl/fast_math.ispc" HINTS "${CORENRN_NMODL_DIR}/include")
35+
find_path(nmodl_INCLUDE "nmodl/fast_math.hpp" HINTS "${CORENRN_NMODL_DIR}/include")
3636
find_path(nmodl_PYTHONPATH "nmodl/__init__.py" HINTS "${CORENRN_NMODL_DIR}/lib")
3737

3838
# Checks 'REQUIRED', 'QUIET' and versions.

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -395,4 +395,4 @@ You can see current [contributors here](https://github.com/BlueBrain/CoreNeuron/
395395

396396
CoreNEURON is developed in a joint collaboration between the Blue Brain Project and Yale University. This work is supported by funding to the Blue Brain Project, a research center of the École polytechnique fédérale de Lausanne (EPFL), from the Swiss government’s ETH Board of the Swiss Federal Institutes of Technology, NIH grant number R01NS11613 (Yale University), the European Union Seventh Framework Program (FP7/20072013) under grant agreement n◦ 604102 (HBP) and the European Union’s Horizon 2020 Framework Programme for Research and Innovation under Specific Grant Agreement n◦ 720270 (Human Brain Project SGA1), n◦ 785907 (Human Brain Project SGA2) and n◦ 945539 (Human Brain Project SGA3).
397397

398-
Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
398+
Copyright (c) 2016 - 2022 Blue Brain Project/EPFL

coreneuron/apps/corenrn_parameters.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ corenrn_parameters::corenrn_parameters() {
4040
app.add_set(
4141
"--verbose",
4242
this->verbose,
43-
{verbose_level::NONE, verbose_level::ERROR, verbose_level::INFO, verbose_level::DEBUG},
43+
{verbose_level::NONE, verbose_level::ERROR, verbose_level::INFO, verbose_level::DEBUG_INFO},
4444
"Verbose level: 0 = NONE, 1 = ERROR, 2 = INFO, 3 = DEBUG. Default is INFO");
4545
app.add_flag("--model-stats",
4646
this->model_stats,

coreneuron/apps/corenrn_parameters.hpp

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,13 @@
3535
namespace coreneuron {
3636

3737
struct corenrn_parameters_data {
38-
enum verbose_level : std::uint32_t { NONE = 0, ERROR = 1, INFO = 2, DEBUG = 3, DEFAULT = INFO };
38+
enum verbose_level : std::uint32_t {
39+
NONE = 0,
40+
ERROR = 1,
41+
INFO = 2,
42+
DEBUG_INFO = 3,
43+
DEFAULT = INFO
44+
};
3945

4046
static constexpr int report_buff_size_default = 4;
4147

coreneuron/apps/main1.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -428,13 +428,13 @@ static void trajectory_return() {
428428
}
429429
}
430430

431-
std::unique_ptr<ReportHandler> create_report_handler(ReportConfiguration& config,
431+
std::unique_ptr<ReportHandler> create_report_handler(const ReportConfiguration& config,
432432
const SpikesInfo& spikes_info) {
433433
std::unique_ptr<ReportHandler> report_handler;
434434
if (config.format == "Bin") {
435-
report_handler = std::make_unique<BinaryReportHandler>(config);
435+
report_handler = std::make_unique<BinaryReportHandler>();
436436
} else if (config.format == "SONATA") {
437-
report_handler = std::make_unique<SonataReportHandler>(config, spikes_info);
437+
report_handler = std::make_unique<SonataReportHandler>(spikes_info);
438438
} else {
439439
if (nrnmpi_myid == 0) {
440440
printf(" WARNING : Report name '%s' has unknown format: '%s'.\n",
@@ -595,7 +595,7 @@ extern "C" int run_solve_core(int argc, char** argv) {
595595
std::unique_ptr<ReportHandler> report_handler = create_report_handler(configs[i],
596596
spikes_info);
597597
if (report_handler) {
598-
report_handler->create_report(dt, tstop, delay);
598+
report_handler->create_report(configs[i], dt, tstop, delay);
599599
report_handlers.push_back(std::move(report_handler));
600600
}
601601
if (configs[i].report_dt < min_report_dt) {

coreneuron/io/mech_report.cpp

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <vector>
1111

1212
#include "coreneuron/coreneuron.hpp"
13+
#include "coreneuron/io/nrn_setup.hpp"
1314
#include "coreneuron/mpi/nrnmpi.h"
1415
#include "coreneuron/apps/corenrn_parameters.hpp"
1516

@@ -19,6 +20,7 @@ void write_mech_report() {
1920
/// mechanim count across all gids, local to rank
2021
const auto n_memb_func = corenrn.get_memb_funcs().size();
2122
std::vector<long> local_mech_count(n_memb_func, 0);
23+
std::vector<long> local_mech_size(n_memb_func, 0);
2224

2325
/// each gid record goes on separate row, only check non-empty threads
2426
for (int i = 0; i < nrn_nthread; i++) {
@@ -27,10 +29,12 @@ void write_mech_report() {
2729
const int type = tml->index;
2830
const auto& ml = tml->ml;
2931
local_mech_count[type] += ml->nodecount;
32+
local_mech_size[type] = memb_list_size(tml, true);
3033
}
3134
}
3235

3336
std::vector<long> total_mech_count(n_memb_func);
37+
std::vector<long> total_mech_size(n_memb_func);
3438

3539
#if NRNMPI
3640
if (corenrn_param.mpi_enable) {
@@ -39,21 +43,31 @@ void write_mech_report() {
3943
&total_mech_count[0],
4044
local_mech_count.size(),
4145
1);
42-
46+
nrnmpi_long_allreduce_vec(&local_mech_size[0],
47+
&total_mech_size[0],
48+
local_mech_size.size(),
49+
1);
4350
} else
4451
#endif
4552
{
4653
total_mech_count = local_mech_count;
54+
total_mech_size = local_mech_size;
4755
}
4856

4957
/// print global stats to stdout
5058
if (nrnmpi_myid == 0) {
51-
printf("\n================ MECHANISMS COUNT BY TYPE ==================\n");
52-
printf("%4s %20s %10s\n", "Id", "Name", "Count");
59+
printf("\n============== MECHANISMS COUNT AND SIZE BY TYPE =============\n");
60+
printf("%4s %20s %10s %25s\n", "Id", "Name", "Count", "Total memory size (KiB)");
5361
for (size_t i = 0; i < total_mech_count.size(); i++) {
54-
printf("%4lu %20s %10ld\n", i, nrn_get_mechname(i), total_mech_count[i]);
62+
if (total_mech_count[i] > 0) {
63+
printf("%4lu %20s %10ld %25.2lf\n",
64+
i,
65+
nrn_get_mechname(i),
66+
total_mech_count[i],
67+
static_cast<double>(total_mech_size[i]) / 1024);
68+
}
5569
}
56-
printf("=============================================================\n");
70+
printf("==============================================================\n");
5771
}
5872
}
5973

coreneuron/io/nrn_setup.cpp

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -966,9 +966,37 @@ void read_phase3(NrnThread& nt, UserParams& userParams) {
966966
nt.summation_report_handler_ = std::make_unique<SummationReportMapping>();
967967
}
968968

969-
static size_t memb_list_size(NrnThreadMembList* tml) {
969+
/* Returns the size of the dynamically allocated memory for NrnThreadMembList
970+
* Includes:
971+
* - Size of NrnThreadMembList
972+
* - Size of Memb_list
973+
* - Size of nodeindices
974+
* - Size of _permute
975+
* - Size of _thread
976+
* - Size of NetReceive and NetSend Buffers
977+
* - Size of int variables
978+
* - Size of double variables (If include_data is enabled. Those variables are already counted
979+
* since they point to nt->_data.)
980+
*/
981+
size_t memb_list_size(NrnThreadMembList* tml, bool include_data) {
970982
size_t nbyte = sizeof(NrnThreadMembList) + sizeof(Memb_list);
971983
nbyte += tml->ml->nodecount * sizeof(int);
984+
if (tml->ml->_permute) {
985+
nbyte += tml->ml->nodecount * sizeof(int);
986+
}
987+
if (tml->ml->_thread) {
988+
Memb_func& mf = corenrn.get_memb_func(tml->index);
989+
nbyte += mf.thread_size_ * sizeof(ThreadDatum);
990+
}
991+
if (tml->ml->_net_receive_buffer) {
992+
nbyte += sizeof(NetReceiveBuffer_t) + tml->ml->_net_receive_buffer->size_of_object();
993+
}
994+
if (tml->ml->_net_send_buffer) {
995+
nbyte += sizeof(NetSendBuffer_t) + tml->ml->_net_send_buffer->size_of_object();
996+
}
997+
if (include_data) {
998+
nbyte += corenrn.get_prop_param_size()[tml->index] * tml->ml->nodecount * sizeof(double);
999+
}
9721000
nbyte += corenrn.get_prop_dparam_size()[tml->index] * tml->ml->nodecount * sizeof(Datum);
9731001
#ifdef DEBUG
9741002
int i = tml->index;
@@ -991,7 +1019,7 @@ size_t output_presyn_size(void) {
9911019
size_t nbyte = sizeof(gid2out) + sizeof(int) * gid2out.size() +
9921020
sizeof(PreSyn*) * gid2out.size();
9931021
#ifdef DEBUG
994-
printf(" gid2out table bytes=~%ld size=%d\n", nbyte, gid2out.size());
1022+
printf(" gid2out table bytes=~%ld size=%ld\n", nbyte, gid2out.size());
9951023
#endif
9961024
return nbyte;
9971025
}
@@ -1003,7 +1031,7 @@ size_t input_presyn_size(void) {
10031031
size_t nbyte = sizeof(gid2in) + sizeof(int) * gid2in.size() +
10041032
sizeof(InputPreSyn*) * gid2in.size();
10051033
#ifdef DEBUG
1006-
printf(" gid2in table bytes=~%ld size=%d\n", nbyte, gid2in.size());
1034+
printf(" gid2in table bytes=~%ld size=%ld\n", nbyte, gid2in.size());
10071035
#endif
10081036
return nbyte;
10091037
}
@@ -1031,7 +1059,7 @@ size_t model_size(bool detailed_report) {
10311059
// Memb_list size
10321060
int nmech = 0;
10331061
for (auto tml = nt.tml; tml; tml = tml->next) {
1034-
nb_nt += memb_list_size(tml);
1062+
nb_nt += memb_list_size(tml, false);
10351063
++nmech;
10361064
}
10371065

coreneuron/io/nrn_setup.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@ extern void nrn_setup_cleanup();
4242

4343
extern int nrn_i_layout(int i, int cnt, int j, int size, int layout);
4444

45+
size_t memb_list_size(NrnThreadMembList* tml, bool include_data);
46+
4547
size_t model_size(bool detailed_report);
4648

4749
namespace coreneuron {

0 commit comments

Comments
 (0)