-
Notifications
You must be signed in to change notification settings - Fork 109
Expand file tree
/
Copy pathmain.cc
More file actions
1273 lines (1059 loc) · 48.6 KB
/
main.cc
File metadata and controls
1273 lines (1059 loc) · 48.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2009-2025 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2025, NTESS
// All rights reserved.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include "sst_config.h"
#include "sst/core/warnmacros.h"
DISABLE_WARN_DEPRECATED_REGISTER
// The Python header already defines this and should override one from the
// command line.
#ifdef _XOPEN_SOURCE
#undef _XOPEN_SOURCE
#endif
#include <Python.h>
REENABLE_WARNING
#ifdef SST_CONFIG_HAVE_MPI
DISABLE_WARN_MISSING_OVERRIDE
#include <mpi.h>
REENABLE_WARNING
#endif
#include "sst/core/activity.h"
#include "sst/core/checkpointAction.h"
#include "sst/core/config.h"
#include "sst/core/configGraph.h"
#include "sst/core/cputimer.h"
#include "sst/core/exit.h"
#include "sst/core/factory.h"
#include "sst/core/iouse.h"
#include "sst/core/link.h"
#include "sst/core/mempool.h"
#include "sst/core/mempoolAccessor.h"
#include "sst/core/memuse.h"
#include "sst/core/model/sstmodel.h"
#include "sst/core/objectComms.h"
#include "sst/core/rankInfo.h"
#include "sst/core/realtime.h"
#include "sst/core/simulation_impl.h"
#include "sst/core/statapi/statengine.h"
#include "sst/core/stringize.h"
#include "sst/core/threadsafe.h"
#include "sst/core/timeLord.h"
#include "sst/core/timeVortex.h"
#include <cinttypes>
#include <exception>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <signal.h>
#include <sys/resource.h>
#include <time.h>
// Configuration Graph Generation Options
#include "sst/core/cfgoutput/dotConfigOutput.h"
#include "sst/core/cfgoutput/jsonConfigOutput.h"
#include "sst/core/cfgoutput/pythonConfigOutput.h"
#include "sst/core/configGraphOutput.h"
#include "sst/core/eli/elementinfo.h"
using namespace SST::Core;
using namespace SST::Partition;
using namespace SST;
static SST::Output g_output;
// Functions to force initialization stages of simulation to execute
// one rank at a time. Put force_rank_sequential_start() before the
// serialized section and force_rank_sequential_stop() after. These
// calls must be used in matching pairs. It should also be followed
// by a barrier if there are multiple threads running at the time of
// the call.
static void
force_rank_sequential_start(bool enable, const RankInfo& myRank, const RankInfo& world_size)
{
if ( !enable || world_size.rank == 1 || myRank.thread != 0 ) return;
#ifdef SST_CONFIG_HAVE_MPI
// Start off all ranks with a barrier so none enter the serialized
// region until they are all there
MPI_Barrier(MPI_COMM_WORLD);
// Rank 0 will proceed immediately. All others will wait
if ( myRank.rank == 0 ) return;
// Ranks will wait for notice from previous rank before proceeding
int32_t buf = 0;
MPI_Recv(&buf, 1, MPI_INT32_T, myRank.rank - 1, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
#endif
}
// Functions to force initialization stages of simulation to execute
// one rank at a time. Put force_rank_sequential_start() before the
// serialized section and force_rank_sequential_stop() after. These
// calls must be used in matching pairs.
static void
force_rank_sequential_stop(bool enable, const RankInfo& myRank, const RankInfo& world_size)
{
if ( !enable || world_size.rank == 1 || myRank.thread != 0 ) return;
#ifdef SST_CONFIG_HAVE_MPI
// After I'm through the serialized region, notify the next
// sender, then barrier. The last rank does not need to do a
// send.
if ( myRank.rank != world_size.rank - 1 ) {
uint32_t buf = 0;
MPI_Send(&buf, 1, MPI_INT32_T, myRank.rank + 1, 0, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
}
static void
dump_partition(Config& cfg, ConfigGraph* graph, const RankInfo& size)
{
///////////////////////////////////////////////////////////////////////
// If the user asks us to dump the partitioned graph.
if ( cfg.component_partition_file() != "" ) {
if ( cfg.verbose() ) {
g_output.verbose(
CALL_INFO, 1, 0, "# Dumping partitioned component graph to %s\n",
cfg.component_partition_file().c_str());
}
std::ofstream graph_file(cfg.component_partition_file().c_str());
ConfigComponentMap_t& component_map = graph->getComponentMap();
for ( uint32_t i = 0; i < size.rank; i++ ) {
for ( uint32_t t = 0; t < size.thread; t++ ) {
graph_file << "Rank: " << i << "." << t << " Component List:" << std::endl;
RankInfo r(i, t);
for ( ConfigComponentMap_t::const_iterator j = component_map.begin(); j != component_map.end(); ++j ) {
auto c = *j;
if ( c->rank == r ) {
graph_file << " " << c->name << " (ID=" << c->id << ")" << std::endl;
graph_file << " -> type " << c->type << std::endl;
graph_file << " -> weight " << c->weight << std::endl;
graph_file << " -> linkcount " << c->links.size() << std::endl;
graph_file << " -> rank " << c->rank.rank << std::endl;
graph_file << " -> thread " << c->rank.thread << std::endl;
}
}
}
}
graph_file.close();
if ( cfg.verbose() ) { g_output.verbose(CALL_INFO, 2, 0, "# Dump of partition graph is complete.\n"); }
}
}
static void
do_graph_wireup(ConfigGraph* graph, SST::Simulation_impl* sim, const RankInfo& myRank, SimTime_t min_part)
{
if ( !graph->containsComponentInRank(myRank) ) {
g_output.output("WARNING: No components are assigned to rank: %u.%u\n", myRank.rank, myRank.thread);
}
sim->performWireUp(*graph, myRank, min_part);
}
// Functions to do shared (static) initialization and notificaion for
// stats engines. Right now, the StatGroups are per MPI rank and
// everything else in StatEngine is per partition.
static void
do_statengine_static_initialization(ConfigGraph* graph, const RankInfo& myRank)
{
if ( myRank.thread != 0 ) return;
StatisticProcessingEngine::static_setup(graph);
}
static void
do_statoutput_start_simulation(const RankInfo& myRank)
{
if ( myRank.thread != 0 ) return;
StatisticProcessingEngine::stat_outputs_simulation_start();
}
static void
do_statoutput_end_simulation(const RankInfo& myRank)
{
if ( myRank.thread != 0 ) return;
StatisticProcessingEngine::stat_outputs_simulation_end();
}
// Function to initialize the StatEngines in each partition (Simulation_impl object)
static void
do_statengine_initialization(ConfigGraph* graph, SST::Simulation_impl* sim, const RankInfo& UNUSED(myRank))
{
sim->initializeStatisticEngine(*graph);
}
static void
do_link_preparation(ConfigGraph* graph, SST::Simulation_impl* sim, const RankInfo& myRank, SimTime_t min_part)
{
sim->prepareLinks(*graph, myRank, min_part);
}
// Returns the extension, or an empty string if there was no extension
static std::string
addRankToFileName(std::string& file_name, int rank)
{
auto index = file_name.find_last_of(".");
std::string base;
std::string ext;
// If there is an extension, add it before the extension
if ( index != std::string::npos ) {
base = file_name.substr(0, index);
ext = file_name.substr(index);
}
else {
base = file_name;
}
file_name = base + std::to_string(rank) + ext;
return ext;
}
static void
doSerialOnlyGraphOutput(SST::Config* cfg, ConfigGraph* graph)
{
// See if user asked us to dump the config graph in dot graph format
if ( cfg->output_dot() != "" ) {
DotConfigGraphOutput out(cfg->output_dot().c_str());
out.generate(cfg, graph);
}
}
// This should only be called once in main(). Either before or after
// graph broadcast depending on if parallel_load is turned on or not.
// If on, call it after graph broadcast, if off, call it before.
static void
doParallelCapableGraphOutput(SST::Config* cfg, ConfigGraph* graph, const RankInfo& myRank, const RankInfo& world_size)
{
// User asked us to dump the config graph to a file in Python
if ( cfg->output_config_graph() != "" ) {
// See if we are doing parallel output
std::string file_name(cfg->output_config_graph());
if ( cfg->parallel_output() && world_size.rank != 1 ) {
// Append rank number to base filename
std::string ext = addRankToFileName(file_name, myRank.rank);
if ( ext != ".py" ) {
g_output.fatal(CALL_INFO, 1, "--output-config requires a filename with a .py extension\n");
}
}
PythonConfigGraphOutput out(file_name.c_str());
out.generate(cfg, graph);
}
// User asked us to dump the config graph in JSON format
if ( cfg->output_json() != "" ) {
std::string file_name(cfg->output_json());
if ( cfg->parallel_output() ) {
// Append rank number to base filename
std::string ext = addRankToFileName(file_name, myRank.rank);
if ( ext != ".json" ) {
g_output.fatal(CALL_INFO, 1, "--output-json requires a filename with a .json extension\n");
}
}
JSONConfigGraphOutput out(file_name.c_str());
out.generate(cfg, graph);
}
}
static double
start_graph_creation(
ConfigGraph*& graph, Config& cfg, Factory* factory, const RankInfo& world_size, const RankInfo& myRank)
{
// Get a list of all the available SSTModelDescriptions
std::vector<std::string> models = ELI::InfoDatabase::getRegisteredElementNames<SSTModelDescription>();
// Create a map of extensions to the model that supports them
std::map<std::string, std::string> extension_map;
for ( auto x : models ) {
// auto extensions = factory->getSimpleInfo<SSTModelDescription, 1, std::vector<std::string>>(x);
auto extensions = SSTModelDescription::getElementSupportedExtensions(x);
for ( auto y : extensions ) {
extension_map[y] = x;
}
}
// Create the model generator
std::unique_ptr<SSTModelDescription> modelGen;
force_rank_sequential_start(cfg.rank_seq_startup(), myRank, world_size);
if ( cfg.configFile() != "NONE" ) {
// Get the file extension by finding the last .
std::string extension = cfg.configFile().substr(cfg.configFile().find_last_of("."));
std::string model_name;
try {
model_name = extension_map.at(extension);
}
catch ( std::exception& e ) {
std::cerr << "Unsupported SDL file type: \"" << extension << "\"" << std::endl;
return -1;
}
// If doing parallel load, make sure this model is parallel capable
if ( cfg.parallel_load() && !SSTModelDescription::isElementParallelCapable(model_name) ) {
std::cerr << "Model type for extension: \"" << extension << "\" does not support parallel loading."
<< std::endl;
return -1;
}
if ( myRank.rank == 0 || cfg.parallel_load() ) {
modelGen.reset(factory->Create<SSTModelDescription>(
model_name, cfg.configFile(), cfg.verbose(), &cfg, sst_get_cpu_time()));
}
}
double start_graph_gen = sst_get_cpu_time();
// Only rank 0 will populate the graph, unless we are using
// parallel load. In this case, all ranks will load the graph
if ( myRank.rank == 0 || cfg.parallel_load() ) {
try {
graph = modelGen->createConfigGraph();
}
catch ( std::exception& e ) {
g_output.fatal(CALL_INFO, -1, "Error encountered during config-graph generation: %s\n", e.what());
}
}
else {
graph = new ConfigGraph();
}
force_rank_sequential_stop(cfg.rank_seq_startup(), myRank, world_size);
#ifdef SST_CONFIG_HAVE_MPI
// Config is done - broadcast it, unless we are parallel loading
if ( world_size.rank > 1 && !cfg.parallel_load() ) {
try {
Comms::broadcast(cfg, 0);
}
catch ( std::exception& e ) {
g_output.fatal(CALL_INFO, -1, "Error encountered broadcasting configuration object: %s\n", e.what());
}
}
#endif
return start_graph_gen;
}
static double
start_partitioning(
Config& cfg, const RankInfo& world_size, const RankInfo& myRank, Factory* factory, ConfigGraph* graph)
{
////// Start Partitioning //////
double start_part = sst_get_cpu_time();
if ( !cfg.parallel_load() ) {
// Normal partitioning
// // If this is a serial job, just use the single partitioner,
// // but the same code path
// if ( world_size.rank == 1 && world_size.thread == 1 ) cfg.partitioner_ = "sst.single";
// Get the partitioner. Built in partitioners are in the "sst" library.
SSTPartitioner* partitioner = factory->CreatePartitioner(cfg.partitioner(), world_size, myRank, cfg.verbose());
try {
if ( partitioner->requiresConfigGraph() ) { partitioner->performPartition(graph); }
else {
PartitionGraph* pgraph;
if ( myRank.rank == 0 ) { pgraph = graph->getCollapsedPartitionGraph(); }
else {
pgraph = new PartitionGraph();
}
if ( myRank.rank == 0 || partitioner->spawnOnAllRanks() ) {
partitioner->performPartition(pgraph);
if ( myRank.rank == 0 ) graph->annotateRanks(pgraph);
}
delete pgraph;
}
}
catch ( std::exception& e ) {
g_output.fatal(CALL_INFO, -1, "Error encountered during graph partitioning phase: %s\n", e.what());
}
delete partitioner;
}
// Check the partitioning to make sure it is sane
if ( myRank.rank == 0 || cfg.parallel_load() ) {
if ( !graph->checkRanks(world_size) ) {
g_output.fatal(CALL_INFO, 1, "ERROR: Bad partitioning; partition included unknown ranks.\n");
}
}
return sst_get_cpu_time() - start_part;
}
struct SimThreadInfo_t
{
RankInfo myRank;
RankInfo world_size;
Config* config;
ConfigGraph* graph;
SimTime_t min_part;
// Time / stats information
double build_time;
double run_time;
UnitAlgebra simulated_time;
uint64_t max_tv_depth;
uint64_t current_tv_depth;
uint64_t sync_data_size;
};
static void
start_simulation(uint32_t tid, SimThreadInfo_t& info, Core::ThreadSafe::Barrier& barrier)
{
// Setup Mempools
Core::MemPoolAccessor::initializeLocalData(tid);
info.myRank.thread = tid;
bool restart = info.config->load_from_checkpoint();
////// Create Simulation Objects //////
SST::Simulation_impl* sim = Simulation_impl::createSimulation(info.config, info.myRank, info.world_size, restart);
double start_run = 0.0;
// Setup the real time actions (all of these have to be defined on
// the command-line or SDL file, they will not be checkpointed and
// restored
sim->setupSimActions(info.config);
// Thread zero needs to initialize the checkpoint data structures
// if any checkpointing options were turned on. This will return
// an empty string if checkpointing was not enabled.
if ( tid == 0 ) {
sim->checkpoint_directory_ = Checkpointing::initializeCheckpointInfrastructure(
info.config, sim->real_time_->canInitiateCheckpoint(), info.myRank.rank);
if ( sim->checkpoint_directory_ != "" ) {
// Write out any data structures needed for all checkpoints
}
}
// Wait for all checkpointing files to be initialzed
barrier.wait();
if ( !restart ) {
double start_build = sst_get_cpu_time();
barrier.wait();
sim->processGraphInfo(*info.graph, info.myRank, info.min_part);
barrier.wait();
force_rank_sequential_start(info.config->rank_seq_startup(), info.myRank, info.world_size);
barrier.wait();
// Perform the wireup.
if ( tid == 0 ) { do_statengine_static_initialization(info.graph, info.myRank); }
barrier.wait();
do_statengine_initialization(info.graph, sim, info.myRank);
barrier.wait();
// Prepare the links, which creates the ComponentInfo objects and
// Link and puts the links in the LinkMap for each ComponentInfo.
#ifdef SST_COMPILE_MACOSX
// Some versions of clang on mac have an issue with deleting links
// that were created interleaved between threads, so force
// serialization of threads during link creation. This has been
// confirmed on both Intel and ARM for Xcode 14 and 15. We should
// revisit this in the future. This is easy to see when running
// sst-benchmark with 1024 components and multiple threads. At
// time of adding this code, the difference in delete times was
// 3-5 minutes versuses less than a second.
for ( uint32_t i = 0; i < info.world_size.thread; ++i ) {
if ( i == info.myRank.thread ) { do_link_preparation(info.graph, sim, info.myRank, info.min_part); }
barrier.wait();
}
#else
do_link_preparation(info.graph, sim, info.myRank, info.min_part);
#endif
barrier.wait();
// Create all the simulation components
do_graph_wireup(info.graph, sim, info.myRank, info.min_part);
barrier.wait();
if ( tid == 0 ) { delete info.graph; }
force_rank_sequential_stop(info.config->rank_seq_startup(), info.myRank, info.world_size);
barrier.wait();
if ( info.myRank.thread == 0 ) { sim->exchangeLinkInfo(); }
barrier.wait();
start_run = sst_get_cpu_time();
info.build_time = start_run - start_build;
#ifdef SST_CONFIG_HAVE_MPI
if ( tid == 0 && info.world_size.rank > 1 ) { MPI_Barrier(MPI_COMM_WORLD); }
#endif
barrier.wait();
if ( info.config->runMode() == SimulationRunMode::RUN || info.config->runMode() == SimulationRunMode::BOTH ) {
if ( info.config->verbose() && 0 == tid ) {
g_output.verbose(CALL_INFO, 1, 0, "# Starting main event loop\n");
time_t the_time = time(nullptr);
struct tm* now = localtime(&the_time);
g_output.verbose(
CALL_INFO, 1, 0, "# Start time: %04u/%02u/%02u at: %02u:%02u:%02u\n", (now->tm_year + 1900),
(now->tm_mon + 1), now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
}
if ( tid == 0 && info.world_size.rank > 1 ) {
// If we are a MPI_parallel job, need to makes sure that all used
// libraries are loaded on all ranks.
#ifdef SST_CONFIG_HAVE_MPI
std::set<std::string> lib_names;
std::set<std::string> other_lib_names;
Factory::getFactory()->getLoadedLibraryNames(lib_names);
// Send my lib_names to the next lowest rank
if ( info.myRank.rank == (info.world_size.rank - 1) ) {
Comms::send(info.myRank.rank - 1, 0, lib_names);
lib_names.clear();
}
else {
Comms::recv(info.myRank.rank + 1, 0, other_lib_names);
for ( auto iter = other_lib_names.begin(); iter != other_lib_names.end(); ++iter ) {
lib_names.insert(*iter);
}
if ( info.myRank.rank != 0 ) {
Comms::send(info.myRank.rank - 1, 0, lib_names);
lib_names.clear();
}
}
Comms::broadcast(lib_names, 0);
Factory::getFactory()->loadUnloadedLibraries(lib_names);
#endif
}
barrier.wait();
sim->initialize();
barrier.wait();
/* Run Set */
sim->setup();
barrier.wait();
/* Finalize all the stat outputs */
do_statoutput_start_simulation(info.myRank);
barrier.wait();
sim->prepare_for_run();
}
} // end if !restart
else {
double start_build = sst_get_cpu_time();
// Finish parsing checkpoint for restart
sim->restart(info.config);
barrier.wait();
if ( info.myRank.thread == 0 ) { sim->exchangeLinkInfo(); }
barrier.wait();
start_run = sst_get_cpu_time();
info.build_time = start_run - start_build;
}
/* Run Simulation */
if ( info.config->runMode() == SimulationRunMode::RUN || info.config->runMode() == SimulationRunMode::BOTH ) {
sim->run();
barrier.wait();
/* Adjust clocks at simulation end to
* reflect actual simulation end if that
* differs from detected simulation end
*/
sim->adjustTimeAtSimEnd();
barrier.wait();
sim->complete();
barrier.wait();
sim->finish();
barrier.wait();
/* Tell stat outputs simulation is done */
do_statoutput_end_simulation(info.myRank);
barrier.wait();
}
info.simulated_time = sim->getEndSimTime();
// g_output.output(CALL_INFO,"Simulation time = %s\n",info.simulated_time.toStringBestSI().c_str());
double end_time = sst_get_cpu_time();
info.run_time = end_time - start_run;
info.max_tv_depth = sim->getTimeVortexMaxDepth();
info.current_tv_depth = sim->getTimeVortexCurrentDepth();
// Print the profiling info. For threads, we will serialize
// writing and for ranks we will use different files, unless we
// are writing to console, in which case we will serialize the
// output as well.
FILE* fp = nullptr;
std::string file = info.config->profilingOutput();
if ( file == "stdout" ) {
// Output to the console, so we will force both rank and
// thread output to be sequential
force_rank_sequential_start(info.world_size.rank > 1, info.myRank, info.world_size);
for ( uint32_t i = 0; i < info.world_size.thread; ++i ) {
if ( i == info.myRank.thread ) { sim->printProfilingInfo(stdout); }
barrier.wait();
}
force_rank_sequential_stop(info.world_size.rank > 1, info.myRank, info.world_size);
barrier.wait();
}
else {
// Output to file
if ( info.world_size.rank > 1 ) { addRankToFileName(file, info.myRank.rank); }
// First thread will open a new file
std::string mode;
// Thread 0 will open a new file, all others will append
if ( info.myRank.thread == 0 )
mode = "w";
else
mode = "a";
for ( uint32_t i = 0; i < info.world_size.thread; ++i ) {
if ( i == info.myRank.thread ) {
fp = fopen(file.c_str(), mode.c_str());
sim->printProfilingInfo(fp);
fclose(fp);
}
barrier.wait();
}
}
// Put in info about sync memory usage
info.sync_data_size = sim->getSyncQueueDataSize();
delete sim;
}
int
main(int argc, char* argv[])
{
#ifdef SST_CONFIG_HAVE_MPI
// Initialize MPI
MPI_Init(&argc, &argv);
int myrank = 0;
int mysize = 0;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &mysize);
RankInfo world_size(mysize, 1);
RankInfo myRank(myrank, 0);
#else
int myrank = 0;
RankInfo world_size(1, 1);
RankInfo myRank(0, 0);
#endif
Config cfg(world_size.rank, myrank == 0);
// All ranks parse the command line
auto ret_value = cfg.parseCmdLine(argc, argv);
if ( ret_value == -1 ) {
// Error in command line arguments
return -1;
}
else if ( ret_value == 1 ) {
// Just asked for info, clean exit
return 0;
}
// Check to see if we are doing a restart from a checkpoint
bool restart = cfg.load_from_checkpoint();
// If restarting, update config from checkpoint
uint32_t cpt_num_threads, cpt_num_ranks;
if ( restart ) {
// Need to open the registry file
if ( cfg.checkConfigFile() == false ) { return -1; /* checkConfigFile provides error message */ }
std::ifstream fs(cfg.configFile());
if ( !fs.is_open() ) {
if ( fs.bad() ) {
fprintf(stderr, "Unable to open checkpoint file [%s]: badbit set\n", cfg.configFile().c_str());
return -1;
}
if ( fs.fail() ) {
fprintf(stderr, "Unable to open checkpoint file [%s]: %s\n", cfg.configFile().c_str(), strerror(errno));
return -1;
}
fprintf(stderr, "Unable to open checkpoint file [%s]: unknown error\n", cfg.configFile().c_str());
return -1;
}
std::string line;
// Look for the line that has the global data file
std::string globals_filename;
std::string search_str("** (globals): ");
while ( std::getline(fs, line) ) {
// Look for lines starting with "** (globals):", then get the filename.
size_t pos = line.find(search_str);
if ( pos == 0 ) {
// Get the file name
globals_filename = line.substr(search_str.length());
break;
}
}
fs.close();
// Need to open the globals file
std::ifstream fs_globals(globals_filename);
if ( !fs_globals.is_open() ) {
if ( fs_globals.bad() ) {
fprintf(stderr, "Unable to open checkpoint globals file [%s]: badbit set\n", globals_filename.c_str());
return -1;
}
if ( fs_globals.fail() ) {
fprintf(
stderr, "Unable to open checkpoint globals file [%s]: %s\n", globals_filename.c_str(),
strerror(errno));
return -1;
}
fprintf(stderr, "Unable to open checkpoint globals file [%s]: unknown error\n", globals_filename.c_str());
return -1;
}
size_t size;
char* buffer;
SST::Core::Serialization::serializer ser;
ser.enable_pointer_tracking();
fs_globals.read(reinterpret_cast<char*>(&size), sizeof(size));
buffer = new char[size];
fs_globals.read(buffer, size);
std::string cpt_lib_path;
std::string cpt_timebase;
std::string cpt_output_directory;
std::string cpt_output_core_prefix;
std::string cpt_debug_file;
std::string cpt_prefix;
int cpt_output_verbose = 0;
std::map<std::string, uint32_t> cpt_params_key_map;
std::vector<std::string> cpt_params_key_map_reverse;
uint32_t cpt_params_next_key_id;
ser.start_unpacking(buffer, size);
ser& cpt_num_ranks;
ser& cpt_num_threads;
ser& cpt_lib_path;
ser& cpt_timebase;
ser& cpt_output_directory;
ser& cpt_output_core_prefix;
ser& cpt_output_verbose;
ser& cpt_debug_file;
ser& cpt_prefix;
ser& cpt_params_key_map;
ser& cpt_params_key_map_reverse;
ser& cpt_params_next_key_id;
fs_globals.close();
delete[] buffer;
// Error check that ranks & threads match after output is created
cfg.libpath_ = cpt_lib_path;
cfg.timeBase_ = cpt_timebase;
if ( !cfg.wasOptionSetOnCmdLine("output-directory") ) cfg.output_directory_ = cpt_output_directory;
if ( !cfg.wasOptionSetOnCmdLine("output-prefix-core") ) cfg.output_core_prefix_ = cpt_output_core_prefix;
if ( !cfg.wasOptionSetOnCmdLine("verbose") ) cfg.verbose_ = cpt_output_verbose;
if ( !cfg.wasOptionSetOnCmdLine("debug-file") ) cfg.debugFile_ = cpt_debug_file;
if ( !cfg.wasOptionSetOnCmdLine("checkpoint-prefix") ) cfg.checkpoint_prefix_ = cpt_prefix;
////// Initialize global data //////
// These are initialized after graph creation in the non-restart path
world_size.thread = cfg.num_threads();
Output::setFileName(cfg.debugFile() != "/dev/null" ? cfg.debugFile() : "sst_output");
Output::setWorldSize(world_size.rank, world_size.thread, myrank);
g_output = Output::setDefaultObject(cfg.output_core_prefix(), cfg.verbose(), 0, Output::STDOUT);
Simulation_impl::getTimeLord()->init(cfg.timeBase());
Params::keyMap = cpt_params_key_map;
Params::keyMapReverse = cpt_params_key_map_reverse;
Params::nextKeyID = cpt_params_next_key_id;
}
// If we are doing a parallel load with a file per rank, add the
// rank number to the file name before the extension
if ( cfg.parallel_load() && cfg.parallel_load_mode_multi() && world_size.rank != 1 ) {
addRankToFileName(cfg.configFile_, myRank.rank);
}
// Check to see if the config file exists
if ( cfg.checkConfigFile() == false ) { return -1; /* checkConfigFile provides error message */ }
// Create the factory. This may be needed to load an external model definition
Factory* factory = new Factory(cfg.getLibPath());
if ( restart && (cfg.num_ranks() != cpt_num_ranks || cfg.num_threads() != cpt_num_threads) ) {
g_output.fatal(
CALL_INFO, 1,
"Rank or thread counts do not match checkpoint. "
"Checkpoint requires %" PRIu32 " ranks and %" PRIu32 " threads\n",
cpt_num_ranks, cpt_num_threads);
}
////// Start ConfigGraph Creation //////
double start = sst_get_cpu_time();
ConfigGraph* graph = nullptr;
SimTime_t min_part = 0xffffffffffffffffl;
// If we aren't restarting, need to create the graph
if ( !restart ) {
// Get the memory before we create the graph
const uint64_t pre_graph_create_rss = maxGlobalMemSize();
double start_graph_gen = start_graph_creation(graph, cfg, factory, world_size, myRank);
////// Initialize global data //////
// Config is updated from SDL, initialize globals
// Set the number of threads
world_size.thread = cfg.num_threads();
// Create global output object
Output::setFileName(cfg.debugFile() != "/dev/null" ? cfg.debugFile() : "sst_output");
Output::setWorldSize(world_size.rank, world_size.thread, myrank);
g_output = Output::setDefaultObject(cfg.output_core_prefix(), cfg.verbose(), 0, Output::STDOUT);
g_output.verbose(
CALL_INFO, 1, 0, "#main() My rank is (%u.%u), on %u/%u nodes/threads\n", myRank.rank, myRank.thread,
world_size.rank, world_size.thread);
// TimeLord must be initialized prior to postCreationCleanup() call
Simulation_impl::getTimeLord()->init(cfg.timeBase());
// Cleanup after graph creation
if ( myRank.rank == 0 || cfg.parallel_load() ) {
graph->postCreationCleanup();
// Check config graph to see if there are structural errors.
if ( graph->checkForStructuralErrors() ) {
g_output.fatal(CALL_INFO, 1, "Structure errors found in the ConfigGraph.\n");
}
}
double graph_gen_time = sst_get_cpu_time() - start_graph_gen;
// If verbose level is high enough, compute the total number
// components in the simulation. NOTE: if parallel-load is
// enabled, then the parittioning won't actually happen and all
// ranks already have their parts of the graph.
uint64_t comp_count = 0;
if ( cfg.verbose() >= 1 ) {
if ( !cfg.parallel_load() && myRank.rank == 0 ) { comp_count = graph->getNumComponents(); }
#ifdef SST_CONFIG_HAVE_MPI
else if ( cfg.parallel_load() ) {
uint64_t my_count = graph->getNumComponentsInMPIRank(myRank.rank);
MPI_Allreduce(&my_count, &comp_count, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
}
#endif
}
if ( myRank.rank == 0 ) {
g_output.verbose(CALL_INFO, 1, 0, "# ------------------------------------------------------------\n");
g_output.verbose(CALL_INFO, 1, 0, "# Graph construction took %f seconds.\n", graph_gen_time);
g_output.verbose(CALL_INFO, 1, 0, "# Graph contains %" PRIu64 " components\n", comp_count);
}
////// End ConfigGraph Creation //////
#ifdef SST_CONFIG_HAVE_MPI
// If we did a parallel load, check to make sure that all the
// ranks have the same thread count set (the python can change the
// thread count if not specified on the command line
if ( cfg.parallel_load() ) {
uint32_t max_thread_count = 0;
uint32_t my_thread_count = cfg.num_threads();
MPI_Allreduce(&my_thread_count, &max_thread_count, 1, MPI_UINT32_T, MPI_MAX, MPI_COMM_WORLD);
if ( my_thread_count != max_thread_count ) {
g_output.fatal(
CALL_INFO, 1, "Thread counts do no match across ranks for configuration using parallel loading\n");
}
}
#endif
// If this is a serial job, just use the single partitioner,
// but the same code path
if ( world_size.rank == 1 && world_size.thread == 1 ) cfg.partitioner_ = "sst.single";
// Run the partitioner
double partitioning_time = start_partitioning(cfg, world_size, myRank, factory, graph);
const uint64_t post_graph_create_rss = maxGlobalMemSize();
if ( myRank.rank == 0 ) {
if ( !cfg.parallel_load() )
g_output.verbose(CALL_INFO, 1, 0, "# Graph partitioning took %lg seconds.\n", partitioning_time);
g_output.verbose(
CALL_INFO, 1, 0, "# Graph construction and partition raised RSS by %" PRIu64 " KB\n",
(post_graph_create_rss - pre_graph_create_rss));
g_output.verbose(CALL_INFO, 1, 0, "# ------------------------------------------------------------\n");
// Output the partition information if user requests it
dump_partition(cfg, graph, world_size);
}
////// End Partitioning //////
////// Calculate Minimum Partitioning //////
SimTime_t local_min_part = 0xffffffffffffffffl;
if ( world_size.rank > 1 ) {
// Check the graph for the minimum latency crossing a partition boundary
if ( myRank.rank == 0 || cfg.parallel_load() ) {
ConfigComponentMap_t& comps = graph->getComponentMap();
ConfigLinkMap_t& links = graph->getLinkMap();
// Find the minimum latency across a partition
for ( ConfigLinkMap_t::iterator iter = links.begin(); iter != links.end(); ++iter ) {
ConfigLink* clink = *iter;
RankInfo rank[2];
rank[0] = comps[COMPONENT_ID_MASK(clink->component[0])]->rank;
rank[1] = comps[COMPONENT_ID_MASK(clink->component[1])]->rank;
if ( rank[0].rank == rank[1].rank ) continue;
if ( clink->getMinLatency() < local_min_part ) { local_min_part = clink->getMinLatency(); }
}
}
#ifdef SST_CONFIG_HAVE_MPI
// Fix for case that probably doesn't matter in practice, but
// does come up during some specific testing. If there are no
// links that cross the boundary and we're a multi-rank job,
// we need to put in a sync interval to look for the exit
// conditions being met.
// if ( min_part == MAX_SIMTIME_T ) {
// // std::cout << "No links cross rank boundary" << std::endl;
// min_part = Simulation_impl::getTimeLord()->getSimCycles("1us","");
// }
MPI_Allreduce(&local_min_part, &min_part, 1, MPI_UINT64_T, MPI_MIN, MPI_COMM_WORLD);
// Comms::broadcast(min_part, 0);
#endif
}
////// End Calculate Minimum Partitioning //////
////// Write out the graph, if requested //////
if ( myRank.rank == 0 ) {
doSerialOnlyGraphOutput(&cfg, graph);
if ( !cfg.parallel_output() ) { doParallelCapableGraphOutput(&cfg, graph, myRank, world_size); }
}
////// Broadcast Graph //////
#ifdef SST_CONFIG_HAVE_MPI
if ( world_size.rank > 1 && !cfg.parallel_load() ) {
try {
Comms::broadcast(Params::keyMap, 0);
Comms::broadcast(Params::keyMapReverse, 0);
Comms::broadcast(Params::nextKeyID, 0);
Comms::broadcast(Params::global_params, 0);