Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
65 commits
Select commit Hold shift + click to select a range
09dcd19
openmp test
rvanvenetie Feb 11, 2020
33f2d6f
openmp
rvanvenetie Feb 11, 2020
c839be9
improve test
rvanvenetie Feb 11, 2020
1d094cb
Merge branch 'cpp/master' into cpp/spacetime-omp
rvanvenetie Feb 11, 2020
1bb3048
compute fibers opt
rvanvenetie Feb 11, 2020
68b5cc9
merged
rvanvenetie Jul 9, 2020
254a279
more merge conflicts resolved
rvanvenetie Jul 10, 2020
895275c
more merge conflicts resolved
rvanvenetie Jul 10, 2020
6d8fdb2
seems to work
rvanvenetie Jul 10, 2020
dce5bc7
Merge branch 'cpp/master' into cpp/spacetime-omp
rvanvenetie Jul 10, 2020
5d0f6dc
fix one more test
rvanvenetie Jul 10, 2020
c6337db
pragma _omp_ critical
rvanvenetie Jul 10, 2020
dbfa058
copy variable
bla-zz Jul 10, 2020
c9adcf4
pass the local variable
bla-zz Jul 10, 2020
091d973
set number of threads
bla-zz Jul 10, 2020
401dfc2
write number of threads in log
rvanvenetie Jul 13, 2020
03560f5
Merge branch 'cpp/spacetime-omp' of github.com:rvanvenetie/spacetime …
rvanvenetie Jul 13, 2020
6af1f2e
Merge branch 'cpp/master' into cpp/spacetime-omp
rvanvenetie Jul 13, 2020
ee59b5d
better balancing
bla-zz Jul 13, 2020
327e57f
is dit sneller
rvanvenetie Jul 14, 2020
f948ba7
maybe this compiles
rvanvenetie Jul 14, 2020
87ade0c
test poolvector
rvanvenetie Jul 14, 2020
7fa3cfb
revert
rvanvenetie Jul 14, 2020
4427e04
use arrays instead of vectors for vroem
bla-zz Jul 14, 2020
dcb6161
include array
rvanvenetie Jul 15, 2020
5a77a92
last version
bla-zz Jul 15, 2020
b347e01
Merge branch 'cpp/spacetime-omp' of github.com:rvanvenetie/spacetime …
bla-zz Jul 15, 2020
c5aec39
do uniform by default
bla-zz Jul 15, 2020
5d9ae59
remove unused boost thingy
rvanvenetie Jul 20, 2020
e0fe415
merged
rvanvenetie Nov 5, 2020
32f88e0
works?
rvanvenetie Nov 5, 2020
c41447c
Merge branch 'cpp/master' into cpp/spacetime-omp
Nov 6, 2020
3fe76f6
merge conflict fix
wat1444 Nov 16, 2020
052f9aa
Merge branch 'cpp/print-max-gradedness' into cpp/spacetime-omp
wat1444 Nov 16, 2020
6240905
merge
wat1444 Nov 18, 2020
db2f299
Merge branch 'cpp/compute-fibers' into cpp/spacetime-omp
wat1444 Nov 18, 2020
ce70d51
Merge branch 'cpp/master' into cpp/spacetime-omp
wat1444 Nov 19, 2020
2a6ea97
updates
wat1444 Nov 24, 2020
5db84a5
more infor
rvanvenetie Nov 25, 2020
92a7211
correct interpolation
wat1444 Nov 26, 2020
6752730
default 1 PY cycle
rvanvenetie Nov 26, 2020
46b04a3
Merge branch 'cpp/spacetime-omp' of github.com:rvanvenetie/spacetime …
rvanvenetie Nov 26, 2020
4c478b0
merge
wat1444 Nov 30, 2020
e3c283a
fix
wat1444 Dec 10, 2020
f7bd96c
Merge branch 'cpp/master' into cpp/spacetime-omp
wat1444 Dec 10, 2020
5c35269
Merge branch 'cpp/master' into cpp/spacetime-omp
rvanvenetie Dec 14, 2020
6776b41
Merge branch 'cpp/master' into cpp/spacetime-omp
rvanvenetie Dec 14, 2020
44f5aeb
merge
rvanvenetie Jan 20, 2021
c294a6c
timings
rvanvenetie Jan 20, 2021
5829fbc
more timings
rvanvenetie Jan 20, 2021
2688cce
also for uniform
rvanvenetie Jan 20, 2021
f077ebb
auto
rvanvenetie Jan 20, 2021
50f5e9a
better timing
rvanvenetie Jan 20, 2021
fd86001
sort the work in blockdiag
rvanvenetie Jan 21, 2021
7457d1e
dynamic, 1 for space
rvanvenetie Jan 21, 2021
ae4c984
guided scheduling time
rvanvenetie Jan 21, 2021
867f524
this
rvanvenetie Jan 21, 2021
5aa0b84
more parallel more better
rvanvenetie Jan 21, 2021
3088cf4
try out this openmp stuff
rvanvenetie Jan 21, 2021
dda567f
nu met initalization
rvanvenetie Jan 21, 2021
7939200
final attempt
rvanvenetie Jan 21, 2021
f698456
merge conflicts
rvanvenetie Mar 15, 2021
ee730fb
removed unsude include
rvanvenetie Mar 15, 2021
83e47f6
thread_local variable
rvanvenetie Mar 15, 2021
9997827
Merge branch 'master' into cpp/spacetime-omp
rvanvenetie Mar 15, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,13 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PROGRAM}")
# endif()

# Set OpenMP
find_package(OpenMP)
include(ProcessorCount)
ProcessorCount(MAX_NUMBER_THREADS)
message("Setting maximum number of threads to ${MAX_NUMBER_THREADS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMAX_NUMBER_THREADS=${MAX_NUMBER_THREADS}")

### Download GoogleTest
include(FetchContent)
FetchContent_Declare(
Expand Down
53 changes: 39 additions & 14 deletions src/applications/adaptive.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,10 +114,12 @@ int main(int argc, char* argv[]) {
std::string problem, domain;
size_t initial_refines = 0;
size_t max_dofs = 0;
size_t num_threads = 1;
bool calculate_condition_numbers = false;
bool print_centers = false;
bool print_sampling = false;
bool print_time_apply = false;
bool print_time_apply = true;
bool print_bilforms = false;
std::vector<double> print_time_slices;
boost::program_options::options_description problem_optdesc(
"Problem options");
Expand All @@ -133,7 +135,9 @@ int main(int argc, char* argv[]) {
"print_sampling", po::value<bool>(&print_sampling))(
"print_time_slices",
po::value<std::vector<double>>(&print_time_slices)->multitoken())(
"print_time_apply", po::value<bool>(&print_time_apply));
"print_time_apply", po::value<bool>(&print_time_apply))(
"print_bilforms", po::value<bool>(&print_bilforms))(
"num_threads", po::value<size_t>(&num_threads));

std::sort(print_time_slices.begin(), print_time_slices.end());

Expand Down Expand Up @@ -165,10 +169,20 @@ int main(int argc, char* argv[]) {
po::store(po::command_line_parser(argc, argv).options(cmdline_options).run(),
vm);
po::notify(vm);
assert(num_threads > 0);
if (num_threads > 1 && adapt_opts.use_cache) {
std::cout << "Multithreading is only enabled for no-cache." << std::endl;
return 1;
}
assert(num_threads <= omp_get_max_threads());
assert(num_threads <= MAX_NUMBER_THREADS);
omp_set_num_threads(num_threads);

std::cout << "Problem options:" << std::endl;
std::cout << "\tProblem: " << problem << std::endl;
std::cout << "\tDomain: " << domain
<< "; initial-refines: " << initial_refines << std::endl;
std::cout << "\tNumber-threads: " << num_threads << std::endl;
std::cout << std::endl;
std::cout << adapt_opts << std::endl;

Expand Down Expand Up @@ -215,9 +229,13 @@ int main(int argc, char* argv[]) {

// A slight overestimate.
ndof_Xd = vec_Xd->Bfs().size();
size_t ndof_Xd_time = vec_Xd->Project_0()->Bfs().size();
size_t ndof_Xd_space = vec_Xd->Project_1()->Bfs().size();
size_t ndof_Xdd = heat_eq.vec_Xdd()->Bfs().size();
size_t ndof_Ydd = heat_eq.vec_Ydd()->Bfs().size();
std::cout << "iter: " << ++iter << "\n\tXDelta-size: " << ndof_Xd
<< "\n\tXDelta-space-size: " << ndof_Xd_space
<< "\n\tXDelta-time-size: " << ndof_Xd_time
<< "\n\tXDelta-Gradedness: "
<< vec_Xd->Gradedness(&max_gradedness)
<< "\n\tXDeltaDelta-size: " << ndof_Xdd
Expand Down Expand Up @@ -317,18 +335,25 @@ int main(int argc, char* argv[]) {

if (print_time_apply) {
auto heat_d_dd = heat_eq.heat_d_dd();
std::cout << "\n\tA-time-per-apply: " << heat_d_dd->A()->TimePerApply()
<< "\n\tB-time-per-apply: " << heat_d_dd->B()->TimePerApply()
<< "\n\tBT-time-per-apply: " << heat_d_dd->BT()->TimePerApply()
<< "\n\tG-time-per-apply: " << heat_d_dd->G()->TimePerApply()
<< "\n\tP_Y-time-per-apply: "
<< heat_d_dd->P_Y()->TimePerApply()
<< "\n\tP_X-time-per-apply: "
<< heat_d_dd->P_X()->TimePerApply()
<< "\n\tS-time-per-apply: " << heat_d_dd->S()->TimePerApply()
<< "\n\ttotal-time-apply: " << heat_d_dd->TotalTimeApply()
<< "\n\ttotal-time-construct: "
<< heat_d_dd->TotalTimeConstruct() << std::flush;
std::cout
<< "\n\tA-time-per-apply: " << heat_d_dd->A()->TimePerApply()
<< "\n\tB-time-per-apply: " << heat_d_dd->B()->TimePerApply()
<< "\n\tB-A-time-per-apply: " << heat_d_dd->B()->A()->TimePerApply()
<< "\n\tB-B-time-per-apply: " << heat_d_dd->B()->B()->TimePerApply()
<< "\n\tBT-time-per-apply: " << heat_d_dd->BT()->TimePerApply()
<< "\n\tG-time-per-apply: " << heat_d_dd->G()->TimePerApply()
<< "\n\tP_Y-time-per-apply: " << heat_d_dd->P_Y()->TimePerApply()
<< "\n\tP_X-time-per-apply: " << heat_d_dd->P_X()->TimePerApply()
<< "\n\tS-time-per-apply: " << heat_d_dd->S()->TimePerApply()
<< "\n\ttotal-time-apply: " << heat_d_dd->TotalTimeApply()
<< "\n\ttotal-time-construct: " << heat_d_dd->TotalTimeConstruct()
<< std::flush;
}
if (print_bilforms) {
auto heat_d_dd = heat_eq.heat_d_dd();
std::cout << "\n\tB-A-bilforms: " << heat_d_dd->B()->A()->Information()
<< "\n\tP_Y-bilforms: " << heat_d_dd->P_Y()->Information()
<< std::flush;
}

if (print_centers) {
Expand Down
33 changes: 20 additions & 13 deletions src/applications/uniform.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,11 @@ int main(int argc, char* argv[]) {
size_t initial_refines = 0;
size_t max_level = 0;
size_t max_dofs = 0;
size_t num_threads = 1;
std::string refine;
bool calculate_condition_PY = false;
bool calculate_condition_PX = false;
bool print_time_apply = false;
bool print_time_apply = true;
bool print_centers = false;
double solve_rtol = 1e-5;
boost::program_options::options_description problem_optdesc(
Expand All @@ -70,6 +71,7 @@ int main(int argc, char* argv[]) {
->default_value(std::numeric_limits<std::size_t>::max()))(
"max_dofs", po::value<size_t>(&max_dofs)->default_value(
std::numeric_limits<std::size_t>::max()))(
"num_threads", po::value<size_t>(&num_threads))(
"refine", po::value<std::string>(&refine)->default_value("sparse"))(
"print_centers", po::value<bool>(&print_centers))(
"print_time_apply", po::value<bool>(&print_time_apply))(
Expand Down Expand Up @@ -118,6 +120,10 @@ int main(int argc, char* argv[]) {
std::cout << adapt_opts << "\tsolve-rtol: " << solve_rtol << std::endl
<< std::endl;

assert(num_threads > 0 && num_threads <= omp_get_max_threads() &&
num_threads <= MAX_NUMBER_THREADS);
omp_set_num_threads(num_threads);

auto T = InitialTriangulation(domain, initial_refines);
auto B = Time::Bases();
auto vec_Xd = std::make_shared<
Expand Down Expand Up @@ -229,18 +235,19 @@ int main(int argc, char* argv[]) {

if (print_time_apply) {
auto heat_d_dd = heat_eq.heat_d_dd();
std::cout << "\n\tA-time-per-apply: " << heat_d_dd->A()->TimePerApply()
<< "\n\tB-time-per-apply: " << heat_d_dd->B()->TimePerApply()
<< "\n\tBT-time-per-apply: " << heat_d_dd->BT()->TimePerApply()
<< "\n\tG-time-per-apply: " << heat_d_dd->G()->TimePerApply()
<< "\n\tP_Y-time-per-apply: "
<< heat_d_dd->P_Y()->TimePerApply()
<< "\n\tP_X-time-per-apply: "
<< heat_d_dd->P_X()->TimePerApply()
<< "\n\tS-time-per-apply: " << heat_d_dd->S()->TimePerApply()
<< "\n\ttotal-time-apply: " << heat_d_dd->TotalTimeApply()
<< "\n\ttotal-time-construct: "
<< heat_d_dd->TotalTimeConstruct() << std::flush;
std::cout
<< "\n\tA-time-per-apply: " << heat_d_dd->A()->TimePerApply()
<< "\n\tB-time-per-apply: " << heat_d_dd->B()->TimePerApply()
<< "\n\tB-A-time-per-apply: " << heat_d_dd->B()->A()->TimePerApply()
<< "\n\tB-B-time-per-apply: " << heat_d_dd->B()->B()->TimePerApply()
<< "\n\tBT-time-per-apply: " << heat_d_dd->BT()->TimePerApply()
<< "\n\tG-time-per-apply: " << heat_d_dd->G()->TimePerApply()
<< "\n\tP_Y-time-per-apply: " << heat_d_dd->P_Y()->TimePerApply()
<< "\n\tP_X-time-per-apply: " << heat_d_dd->P_X()->TimePerApply()
<< "\n\tS-time-per-apply: " << heat_d_dd->S()->TimePerApply()
<< "\n\ttotal-time-apply: " << heat_d_dd->TotalTimeApply()
<< "\n\ttotal-time-construct: " << heat_d_dd->TotalTimeConstruct()
<< std::flush;
}

if (print_centers) {
Expand Down
1 change: 1 addition & 0 deletions src/datastructures/boost.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <boost/container/options.hpp>
#include <boost/container/small_vector.hpp>
#include <boost/container/static_vector.hpp>
#include <vector>
template <typename I, size_t N>
using SmallVector = boost::container::small_vector<I, N>;

Expand Down
5 changes: 3 additions & 2 deletions src/datastructures/multi_tree_view.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -87,13 +87,14 @@ std::vector<I*> MultiNodeViewInterface<I, T...>::Union(
// Now do the union magic in all dimensions.
static_for<dim>([&queue, &my_node, &other_node, &call_filter](auto i) {
// Get a list of all children of the other_node in axis `i`.
static std::vector<I_other*> filtered_children;
static thread_local std::vector<I_other*> filtered_children;
filtered_children.clear();
for (const auto& other_child_i : other_node->children(i))
if (call_filter(other_child_i))
filtered_children.emplace_back(other_child_i);

static std::vector<std::tuple_element_t<i, TupleNodes>> other_children_i;
static thread_local std::vector<std::tuple_element_t<i, TupleNodes>>
other_children_i;
other_children_i.clear();
for (const auto& other_child_i : filtered_children)
other_children_i.emplace_back(std::get<i>(other_child_i->nodes()));
Expand Down
32 changes: 20 additions & 12 deletions src/datastructures/tree.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
#pragma once
#include <omp.h>

#include <algorithm>
#include <array>
#include <memory>
#include <queue>
#include <utility>
Expand All @@ -17,6 +20,12 @@ using T_func_noop = decltype(func_noop);
using T_func_true = decltype(func_true);
using T_func_false = decltype(func_false);

// Global variable holding the current thread number.
#ifndef MAX_NUMBER_THREADS
#define MAX_NUMBER_THREADS 1
#endif
static thread_local int thread_number = omp_get_thread_num();

template <typename I>
struct NodeTrait; // This should define N_children and N_parents.

Expand All @@ -38,8 +47,8 @@ class Node {
}

int level() const { return level_; }
bool marked() const { return marked_; }
void set_marked(bool value) { marked_ = value; }
bool marked() const { return marked_[thread_number]; }
void set_marked(bool value) { marked_[thread_number] = value; }
bool is_leaf() const { return children_.size() == 0; }
inline bool is_metaroot() const { return (level_ == -1); }
const auto &parents() const { return parents_; }
Expand All @@ -48,20 +57,19 @@ class Node {
// General data field for universal storage.
template <typename T>
T *data() {
assert(data_ != nullptr);
return static_cast<T *>(data_);
assert(data_[thread_number] != nullptr);
return static_cast<T *>(data_[thread_number]);
}

template <typename T>
void set_data(T *value) {
assert(data_ == nullptr);
data_ = static_cast<void *>(value);
assert(data_[thread_number] == nullptr);
data_[thread_number] = static_cast<void *>(value);
}
void reset_data() {
assert(data_ != nullptr);
data_ = nullptr;
assert(data_[thread_number] != nullptr);
data_[thread_number] = nullptr;
}
bool has_data() { return data_ != nullptr; }
bool has_data() { return data_[thread_number] != nullptr; }

template <typename Func = T_func_noop>
std::vector<I *> Bfs(bool include_metaroot = false,
Expand Down Expand Up @@ -93,9 +101,9 @@ class Node {
}

protected:
bool marked_ = false;
int level_;
void *data_ = nullptr;
std::array<unsigned short, MAX_NUMBER_THREADS> marked_{0};
std::array<void *, MAX_NUMBER_THREADS> data_{nullptr};

// Store children/parents as raw pointers.
SmallVector<I *, NodeTrait<I>::N_children> children_;
Expand Down
1 change: 1 addition & 0 deletions src/space/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
add_library(space STATIC triangulation.cpp initial_triangulation.cpp basis.cpp operators.cpp triangulation_view.cpp linear_form.cpp integration.cpp)
target_link_libraries(space PUBLIC OpenMP::OpenMP_CXX)

# Executables
add_executable(space_adaptive adaptive.cpp)
Expand Down
12 changes: 6 additions & 6 deletions src/space/operators.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -213,13 +213,13 @@ void CGInverse<ForwardOp>::ApplySingleScale(Eigen::VectorXd &vec_SS) const {

// Define the class variables.
template <typename ForwardOp>
std::vector<std::vector<std::pair<uint, double>>>
thread_local std::vector<std::vector<std::pair<uint, double>>>
MultigridPreconditioner<ForwardOp>::row_mat;
template <typename ForwardOp>
std::vector<std::vector<Element2D *>>
thread_local std::vector<std::vector<Element2D *>>
MultigridPreconditioner<ForwardOp>::patches;
template <typename ForwardOp>
std::vector<std::vector<uint>>
thread_local std::vector<std::vector<uint>>
MultigridPreconditioner<ForwardOp>::vertices_relaxation;

template <typename ForwardOp>
Expand Down Expand Up @@ -346,12 +346,12 @@ void MultigridPreconditioner<ForwardOp>::ApplySingleScale(
// Shortcut.
const uint V = triang_.V;

// Reuse a static variable for storing the corrections.
static std::vector<double> e;
// Reuse a static variable for storing the row of a matrix.
static thread_local std::vector<double> e;
e.reserve(V * 3);

// Reuse a static variable for storing the residual in the downard cycle.
static std::vector<double> r_down;
static thread_local std::vector<double> r_down;
r_down.reserve(V * 3);

// Initialize the multigrid matrix (row_mat).
Expand Down
6 changes: 3 additions & 3 deletions src/space/operators.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,9 +221,9 @@ class MultigridPreconditioner : public BackwardOperator {
DirectInverse<ForwardOp> initial_triang_solver_;

// (Static) variables reused for calculation of the multigrid matrix.
static std::vector<std::vector<std::pair<uint, double>>> row_mat;
static std::vector<std::vector<Element2D *>> patches;
static std::vector<std::vector<uint>> vertices_relaxation;
static thread_local std::vector<std::vector<std::pair<uint, double>>> row_mat;
static thread_local std::vector<std::vector<Element2D *>> patches;
static thread_local std::vector<std::vector<uint>> vertices_relaxation;
};

template <template <typename> class InverseOp>
Expand Down
16 changes: 16 additions & 0 deletions src/spacetime/bilinear_form.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ class BilinearForm
auto sigma() { return sigma_; }
auto theta() { return theta_; }

std::string Information() final;

protected:
// References to in/out vectors.
DblVecIn *vec_in_;
Expand All @@ -72,6 +74,7 @@ class BilinearForm
// Debug information.
using BilinearFormBase<DblVecIn, DblVecOut>::time_construct_;
using BilinearFormBase<DblVecIn, DblVecOut>::time_apply_;
using BilinearFormBase<DblVecIn, DblVecOut>::time_apply_split_;
using BilinearFormBase<DblVecIn, DblVecOut>::num_apply_;

// Define frozen templates, useful for storing the bil forms.
Expand All @@ -87,6 +90,14 @@ class BilinearForm
std::vector<Time::BilinearForm<OperatorTime, FI<0>, FO<0>>> bil_time_low_;
std::vector<Time::BilinearForm<OperatorTime, FI<0>, FO<0>>> bil_time_upp_;
std::vector<space::BilinearForm<OperatorSpace, FO<1>, FO<1>>> bil_space_upp_;

// Store ordering for spatial parallism.
std::vector<FI<0> *> sigma_proj_0_;
std::vector<FO<1> *> theta_proj_1_;
std::vector<FO<0> *> vec_out_proj_0_;
std::vector<FO<1> *> vec_out_proj_1_;
std::vector<size_t> ordering_sigma_;
std::vector<size_t> ordering_vec_out_;
};

// Helper functions.
Expand Down Expand Up @@ -146,6 +157,7 @@ class BlockDiagonalBilinearForm
Eigen::VectorXd Apply(const Eigen::VectorXd &v) final;
DblVecIn *vec_in() const final { return vec_in_; }
DblVecOut *vec_out() const final { return vec_out_; }
std::string Information() final;

protected:
bool use_cache_;
Expand All @@ -157,13 +169,17 @@ class BlockDiagonalBilinearForm
// Debug information.
using BilinearFormBase<DblVecIn, DblVecOut>::time_construct_;
using BilinearFormBase<DblVecIn, DblVecOut>::time_apply_;
using BilinearFormBase<DblVecIn, DblVecOut>::time_apply_split_;
using BilinearFormBase<DblVecIn, DblVecOut>::num_apply_;

// The (cached) bilinear forms.
template <size_t i>
using FI = datastructures::FrozenDoubleNode<
datastructures::DoubleNodeVector<BasisTimeIn, BasisSpace>, i>;
std::vector<space::BilinearForm<OperatorSpace, FI<1>, FI<1>>> space_bilforms_;

std::vector<FI<0> *> vec_out_proj_0_;
std::vector<size_t> ordering_;
};

template <typename OpSpace, typename BTimeIn, typename BTimeOut>
Expand Down
Loading