Skip to content

Commit a211c68

Browse files
committed
introduced size_of
1 parent f678ad7 commit a211c68

File tree

6 files changed

+142
-0
lines changed

6 files changed

+142
-0
lines changed

src/TiledArray/dense_shape.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,10 @@
2727
#define TILEDARRAY_DENSE_SHAPE_H__INCLUDED
2828

2929
#include <TiledArray/config.h>
30+
31+
#include <TiledArray/platform.h>
3032
#include <TiledArray/type_traits.h>
33+
3134
#include <cstdint>
3235

3336
namespace madness {
@@ -391,6 +394,11 @@ class DenseShape {
391394
std::numeric_limits<value_type>::epsilon();
392395
}; // class DenseShape
393396

397+
template <MemorySpace S>
398+
std::size_t size_of(const DenseShape& shape) {
399+
return sizeof(shape);
400+
}
401+
394402
constexpr inline bool operator==(const DenseShape& a, const DenseShape& b) {
395403
return true;
396404
}

src/TiledArray/dist_array.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1743,6 +1743,19 @@ class DistArray : public madness::archive::ParallelSerializableObject {
17431743

17441744
}; // class DistArray
17451745

1746+
/// \return the number of bytes used by \p t in this rank's memory space
1747+
/// `S`
1748+
template <MemorySpace S, typename Tile, typename Policy>
1749+
std::size_t size_of(const DistArray<Tile, Policy>& da) {
1750+
std::size_t result = 0;
1751+
result += size_of<S>(da.shape());
1752+
// add up local tile's contributions
1753+
for (const auto& tile_ref : da) {
1754+
result += size_of<S>(tile_ref.get());
1755+
}
1756+
return result;
1757+
}
1758+
17461759
#ifndef TILEDARRAY_HEADER_ONLY
17471760

17481761
extern template class DistArray<Tensor<double>, DensePolicy>;

src/TiledArray/sparse_shape.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828

2929
#include <TiledArray/fwd.h>
3030

31+
#include <TiledArray/platform.h>
3132
#include <TiledArray/tensor.h>
3233
#include <TiledArray/tensor/shift_wrapper.h>
3334
#include <TiledArray/tensor/tensor_interface.h>
@@ -1721,13 +1722,21 @@ class SparseShape {
17211722
return cast_abs_factor;
17221723
}
17231724

1725+
template <MemorySpace S, typename T_>
1726+
friend std::size_t size_of(const SparseShape<T_>& shape);
1727+
17241728
}; // class SparseShape
17251729

17261730
// Static member initialization
17271731
template <typename T>
17281732
typename SparseShape<T>::value_type SparseShape<T>::threshold_ =
17291733
std::numeric_limits<T>::epsilon();
17301734

1735+
template <MemorySpace S, typename T>
1736+
std::size_t size_of(const SparseShape<T>& shape) {
1737+
return size_of<S>(shape.tile_norms_);
1738+
}
1739+
17311740
/// Add the shape to an output stream
17321741

17331742
/// \tparam T the numeric type supporting the type of \c shape

src/TiledArray/tensor/tensor.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424

2525
#include "TiledArray/external/umpire.h"
2626
#include "TiledArray/host/env.h"
27+
#include "TiledArray/platform.h"
2728

2829
#include "TiledArray/math/blas.h"
2930
#include "TiledArray/math/gemm_helper.h"
@@ -2776,6 +2777,30 @@ class Tensor {
27762777

27772778
}; // class Tensor
27782779

2780+
/// \return the number of bytes used by \p t in memory space
2781+
/// `S`
2782+
/// \warning footprint of Range is approximated, will not be exact for
2783+
/// tensor orders highers than `TA_MAX_SOO_RANK_METADATA`
2784+
template <MemorySpace S, typename T, typename A>
2785+
std::size_t size_of(const Tensor<T, A>& t) {
2786+
std::size_t result = 0;
2787+
if constexpr (S == MemorySpace::Host) {
2788+
result += sizeof(t);
2789+
}
2790+
if (allocates_memory_space<S>(A{})) {
2791+
if (!t.empty()) {
2792+
if constexpr (is_constexpr_size_of_v<S, Tensor<T, A>>) {
2793+
result += t.size() * sizeof(T);
2794+
} else {
2795+
result += std::accumulate(
2796+
t.begin(), t.end(), std::size_t{0},
2797+
[](const std::size_t s, const T& t) { return s + size_of<S>(t); });
2798+
}
2799+
}
2800+
}
2801+
return result;
2802+
}
2803+
27792804
#ifdef TA_TENSOR_MEM_TRACE
27802805
template <typename T, typename A>
27812806
std::size_t Tensor<T, A>::trace_if_larger_than_ =

tests/dist_array.cpp

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -944,4 +944,63 @@ BOOST_AUTO_TEST_CASE(reduction) {
944944
BOOST_REQUIRE(array_norm = std::sqrt(TA::dot(array, array)));
945945
}
946946

947+
BOOST_AUTO_TEST_CASE(size_of) {
948+
using Numeric = double;
949+
using T = Tensor<Numeric>;
950+
using ToT = Tensor<T>;
951+
using Policy = SparsePolicy;
952+
using ArrayToT = DistArray<ToT, Policy>;
953+
954+
auto unit_T = [](Range const& rng) { return T(rng, Numeric{1}); };
955+
956+
auto unit_ToT = [unit_T](Range const& rngo, Range const& rngi) {
957+
return ToT(rngo, unit_T(rngi));
958+
};
959+
960+
size_t constexpr nrows = 3;
961+
size_t constexpr ncols = 4;
962+
TiledRange const trange({{0, 2, 5, 7}, {0, 5, 7, 10, 12}});
963+
TA_ASSERT(trange.tiles_range().extent().at(0) == nrows &&
964+
trange.tiles_range().extent().at(1) == ncols,
965+
"Following code depends on this condition.");
966+
967+
// this Range is used to construct all inner tensors of the tile with
968+
// tile index @c tix.
969+
auto inner_dims = [nrows, ncols](Range::index_type const& tix) -> Range {
970+
static std::array<size_t, nrows> const rows{7, 8, 9};
971+
static std::array<size_t, ncols> const cols{7, 8, 9, 10};
972+
973+
TA_ASSERT(tix.size() == 2, "Only rank-2 tensor expected.");
974+
return Range({rows[tix.at(0) % nrows], cols[tix.at(1) % ncols]});
975+
};
976+
977+
// let's make all 'diagonal' tiles zero
978+
auto zero_tile = [](Range::index_type const& tix) -> bool {
979+
return tix.at(0) == tix.at(1);
980+
};
981+
982+
auto make_tile = [inner_dims, //
983+
zero_tile, //
984+
&trange, //
985+
unit_ToT](auto& tile, auto const& rng) {
986+
auto&& tix = trange.element_to_tile(rng.lobound());
987+
if (zero_tile(tix))
988+
return 0.;
989+
else {
990+
tile = unit_ToT(rng, inner_dims(tix));
991+
return tile.norm();
992+
}
993+
};
994+
995+
auto& world = get_default_world();
996+
997+
// all non-zero inner tensors of this ToT array are unit (ie all
998+
// inner tensors' elements are 1.)
999+
auto array = make_array<ArrayToT>(world, trange, make_tile);
1000+
1001+
auto sz0 = TiledArray::size_of<MemorySpace::Host>(array);
1002+
world.gop.sum(sz0);
1003+
BOOST_REQUIRE(sz0 == 56688);
1004+
}
1005+
9471006
BOOST_AUTO_TEST_SUITE_END()

tests/tensor.cpp

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -783,4 +783,32 @@ BOOST_AUTO_TEST_CASE(print) {
783783
// std::cout << tb;
784784
}
785785

786+
BOOST_AUTO_TEST_CASE(size_of) {
787+
auto sz0h = TiledArray::size_of<TiledArray::MemorySpace::Host>(TensorN{});
788+
BOOST_REQUIRE(sz0h == sizeof(TensorN));
789+
790+
auto sz0d = TiledArray::size_of<TiledArray::MemorySpace::Device>(TensorN{});
791+
BOOST_REQUIRE(sz0d == 0);
792+
793+
auto sz0um =
794+
TiledArray::size_of<TiledArray::MemorySpace::Device_UM>(TensorN{});
795+
BOOST_REQUIRE(sz0um == 0);
796+
797+
auto sz1 = TiledArray::size_of<TiledArray::MemorySpace::Host>(
798+
TensorZ(Range(2, 3, 4)));
799+
BOOST_REQUIRE(sz1 ==
800+
sizeof(TensorZ) + 2 * 3 * 4 * sizeof(TensorZ::value_type));
801+
802+
using TTD = Tensor<Tensor<double>>;
803+
auto sz2 =
804+
TiledArray::size_of<TiledArray::MemorySpace::Host>(TTD(Range(2, 3, 4)));
805+
BOOST_REQUIRE(sz2 == sizeof(TTD) + 2 * 3 * 4 * sizeof(TTD::value_type));
806+
807+
TTD ttd(Range(2, 3, 4));
808+
ttd(0, 0, 0) = TensorD(Range(5, 6));
809+
auto sz3 = TiledArray::size_of<TiledArray::MemorySpace::Host>(ttd);
810+
BOOST_REQUIRE(sz3 == sizeof(TTD) + 2 * 3 * 4 * sizeof(TTD::value_type) +
811+
5 * 6 * sizeof(TTD::value_type::value_type));
812+
}
813+
786814
BOOST_AUTO_TEST_SUITE_END()

0 commit comments

Comments
 (0)