Skip to content

Commit 8d09a41

Browse files
authored
Merge pull request #125 from marty1885/apichange
another batch of needed additional features
2 parents b5915db + 8fb958c commit 8d09a41

File tree

9 files changed

+161
-9
lines changed

9 files changed

+161
-9
lines changed

Etaler/Backends/CPUBackend.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -682,6 +682,11 @@ void CPUBackend::decaySynapses(TensorImpl* connections, TensorImpl* permeances,
682682
});
683683
}
684684

685+
std::shared_ptr<TensorImpl> CPUBackend::abs(const TensorImpl* x)
686+
{
687+
return uniaryOp(x, [](auto v){return std::abs(v);});
688+
}
689+
685690
std::shared_ptr<TensorImpl> CPUBackend::exp(const TensorImpl* x)
686691
{
687692
return uniaryOp(x, [](auto v){return std::exp(v);});

Etaler/Backends/CPUBackend.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ struct ETALER_EXPORT CPUBackend : public Backend
7676
virtual std::shared_ptr<TensorImpl> sum(const TensorImpl* x, size_t chunk_size, DType dtype=DType::Unknown) override;
7777

7878
//Unary Operations
79+
virtual std::shared_ptr<TensorImpl> abs(const TensorImpl* x) override;
7980
virtual std::shared_ptr<TensorImpl> exp(const TensorImpl* x) override;
8081
virtual std::shared_ptr<TensorImpl> negate(const TensorImpl* x) override;
8182
virtual std::shared_ptr<TensorImpl> inverse(const TensorImpl* x) override;

Etaler/Backends/OpenCLBackend.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -991,6 +991,18 @@ std::shared_ptr<TensorImpl> OpenCLBackend::applyBinaryOp(const TensorImpl* x1, c
991991
return res;
992992
}
993993

994+
std::shared_ptr<TensorImpl> OpenCLBackend::abs(const TensorImpl* x)
995+
{
996+
DType result_type = [&x](){
997+
auto dtype = x->dtype();
998+
if(dtype == DType::Bool)
999+
return DType::Int32;
1000+
return dtype;
1001+
}();
1002+
const char* func = (result_type == DType::Float || result_type == DType::Half ? "#define f(x) (fabs((ResType)x))" : "#define f(x) (abs((ResType)x))");
1003+
return applyUnaryOp(x, func, result_type);
1004+
}
1005+
9941006
std::shared_ptr<TensorImpl> OpenCLBackend::exp(const TensorImpl* x)
9951007
{
9961008
DType result_type = x->dtype() == DType::Half ? DType::Half : DType::Float;

Etaler/Backends/OpenCLBackend.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ struct ETALER_EXPORT OpenCLBackend : public Backend
123123
virtual void assign(TensorImpl* dest, const TensorImpl* src) override;
124124
virtual std::shared_ptr<TensorImpl> sum(const TensorImpl* x, size_t chunk_size, DType dtype=DType::Unknown) override;
125125

126+
virtual std::shared_ptr<TensorImpl> abs(const TensorImpl* x) override;
126127
virtual std::shared_ptr<TensorImpl> exp(const TensorImpl* x) override;
127128
virtual std::shared_ptr<TensorImpl> negate(const TensorImpl* x) override;
128129
virtual std::shared_ptr<TensorImpl> inverse(const TensorImpl* x) override;

Etaler/Core/Backend.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ struct ETALER_EXPORT Backend : public std::enable_shared_from_this<Backend>
4646
virtual std::shared_ptr<TensorImpl> sum(const TensorImpl* x, size_t chunk_size, DType dtype=DType::Unknown) { throw notImplemented("sum");}
4747

4848
//Unary operations
49+
virtual std::shared_ptr<TensorImpl> abs(const TensorImpl* x) { throw notImplemented("abs");}
4950
virtual std::shared_ptr<TensorImpl> exp(const TensorImpl* x) { throw notImplemented("exp");}
5051
virtual std::shared_ptr<TensorImpl> negate(const TensorImpl* x) { throw notImplemented("negate");}
5152
virtual std::shared_ptr<TensorImpl> inverse(const TensorImpl* x) { throw notImplemented("inverse");}

Etaler/Core/Tensor.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ Tensor Tensor::view(svector<Range> ranges) const
174174
throw EtError("Cannot view a tensor of " + std::to_string(dimentions()) + " with " + std::to_string(ranges.size()) + " dimentions");
175175

176176
while(ranges.size() != dimentions())
177-
ranges.push_back(all());
177+
ranges.push_back(et::all());
178178

179179
auto resolve_index = [](intmax_t idx, intmax_t size) -> intmax_t {
180180
if(idx < 0)
@@ -339,7 +339,7 @@ Tensor et::cat(const svector<Tensor>& tensors, intmax_t dim)
339339
intmax_t pos = 0;
340340
svector<Range> ranges;
341341
for(size_t i=0;i<res_shape.size();i++)
342-
ranges.push_back(all());
342+
ranges.push_back(et::all());
343343

344344
for(const auto& t : tensors) {
345345
ranges[dim] = Range(pos, pos+t.shape()[dim]);

Etaler/Core/Tensor.hpp

Lines changed: 27 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ template <typename T>
2222
struct ETALER_EXPORT TensorIterator
2323
{
2424
// Iterator properties
25-
using iterator_category = std::bidirectional_iterator_tag;
25+
using iterator_category = std::random_access_iterator_tag;
2626
using value_type = T;
2727
using raw_value_type = std::remove_const_t<value_type>; // extra
2828
using difference_type = intmax_t;
@@ -36,12 +36,20 @@ struct ETALER_EXPORT TensorIterator
3636
value_type operator*() { return t_.view({offset_}); }
3737
// Unfortunatelly returning a pointer is not doable
3838
pointer operator->() { return std::make_unique<raw_value_type>(this->operator*()); }
39-
bool operator==(ThisIterator rhs) const { return offset_ == rhs.offset_ && t_.pimpl() == rhs.t_.pimpl(); }
40-
bool operator!=(ThisIterator rhs) const { return !(*this == rhs); }
39+
bool operator==(const ThisIterator& rhs) const { return offset_ == rhs.offset_ && t_.pimpl() == rhs.t_.pimpl(); }
40+
bool operator!=(const ThisIterator& rhs) const { return !(*this == rhs); }
4141
ThisIterator& operator++() {offset_ += 1; return *this;}
4242
ThisIterator operator++(int) {ThisIterator retval = *this; ++(*this); return retval;}
4343
ThisIterator& operator--() {offset_ -= 1; return *this;}
4444
ThisIterator operator--(int) {ThisIterator retval = *this; --(*this); return retval;}
45+
difference_type operator- (const ThisIterator& rhs) const { return offset_ - rhs.offset_; }
46+
ThisIterator operator+(intmax_t n) {return ThisIterator(t_,offset_+n);}
47+
ThisIterator operator-(intmax_t n) {return ThisIterator(t_,offset_-n);}
48+
value_type operator[](intmax_t n) { return *operator+(n); }
49+
bool operator< (const ThisIterator& rhs) const { return offset_ < rhs.offset_; }
50+
bool operator> (const ThisIterator& rhs) const { return offset_ > rhs.offset_; }
51+
bool operator<= (const ThisIterator& rhs) const { return offset_ <= rhs.offset_; }
52+
bool operator>= (const ThisIterator& rhs) const { return offset_ >= rhs.offset_; }
4553
value_type t_;
4654
intmax_t offset_ = 0;
4755
};
@@ -69,6 +77,10 @@ struct ETALER_EXPORT Tensor
6977
pimpl_ = backend->createTensor(s, dtype, data);
7078
}
7179

80+
template<typename T>
81+
Tensor(const std::vector<T>& vec, Backend* backend=defaultBackend())
82+
: Tensor(Shape{intmax_t(vec.size())}, vec.data()) {}
83+
7284
Tensor(int v) : Tensor({1}, &v) {}
7385
Tensor(float v) : Tensor({1}, &v) {}
7486
Tensor(bool v) : Tensor({1}, &v) {}
@@ -93,6 +105,7 @@ struct ETALER_EXPORT Tensor
93105
size_t dimentions() const {return pimpl_->dimentions();}
94106
void resize(Shape s) {pimpl()->resize(s);}
95107
bool iscontiguous() const {return pimpl()->iscontiguous();}
108+
bool isplain() const {return pimpl()->isplain();}
96109
Shape stride() const {return pimpl()->stride();}
97110

98111
Backend* backend() const {return pimpl()->backend();}
@@ -116,7 +129,7 @@ struct ETALER_EXPORT Tensor
116129
return realize().toHost<T>();
117130
if(dtype() != typeToDType<T>()) {
118131
throw EtError("toHost() failed. Requested type and dtype mismatch. " + demangle(typeid(T).name())
119-
+ " requested but " + to_ctype_string(dtype()) + "is stored.");
132+
+ " requested but " + to_ctype_string(dtype()) + " is stored.");
120133
}
121134
std::vector<T> res(size());
122135
backend()->copyToHost(pimpl(), res.data());
@@ -204,6 +217,9 @@ struct ETALER_EXPORT Tensor
204217
Tensor logical_and(const Tensor& other) const { auto [a, b] = brodcast(other); return backend()->logical_and(a(), b()); }
205218
Tensor logical_or(const Tensor& other) const { auto [a, b] = brodcast(other); return backend()->logical_or(a(), b()); }
206219

220+
inline bool any() const { return cast(DType::Bool).sum(std::nullopt, DType::Bool).item<uint8_t>(); }
221+
inline bool all() const { return cast(DType::Bool).sum(std::nullopt).item<int32_t>() == int32_t(size()); }
222+
207223
Tensor operator- () const {return negate();}
208224
Tensor operator+ () const {return *this;}
209225
Tensor operator! () const {return logical_not();}
@@ -228,6 +244,7 @@ struct ETALER_EXPORT Tensor
228244
Tensor operator [] (svector<Range> r) { return view(r); }
229245

230246
Tensor sum(std::optional<intmax_t> dim=std::nullopt, DType dtype=DType::Unknown) const;
247+
Tensor abs() const { return backend()->abs(pimpl()); }
231248
bool isSame (const Tensor& other) const;
232249

233250
//Utils
@@ -292,7 +309,7 @@ inline Tensor realize(const Tensor& t)
292309

293310
inline Tensor ravel(const Tensor& t)
294311
{
295-
if(t.iscontiguous() == true)
312+
if(t.isplain() == true)
296313
return t;
297314
return t.realize();
298315
}
@@ -372,11 +389,13 @@ inline Tensor concat(const svector<Tensor>& tensors, intmax_t dim=0) { return ca
372389
inline Tensor concatenate(const svector<Tensor>& tensors, intmax_t dim=0) { return cat(tensors, dim); }
373390
std::pair<Tensor, Tensor> brodcast_tensors(const Tensor& a, const Tensor& b);
374391

392+
inline Tensor abs(const Tensor& x) { return x.abs(); }
375393
inline Tensor exp(const Tensor& x) { return x.exp(); }
376394
inline Tensor negate(const Tensor& x) { return x.negate(); }
377395
inline Tensor inverse(const Tensor& x) { return x.inverse(); }
378396
inline Tensor log(const Tensor& x) { return x.log(); }
379397
inline Tensor logical_not(const Tensor& x) { return x.logical_not(); }
398+
inline Tensor isclose(const Tensor& x, const Tensor& y, float rtol=1e-5f, float atol=1e-8f) { return abs(x-y) <= (atol + rtol * abs(y)); }
380399

381400
inline Tensor add(const Tensor& x1, const Tensor& x2) { return x1.add(x2); }
382401
inline Tensor subtract(const Tensor& x1, const Tensor& x2) { return x1.subtract(x2); }
@@ -388,6 +407,9 @@ inline Tensor lesser(const Tensor& x1, const Tensor& x2) { return x1.lesser(x2);
388407
inline Tensor logical_and(const Tensor& x1, const Tensor& x2) { return x1.logical_and(x2); }
389408
inline Tensor logical_or(const Tensor& x1, const Tensor& x2) { return x1.logical_or(x2); }
390409

410+
inline bool all(const Tensor& t) { return t.all(); }
411+
inline bool any(const Tensor& t) { return t.any(); }
412+
391413
inline Tensor zeros_like(const Tensor& x) { return zeros(x.shape(), x.dtype(), x.backend()); }
392414
inline Tensor ones_like(const Tensor& x) { return ones(x.shape(), x.dtype(), x.backend()); }
393415
}

Etaler/Core/TypeHelpers.hpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,12 @@ struct is_specialization<Ref<Args...>, Ref>: std::true_type {};
3131
template<template<typename...> class Ref, typename... Args>
3232
const bool is_specialization_v = is_specialization<Ref<Args...>, Ref>::value;
3333

34+
template <typename T, typename = void>
35+
struct is_container : std::false_type {};
36+
37+
template <typename T>
38+
struct is_container<T
39+
, std::void_t<decltype(std::declval<T>().data())
40+
, decltype(std::declval<T>().size())>> : std::true_type {};
41+
3442
}

tests/common_tests.cpp

Lines changed: 104 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,13 @@ TEST_CASE("Testing Tensor", "[Tensor]")
103103
CHECK(r.item<uint8_t>() == true);
104104
}
105105

106+
SECTION("Create Tensor from vector") {
107+
std::vector<int> v = {1, 2, 3, 4};
108+
Tensor t = Tensor(v);
109+
CHECK(t.size() == intmax_t(v.size()));
110+
CHECK(t.dtype() == DType::Int);
111+
}
112+
106113
SECTION("tensor like") {
107114
Tensor t = ones({4,4});
108115
Tensor q = ones_like(t);
@@ -293,6 +300,17 @@ TEST_CASE("Testing Tensor", "[Tensor]")
293300
//Check a subset of weather the result is correct
294301
CHECK(t[{2, 2}].item<int>() == 11);
295302
}
303+
304+
SECTION("all/any test") {
305+
CHECK(ones({3}).any() == true);
306+
CHECK(zeros({3}).any() == false);
307+
CHECK(ones({3}).all() == true);
308+
CHECK(zeros({3}).all() == false);
309+
CHECK((ones({7}) == zeros({7})).all() == false);
310+
CHECK((ones({7}) == zeros({7})).any() == false);
311+
CHECK((ones({4,4}) == t).any() == true);
312+
CHECK((ones({4,4}) == t).all() == false);
313+
}
296314
}
297315

298316
SECTION("item") {
@@ -311,8 +329,8 @@ TEST_CASE("Testing Tensor", "[Tensor]")
311329
Tensor q = zeros({3, 4});
312330
STATIC_REQUIRE(std::is_same_v<Tensor::iterator::value_type, Tensor>);
313331

314-
// Tensor::iterator should be bideractional
315-
// Reference: http://www.cplusplus.com/reference/iterator/BidirectionalIterator/
332+
// Tensor::iterator should be ramdp,
333+
// Reference: http://www.cplusplus.com/reference/iterator/RandomAccessIterator/
316334
STATIC_REQUIRE(std::is_default_constructible_v<Tensor::iterator>);
317335
STATIC_REQUIRE(std::is_copy_constructible_v<Tensor::iterator>);
318336
STATIC_REQUIRE(std::is_copy_assignable_v<Tensor::iterator>);
@@ -321,6 +339,8 @@ TEST_CASE("Testing Tensor", "[Tensor]")
321339
CHECK(t.begin() == t.begin());
322340
CHECK((*t.begin()).shape() == Shape{4});
323341
CHECK(t.begin()->shape() == Shape{4});
342+
CHECK(t.end() - t.begin() == t.shape()[0]);
343+
CHECK(t.begin()[2].isSame(*t.back()) == true);
324344
auto it1 = t.begin(), it2 = t.begin();
325345
it1++;
326346
++it2;
@@ -341,6 +361,36 @@ TEST_CASE("Testing Tensor", "[Tensor]")
341361
CHECK(num_iteration == t.shape()[0]);
342362
CHECK(t.sum().item<int>() == 42*t.size());
343363
}
364+
365+
SECTION("swapping Tensor") {
366+
std::vector<int> v1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10 , 11, 12};
367+
std::vector<int> v2 = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20 , 21, 22};
368+
Tensor t = Tensor(v1).reshape({3, 4});
369+
Tensor q = Tensor(v2).reshape({3, 4});
370+
Tensor t_old = t.copy();
371+
Tensor q_old = q.copy();
372+
373+
SECTION("swap") {
374+
swap(t, q);
375+
CHECK(t.isSame(q_old));
376+
CHECK(q.isSame(t_old));
377+
}
378+
379+
SECTION("swapping views") {
380+
swap(q[{1}], t[{2}]);
381+
CHECK(t[{2}].isSame(q_old[{1}]));
382+
CHECK(q[{1}].isSame(t_old[{2}]));
383+
}
384+
385+
SECTION("swaping itself") {
386+
swap(t[{0}], t[{0}]);
387+
REQUIRE(t[{0}].isSame(t_old[{0}]));
388+
389+
swap(t[{0}], t[{1}]);
390+
REQUIRE(t[{0}].isSame(t_old[{1}]));
391+
REQUIRE(t[{1}].isSame(t_old[{0}]));
392+
}
393+
}
344394
}
345395

346396
TEST_CASE("Testing Encoders", "[Encoder]")
@@ -768,6 +818,14 @@ TEST_CASE("Type system")
768818
return true;
769819
}();
770820

821+
SECTION("abs") {
822+
CHECK(abs(ones({1}, DType::Bool)).dtype() == DType::Int32);
823+
CHECK(abs(ones({1}, DType::Int32)).dtype() == DType::Int32);
824+
CHECK(abs(ones({1}, DType::Float)).dtype() == DType::Float);
825+
if(support_fp16)
826+
CHECK(abs(ones({1}, DType::Half)).dtype() == DType::Half);
827+
}
828+
771829
SECTION("exp") {
772830
CHECK(exp(ones({1}, DType::Bool)).dtype() == DType::Float);
773831
CHECK(exp(ones({1}, DType::Int32)).dtype() == DType::Float);
@@ -876,6 +934,50 @@ TEST_CASE("Type system")
876934
}
877935
}
878936

937+
// TODO: Should I count this as an integration test?
938+
// This test checks all components of Tensor works together properly
939+
TEST_CASE("Complex Tensor operations")
940+
{
941+
SECTION("Vector inner product") {
942+
std::vector<int> v1 = {1, 6, 7, 9, 15, 6};
943+
std::vector<int> v2 = {3, 7, 8, -1, 6, 15};
944+
REQUIRE(v1.size() == v2.size());
945+
Tensor a = Tensor(v1);
946+
Tensor b = Tensor(v2);
947+
948+
CHECK((a*b).sum().item<int>() == std::inner_product(v1.begin(), v1.end(), v2.begin(), 0));
949+
}
950+
951+
SECTION("shuffle") {
952+
std::mt19937 rng;
953+
std::vector<int> v1 = {1, 8, 6, 7
954+
, 3, 2, 5, 6
955+
, 4, 3, 2, 7
956+
, 9, 0 ,1, 1};
957+
Tensor a = Tensor(v1).reshape({4,4});
958+
std::shuffle(a.begin(), a.end(), rng);
959+
CHECK(std::accumulate(v1.begin(), v1.end(), 0) == a.sum().item<int>());
960+
}
961+
962+
SECTION("find_if") {
963+
std::vector<int> v1 = {1, 8, 6, 7
964+
, 3, 2, 5, 6
965+
, 4, 3, 2, 7
966+
, 9, 0 ,1, 1};
967+
Tensor a = Tensor(v1).reshape({4,4});
968+
Tensor b = a[{0}];
969+
970+
CHECK(std::find_if(a.begin(), a.end(), [&b](auto t){ return t.isSame(b); }) != a.end());
971+
}
972+
973+
SECTION("transform") {
974+
Tensor a = ones({12, 6});
975+
Tensor b = ones({12, 6});
976+
std::transform(a.begin(), a.end(), b.begin(), [](const auto& t){return zeros_like(t);});
977+
CHECK(b.isSame(zeros_like(a)));
978+
}
979+
}
980+
879981
// TEST_CASE("Serealize")
880982
// {
881983
// using namespace et;

0 commit comments

Comments
 (0)