-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathindex.h
More file actions
153 lines (138 loc) · 5.43 KB
/
index.h
File metadata and controls
153 lines (138 loc) · 5.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
/*
@copyright Russell Standish 2020
@author Russell Standish
This file is part of Civita.
Civita is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Civita is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Civita. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef CIVITA_INDEX_H
#define CIVITA_INDEX_H
#include <vector>
#include <set>
#include <map>
#include <mutex>
#include <cstdint>
#include <cstdlib>
#include <assert.h>
#ifndef CLASSDESC_ACCESS
#define CLASSDESC_ACCESS(x)
#endif
// This macro allows the allocator to be swapped for a custom
// allocator, such as needed when running this under an electron
// project. See https://sourceforge.net/p/minsky/ravel/564/
#ifndef CIVITA_ALLOCATOR
#define CIVITA_ALLOCATOR std::allocator
#endif
namespace civita
{
void trackAllocation(std::ptrdiff_t n);
template <class T>
struct LibCAllocator
{
using value_type=T;
T* allocate(size_t n) {
trackAllocation(sizeof(T)*n);
if (auto r=reinterpret_cast<T*>(std::malloc(sizeof(T)*n))) return r;
throw std::bad_alloc();
}
void deallocate(T* p, size_t n) {trackAllocation(-sizeof(T)*n); std::free(p);}
bool operator==(const LibCAllocator&) const {return true;} // no state!
bool operator!=(const LibCAllocator& x) const {return !operator==(x);}
};
struct LinealOffsetMutex
{
mutable std::mutex linealOffsetMutex;
LinealOffsetMutex()=default;
// done this way to define null copy operations for the mutex
LinealOffsetMutex(const LinealOffsetMutex&) {}
LinealOffsetMutex& operator=(const LinealOffsetMutex&) {return *this;}
#if defined(__cplusplus) && __cplusplus >= 202002L && !defined(__APPLE__)
std::strong_ordering operator<=>(const LinealOffsetMutex&) const {return std::strong_ordering::equal;}
#endif
bool operator==(const LinealOffsetMutex&) const {return true;}
};
/// represents index concept for sparse tensors
class Index: private LinealOffsetMutex
{
public:
CLASSDESC_ACCESS(Index);
using Impl=std::vector<std::size_t,CIVITA_ALLOCATOR<std::size_t>>;
Index() {}
template <class T> explicit
Index(const T& indices) {*this=indices;}
Index(const Index&)=default;
Index(Index&&)=default;
Index& operator=(const Index&)=default;
Index& operator=(Index&&)=default;
// can only assign ordered containers
template <class T, class C, class A>
Index& operator=(const std::set<T,C,A>& indices) {
index.clear(); index.reserve(indices.size());
linealOffsetLookup.clear();
for (auto& i: indices) index.push_back(i);
return *this;
}
template <class K, class V, class C, class A>
Index& operator=(const std::map<K,V,C,A>& indices) {
index.clear(); index.reserve(indices.size());
linealOffsetLookup.clear();
for (auto& i: indices) index.push_back(i.first);
return *this;
}
#if defined(__cplusplus) && __cplusplus >= 202002L && !defined(__APPLE__)
std::strong_ordering operator<=>(const Index&) const=default;
#endif
/// return hypercube index corresponding to lineal index i
std::size_t operator[](std::size_t i) const {return index.empty()? i: index[i];}
// invariant, should always be true
bool noDuplicates() const {
std::set<std::size_t,std::less<std::size_t>,CIVITA_ALLOCATOR<std::size_t>>
tmp(index.begin(), index.end());
return tmp.size()==index.size();
}
bool empty() const {return index.empty();}
std::size_t size() const {return index.size();}
void clear() {index.clear();linealOffsetLookup.clear();}
/// return the lineal index of hypercube index h, or size if not present
std::size_t linealOffset(std::size_t h) const;
Index::Impl::const_iterator begin() const {return index.begin();}
Index::Impl::const_iterator end() const {return index.end();}
protected:
Impl index; // sorted index vector
mutable std::map<size_t,size_t> linealOffsetLookup; // cached map of index to linealOffset value
// For optimisation to avoid map<=>vector transformation
friend class PermuteAxis;
friend class Pivot;
friend class ReductionOp;
friend class Slice;
// optimised transfer routines - private because can't guarantee index uniqueness
void assignVector(Impl&& indices) {
index=std::move(indices);
linealOffsetLookup.clear();
assert(noDuplicates());
}
template <class T, class A>
void assignVector(const std::vector<T,A>& indices) {
index.clear(); index.reserve(indices.size());
linealOffsetLookup.clear();
for (auto& i: indices) index.push_back(i);
assert(noDuplicates());
}
template <class F, class S, class A>
void assignVector(const std::vector<std::pair<F,S>,A>& indices) {
index.clear(); index.reserve(indices.size());
linealOffsetLookup.clear();
for (auto& i: indices) index.push_back(i.first);
assert(noDuplicates());
}
};
}
#endif