Skip to content

Commit 913a2f9

Browse files
Added an address allocator with Iterators for ECS-like things
1 parent 5a8fd45 commit 913a2f9

File tree

4 files changed

+268
-47
lines changed

4 files changed

+268
-47
lines changed

examples_tests/10.AllocatorTest/main.cpp

Lines changed: 42 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -106,9 +106,8 @@ class AllocatorHandler
106106
alctr = AlctrType(reservedSpace, randAllocParams.offset, randAllocParams.alignOffset, randAllocParams.maxAlign, randAllocParams.addressSpaceSize, randAllocParams.blockSz);
107107
}
108108

109-
// variable shadowing and other problems @Przemog
110-
testsCnt = rng.getRndAllocCnt();
111-
for (size_t i = 0; i < testsCnt; i++)
109+
uint32_t subTestsCnt = rng.getRndAllocCnt();
110+
for (size_t i = 0; i < subTestsCnt; i++)
112111
executeForFrame(alctr, randAllocParams);
113112

114113
if constexpr (!std::is_same<AlctrType, core::LinearAddressAllocator<uint32_t>>::value)
@@ -122,6 +121,19 @@ class AllocatorHandler
122121
uint32_t outAddr = AlctrType::invalid_address;
123122
uint32_t size = 0u;
124123
uint32_t align = 0u;
124+
125+
inline bool operator==(const AllocationData& other) const
126+
{
127+
return outAddr==other.outAddr;
128+
}
129+
130+
struct Hash
131+
{
132+
inline size_t operator()(const AllocationData& _this) const
133+
{
134+
return std::hash<uint32_t>()(_this.outAddr);
135+
}
136+
};
125137
};
126138

127139
struct RandParams
@@ -146,11 +158,13 @@ class AllocatorHandler
146158
Traits::multi_alloc_addr(alctr, addressesToAllcate, allocDataSoA.outAddresses.data(), allocDataSoA.sizes.data(), allocDataSoA.alignments.data());
147159

148160
// record all successful alloc addresses to the `core::vector`
161+
if constexpr (!std::is_same<AlctrType, core::LinearAddressAllocator<uint32_t>>::value)
149162
for (uint32_t j = 0u; j < allocDataSoA.size; j++)
150163
{
151164
if (allocDataSoA.outAddresses[j] != AlctrType::invalid_address)
152165
results.push_back({ allocDataSoA.outAddresses[j], allocDataSoA.sizes[j], allocDataSoA.alignments[j] });
153166
}
167+
checkStillIteratable(alctr);
154168

155169
// run random dealloc function
156170
randFreeAllocatedAddresses(alctr);
@@ -173,10 +187,7 @@ class AllocatorHandler
173187
}
174188
}
175189
else
176-
{
177190
alctr.reset();
178-
results.clear();
179-
}
180191
}
181192

182193
// random dealloc function
@@ -188,10 +199,8 @@ class AllocatorHandler
188199
// randomly decide how many calls to `multi_free`
189200
const uint32_t multiFreeCnt = rng.getRandomNumber(1u, results.size());
190201

191-
if (std::is_same<AlctrType, core::GeneralpurposeAddressAllocator<uint32_t>>::value)
192-
{
202+
if constexpr (Traits::supportsArbitraryOrderFrees)
193203
std::shuffle(results.begin(), results.end(), rng.getMt());
194-
}
195204

196205
for (uint32_t i = 0u; (i < multiFreeCnt) && results.size(); i++)
197206
{
@@ -210,6 +219,7 @@ class AllocatorHandler
210219

211220
Traits::multi_free_addr(alctr, addressesToFreeCnt, allocDataSoA.outAddresses.data(), allocDataSoA.sizes.data());
212221
results.erase(results.end() - addressesToFreeCnt, results.end());
222+
checkStillIteratable(alctr);
213223
}
214224
}
215225

@@ -231,6 +241,19 @@ class AllocatorHandler
231241

232242
private:
233243
core::vector<AllocationData> results;
244+
inline void checkStillIteratable(const AlctrType& alctr)
245+
{
246+
if constexpr (std::is_same<AlctrType, core::IteratablePoolAddressAllocator<uint32_t>>::value)
247+
{
248+
core::unordered_set<AllocationData,AllocationData::Hash> allocationSet(results.begin(),results.end());
249+
for (auto addr : alctr)
250+
{
251+
AllocationData dummy; dummy.outAddr = addr;
252+
if (allocationSet.find(dummy)==allocationSet.end())
253+
exit(34);
254+
}
255+
}
256+
}
234257

235258
//these hold inputs for `multi_alloc_addr` and `multi_free_addr`
236259

@@ -247,7 +270,7 @@ class AllocatorHandler
247270
{
248271
// randomly decide sizes (but always less than `address_allocator_traits::max_size`)
249272

250-
if constexpr (std::is_same<AlctrType, core::PoolAddressAllocator<uint32_t>>::value)
273+
if constexpr (std::is_same_v<AlctrType,core::PoolAddressAllocator<uint32_t>>||std::is_same_v<AlctrType,core::IteratablePoolAddressAllocator<uint32_t>>)
251274
{
252275
sizes[j] = randAllocParams.blockSz;
253276
alignments[j] = randAllocParams.blockSz;
@@ -310,6 +333,11 @@ int main()
310333
poolAlctrHandler.executeAllocatorTest();
311334
}
312335

336+
{
337+
AllocatorHandler<core::IteratablePoolAddressAllocator<uint32_t>> iterPoolAlctrHandler;
338+
iterPoolAlctrHandler.executeAllocatorTest();
339+
}
340+
313341
{
314342
AllocatorHandler<core::LinearAddressAllocator<uint32_t>> linearAlctrHandler;
315343
linearAlctrHandler.executeAllocatorTest();
@@ -336,6 +364,8 @@ int main()
336364
nbl::core::address_allocator_traits<core::StackAddressAllocatorST<uint32_t> >::printDebugInfo();
337365
printf("Pool \n");
338366
nbl::core::address_allocator_traits<core::PoolAddressAllocatorST<uint32_t> >::printDebugInfo();
367+
printf("IteratablePool \n");
368+
nbl::core::address_allocator_traits<core::IteratablePoolAddressAllocatorST<uint32_t> >::printDebugInfo();
339369
printf("General \n");
340370
nbl::core::address_allocator_traits<core::GeneralpurposeAddressAllocatorST<uint32_t> >::printDebugInfo();
341371

@@ -344,6 +374,8 @@ int main()
344374
nbl::core::address_allocator_traits<core::LinearAddressAllocatorMT<uint32_t, std::recursive_mutex> >::printDebugInfo();
345375
printf("Pool \n");
346376
nbl::core::address_allocator_traits<core::PoolAddressAllocatorMT<uint32_t, std::recursive_mutex> >::printDebugInfo();
377+
printf("Iteratable Pool \n");
378+
nbl::core::address_allocator_traits<core::IteratablePoolAddressAllocatorMT<uint32_t, std::recursive_mutex> >::printDebugInfo();
347379
printf("General \n");
348380
nbl::core::address_allocator_traits<core::GeneralpurposeAddressAllocatorMT<uint32_t, std::recursive_mutex> >::printDebugInfo();
349381
}
Lines changed: 177 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,177 @@
1+
// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O.
2+
// This file is part of the "Nabla Engine".
3+
// For conditions of distribution and use, see copyright notice in nabla.h
4+
5+
#ifndef __NBL_CORE_ITERATABLE_POOL_ADDRESS_ALLOCATOR_H_INCLUDED__
6+
#define __NBL_CORE_ITERATABLE_POOL_ADDRESS_ALLOCATOR_H_INCLUDED__
7+
8+
9+
#include<algorithm>
10+
11+
#include "nbl/core/alloc/PoolAddressAllocator.h"
12+
13+
14+
namespace nbl
15+
{
16+
namespace core
17+
{
18+
19+
20+
//! Can only allocate up to a size of a single block, no support for allocations larger than blocksize
21+
template<typename _size_type>
22+
class IteratablePoolAddressAllocator : protected PoolAddressAllocator<_size_type>
23+
{
24+
protected:
25+
inline size_type* begin() { return &getFreeStack(Base::freeStackCtr); }
26+
inline size_type& getIteratorOffset(size_type i) {return reinterpret_cast<size_type*>(Base::reservedSpace)[Base::blockCount+i];}
27+
inline const size_type& getIteratorOffset(size_type i) const {return reinterpret_cast<const size_type*>(Base::reservedSpace)[Base::blockCount+i];}
28+
29+
private:
30+
using Base = PoolAddressAllocator<_size_type>;
31+
32+
void copySupplementaryState(const IteratablePoolAddressAllocator& other, _size_type newBuffSz)
33+
{
34+
std::copy(other.begin(),other.end(),begin());
35+
for (auto i=0u; i<std::min(blockCount,other.blockCount); i++)
36+
getIteratorOffset(i) = other.getIteratorOffset(i);
37+
}
38+
// use [freeStackCtr,blockCount) as the iteratable range
39+
// use [blockCount,blockCount*2u) to store backreferences to iterators
40+
public:
41+
_NBL_DECLARE_ADDRESS_ALLOCATOR_TYPEDEFS(_size_type);
42+
43+
IteratablePoolAddressAllocator() : Base() {}
44+
virtual ~IteratablePoolAddressAllocator() {}
45+
46+
IteratablePoolAddressAllocator(void* reservedSpc, _size_type addressOffsetToApply, _size_type alignOffsetNeeded, _size_type maxAllocatableAlignment, size_type bufSz, size_type blockSz) noexcept :
47+
Base(reservedSpc,addressOffsetToApply,alignOffsetNeeded,maxAllocatableAlignment,bufSz,blockSz) {}
48+
49+
//! When resizing we require that the copying of data buffer has already been handled by the user of the address allocator
50+
template<typename... Args>
51+
IteratablePoolAddressAllocator(_size_type newBuffSz, IteratablePoolAddressAllocator&& other, Args&&... args) noexcept :
52+
Base(newBuffSz,std::move(other),std::forward<Args>(args)...)
53+
{
54+
copyState
55+
}
56+
57+
template<typename... Args>
58+
IteratablePoolAddressAllocator(_size_type newBuffSz, const IteratablePoolAddressAllocator& other, Args&&... args) noexcept :
59+
Base(newBuffSz,other,std::forward<Args>(args)...)
60+
{
61+
copyState
62+
}
63+
64+
IteratablePoolAddressAllocator& operator=(IteratablePoolAddressAllocator&& other)
65+
{
66+
Base::operator=(std::move(other));
67+
return *this;
68+
}
69+
70+
71+
//! Functions that actually differ
72+
inline size_type alloc_addr(size_type bytes, size_type alignment, size_type hint=0ull) noexcept
73+
{
74+
const size_type allocatedAddress = Base::alloc_addr(bytes,alignment,hint);
75+
if (allocatedAddress!=invalid_address)
76+
{
77+
*begin() = allocatedAddress;
78+
getIteratorOffset(addressToBlockID(allocatedAddress)) = freeStackCtr;
79+
}
80+
return allocatedAddress;
81+
}
82+
83+
inline void free_addr(size_type addr, size_type bytes) noexcept
84+
{
85+
const size_type iteratorOffset = getIteratorOffset(addressToBlockID(addr));
86+
#ifdef _NBL_DEBUG
87+
assert(iteratorOffset>=freeStackCtr);
88+
#endif
89+
// swap the erased element with either end of the array in the contiguous array
90+
// not using a swap cause it doesn't matter where the erased element points
91+
const size_type otherNodeOffset = *begin();
92+
reinterpret_cast<size_type*>(Base::reservedSpace)[iteratorOffset] = otherNodeOffset;
93+
// but I need to patch up the back-link of the moved element
94+
getIteratorOffset(addressToBlockID(otherNodeOffset)) = iteratorOffset;
95+
96+
Base::free_addr(addr,bytes);
97+
}
98+
99+
// gets a range of all the allocated addresses
100+
inline const size_type* begin() const {return &getFreeStack(Base::freeStackCtr);}
101+
inline const size_type* end() const {return &getFreeStack(Base::blockCount);}
102+
103+
104+
inline size_type safe_shrink_size(size_type sizeBound, size_type newBuffAlignmentWeCanGuarantee=1u) noexcept
105+
{
106+
if (safe_shrink_size_common(sizeBound,newBuffAlignmentWeCanGuarantee))
107+
{
108+
assert(begin()!=end()); // we already checked that freeStackCtr>0
109+
sizeBound = *std::max_element(begin(),end());
110+
}
111+
return AddressAllocatorBase<PoolAddressAllocator<_size_type>,_size_type>::safe_shrink_size(sizeBound,newBuffAlignmentWeCanGuarantee);
112+
}
113+
114+
115+
static inline size_type reserved_size(size_type maxAlignment, size_type bufSz, size_type blockSz) noexcept
116+
{
117+
size_type maxBlockCount = bufSz/blockSz;
118+
return maxBlockCount*sizeof(size_type)*size_type(2u);
119+
}
120+
static inline size_type reserved_size(const IteratablePoolAddressAllocator<_size_type>& other, size_type bufSz) noexcept
121+
{
122+
return reserved_size(other.maxRequestableAlignment,bufSz,other.blockSize);
123+
}
124+
125+
inline void reset()
126+
{
127+
Base::reset();
128+
}
129+
inline size_type max_size() const noexcept
130+
{
131+
return Base::max_size();
132+
}
133+
inline size_type min_size() const noexcept
134+
{
135+
return Base::min_size();
136+
}
137+
inline size_type get_free_size() const noexcept
138+
{
139+
return Base::get_free_size();
140+
}
141+
inline size_type get_allocated_size() const noexcept
142+
{
143+
return Base::get_allocated_size();
144+
}
145+
inline size_type get_total_size() const noexcept
146+
{
147+
return Base::get_total_size();
148+
}
149+
inline size_type addressToBlockID(size_type addr) const noexcept
150+
{
151+
return Base::addressToBlockID(addr);
152+
}
153+
};
154+
155+
156+
}
157+
}
158+
159+
#include "nbl/core/alloc/AddressAllocatorConcurrencyAdaptors.h"
160+
161+
namespace nbl
162+
{
163+
namespace core
164+
{
165+
166+
// aliases
167+
template<typename size_type>
168+
using IteratablePoolAddressAllocatorST = IteratablePoolAddressAllocator<size_type>;
169+
170+
template<typename size_type, class RecursiveLockable>
171+
using IteratablePoolAddressAllocatorMT = AddressAllocatorBasicConcurrencyAdaptor<IteratablePoolAddressAllocator<size_type>,RecursiveLockable>;
172+
173+
}
174+
}
175+
176+
#endif
177+

0 commit comments

Comments
 (0)