2626#include "utils_math.h"
2727#include "utils_sanitizers.h"
2828
29- #include "pool_disjoint_temp.h"
29+ typedef struct bucket_t bucket_t ;
30+ typedef struct slab_t slab_t ;
31+ typedef struct slab_list_item_t slab_list_item_t ;
32+ typedef struct AllocImpl AllocImpl ;
3033
31- // TODO remove
32- #ifdef __cplusplus
33- extern "C" {
34- #endif
34+
35+ slab_t * create_slab (bucket_t * bucket );
36+ void destroy_slab (slab_t * slab );
37+
38+ void * slab_get (const slab_t * slab );
39+ void * slab_get_end (const slab_t * slab );
40+ bucket_t * slab_get_bucket (slab_t * slab );
41+ void * slab_get_chunk (slab_t * slab );
42+ size_t slab_get_num_chunks (const slab_t * slab );
43+ size_t slab_get_chunk_size (const slab_t * slab );
44+ size_t slab_get_num_allocated (const slab_t * slab );
45+
46+ bool slab_has_avail (const slab_t * slab );
47+ void slab_free_chunk (slab_t * slab , void * ptr );
48+
49+ void slab_reg (slab_t * slab );
50+ void slab_reg_by_addr (void * addr , slab_t * slab );
51+ void slab_unreg (slab_t * slab );
52+ void slab_unreg_by_addr (void * addr , slab_t * slab );
53+
54+ bucket_t * create_bucket (size_t sz , void * alloc_ctx ,
55+ umf_disjoint_pool_shared_limits_t * shared_limits );
56+ void destroy_bucket (bucket_t * bucket );
57+
58+ void bucket_update_stats (bucket_t * bucket , int in_use , int in_pool );
59+ bool bucket_can_pool (bucket_t * bucket , bool * to_pool );
60+ void bucket_on_free_chunk (bucket_t * bucket , slab_t * slab , bool * to_pool );
61+ void bucket_decrement_pool (bucket_t * bucket , bool * from_pool );
62+ void * bucket_get_chunk (bucket_t * bucket , bool * from_pool );
63+ size_t bucket_get_size (bucket_t * bucket );
64+ size_t bucket_chunk_cut_off (bucket_t * bucket );
65+ size_t bucket_capacity (bucket_t * bucket );
66+ void bucket_free_chunk (bucket_t * bucket , void * ptr , slab_t * slab ,
67+ bool * to_pool );
68+ void bucket_count_alloc (bucket_t * bucket , bool from_pool );
69+ void bucket_count_free (bucket_t * bucket );
70+
71+ void * bucket_get_slab (bucket_t * bucket , bool * from_pool );
72+ size_t bucket_slab_alloc_size (bucket_t * bucket );
73+ size_t bucket_slab_min_size (bucket_t * bucket );
74+ slab_list_item_t * bucket_get_avail_slab (bucket_t * bucket , bool * from_pool );
75+ slab_list_item_t * bucket_get_avail_full_slab (bucket_t * bucket , bool * from_pool );
76+ void bucket_free_slab (bucket_t * bucket , slab_t * slab , bool * to_pool );
77+
78+ umf_disjoint_pool_shared_limits_t * bucket_get_limits (bucket_t * bucket );
79+ umf_disjoint_pool_params_t * bucket_get_params (bucket_t * bucket );
80+ umf_memory_provider_handle_t bucket_get_mem_handle (bucket_t * bucket );
81+ utils_mutex_t * bucket_get_known_slabs_map_lock (bucket_t * bucket );
82+ critnib * bucket_get_known_slabs (bucket_t * bucket );
83+
84+ AllocImpl * create_AllocImpl (umf_memory_provider_handle_t hProvider ,
85+ umf_disjoint_pool_params_t * params );
86+ void destroy_AllocImpl (AllocImpl * ai );
87+
88+ bucket_t * AllocImpl_findBucket (AllocImpl * ai , size_t Size );
89+ umf_result_t AllocImpl_deallocate (AllocImpl * ai , void * Ptr , bool * ToPool );
90+ umf_disjoint_pool_shared_limits_t * AllocImpl_getLimits (AllocImpl * ai );
91+ void * AllocImpl_allocate (AllocImpl * ai , size_t Size , bool * FromPool );
92+ void * AllocImpl_allocate_align (AllocImpl * ai , size_t Size , size_t Alignment ,
93+ bool * FromPool );
94+
95+ umf_memory_provider_handle_t AllocImpl_getMemHandle (AllocImpl * ai );
96+ utils_mutex_t * AllocImpl_getKnownSlabsMapLock (AllocImpl * ai );
97+ critnib * AllocImpl_getKnownSlabs (AllocImpl * ai );
98+ size_t AllocImpl_SlabMinSize (AllocImpl * ai );
99+ umf_disjoint_pool_params_t * AllocImpl_getParams (AllocImpl * ai );
35100
36101//static <- make static rename to TLS_last_allocation_error
37102__TLS umf_result_t TLS_last_allocation_error_dp ;
@@ -79,6 +144,97 @@ static size_t CutOff = (size_t)1 << 31; // 2GB
79144#endif
80145}
81146
147+ void annotate_memory_inaccessible (void * ptr , size_t size );
148+ void annotate_memory_undefined (void * ptr , size_t size );
149+
150+
151+ typedef struct slab_list_item_t slab_list_item_t ;
152+
153+ typedef struct bucket_t {
154+ size_t Size ;
155+
156+ // List of slabs which have at least 1 available chunk.
157+ slab_list_item_t * AvailableSlabs ;
158+
159+ // List of slabs with 0 available chunk.
160+ slab_list_item_t * UnavailableSlabs ;
161+
162+ // Protects the bucket and all the corresponding slabs
163+ utils_mutex_t bucket_lock ;
164+
165+ // Reference to the allocator context, used access memory allocation
166+ // routines, slab map and etc.
167+ void * OwnAllocCtx ;
168+
169+ umf_disjoint_pool_shared_limits_t * shared_limits ;
170+
171+ // For buckets used in chunked mode, a counter of slabs in the pool.
172+ // For allocations that use an entire slab each, the entries in the Available
173+ // list are entries in the pool.Each slab is available for a new
174+ // allocation.The size of the Available list is the size of the pool.
175+ // For allocations that use slabs in chunked mode, slabs will be in the
176+ // Available list if any one or more of their chunks is free.The entire slab
177+ // is not necessarily free, just some chunks in the slab are free. To
178+ // implement pooling we will allow one slab in the Available list to be
179+ // entirely empty. Normally such a slab would have been freed. But
180+ // now we don't, and treat this slab as "in the pool".
181+ // When a slab becomes entirely free we have to decide whether to return it
182+ // to the provider or keep it allocated. A simple check for size of the
183+ // Available list is not sufficient to check whether any slab has been
184+ // pooled yet.We would have to traverse the entire Available listand check
185+ // if any of them is entirely free. Instead we keep a counter of entirely
186+ // empty slabs within the Available list to speed up the process of checking
187+ // if a slab in this bucket is already pooled.
188+ size_t chunkedSlabsInPool ;
189+
190+ // Statistics
191+ size_t allocPoolCount ;
192+ size_t freeCount ;
193+ size_t currSlabsInUse ;
194+ size_t currSlabsInPool ;
195+ size_t maxSlabsInPool ;
196+
197+ // Statistics
198+ size_t allocCount ;
199+ size_t maxSlabsInUse ;
200+
201+ } bucket_t ;
202+
203+ // Represents the allocated memory block of size 'slab_min_size'
204+ // Internally, it splits the memory block into chunks. The number of
205+ // chunks depends of the size of a Bucket which created the Slab.
206+ // Note: Bucket's methods are responsible for thread safety of Slab access,
207+ // so no locking happens here.
208+ typedef struct slab_t {
209+ // Pointer to the allocated memory of slab_min_size bytes
210+ void * mem_ptr ;
211+ size_t slab_size ;
212+
213+ // Represents the current state of each chunk: if the bit is set then the
214+ // chunk is allocated, and if the chunk is free for allocation otherwise
215+ bool * chunks ;
216+ size_t num_chunks ;
217+
218+ // Total number of allocated chunks at the moment.
219+ size_t num_allocated ;
220+
221+ // The bucket which the slab belongs to
222+ bucket_t * bucket ;
223+
224+ // Hints where to start search for free chunk in a slab
225+ size_t first_free_chunk_idx ;
226+
227+ // Store iterator to the corresponding node in avail/unavail list
228+ // to achieve O(1) removal
229+ slab_list_item_t * iter ;
230+ } slab_t ;
231+
232+ typedef struct slab_list_item_t {
233+ slab_t * val ;
234+ struct slab_list_item_t * prev , * next ;
235+ } slab_list_item_t ;
236+
237+
82238typedef struct umf_disjoint_pool_shared_limits_t {
83239 size_t max_size ;
84240 size_t total_size ; // requires atomic access
@@ -98,6 +254,34 @@ void umfDisjointPoolSharedLimitsDestroy(
98254 umf_ba_global_free (limits );
99255}
100256
257+ typedef struct AllocImpl {
258+ // It's important for the map to be destroyed last after buckets and their
259+ // slabs This is because slab's destructor removes the object from the map.
260+ critnib * known_slabs ; // (void *, slab_t *)
261+
262+ // prev std::shared_timed_mutex - ok?
263+ utils_mutex_t known_slabs_map_lock ;
264+
265+ // Handle to the memory provider
266+ umf_memory_provider_handle_t MemHandle ;
267+
268+ // Store as unique_ptrs since Bucket is not Movable(because of std::mutex)
269+ bucket_t * * buckets ;
270+ size_t buckets_num ;
271+
272+ // Configuration for this instance
273+ umf_disjoint_pool_params_t params ;
274+
275+ umf_disjoint_pool_shared_limits_t * DefaultSharedLimits ;
276+
277+ // Used in algorithm for finding buckets
278+ size_t MinBucketSizeExp ;
279+
280+ // Coarse-grain allocation min alignment
281+ size_t ProviderMinPageSize ;
282+
283+ } AllocImpl ;
284+
101285typedef struct disjoint_pool_t {
102286 AllocImpl * impl ;
103287} disjoint_pool_t ;
@@ -1185,8 +1369,3 @@ static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = {
11851369umf_memory_pool_ops_t * umfDisjointPoolOps (void ) {
11861370 return & UMF_DISJOINT_POOL_OPS ;
11871371}
1188-
1189- // TODO remove
1190- #ifdef __cplusplus
1191- }
1192- #endif
0 commit comments