1616#include <umf/pools/pool_disjoint.h>
1717
1818#include "base_alloc_global.h"
19+ #include "uthash/utlist.h"
1920#include "utils_common.h"
2021#include "utils_concurrency.h"
2122#include "utils_log.h"
@@ -50,15 +51,12 @@ extern "C" {
5051#endif
5152}
5253
53- size_t bucket_get_slab_min_size (const bucket_t bucket );
54- size_t bucket_get_slab_alloc_size (const bucket_t bucket );
55- size_t bucket_get_size (const bucket_t bucket );
56- umf_memory_provider_handle_t bucket_get_provider (const bucket_t bucket );
54+ size_t bucket_get_size (bucket_t * bucket );
5755
5856void slab_reg (slab_t * slab );
5957void slab_unreg (slab_t * slab );
6058
61- slab_t * create_slab (bucket_t bucket ) {
59+ slab_t * create_slab (bucket_t * bucket ) {
6260 // In case bucket size is not a multiple of SlabMinSize, we would have
6361 // some padding at the end of the slab.
6462 slab_t * slab = umf_ba_global_alloc (sizeof (slab_t ));
@@ -73,33 +71,32 @@ slab_t *create_slab(bucket_t bucket) {
7371 slab -> iter -> val = slab ;
7472 slab -> iter -> prev = slab -> iter -> next = NULL ;
7573
76- slab -> num_chunks =
77- bucket_get_slab_min_size (bucket ) / bucket_get_size (bucket );
74+ slab -> num_chunks = bucket_slab_min_size (bucket ) / bucket_get_size (bucket );
7875 slab -> chunks = umf_ba_global_alloc (sizeof (bool ) * slab -> num_chunks );
7976 memset (slab -> chunks , 0 , sizeof (bool ) * slab -> num_chunks );
8077
81- slab -> slab_size = bucket_get_slab_alloc_size (bucket );
78+ slab -> slab_size = bucket_slab_alloc_size (bucket );
8279 umf_result_t res = umfMemoryProviderAlloc (
83- bucket_get_provider (bucket ), slab -> slab_size , 0 , & slab -> mem_ptr );
80+ bucket_get_mem_handle (bucket ), slab -> slab_size , 0 , & slab -> mem_ptr );
8481
8582 if (res == UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ) {
8683 destroy_slab (slab );
8784 return NULL ;
8885 }
8986
9087 annotate_memory_inaccessible (slab -> mem_ptr , slab -> slab_size );
91- fprintf (stderr , "[DP create_slab] bucket: %p, slab_size: %zu\n" , bucket ,
92- slab -> slab_size );
88+ fprintf (stderr , "[DP create_slab] bucket: %p, slab_size: %zu\n" ,
89+ ( void * ) bucket , slab -> slab_size );
9390
9491 return slab ;
9592}
9693
9794void destroy_slab (slab_t * slab ) {
9895 fprintf (stderr , "[DP destroy_slab] bucket: %p, slab_size: %zu\n" ,
99- slab -> bucket , slab -> slab_size );
96+ ( void * ) slab -> bucket , slab -> slab_size );
10097
101- umf_result_t res = umfMemoryProviderFree (bucket_get_provider ( slab -> bucket ),
102- slab -> mem_ptr , slab -> slab_size );
98+ umf_result_t res = umfMemoryProviderFree (
99+ bucket_get_mem_handle ( slab -> bucket ), slab -> mem_ptr , slab -> slab_size );
103100 assert (res == UMF_RESULT_SUCCESS );
104101 umf_ba_global_free (slab -> chunks );
105102 umf_ba_global_free (slab -> iter );
@@ -154,12 +151,12 @@ void *slab_get_chunk(slab_t *slab) {
154151}
155152
156153void * slab_get_end (const slab_t * slab ) {
157- return (uint8_t * )slab -> mem_ptr + bucket_get_slab_min_size (slab -> bucket );
154+ return (uint8_t * )slab -> mem_ptr + bucket_slab_min_size (slab -> bucket );
158155}
159156
160157// TODO remove? why need getter/setter?
161158void * slab_get (const slab_t * slab ) { return slab -> mem_ptr ; }
162- bucket_t slab_get_bucket (const slab_t * slab ) { return slab -> bucket ; }
159+ bucket_t * slab_get_bucket (slab_t * slab ) { return slab -> bucket ; }
163160size_t slab_get_chunk_size (const slab_t * slab ) {
164161 return bucket_get_size (slab -> bucket );
165162}
@@ -195,6 +192,112 @@ bool slab_has_avail(const slab_t *slab) {
195192 return slab -> num_allocated != slab -> num_chunks ;
196193}
197194
195+ void slab_reg (slab_t * slab ) {
196+ bucket_t * bucket = slab_get_bucket (slab );
197+ void * start_addr = (void * )ALIGN_DOWN ((size_t )slab_get (slab ),
198+ bucket_slab_min_size (bucket ));
199+ void * end_addr = (uint8_t * )(start_addr ) + bucket_slab_min_size (bucket );
200+
201+ slab_reg_by_addr (start_addr , slab );
202+ slab_reg_by_addr (end_addr , slab );
203+ }
204+
205+ void slab_unreg (slab_t * slab ) {
206+ bucket_t * bucket = slab_get_bucket (slab );
207+ void * start_addr = (void * )ALIGN_DOWN ((size_t )slab_get (slab ),
208+ bucket_slab_min_size (bucket ));
209+ void * end_addr = (uint8_t * )(start_addr ) + bucket_slab_min_size (bucket );
210+
211+ slab_unreg_by_addr (start_addr , slab );
212+ slab_unreg_by_addr (end_addr , slab );
213+ }
214+
215+ // The lock must be acquired before calling this method
216+ void bucket_on_free_chunk (bucket_t * bucket , slab_t * slab , bool * ToPool ) {
217+ * ToPool = true;
218+
219+ // In case if the slab was previously full and now has 1 available
220+ // chunk, it should be moved to the list of available slabs
221+ if (slab_get_num_allocated (slab ) == (slab_get_num_chunks (slab ) - 1 )) {
222+ slab_list_item_t * slab_it = slab -> iter ;
223+ assert (slab_it -> val != NULL );
224+ DL_DELETE (bucket -> UnavailableSlabs , slab_it );
225+ DL_PREPEND (bucket -> AvailableSlabs , slab_it );
226+ }
227+
228+ // Check if slab is empty, and pool it if we can.
229+ if (slab_get_num_allocated (slab ) == 0 ) {
230+ // The slab is now empty.
231+ // If pool has capacity then put the slab in the pool.
232+ // The ToPool parameter indicates whether the Slab will be put in the
233+ // pool or freed.
234+ if (!bucket_can_pool (bucket , ToPool )) {
235+ // Note: since the slab is stored as unique_ptr, just remove it from
236+ // the list to destroy the object.
237+ slab_list_item_t * slab_it = slab -> iter ;
238+ assert (slab_it -> val != NULL );
239+ slab_unreg (slab_it -> val );
240+ DL_DELETE (bucket -> AvailableSlabs , slab_it );
241+ destroy_slab (slab_it -> val );
242+ }
243+ }
244+ }
245+
246+ // Return the allocation size of this bucket.
247+ size_t bucket_get_size (bucket_t * bucket ) { return bucket -> Size ; }
248+
249+ void * bucket_get_alloc_ctx (bucket_t * bucket ) { return bucket -> OwnAllocCtx ; }
250+
251+ void bucket_count_free (bucket_t * bucket ) { ++ bucket -> freeCount ; }
252+
253+ void bucket_free_chunk (bucket_t * bucket , void * ptr , slab_t * Slab ,
254+ bool * ToPool ) {
255+ utils_mutex_lock (& bucket -> bucket_lock );
256+
257+ slab_free_chunk (Slab , ptr );
258+ bucket_on_free_chunk (bucket , Slab , ToPool );
259+
260+ utils_mutex_unlock (& bucket -> bucket_lock );
261+ }
262+
263+ void bucket_count_alloc (bucket_t * bucket , bool FromPool ) {
264+ ++ bucket -> allocCount ;
265+ if (FromPool ) {
266+ ++ bucket -> allocPoolCount ;
267+ }
268+ }
269+
270+ void * bucket_get_chunk (bucket_t * bucket , bool * FromPool ) {
271+ utils_mutex_lock (& bucket -> bucket_lock );
272+
273+ slab_list_item_t * slab_it = bucket_get_avail_slab (bucket , FromPool );
274+ void * free_chunk = slab_get_chunk (slab_it -> val );
275+
276+ // If the slab is full, move it to unavailable slabs and update its iterator
277+ if (!(slab_has_avail (slab_it -> val ))) {
278+ DL_DELETE (bucket -> AvailableSlabs , slab_it );
279+ DL_PREPEND (bucket -> UnavailableSlabs , slab_it );
280+ }
281+
282+ utils_mutex_unlock (& bucket -> bucket_lock );
283+ return free_chunk ;
284+ }
285+
286+ size_t bucket_chunk_cut_off (bucket_t * bucket ) {
287+ return bucket_slab_min_size (bucket ) / 2 ;
288+ }
289+
290+ size_t bucket_slab_alloc_size (bucket_t * bucket ) {
291+ // return max
292+ return (bucket_get_size (bucket ) > bucket_slab_min_size (bucket ))
293+ ? bucket_get_size (bucket )
294+ : bucket_slab_min_size (bucket );
295+ }
296+
297+ size_t bucket_slab_min_size (bucket_t * bucket ) {
298+ return bucket_get_params (bucket )-> SlabMinSize ;
299+ }
300+
198301#ifdef __cplusplus
199302}
200303#endif
0 commit comments