@@ -212,6 +212,41 @@ void slab_unreg(slab_t *slab) {
212212 slab_unreg_by_addr (end_addr , slab );
213213}
214214
215+ bucket_t * create_bucket (size_t Sz , void * AllocCtx ) {
216+ bucket_t * bucket = (bucket_t * )umf_ba_global_alloc (sizeof (bucket_t ));
217+
218+ bucket -> Size = Sz ;
219+ bucket -> OwnAllocCtx = AllocCtx ;
220+ bucket -> AvailableSlabs = NULL ;
221+ bucket -> UnavailableSlabs = NULL ;
222+ bucket -> chunkedSlabsInPool = 0 ;
223+ bucket -> allocPoolCount = 0 ;
224+ bucket -> freeCount = 0 ;
225+ bucket -> currSlabsInUse = 0 ;
226+ bucket -> currSlabsInPool = 0 ;
227+ bucket -> maxSlabsInPool = 0 ;
228+ bucket -> allocCount = 0 ;
229+ bucket -> maxSlabsInUse = 0 ;
230+
231+ utils_mutex_init (& bucket -> bucket_lock );
232+
233+ return bucket ;
234+ }
235+
236+ void destroy_bucket (bucket_t * bucket ) {
237+ slab_list_item_t * it = NULL , * tmp = NULL ;
238+ // TODO check eng
239+ // use extra tmp to store next iterator before the slab is destroyed
240+ LL_FOREACH_SAFE (bucket -> AvailableSlabs , it , tmp ) { destroy_slab (it -> val ); }
241+ LL_FOREACH_SAFE (bucket -> UnavailableSlabs , it , tmp ) {
242+ destroy_slab (it -> val );
243+ }
244+
245+ utils_mutex_destroy_not_free (& bucket -> bucket_lock );
246+
247+ umf_ba_global_free (bucket );
248+ }
249+
215250// The lock must be acquired before calling this method
216251void bucket_on_free_chunk (bucket_t * bucket , slab_t * slab , bool * ToPool ) {
217252 * ToPool = true;
@@ -271,6 +306,11 @@ void *bucket_get_chunk(bucket_t *bucket, bool *FromPool) {
271306 utils_mutex_lock (& bucket -> bucket_lock );
272307
273308 slab_list_item_t * slab_it = bucket_get_avail_slab (bucket , FromPool );
309+ if (slab_it == NULL ) {
310+ utils_mutex_unlock (& bucket -> bucket_lock );
311+ return NULL ;
312+ }
313+
274314 void * free_chunk = slab_get_chunk (slab_it -> val );
275315
276316 // If the slab is full, move it to unavailable slabs and update its iterator
@@ -298,6 +338,121 @@ size_t bucket_slab_min_size(bucket_t *bucket) {
298338 return bucket_get_params (bucket )-> SlabMinSize ;
299339}
300340
341+ slab_list_item_t * bucket_get_avail_full_slab (bucket_t * bucket ,
342+ bool * from_pool ) {
343+ // Return a slab that will be used for a single allocation.
344+ if (bucket -> AvailableSlabs == NULL ) {
345+ slab_t * slab = create_slab (bucket );
346+ if (slab == NULL ) {
347+ //assert(0);
348+ return NULL ;
349+ }
350+
351+ slab_reg (slab );
352+ DL_PREPEND (bucket -> AvailableSlabs , slab -> iter );
353+ * from_pool = false;
354+ bucket_update_stats (bucket , 1 , 0 );
355+ } else {
356+ bucket_decrement_pool (bucket , from_pool );
357+ }
358+
359+ return bucket -> AvailableSlabs ;
360+ }
361+
362+ void * bucket_get_slab (bucket_t * bucket , bool * from_pool ) {
363+ utils_mutex_lock (& bucket -> bucket_lock );
364+
365+ slab_list_item_t * slab_it = bucket_get_avail_full_slab (bucket , from_pool );
366+ if (slab_it == NULL ) {
367+ utils_mutex_unlock (& bucket -> bucket_lock );
368+ return NULL ;
369+ }
370+ slab_t * slab = slab_it -> val ;
371+ void * ptr = slab_get (slab );
372+
373+ DL_DELETE (bucket -> AvailableSlabs , slab_it );
374+ DL_PREPEND (bucket -> UnavailableSlabs , slab_it );
375+
376+ utils_mutex_unlock (& bucket -> bucket_lock );
377+ return ptr ;
378+ }
379+
380+ void bucket_free_slab (bucket_t * bucket , slab_t * slab , bool * to_pool ) {
381+ utils_mutex_lock (& bucket -> bucket_lock );
382+
383+ slab_list_item_t * slab_it = slab -> iter ;
384+ assert (slab_it -> val != NULL );
385+ if (bucket_can_pool (bucket , to_pool )) {
386+ DL_DELETE (bucket -> UnavailableSlabs , slab_it );
387+ DL_PREPEND (bucket -> AvailableSlabs , slab_it );
388+ } else {
389+ slab_unreg (slab_it -> val );
390+ DL_DELETE (bucket -> UnavailableSlabs , slab_it );
391+ destroy_slab (slab_it -> val );
392+ }
393+ utils_mutex_unlock (& bucket -> bucket_lock );
394+ }
395+
396+ slab_list_item_t * bucket_get_avail_slab (bucket_t * bucket , bool * from_pool ) {
397+ if (bucket -> AvailableSlabs == NULL ) {
398+ slab_t * slab = create_slab (bucket );
399+ if (slab == NULL ) {
400+ // TODO log
401+ // TODO replace asserts
402+ return NULL ;
403+ }
404+
405+ slab_reg (slab );
406+ DL_PREPEND (bucket -> AvailableSlabs , slab -> iter );
407+ bucket_update_stats (bucket , 1 , 0 );
408+ * from_pool = false;
409+ } else {
410+ if (slab_get_num_allocated (bucket -> AvailableSlabs -> val ) == 0 ) {
411+ // If this was an empty slab, it was in the pool.
412+ // Now it is no longer in the pool, so update count.
413+ -- bucket -> chunkedSlabsInPool ;
414+ bucket_decrement_pool (bucket , from_pool );
415+ } else {
416+ // Allocation from existing slab is treated as from pool for statistics.
417+ * from_pool = true;
418+ }
419+ }
420+
421+ return bucket -> AvailableSlabs ;
422+ }
423+
424+ size_t bucket_capacity (bucket_t * bucket ) {
425+ // For buckets used in chunked mode, just one slab in pool is sufficient.
426+ // For larger buckets, the capacity could be more and is adjustable.
427+ if (bucket_get_size (bucket ) <= bucket_chunk_cut_off (bucket )) {
428+ return 1 ;
429+ } else {
430+ return bucket_get_params (bucket )-> Capacity ;
431+ }
432+ }
433+
434+ size_t bucket_max_poolable_size (bucket_t * bucket ) {
435+ return bucket_get_params (bucket )-> MaxPoolableSize ;
436+ }
437+
438+ void bucket_update_stats (bucket_t * bucket , int in_use , int in_pool ) {
439+ if (bucket_get_params (bucket )-> PoolTrace == 0 ) {
440+ return ;
441+ }
442+
443+ bucket -> currSlabsInUse += in_use ;
444+ bucket -> maxSlabsInUse =
445+ utils_max (bucket -> currSlabsInUse , bucket -> maxSlabsInUse );
446+ bucket -> currSlabsInPool += in_pool ;
447+ bucket -> maxSlabsInPool =
448+ utils_max (bucket -> currSlabsInPool , bucket -> maxSlabsInPool );
449+
450+ // Increment or decrement current pool sizes based on whether
451+ // slab was added to or removed from pool.
452+ bucket_get_params (bucket )-> CurPoolSize +=
453+ in_pool * bucket_slab_alloc_size (bucket );
454+ }
455+
301456#ifdef __cplusplus
302457}
303458#endif
0 commit comments