41
41
#endif
42
42
43
43
#define MIN_FRAG_SIZE ROUNDTO
44
- #define FRAG_NEXT (f ) ((struct hp_frag *) \
45
- ((char *)(f) + sizeof(struct hp_frag) + ((struct hp_frag *)(f))->size))
46
44
47
- #define FRAG_OVERHEAD (sizeof(struct hp_frag))
48
- #define frag_is_free (_f ) ((_f)->prev)
45
+ /* only perform a split if the resulting free fragment is at least this size */
46
+ #define MIN_SHM_SPLIT_SIZE 4096
47
+ #define MIN_PKG_SPLIT_SIZE 256
48
+
49
+ #define FRAG_NEXT (f ) ((struct hp_frag *)((char *)((f) + 1) + (f)->size))
50
+
51
+ #define FRAG_OVERHEAD HP_FRAG_OVERHEAD
52
+ #define frag_is_free (_f ) ((_f)->prev)
49
53
50
54
/* used when detaching free fragments */
51
55
static unsigned int optimized_get_indexes [HP_HASH_SIZE ];
@@ -56,7 +60,7 @@ static unsigned int optimized_put_indexes[HP_HASH_SIZE];
56
60
/* finds the hash value for s, s=ROUNDTO multiple */
57
61
#define GET_HASH (s ) (((unsigned long)(s) <= HP_MALLOC_OPTIMIZE) ? \
58
62
(unsigned long)(s) / ROUNDTO : \
59
- HP_LINEAR_HASH_SIZE + big_hash_idx((s) ) - HP_MALLOC_OPTIMIZE_FACTOR + 1)
63
+ HP_LINEAR_HASH_SIZE + big_hash_idx(s ) - HP_MALLOC_OPTIMIZE_FACTOR + 1)
60
64
61
65
/*
62
66
* - for heavily used sizes (which need some optimizing) it returns
@@ -96,10 +100,6 @@ static unsigned int optimized_put_indexes[HP_HASH_SIZE];
96
100
}) : \
97
101
HP_LINEAR_HASH_SIZE + big_hash_idx((s)) - HP_MALLOC_OPTIMIZE_FACTOR + 1)
98
102
99
-
100
-
101
-
102
-
103
103
extern unsigned long * shm_hash_usage ;
104
104
105
105
/*
@@ -135,12 +135,14 @@ stat_var *shm_frags;
135
135
136
136
#define MEM_FRAG_AVOIDANCE
137
137
138
- #define HP_MALLOC_LARGE_LIMIT HP_MALLOC_OPTIMIZE
139
- #define HP_MALLOC_DEFRAG_LIMIT (HP_MALLOC_LARGE_LIMIT * 5)
140
- #define HP_MALLOC_DEFRAG_PERCENT 5
138
+ #define can_split_frag (frag , wanted_size , min_size ) \
139
+ ((frag)->size - wanted_size >= min_size)
141
140
142
- #define can_split_frag (frag , wanted_size ) \
143
- ((frag)->size - wanted_size > (FRAG_OVERHEAD + MIN_FRAG_SIZE))
141
+ #define can_split_pkg_frag (frag , wanted_size ) \
142
+ can_split_frag(frag, wanted_size, MIN_PKG_SPLIT_SIZE)
143
+ #define can_split_shm_frag (frag , wanted_size ) \
144
+ can_split_frag(frag, wanted_size, MIN_SHM_SPLIT_SIZE)
145
+ #define can_split_rpm_frag can_split_shm_frag
144
146
145
147
/* computes hash number for big buckets */
146
148
inline static unsigned long big_hash_idx (unsigned long s )
@@ -159,6 +161,36 @@ inline static unsigned long big_hash_idx(unsigned long s)
159
161
return idx ;
160
162
}
161
163
164
+ static inline void hp_lock (struct hp_block * hpb , unsigned int hash )
165
+ {
166
+ int i ;
167
+
168
+ if (!hpb -> free_hash [hash ].is_optimized ) {
169
+ SHM_LOCK (hash );
170
+ return ;
171
+ }
172
+
173
+ /* for optimized buckets, we have to lock the entire array */
174
+ hash = HP_HASH_SIZE + hash * shm_secondary_hash_size ;
175
+ for (i = 0 ; i < shm_secondary_hash_size ; i ++ )
176
+ SHM_LOCK (hash + i );
177
+ }
178
+
179
+ static inline void hp_unlock (struct hp_block * hpb , unsigned int hash )
180
+ {
181
+ int i ;
182
+
183
+ if (!hpb -> free_hash [hash ].is_optimized ) {
184
+ SHM_UNLOCK (hash );
185
+ return ;
186
+ }
187
+
188
+ /* for optimized buckets, we have to unlock the entire array */
189
+ hash = HP_HASH_SIZE + hash * shm_secondary_hash_size ;
190
+ for (i = 0 ; i < shm_secondary_hash_size ; i ++ )
191
+ SHM_UNLOCK (hash + i );
192
+ }
193
+
162
194
#ifdef SHM_EXTRA_STATS
163
195
#include "module_info.h"
164
196
unsigned long hp_stats_get_index (void * ptr )
@@ -178,26 +210,43 @@ void hp_stats_set_index(void *ptr, unsigned long idx)
178
210
}
179
211
#endif
180
212
213
+ #if 0
214
+ /* walk through all fragments and write them to the log. Useful for dev */
215
+ static void hp_dump (struct hp_block * hpb )
216
+ {
217
+ struct hp_frag * f ;
218
+
219
+ fprintf (stderr , "dumping all fragments...\n" );
220
+
221
+ for (f = hpb -> first_frag ; f < hpb -> last_frag ; f = FRAG_NEXT (f )) {
222
+ fprintf (stderr , " | sz: %lu, prev: %p, next: %p |\n" , f -> size ,
223
+ f -> prev , f -> nxt_free );
224
+ }
225
+ }
226
+ #endif
227
+
181
228
static inline void hp_frag_attach (struct hp_block * hpb , struct hp_frag * frag )
182
229
{
183
230
struct hp_frag * * f ;
184
231
unsigned int hash ;
185
232
233
+
186
234
hash = GET_HASH_RR (hpb , frag -> size );
235
+
187
236
f = & (hpb -> free_hash [hash ].first );
188
237
189
238
if (frag -> size > HP_MALLOC_OPTIMIZE ){ /* because of '<=' in GET_HASH,
190
239
purpose --andrei ) */
191
- for (; * f ; f = & ((* f )-> u . nxt_free )){
240
+ for (; * f ; f = & ((* f )-> nxt_free )){
192
241
if (frag -> size <= (* f )-> size ) break ;
193
242
}
194
243
}
195
244
196
245
/*insert it here*/
197
246
frag -> prev = f ;
198
- frag -> u . nxt_free = * f ;
247
+ frag -> nxt_free = * f ;
199
248
if (* f )
200
- (* f )-> prev = & (frag -> u . nxt_free );
249
+ (* f )-> prev = & (frag -> nxt_free );
201
250
202
251
* f = frag ;
203
252
@@ -213,10 +262,10 @@ static inline void hp_frag_detach(struct hp_block *hpb, struct hp_frag *frag)
213
262
pf = frag -> prev ;
214
263
215
264
/* detach */
216
- * pf = frag -> u . nxt_free ;
265
+ * pf = frag -> nxt_free ;
217
266
218
- if (frag -> u . nxt_free )
219
- frag -> u . nxt_free -> prev = pf ;
267
+ if (frag -> nxt_free )
268
+ frag -> nxt_free -> prev = pf ;
220
269
221
270
frag -> prev = NULL ;
222
271
@@ -411,7 +460,7 @@ int hp_mem_warming(struct hp_block *hpb)
411
460
while (bucket_mem >= FRAG_OVERHEAD + current_frag_size ) {
412
461
hp_frag_detach (hpb , big_frag );
413
462
if (stats_are_ready ()) {
414
- update_stats_shm_frag_detach (big_frag );
463
+ update_stats_shm_frag_detach (big_frag -> size );
415
464
#if defined(DBG_MALLOC ) || defined(STATISTICS )
416
465
hpb -> used += big_frag -> size ;
417
466
hpb -> real_used += big_frag -> size + FRAG_OVERHEAD ;
@@ -438,10 +487,6 @@ int hp_mem_warming(struct hp_block *hpb)
438
487
hp_frag_attach (hpb , big_frag );
439
488
if (stats_are_ready ()) {
440
489
update_stats_shm_frag_attach (big_frag );
441
- #if defined(DBG_MALLOC ) || defined(STATISTICS )
442
- hpb -> used -= big_frag -> size ;
443
- hpb -> real_used -= big_frag -> size + FRAG_OVERHEAD ;
444
- #endif
445
490
} else {
446
491
hpb -> used -= big_frag -> size ;
447
492
hpb -> real_used -= big_frag -> size + FRAG_OVERHEAD ;
@@ -497,8 +542,7 @@ static struct hp_block *hp_malloc_init(char *address, unsigned long size,
497
542
498
543
size = ROUNDDOWN (size );
499
544
500
- init_overhead = (ROUNDUP (sizeof (struct hp_block )) + 2 * FRAG_OVERHEAD );
501
-
545
+ init_overhead = ROUNDUP (sizeof (struct hp_block )) + 2 * FRAG_OVERHEAD ;
502
546
if (size < init_overhead )
503
547
{
504
548
LM_ERR ("not enough memory for the basic structures! "
@@ -516,24 +560,19 @@ static struct hp_block *hp_malloc_init(char *address, unsigned long size,
516
560
hpb -> used = 0 ;
517
561
hpb -> real_used = init_overhead ;
518
562
hpb -> max_real_used = init_overhead ;
563
+ hpb -> total_fragments = 2 ;
519
564
gettimeofday (& hpb -> last_updated , NULL );
520
565
521
566
hpb -> first_frag = (struct hp_frag * )(start + ROUNDUP (sizeof (struct hp_block )));
522
- hpb -> last_frag = (struct hp_frag * )(end - sizeof (struct hp_frag ));
523
- /* init initial fragment*/
524
- hpb -> first_frag -> size = size - init_overhead ;
567
+ hpb -> last_frag = (struct hp_frag * )(end - sizeof * hpb -> last_frag );
525
568
hpb -> last_frag -> size = 0 ;
526
569
527
- hpb -> last_frag -> prev = NULL ;
570
+ /* init initial fragment */
571
+ hpb -> first_frag -> size = size - init_overhead ;
528
572
hpb -> first_frag -> prev = NULL ;
573
+ hpb -> last_frag -> prev = NULL ;
529
574
530
- /* link initial fragment into the free list*/
531
-
532
- hpb -> large_space = 0 ;
533
- hpb -> large_limit = hpb -> size / 100 * HP_MALLOC_DEFRAG_PERCENT ;
534
-
535
- if (hpb -> large_limit < HP_MALLOC_DEFRAG_LIMIT )
536
- hpb -> large_limit = HP_MALLOC_DEFRAG_LIMIT ;
575
+ hp_frag_attach (hpb , hpb -> first_frag );
537
576
538
577
return hpb ;
539
578
}
@@ -549,14 +588,6 @@ struct hp_block *hp_pkg_malloc_init(char *address, unsigned long size,
549
588
return NULL ;
550
589
}
551
590
552
- hp_frag_attach (hpb , hpb -> first_frag );
553
-
554
- /* first fragment attach is the equivalent of a split */
555
- #if defined(DBG_MALLOC ) && !defined(STATISTICS )
556
- hpb -> real_used += FRAG_OVERHEAD ;
557
- hpb -> total_fragments ++ ;
558
- #endif
559
-
560
591
return hpb ;
561
592
}
562
593
@@ -575,23 +606,6 @@ struct hp_block *hp_shm_malloc_init(char *address, unsigned long size,
575
606
hpb -> free_hash [PEEK_HASH_RR (hpb , hpb -> first_frag -> size )].total_no ++ ;
576
607
#endif
577
608
578
- hp_frag_attach (hpb , hpb -> first_frag );
579
-
580
- /* first fragment attach is the equivalent of a split */
581
- if (stats_are_ready ()) {
582
- #if defined(STATISTICS ) && !defined(HP_MALLOC_FAST_STATS )
583
- update_stat (shm_rused , FRAG_OVERHEAD );
584
- update_stat (shm_frags , 1 );
585
- #endif
586
- #if defined(DBG_MALLOC ) || defined(STATISTICS )
587
- hpb -> real_used += FRAG_OVERHEAD ;
588
- hpb -> total_fragments ++ ;
589
- #endif
590
- } else {
591
- hpb -> real_used += FRAG_OVERHEAD ;
592
- hpb -> total_fragments ++ ;
593
- }
594
-
595
609
#ifdef HP_MALLOC_FAST_STATS
596
610
#ifdef DBG_MALLOC
597
611
hp_stats_lock = hp_shm_malloc_unsafe (hpb , sizeof * hp_stats_lock ,
@@ -619,7 +633,7 @@ void hp_stats_core_init(struct hp_block *hp, int core_index)
619
633
{
620
634
struct hp_frag * f ;
621
635
622
- for (f = hp -> first_frag ; ( char * ) f < ( char * ) hp -> last_frag ; f = FRAG_NEXT (f ))
636
+ for (f = hp -> first_frag ; f < hp -> last_frag ; f = FRAG_NEXT (f ))
623
637
if (!frag_is_free (f ))
624
638
f -> statistic_index = core_index ;
625
639
}
0 commit comments