@@ -324,14 +324,14 @@ void __rpm_frag_split(struct hp_block *hpb, struct hp_frag *frag,
324
324
325
325
/* size should already be rounded-up */
326
326
#if !defined INLINE_ALLOC && defined DBG_MALLOC
327
- void shm_frag_split_dbg (struct hp_block * hpb , struct hp_frag * frag ,
327
+ unsigned long shm_frag_split_dbg (struct hp_block * hpb , struct hp_frag * frag ,
328
328
unsigned long size , unsigned int old_hash ,
329
329
const char * file , const char * func , unsigned int line )
330
330
#elif !defined HP_MALLOC_DYN && !defined DBG_MALLOC
331
- void shm_frag_split (struct hp_block * hpb , struct hp_frag * frag ,
331
+ unsigned long shm_frag_split (struct hp_block * hpb , struct hp_frag * frag ,
332
332
unsigned long size , unsigned int old_hash )
333
333
#else
334
- void shm_frag_split (struct hp_block * hpb , struct hp_frag * frag ,
334
+ unsigned long shm_frag_split (struct hp_block * hpb , struct hp_frag * frag ,
335
335
unsigned long size , unsigned int old_hash ,
336
336
const char * file , const char * func , unsigned int line )
337
337
#endif
@@ -345,12 +345,12 @@ void shm_frag_split(struct hp_block *hpb, struct hp_frag *frag,
345
345
hpb -> free_hash [PEEK_HASH_RR (hpb , size )].total_no ++ ;
346
346
#endif
347
347
348
- rest = frag -> size - size ;
348
+ rest = frag -> size - size - FRAG_OVERHEAD ;
349
349
frag -> size = size ;
350
350
351
351
/* split the fragment */
352
352
n = FRAG_NEXT (frag );
353
- n -> size = rest - FRAG_OVERHEAD ;
353
+ n -> size = rest ;
354
354
355
355
#ifdef DBG_MALLOC
356
356
/* frag created by malloc, mark it*/
@@ -369,11 +369,11 @@ void shm_frag_split(struct hp_block *hpb, struct hp_frag *frag,
369
369
if (hash != old_hash )
370
370
SHM_UNLOCK (hash );
371
371
372
- update_stats_shm_frag_attach (n );
373
-
374
372
#ifdef HP_MALLOC_FAST_STATS
375
373
hpb -> free_hash [PEEK_HASH_RR (hpb , n -> size )].total_no ++ ;
376
374
#endif
375
+
376
+ return rest ;
377
377
}
378
378
379
379
#if !defined INLINE_ALLOC && defined DBG_MALLOC
@@ -505,10 +505,6 @@ void *hp_shm_malloc_unsafe(struct hp_block *hpb, unsigned long size,
505
505
506
506
if (stats_are_ready ()) {
507
507
update_stats_shm_frag_detach (frag -> size );
508
- #if defined(DBG_MALLOC ) || defined(STATISTICS )
509
- hpb -> used += frag -> size ;
510
- hpb -> real_used += frag -> size + FRAG_OVERHEAD ;
511
- #endif
512
508
} else {
513
509
hpb -> used += frag -> size ;
514
510
hpb -> real_used += frag -> size + FRAG_OVERHEAD ;
@@ -632,7 +628,8 @@ void *hp_shm_malloc(struct hp_block *hpb, unsigned long size,
632
628
{
633
629
struct hp_frag * frag ;
634
630
unsigned int init_hash , hash , sec_hash ;
635
- unsigned long old_size ;
631
+ unsigned long old_size , split_size ;
632
+ long extra_used ;
636
633
int i = 0 ;
637
634
638
635
/* size must be a multiple of ROUNDTO */
@@ -704,24 +701,28 @@ void *hp_shm_malloc(struct hp_block *hpb, unsigned long size,
704
701
frag -> line = line ;
705
702
#endif
706
703
707
- update_stats_shm_frag_detach (old_size );
708
-
709
704
if (can_split_shm_frag (frag , size )) {
710
705
#if !defined INLINE_ALLOC && defined DBG_MALLOC
711
706
/* split the fragment if possible */
712
- shm_frag_split_dbg (hpb , frag , size , hash , file , "hp_malloc frag" , line );
707
+ split_size = shm_frag_split_dbg (hpb , frag , size , hash , file ,
708
+ "hp_malloc frag" , line );
713
709
#elif !defined HP_MALLOC_DYN && !defined DBG_MALLOC
714
- shm_frag_split (hpb , frag , size , hash );
710
+ split_size = shm_frag_split (hpb , frag , size , hash );
715
711
#else
716
- shm_frag_split (hpb , frag , size , hash , file , "hp_malloc frag" , line );
712
+ split_size = shm_frag_split (hpb , frag , size , hash , file , "hp_malloc frag" , line );
717
713
#endif
718
714
SHM_UNLOCK (hash );
719
715
720
- update_stats_shm_frag_split ();
716
+ extra_used = split_size + FRAG_OVERHEAD ;
717
+ update_stat (shm_frags , +1 );
721
718
} else {
722
719
SHM_UNLOCK (hash );
720
+ extra_used = 0 ;
723
721
}
724
722
723
+ update_stat (shm_used , old_size - extra_used );
724
+ update_stat (shm_rused , old_size + FRAG_OVERHEAD - extra_used );
725
+
725
726
#ifndef HP_MALLOC_FAST_STATS
726
727
unsigned long real_used ;
727
728
@@ -945,7 +946,8 @@ void hp_shm_free(struct hp_block *hpb, void *p,
945
946
{
946
947
struct hp_frag * f , * neigh ;
947
948
unsigned int hash ;
948
- unsigned long neigh_size ;
949
+ unsigned long neigh_size , f_size ;
950
+ long extra_used ;
949
951
950
952
if (!p ) {
951
953
LM_GEN1 (memlog , "free(NULL) called\n" );
@@ -964,18 +966,22 @@ void hp_shm_free(struct hp_block *hpb, void *p,
964
966
if (!frag_is_free (neigh ) || neigh -> size != neigh_size ) {
965
967
/* the fragment is volatile, abort mission */
966
968
hp_unlock (hpb , hash );
969
+ extra_used = 0 ;
967
970
} else {
968
971
hp_frag_detach (hpb , neigh );
969
972
hp_unlock (hpb , hash );
970
973
971
- update_stats_shm_frag_detach (neigh_size );
972
-
973
974
f -> size += neigh_size + FRAG_OVERHEAD ;
974
- update_stats_shm_frag_merge ();
975
+
976
+ extra_used = neigh_size + FRAG_OVERHEAD ;
977
+ update_stat (shm_frags , -1 );
975
978
}
979
+ } else {
980
+ extra_used = 0 ;
976
981
}
977
982
978
983
hash = PEEK_HASH_RR (hpb , f -> size );
984
+ f_size = f -> size ;
979
985
980
986
SHM_LOCK (hash );
981
987
hp_frag_attach (hpb , f );
@@ -986,12 +992,8 @@ void hp_shm_free(struct hp_block *hpb, void *p,
986
992
#endif
987
993
SHM_UNLOCK (hash );
988
994
989
- update_stats_shm_frag_attach (f );
990
-
991
- #if defined(DBG_MALLOC ) || defined(STATISTICS )
992
- hpb -> used -= f -> size ;
993
- hpb -> real_used -= f -> size + FRAG_OVERHEAD ;
994
- #endif
995
+ update_stat (shm_used , - (long )f_size + extra_used );
996
+ update_stat (shm_rused , - (long )(f_size + FRAG_OVERHEAD ) + extra_used );
995
997
}
996
998
997
999
#if !defined INLINE_ALLOC && defined DBG_MALLOC
0 commit comments