11
11
12
12
#include "cache.h"
13
13
#include "mpool.h"
14
-
15
- #define GOLDEN_RATIO_32 0x61C88647
16
- #define HASH (val ) \
17
- (((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
14
+ #include "utils.h"
18
15
19
16
/* THRESHOLD is set to identify hot spots. Once the frequency of use for a block
20
17
* exceeds the THRESHOLD, the JIT compiler flow is triggered.
24
21
static uint32_t cache_size , cache_size_bits ;
25
22
static struct mpool * cache_mp ;
26
23
24
+ /* hash function for the cache */
25
+ HASH_FUNC_IMPL (cache_hash , cache_size_bits , cache_size );
26
+
27
27
#if RV32_HAS (ARC )
28
28
/* The Adaptive Replacement Cache (ARC) improves the traditional LRU strategy
29
29
* by dividing the cache into two lists: T1 and T2. T1 follows the LRU
@@ -337,16 +337,18 @@ static inline void move_to_mru(cache_t *cache,
337
337
338
338
void * cache_get (cache_t * cache , uint32_t key )
339
339
{
340
- if (!cache -> capacity || hlist_empty (& cache -> map -> ht_list_head [HASH (key )]))
340
+ if (!cache -> capacity ||
341
+ hlist_empty (& cache -> map -> ht_list_head [cache_hash (key )]))
341
342
return NULL ;
342
343
343
344
#if RV32_HAS (ARC )
344
345
arc_entry_t * entry = NULL ;
345
346
#ifdef __HAVE_TYPEOF
346
- hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
347
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [cache_hash (key )],
348
+ ht_list )
347
349
#else
348
- hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
349
- arc_entry_t )
350
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [cache_hash (key )],
351
+ ht_list , arc_entry_t )
350
352
#endif
351
353
{
352
354
if (entry -> key == key )
@@ -388,10 +390,11 @@ void *cache_get(cache_t *cache, uint32_t key)
388
390
#else /* !RV32_HAS(ARC) */
389
391
lfu_entry_t * entry = NULL ;
390
392
#ifdef __HAVE_TYPEOF
391
- hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
393
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [cache_hash (key )],
394
+ ht_list )
392
395
#else
393
- hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
394
- lfu_entry_t )
396
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [cache_hash (key )],
397
+ ht_list , lfu_entry_t )
395
398
#endif
396
399
{
397
400
if (entry -> key == key )
@@ -478,7 +481,8 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
478
481
list_add (& new_entry -> list , cache -> lists [LRU_ghost_list ]);
479
482
cache -> list_size [LRU_ghost_list ]++ ;
480
483
}
481
- hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
484
+ hlist_add_head (& new_entry -> ht_list ,
485
+ & cache -> map -> ht_list_head [cache_hash (key )]);
482
486
483
487
CACHE_ASSERT (cache );
484
488
#else /* !RV32_HAS(ARC) */
@@ -504,7 +508,8 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
504
508
new_entry -> frequency = 0 ;
505
509
list_add (& new_entry -> list , cache -> lists [new_entry -> frequency ++ ]);
506
510
cache -> list_size ++ ;
507
- hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
511
+ hlist_add_head (& new_entry -> ht_list ,
512
+ & cache -> map -> ht_list_head [cache_hash (key )]);
508
513
assert (cache -> list_size <= cache -> capacity );
509
514
#endif
510
515
return delete_value ;
0 commit comments