5
5
#ifndef BITCOIN_CUCKOOCACHE_H
6
6
#define BITCOIN_CUCKOOCACHE_H
7
7
8
+ #include < util/fastrange.h>
9
+
8
10
#include < algorithm> // std::find
9
11
#include < array>
10
12
#include < atomic>
@@ -219,13 +221,8 @@ class cache
219
221
* One option would be to implement the same trick the compiler uses and compute the
220
222
* constants for exact division based on the size, as described in "{N}-bit Unsigned
221
223
* Division via {N}-bit Multiply-Add" by Arch D. Robison in 2005. But that code is
222
- * somewhat complicated and the result is still slower than other options:
223
- *
224
- * Instead we treat the 32-bit random number as a Q32 fixed-point number in the range
225
- * [0, 1) and simply multiply it by the size. Then we just shift the result down by
226
- * 32-bits to get our bucket number. The result has non-uniformity the same as a
227
- * mod, but it is much faster to compute. More about this technique can be found at
228
- * https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ .
224
+ * somewhat complicated and the result is still slower than an even simpler option:
225
+ * see the FastRange32 function in util/fastrange.h.
229
226
*
230
227
* The resulting non-uniformity is also more equally distributed which would be
231
228
* advantageous for something like linear probing, though it shouldn't matter
@@ -241,14 +238,14 @@ class cache
241
238
*/
242
239
inline std::array<uint32_t , 8 > compute_hashes (const Element& e) const
243
240
{
244
- return {{( uint32_t )((( uint64_t ) hash_function.template operator ()<0 >(e) * ( uint64_t ) size) >> 32 ),
245
- ( uint32_t )((( uint64_t ) hash_function.template operator ()<1 >(e) * ( uint64_t ) size) >> 32 ),
246
- ( uint32_t )((( uint64_t ) hash_function.template operator ()<2 >(e) * ( uint64_t ) size) >> 32 ),
247
- ( uint32_t )((( uint64_t ) hash_function.template operator ()<3 >(e) * ( uint64_t ) size) >> 32 ),
248
- ( uint32_t )((( uint64_t ) hash_function.template operator ()<4 >(e) * ( uint64_t ) size) >> 32 ),
249
- ( uint32_t )((( uint64_t ) hash_function.template operator ()<5 >(e) * ( uint64_t ) size) >> 32 ),
250
- ( uint32_t )((( uint64_t ) hash_function.template operator ()<6 >(e) * ( uint64_t ) size) >> 32 ),
251
- ( uint32_t )((( uint64_t ) hash_function.template operator ()<7 >(e) * ( uint64_t ) size) >> 32 )}};
241
+ return {{FastRange32 ( hash_function.template operator ()<0 >(e), size),
242
+ FastRange32 ( hash_function.template operator ()<1 >(e), size),
243
+ FastRange32 ( hash_function.template operator ()<2 >(e), size),
244
+ FastRange32 ( hash_function.template operator ()<3 >(e), size),
245
+ FastRange32 ( hash_function.template operator ()<4 >(e), size),
246
+ FastRange32 ( hash_function.template operator ()<5 >(e), size),
247
+ FastRange32 ( hash_function.template operator ()<6 >(e), size),
248
+ FastRange32 ( hash_function.template operator ()<7 >(e), size)}};
252
249
}
253
250
254
251
/* * invalid returns a special index that can never be inserted to
0 commit comments