Skip to content

Commit efab28b

Browse files
committed
Add FastRange32 function and use it throughout the codebase
1 parent 96ecd6f commit efab28b

File tree

3 files changed

+30
-30
lines changed

3 files changed

+30
-30
lines changed

src/common/bloom.cpp

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <script/standard.h>
1212
#include <span.h>
1313
#include <streams.h>
14+
#include <util/fastrange.h>
1415

1516
#include <algorithm>
1617
#include <cmath>
@@ -191,14 +192,6 @@ static inline uint32_t RollingBloomHash(unsigned int nHashNum, uint32_t nTweak,
191192
return MurmurHash3(nHashNum * 0xFBA4C795 + nTweak, vDataToHash);
192193
}
193194

194-
195-
// A replacement for x % n. This assumes that x and n are 32bit integers, and x is a uniformly random distributed 32bit value
196-
// which should be the case for a good hash.
197-
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
198-
static inline uint32_t FastMod(uint32_t x, size_t n) {
199-
return ((uint64_t)x * (uint64_t)n) >> 32;
200-
}
201-
202195
void CRollingBloomFilter::insert(Span<const unsigned char> vKey)
203196
{
204197
if (nEntriesThisGeneration == nEntriesPerGeneration) {
@@ -223,7 +216,7 @@ void CRollingBloomFilter::insert(Span<const unsigned char> vKey)
223216
uint32_t h = RollingBloomHash(n, nTweak, vKey);
224217
int bit = h & 0x3F;
225218
/* FastMod works with the upper bits of h, so it is safe to ignore that the lower bits of h are already used for bit. */
226-
uint32_t pos = FastMod(h, data.size());
219+
uint32_t pos = FastRange32(h, data.size());
227220
/* The lowest bit of pos is ignored, and set to zero for the first bit, and to one for the second. */
228221
data[pos & ~1] = (data[pos & ~1] & ~(((uint64_t)1) << bit)) | ((uint64_t)(nGeneration & 1)) << bit;
229222
data[pos | 1] = (data[pos | 1] & ~(((uint64_t)1) << bit)) | ((uint64_t)(nGeneration >> 1)) << bit;
@@ -235,7 +228,7 @@ bool CRollingBloomFilter::contains(Span<const unsigned char> vKey) const
235228
for (int n = 0; n < nHashFuncs; n++) {
236229
uint32_t h = RollingBloomHash(n, nTweak, vKey);
237230
int bit = h & 0x3F;
238-
uint32_t pos = FastMod(h, data.size());
231+
uint32_t pos = FastRange32(h, data.size());
239232
/* If the relevant bit is not set in either data[pos & ~1] or data[pos | 1], the filter does not contain vKey */
240233
if (!(((data[pos & ~1] | data[pos | 1]) >> bit) & 1)) {
241234
return false;

src/cuckoocache.h

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55
#ifndef BITCOIN_CUCKOOCACHE_H
66
#define BITCOIN_CUCKOOCACHE_H
77

8+
#include <util/fastrange.h>
9+
810
#include <algorithm> // std::find
911
#include <array>
1012
#include <atomic>
@@ -219,13 +221,8 @@ class cache
219221
* One option would be to implement the same trick the compiler uses and compute the
220222
* constants for exact division based on the size, as described in "{N}-bit Unsigned
221223
* Division via {N}-bit Multiply-Add" by Arch D. Robison in 2005. But that code is
222-
* somewhat complicated and the result is still slower than other options:
223-
*
224-
* Instead we treat the 32-bit random number as a Q32 fixed-point number in the range
225-
* [0, 1) and simply multiply it by the size. Then we just shift the result down by
226-
* 32-bits to get our bucket number. The result has non-uniformity the same as a
227-
* mod, but it is much faster to compute. More about this technique can be found at
228-
* https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ .
224+
* somewhat complicated and the result is still slower than an even simpler option:
225+
* see the FastRange32 function in util/fastrange.h.
229226
*
230227
* The resulting non-uniformity is also more equally distributed which would be
231228
* advantageous for something like linear probing, though it shouldn't matter
@@ -241,14 +238,14 @@ class cache
241238
*/
242239
inline std::array<uint32_t, 8> compute_hashes(const Element& e) const
243240
{
244-
return {{(uint32_t)(((uint64_t)hash_function.template operator()<0>(e) * (uint64_t)size) >> 32),
245-
(uint32_t)(((uint64_t)hash_function.template operator()<1>(e) * (uint64_t)size) >> 32),
246-
(uint32_t)(((uint64_t)hash_function.template operator()<2>(e) * (uint64_t)size) >> 32),
247-
(uint32_t)(((uint64_t)hash_function.template operator()<3>(e) * (uint64_t)size) >> 32),
248-
(uint32_t)(((uint64_t)hash_function.template operator()<4>(e) * (uint64_t)size) >> 32),
249-
(uint32_t)(((uint64_t)hash_function.template operator()<5>(e) * (uint64_t)size) >> 32),
250-
(uint32_t)(((uint64_t)hash_function.template operator()<6>(e) * (uint64_t)size) >> 32),
251-
(uint32_t)(((uint64_t)hash_function.template operator()<7>(e) * (uint64_t)size) >> 32)}};
241+
return {{FastRange32(hash_function.template operator()<0>(e), size),
242+
FastRange32(hash_function.template operator()<1>(e), size),
243+
FastRange32(hash_function.template operator()<2>(e), size),
244+
FastRange32(hash_function.template operator()<3>(e), size),
245+
FastRange32(hash_function.template operator()<4>(e), size),
246+
FastRange32(hash_function.template operator()<5>(e), size),
247+
FastRange32(hash_function.template operator()<6>(e), size),
248+
FastRange32(hash_function.template operator()<7>(e), size)}};
252249
}
253250

254251
/** invalid returns a special index that can never be inserted to

src/util/fastrange.h

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,21 @@
77

88
#include <cstdint>
99

10-
// Map a value x that is uniformly distributed in the range [0, 2^64) to a
11-
// value uniformly distributed in [0, n) by returning the upper 64 bits of
12-
// x * n.
13-
//
14-
// See: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
10+
/* This file offers implementations of the fast range reduction technique described
11+
* in https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
12+
*
13+
* In short, they take an integer x and a range n, and return the upper bits of
14+
* (x * n). If x is uniformly distributed over its domain, the result is as close to
15+
* uniformly distributed over [0, n) as (x mod n) would be, but significantly faster.
16+
*/
17+
18+
/** Fast range reduction with 32-bit input and 32-bit range. */
19+
static inline uint32_t FastRange32(uint32_t x, uint32_t n)
20+
{
21+
return (uint64_t{x} * n) >> 32;
22+
}
23+
24+
/** Fast range reduction with 64-bit input and 64-bit range. */
1525
static inline uint64_t FastRange64(uint64_t x, uint64_t n)
1626
{
1727
#ifdef __SIZEOF_INT128__

0 commit comments

Comments
 (0)