88
99#include "murmurhash3.h"
1010
11+ #include <asm/unaligned.h>
12+
1113static inline u64 rotl64 (u64 x , s8 r )
1214{
1315 return (x << r ) | (x >> (64 - r ));
1416}
1517
1618#define ROTL64 (x , y ) rotl64(x, y)
17- static __always_inline u64 getblock64 (const u64 * p , int i )
18- {
19- #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
20- return p [i ];
21- #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22- return __builtin_bswap64 (p [i ]);
23- #else
24- #error "can't figure out byte order"
25- #endif
26- }
27-
28- static __always_inline void putblock64 (u64 * p , int i , u64 value )
29- {
30- #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
31- p [i ] = value ;
32- #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
33- p [i ] = __builtin_bswap64 (value );
34- #else
35- #error "can't figure out byte order"
36- #endif
37- }
3819
3920/* Finalization mix - force all bits of a hash block to avalanche */
4021
@@ -60,15 +41,17 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
6041 const u64 c1 = 0x87c37b91114253d5LLU ;
6142 const u64 c2 = 0x4cf5ad432745937fLLU ;
6243
44+ u64 * hash_out = out ;
45+
6346 /* body */
6447
6548 const u64 * blocks = (const u64 * )(data );
6649
6750 int i ;
6851
6952 for (i = 0 ; i < nblocks ; i ++ ) {
70- u64 k1 = getblock64 ( blocks , i * 2 + 0 );
71- u64 k2 = getblock64 ( blocks , i * 2 + 1 );
53+ u64 k1 = get_unaligned_le64 ( & blocks [ i * 2 ] );
54+ u64 k2 = get_unaligned_le64 ( & blocks [ i * 2 + 1 ] );
7255
7356 k1 *= c1 ;
7457 k1 = ROTL64 (k1 , 31 );
@@ -170,6 +153,6 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
170153 h1 += h2 ;
171154 h2 += h1 ;
172155
173- putblock64 (( u64 * ) out , 0 , h1 );
174- putblock64 (( u64 * ) out , 1 , h2 );
156+ put_unaligned_le64 ( h1 , & hash_out [ 0 ] );
157+ put_unaligned_le64 ( h2 , & hash_out [ 1 ] );
175158}
0 commit comments