Make MM3 hash match C*'s hash for all values

Fixes #147
Fixes PYTHON-89
This commit is contained in:
Tyler Hobbs
2014-07-09 14:55:14 -05:00
parent 02e92d6f34
commit d8e802d697
3 changed files with 51 additions and 40 deletions

View File

@@ -43,6 +43,8 @@ Bug Fixes
* Make sure cluster.metadata.partitioner and cluster.metadata.token_map
are populated when all nodes in the cluster are included in the
contact points (PYTHON-90)
* Make Murmur3 hash match Cassandra's hash for all values (PYTHON-89,
github #147)
2.0.2
=====

View File

@@ -31,6 +31,10 @@ typedef unsigned char uint8_t;
typedef unsigned long uint32_t;
typedef unsigned __int64 uint64_t;
typedef char int8_t;
typedef long int32_t;
typedef __int64 int64_t;
#define FORCE_INLINE __forceinline
#include <stdlib.h>
@@ -48,20 +52,22 @@ typedef unsigned __int64 uint64_t;
#define FORCE_INLINE inline __attribute__((always_inline))
inline uint32_t rotl32 ( uint32_t x, int8_t r )
inline uint32_t rotl32 ( int32_t x, int8_t r )
{
return (x << r) | (x >> (32 - r));
// cast to unsigned for logical right bitshift (to match C* MM3 implementation)
return (x << r) | ((int32_t) (((uint32_t) x) >> (32 - r)));
}
inline uint64_t rotl64 ( uint64_t x, int8_t r )
inline int64_t rotl64 ( int64_t x, int8_t r )
{
return (x << r) | (x >> (64 - r));
// cast to unsigned for logical right bitshift (to match C* MM3 implementation)
return (x << r) | ((int64_t) (((uint64_t) x) >> (64 - r)));
}
#define ROTL32(x,y) rotl32(x,y)
#define ROTL32(x,y) rotl32(x,y)
#define ROTL64(x,y) rotl64(x,y)
#define BIG_CONSTANT(x) (x##LLU)
#define BIG_CONSTANT(x) (x##LL)
#endif // !defined(_MSC_VER)
@@ -71,7 +77,7 @@ inline uint64_t rotl64 ( uint64_t x, int8_t r )
// TODO 32bit?
FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
FORCE_INLINE int64_t getblock ( const int64_t * p, int i )
{
return p[i];
}
@@ -79,33 +85,34 @@ FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
FORCE_INLINE uint64_t fmix ( uint64_t k )
FORCE_INLINE int64_t fmix ( int64_t k )
{
k ^= k >> 33;
// cast to unsigned for logical right bitshift (to match C* MM3 implementation)
k ^= ((uint64_t) k) >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k ^= ((uint64_t) k) >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
k ^= ((uint64_t) k) >> 33;
return k;
}
uint64_t MurmurHash3_x64_128 (const void * key, const int len,
int64_t MurmurHash3_x64_128 (const void * key, const int len,
const uint32_t seed)
{
const uint8_t * data = (const uint8_t*)key;
const int8_t * data = (const int8_t*)key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
int64_t h1 = seed;
int64_t h2 = seed;
uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
uint64_t k1 = 0;
uint64_t k2 = 0;
int64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
int64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
int64_t k1 = 0;
int64_t k2 = 0;
const uint64_t * blocks = (const uint64_t *)(data);
const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
const int64_t * blocks = (const int64_t *)(data);
const int8_t * tail = (const int8_t*)(data + nblocks*16);
//----------
// body
@@ -113,8 +120,8 @@ uint64_t MurmurHash3_x64_128 (const void * key, const int len,
int i;
for(i = 0; i < nblocks; i++)
{
uint64_t k1 = getblock(blocks,i*2+0);
uint64_t k2 = getblock(blocks,i*2+1);
int64_t k1 = getblock(blocks,i*2+0);
int64_t k2 = getblock(blocks,i*2+1);
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
@@ -128,26 +135,25 @@ uint64_t MurmurHash3_x64_128 (const void * key, const int len,
//----------
// tail
switch(len & 15)
{
case 15: k2 ^= (uint64_t)(tail[14]) << 48;
case 14: k2 ^= (uint64_t)(tail[13]) << 40;
case 13: k2 ^= (uint64_t)(tail[12]) << 32;
case 12: k2 ^= (uint64_t)(tail[11]) << 24;
case 11: k2 ^= (uint64_t)(tail[10]) << 16;
case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
case 9: k2 ^= (uint64_t)(tail[ 8]) << 0;
case 15: k2 ^= ((int64_t) (tail[14])) << 48;
case 14: k2 ^= ((int64_t) (tail[13])) << 40;
case 13: k2 ^= ((int64_t) (tail[12])) << 32;
case 12: k2 ^= ((int64_t) (tail[11])) << 24;
case 11: k2 ^= ((int64_t) (tail[10])) << 16;
case 10: k2 ^= ((int64_t) (tail[ 9])) << 8;
case 9: k2 ^= ((int64_t) (tail[ 8])) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= (uint64_t)(tail[ 7]) << 56;
case 7: k1 ^= (uint64_t)(tail[ 6]) << 48;
case 6: k1 ^= (uint64_t)(tail[ 5]) << 40;
case 5: k1 ^= (uint64_t)(tail[ 4]) << 32;
case 4: k1 ^= (uint64_t)(tail[ 3]) << 24;
case 3: k1 ^= (uint64_t)(tail[ 2]) << 16;
case 2: k1 ^= (uint64_t)(tail[ 1]) << 8;
case 1: k1 ^= (uint64_t)(tail[ 0]) << 0;
case 8: k1 ^= ((int64_t) (tail[ 7])) << 56;
case 7: k1 ^= ((int64_t) (tail[ 6])) << 48;
case 6: k1 ^= ((int64_t) (tail[ 5])) << 40;
case 5: k1 ^= ((int64_t) (tail[ 4])) << 32;
case 4: k1 ^= ((int64_t) (tail[ 3])) << 24;
case 3: k1 ^= ((int64_t) (tail[ 2])) << 16;
case 2: k1 ^= ((int64_t) (tail[ 1])) << 8;
case 1: k1 ^= ((int64_t) (tail[ 0])) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
@@ -186,7 +192,7 @@ murmur3(PyObject *self, PyObject *args)
const char *key;
Py_ssize_t len;
uint32_t seed = 0;
uint64_t result = 0;
int64_t result = 0;
if (!PyArg_ParseTuple(args, "s#|I", &key, &len, &seed)) {

View File

@@ -222,6 +222,9 @@ class TestTokens(unittest.TestCase):
try:
murmur3_token = Murmur3Token(cassandra.metadata.MIN_LONG - 1)
self.assertEqual(murmur3_token.hash_fn('123'), -7468325962851647638)
self.assertEqual(murmur3_token.hash_fn('\x00\xff\x10\xfa\x99' * 10), 5837342703291459765)
self.assertEqual(murmur3_token.hash_fn('\xfe' * 8), -8927430733708461935)
self.assertEqual(murmur3_token.hash_fn('\x10' * 8), 1446172840243228796)
self.assertEqual(murmur3_token.hash_fn(str(cassandra.metadata.MAX_LONG)), 7162290910810015547)
self.assertEqual(str(murmur3_token), '<Murmur3Token: -9223372036854775809>')
except NoMurmur3: