From ab191a5b07b5c6043754f6a56cfc5782101d967d Mon Sep 17 00:00:00 2001
From: Eric NOULARD
Date: Tue, 24 Feb 2015 16:30:40 +0100
Subject: [PATCH] Implement Handle hash method using MurmurHash2
---
libHLA/CMakeLists.txt | 6 +-
libHLA/COPYING.MurmurHash | 6 +
libHLA/COPYING.MurmurHash2A | 4 -
libHLA/MurmurHash2.cpp | 527 ++++++++++++++++++++++++++
libHLA/MurmurHash2.h | 30 ++
libHLA/MurmurHash2A.cc | 149 --------
libHLA/MurmurHash3.cpp | 339 +++++++++++++++++
libHLA/MurmurHash3.h | 25 ++
libHLA/PMurHash.c | 317 ++++++++++++++++
libHLA/PMurHash.h | 91 +++++
libRTI/ieee1516-2010/CMakeLists.txt | 4 +-
libRTI/ieee1516-2010/HandleImplementation.cpp | 28 ++
libRTI/ieee1516-2010/HandleImplementation.h | 11 +-
13 files changed, 1379 insertions(+), 158 deletions(-)
create mode 100644 libHLA/COPYING.MurmurHash
delete mode 100644 libHLA/COPYING.MurmurHash2A
create mode 100644 libHLA/MurmurHash2.cpp
create mode 100644 libHLA/MurmurHash2.h
delete mode 100644 libHLA/MurmurHash2A.cc
create mode 100644 libHLA/MurmurHash3.cpp
create mode 100644 libHLA/MurmurHash3.h
create mode 100644 libHLA/PMurHash.c
create mode 100644 libHLA/PMurHash.h
diff --git a/libHLA/CMakeLists.txt b/libHLA/CMakeLists.txt
index 69593a4..ea3b7c7 100644
--- a/libHLA/CMakeLists.txt
+++ b/libHLA/CMakeLists.txt
@@ -15,9 +15,9 @@ LIST(APPEND LIBHLA_EXPORTED_INCLUDES
SOURCE_GROUP("Source Files\\Types1516" FILES ${LIBHLA_TYPES1516_SRCS})
enable_language(C)
-SET(LIBHLA_HASH_SRCS sha1.c MurmurHash2A.cc)
-LIST(APPEND LIBHLA_EXPORTED_INCLUDES sha1.h)
-set_source_files_properties(sha1.c PROPERTIES LANGUAGE "C")
+SET(LIBHLA_HASH_SRCS sha1.c MurmurHash2.cpp MurmurHash3.cpp PMurHash.c)
+LIST(APPEND LIBHLA_EXPORTED_INCLUDES sha1.h MurmurHash2.h MurmurHash3.h PMurHash.h)
+set_source_files_properties(sha1.c PMurHash.c PROPERTIES LANGUAGE "C")
SOURCE_GROUP("Source Files\\Hash" FILES ${LIBHLA_HASH_SRCS})
# Currently TLSF does not compile as-is on WIN32
diff --git a/libHLA/COPYING.MurmurHash b/libHLA/COPYING.MurmurHash
new file mode 100644
index 0000000..e751f79
--- /dev/null
+++ b/libHLA/COPYING.MurmurHash
@@ -0,0 +1,6 @@
+Taken from: http://code.google.com/p/smhasher
+On February, 24th 2015
+// MurmurHash2 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+// MurmurHash3 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
\ No newline at end of file
diff --git a/libHLA/COPYING.MurmurHash2A b/libHLA/COPYING.MurmurHash2A
deleted file mode 100644
index 52947ed..0000000
--- a/libHLA/COPYING.MurmurHash2A
+++ /dev/null
@@ -1,4 +0,0 @@
-Taken from: http://sites.google.com/site/murmurhash/
-On March, 28th 2010
-All code is released to the public domain. For business purposes, Murmurhash is
-under the MIT license.
diff --git a/libHLA/MurmurHash2.cpp b/libHLA/MurmurHash2.cpp
new file mode 100644
index 0000000..b365c23
--- /dev/null
+++ b/libHLA/MurmurHash2.cpp
@@ -0,0 +1,527 @@
+//-----------------------------------------------------------------------------
+// MurmurHash2 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+// Note - This code makes a few assumptions about how your machine behaves -
+
+// 1. We can read a 4-byte value from any address without crashing
+// 2. sizeof(int) == 4
+
+// And it has a few limitations -
+
+// 1. It will not work incrementally.
+// 2. It will not produce the same results on little-endian and big-endian
+// machines.
+
+#include "MurmurHash2.h"
+
+
+namespace libhla {
+namespace hash {
+//-----------------------------------------------------------------------------
+// Platform-specific functions and macros
+
+// Microsoft Visual Studio
+
+#if defined(_MSC_VER)
+
+#define BIG_CONSTANT(x) (x)
+
+// Other compilers
+
+#else // defined(_MSC_VER)
+
+#define BIG_CONSTANT(x) (x##LLU)
+
+#endif // !defined(_MSC_VER)
+
+//-----------------------------------------------------------------------------
+
+uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed )
+{
+ // 'm' and 'r' are mixing constants generated offline.
+ // They're not really 'magic', they just happen to work well.
+
+ const uint32_t m = 0x5bd1e995;
+ const int r = 24;
+
+ // Initialize the hash to a 'random' value
+
+ uint32_t h = seed ^ len;
+
+ // Mix 4 bytes at a time into the hash
+
+ const unsigned char * data = (const unsigned char *)key;
+
+ while(len >= 4)
+ {
+ uint32_t k = *(uint32_t*)data;
+
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+
+ h *= m;
+ h ^= k;
+
+ data += 4;
+ len -= 4;
+ }
+
+ // Handle the last few bytes of the input array
+
+ switch(len)
+ {
+ case 3: h ^= data[2] << 16;
+ case 2: h ^= data[1] << 8;
+ case 1: h ^= data[0];
+ h *= m;
+ };
+
+ // Do a few final mixes of the hash to ensure the last few
+ // bytes are well-incorporated.
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ return h;
+}
+
+//-----------------------------------------------------------------------------
+// MurmurHash2, 64-bit versions, by Austin Appleby
+
+// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment
+// and endian-ness issues if used across multiple platforms.
+
+// 64-bit hash for 64-bit platforms
+
+uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed )
+{
+ const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
+ const int r = 47;
+
+ uint64_t h = seed ^ (len * m);
+
+ const uint64_t * data = (const uint64_t *)key;
+ const uint64_t * end = data + (len/8);
+
+ while(data != end)
+ {
+ uint64_t k = *data++;
+
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+
+ h ^= k;
+ h *= m;
+ }
+
+ const unsigned char * data2 = (const unsigned char*)data;
+
+ switch(len & 7)
+ {
+ case 7: h ^= uint64_t(data2[6]) << 48;
+ case 6: h ^= uint64_t(data2[5]) << 40;
+ case 5: h ^= uint64_t(data2[4]) << 32;
+ case 4: h ^= uint64_t(data2[3]) << 24;
+ case 3: h ^= uint64_t(data2[2]) << 16;
+ case 2: h ^= uint64_t(data2[1]) << 8;
+ case 1: h ^= uint64_t(data2[0]);
+ h *= m;
+ };
+
+ h ^= h >> r;
+ h *= m;
+ h ^= h >> r;
+
+ return h;
+}
+
+
+// 64-bit hash for 32-bit platforms
+
+uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed )
+{
+ const uint32_t m = 0x5bd1e995;
+ const int r = 24;
+
+ uint32_t h1 = uint32_t(seed) ^ len;
+ uint32_t h2 = uint32_t(seed >> 32);
+
+ const uint32_t * data = (const uint32_t *)key;
+
+ while(len >= 8)
+ {
+ uint32_t k1 = *data++;
+ k1 *= m; k1 ^= k1 >> r; k1 *= m;
+ h1 *= m; h1 ^= k1;
+ len -= 4;
+
+ uint32_t k2 = *data++;
+ k2 *= m; k2 ^= k2 >> r; k2 *= m;
+ h2 *= m; h2 ^= k2;
+ len -= 4;
+ }
+
+ if(len >= 4)
+ {
+ uint32_t k1 = *data++;
+ k1 *= m; k1 ^= k1 >> r; k1 *= m;
+ h1 *= m; h1 ^= k1;
+ len -= 4;
+ }
+
+ switch(len)
+ {
+ case 3: h2 ^= ((unsigned char*)data)[2] << 16;
+ case 2: h2 ^= ((unsigned char*)data)[1] << 8;
+ case 1: h2 ^= ((unsigned char*)data)[0];
+ h2 *= m;
+ };
+
+ h1 ^= h2 >> 18; h1 *= m;
+ h2 ^= h1 >> 22; h2 *= m;
+ h1 ^= h2 >> 17; h1 *= m;
+ h2 ^= h1 >> 19; h2 *= m;
+
+ uint64_t h = h1;
+
+ h = (h << 32) | h2;
+
+ return h;
+}
+
+//-----------------------------------------------------------------------------
+// MurmurHash2A, by Austin Appleby
+
+// This is a variant of MurmurHash2 modified to use the Merkle-Damgard
+// construction. Bulk speed should be identical to Murmur2, small-key speed
+// will be 10%-20% slower due to the added overhead at the end of the hash.
+
+// This variant fixes a minor issue where null keys were more likely to
+// collide with each other than expected, and also makes the function
+// more amenable to incremental implementations.
+
+#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
+
+uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed )
+{
+ const uint32_t m = 0x5bd1e995;
+ const int r = 24;
+ uint32_t l = len;
+
+ const unsigned char * data = (const unsigned char *)key;
+
+ uint32_t h = seed;
+
+ while(len >= 4)
+ {
+ uint32_t k = *(uint32_t*)data;
+
+ mmix(h,k);
+
+ data += 4;
+ len -= 4;
+ }
+
+ uint32_t t = 0;
+
+ switch(len)
+ {
+ case 3: t ^= data[2] << 16;
+ case 2: t ^= data[1] << 8;
+ case 1: t ^= data[0];
+ };
+
+ mmix(h,t);
+ mmix(h,l);
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ return h;
+}
+
+//-----------------------------------------------------------------------------
+// CMurmurHash2A, by Austin Appleby
+
+// This is a sample implementation of MurmurHash2A designed to work
+// incrementally.
+
+// Usage -
+
+// CMurmurHash2A hasher
+// hasher.Begin(seed);
+// hasher.Add(data1,size1);
+// hasher.Add(data2,size2);
+// ...
+// hasher.Add(dataN,sizeN);
+// uint32_t hash = hasher.End()
+
+class CMurmurHash2A
+{
+public:
+
+ void Begin ( uint32_t seed = 0 )
+ {
+ m_hash = seed;
+ m_tail = 0;
+ m_count = 0;
+ m_size = 0;
+ }
+
+ void Add ( const unsigned char * data, int len )
+ {
+ m_size += len;
+
+ MixTail(data,len);
+
+ while(len >= 4)
+ {
+ uint32_t k = *(uint32_t*)data;
+
+ mmix(m_hash,k);
+
+ data += 4;
+ len -= 4;
+ }
+
+ MixTail(data,len);
+ }
+
+ uint32_t End ( void )
+ {
+ mmix(m_hash,m_tail);
+ mmix(m_hash,m_size);
+
+ m_hash ^= m_hash >> 13;
+ m_hash *= m;
+ m_hash ^= m_hash >> 15;
+
+ return m_hash;
+ }
+
+private:
+
+ static const uint32_t m = 0x5bd1e995;
+ static const int r = 24;
+
+ void MixTail ( const unsigned char * & data, int & len )
+ {
+ while( len && ((len<4) || m_count) )
+ {
+ m_tail |= (*data++) << (m_count * 8);
+
+ m_count++;
+ len--;
+
+ if(m_count == 4)
+ {
+ mmix(m_hash,m_tail);
+ m_tail = 0;
+ m_count = 0;
+ }
+ }
+ }
+
+ uint32_t m_hash;
+ uint32_t m_tail;
+ uint32_t m_count;
+ uint32_t m_size;
+};
+
+//-----------------------------------------------------------------------------
+// MurmurHashNeutral2, by Austin Appleby
+
+// Same as MurmurHash2, but endian- and alignment-neutral.
+// Half the speed though, alas.
+
+uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed )
+{
+ const uint32_t m = 0x5bd1e995;
+ const int r = 24;
+
+ uint32_t h = seed ^ len;
+
+ const unsigned char * data = (const unsigned char *)key;
+
+ while(len >= 4)
+ {
+ uint32_t k;
+
+ k = data[0];
+ k |= data[1] << 8;
+ k |= data[2] << 16;
+ k |= data[3] << 24;
+
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+
+ h *= m;
+ h ^= k;
+
+ data += 4;
+ len -= 4;
+ }
+
+ switch(len)
+ {
+ case 3: h ^= data[2] << 16;
+ case 2: h ^= data[1] << 8;
+ case 1: h ^= data[0];
+ h *= m;
+ };
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ return h;
+}
+
+//-----------------------------------------------------------------------------
+// MurmurHashAligned2, by Austin Appleby
+
+// Same algorithm as MurmurHash2, but only does aligned reads - should be safer
+// on certain platforms.
+
+// Performance will be lower than MurmurHash2
+
+#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
+
+
+uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed )
+{
+ const uint32_t m = 0x5bd1e995;
+ const int r = 24;
+
+ const unsigned char * data = (const unsigned char *)key;
+
+ uint32_t h = seed ^ len;
+
+ int align = (uint64_t)data & 3;
+
+ if(align && (len >= 4))
+ {
+ // Pre-load the temp registers
+
+ uint32_t t = 0, d = 0;
+
+ switch(align)
+ {
+ case 1: t |= data[2] << 16;
+ case 2: t |= data[1] << 8;
+ case 3: t |= data[0];
+ }
+
+ t <<= (8 * align);
+
+ data += 4-align;
+ len -= 4-align;
+
+ int sl = 8 * (4-align);
+ int sr = 8 * align;
+
+ // Mix
+
+ while(len >= 4)
+ {
+ d = *(uint32_t *)data;
+ t = (t >> sr) | (d << sl);
+
+ uint32_t k = t;
+
+ MIX(h,k,m);
+
+ t = d;
+
+ data += 4;
+ len -= 4;
+ }
+
+ // Handle leftover data in temp registers
+
+ d = 0;
+
+ if(len >= align)
+ {
+ switch(align)
+ {
+ case 3: d |= data[2] << 16;
+ case 2: d |= data[1] << 8;
+ case 1: d |= data[0];
+ }
+
+ uint32_t k = (t >> sr) | (d << sl);
+ MIX(h,k,m);
+
+ data += align;
+ len -= align;
+
+ //----------
+ // Handle tail bytes
+
+ switch(len)
+ {
+ case 3: h ^= data[2] << 16;
+ case 2: h ^= data[1] << 8;
+ case 1: h ^= data[0];
+ h *= m;
+ };
+ }
+ else
+ {
+ switch(len)
+ {
+ case 3: d |= data[2] << 16;
+ case 2: d |= data[1] << 8;
+ case 1: d |= data[0];
+ case 0: h ^= (t >> sr) | (d << sl);
+ h *= m;
+ }
+ }
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ return h;
+ }
+ else
+ {
+ while(len >= 4)
+ {
+ uint32_t k = *(uint32_t *)data;
+
+ MIX(h,k,m);
+
+ data += 4;
+ len -= 4;
+ }
+
+ //----------
+ // Handle tail bytes
+
+ switch(len)
+ {
+ case 3: h ^= data[2] << 16;
+ case 2: h ^= data[1] << 8;
+ case 1: h ^= data[0];
+ h *= m;
+ };
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ return h;
+ }
+}
+
+//-----------------------------------------------------------------------------
+} /* end of namespace hash */
+} /* end of namespace libhla */
diff --git a/libHLA/MurmurHash2.h b/libHLA/MurmurHash2.h
new file mode 100644
index 0000000..d4eef2a
--- /dev/null
+++ b/libHLA/MurmurHash2.h
@@ -0,0 +1,30 @@
+//-----------------------------------------------------------------------------
+// MurmurHash2 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+#ifndef _MURMURHASH2_H_
+#define _MURMURHASH2_H_
+
+//-----------------------------------------------------------------------------
+
+
+#include "libhla.hh"
+
+namespace libhla {
+namespace hash {
+//-----------------------------------------------------------------------------
+
+HLA_EXPORT uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed );
+HLA_EXPORT uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed );
+HLA_EXPORT uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed );
+HLA_EXPORT uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed );
+HLA_EXPORT uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed );
+HLA_EXPORT uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed );
+
+//-----------------------------------------------------------------------------
+
+} /* end of namespace hash */
+} /* end of namespace libhla */
+
+#endif // _MURMURHASH2_H_
+
diff --git a/libHLA/MurmurHash2A.cc b/libHLA/MurmurHash2A.cc
deleted file mode 100644
index 4429b2f..0000000
--- a/libHLA/MurmurHash2A.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-//-----------------------------------------------------------------------------
-// MurmurHash2A, by Austin Appleby
-// taken from: http://sites.google.com/site/murmurhash/
-// This is a variant of MurmurHash2 modified to use the Merkle-Damgard
-// construction. Bulk speed should be identical to Murmur2, small-key speed
-// will be 10%-20% slower due to the added overhead at the end of the hash.
-
-// This variant fixes a minor issue where null keys were more likely to
-// collide with each other than expected, and also makes the algorithm
-// more amenable to incremental implementations. All other caveats from
-// MurmurHash2 still apply.
-
-#include "libhla.hh"
-
-namespace libhla {
-namespace hash {
-
-#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
-
-unsigned int MurmurHash2A ( const void * key, int len, unsigned int seed )
-{
- const unsigned int m = 0x5bd1e995;
- const int r = 24;
- unsigned int l = len;
-
- const unsigned char * data = (const unsigned char *)key;
-
- unsigned int h = seed;
-
- while(len >= 4)
- {
- unsigned int k = *(unsigned int*)data;
-
- mmix(h,k);
-
- data += 4;
- len -= 4;
- }
-
- unsigned int t = 0;
-
- switch(len)
- {
- case 3: t ^= data[2] << 16;
- case 2: t ^= data[1] << 8;
- case 1: t ^= data[0];
- };
-
- mmix(h,t);
- mmix(h,l);
-
- h ^= h >> 13;
- h *= m;
- h ^= h >> 15;
-
- return h;
-}
-
-//-----------------------------------------------------------------------------
-// CMurmurHash2A, by Austin Appleby
-
-// This is a sample implementation of MurmurHash2A designed to work
-// incrementally.
-
-// Usage -
-
-// CMurmurHash2A hasher
-// hasher.Begin(seed);
-// hasher.Add(data1,size1);
-// hasher.Add(data2,size2);
-// ...
-// hasher.Add(dataN,sizeN);
-// unsigned int hash = hasher.End()
-
-class HLA_EXPORT CMurmurHash2A
-{
-public:
-
- void Begin ( unsigned int seed = 0 )
- {
- m_hash = seed;
- m_tail = 0;
- m_count = 0;
- m_size = 0;
- }
-
- void Add ( const unsigned char * data, int len )
- {
- m_size += len;
-
- MixTail(data,len);
-
- while(len >= 4)
- {
- unsigned int k = *(unsigned int*)data;
-
- mmix(m_hash,k);
-
- data += 4;
- len -= 4;
- }
-
- MixTail(data,len);
- }
-
- unsigned int End ( void )
- {
- mmix(m_hash,m_tail);
- mmix(m_hash,m_size);
-
- m_hash ^= m_hash >> 13;
- m_hash *= m;
- m_hash ^= m_hash >> 15;
-
- return m_hash;
- }
-
-private:
-
- static const unsigned int m = 0x5bd1e995;
- static const int r = 24;
-
- void MixTail ( const unsigned char * & data, int & len )
- {
- while( len && ((len<4) || m_count) )
- {
- m_tail |= (*data++) << (m_count * 8);
-
- m_count++;
- len--;
-
- if(m_count == 4)
- {
- mmix(m_hash,m_tail);
- m_tail = 0;
- m_count = 0;
- }
- }
- }
-
- unsigned int m_hash;
- unsigned int m_tail;
- unsigned int m_count;
- unsigned int m_size;
-};
-
-} /* end of namespace hash */
-} /* end of namespace libhla */
-
diff --git a/libHLA/MurmurHash3.cpp b/libHLA/MurmurHash3.cpp
new file mode 100644
index 0000000..1ae59b0
--- /dev/null
+++ b/libHLA/MurmurHash3.cpp
@@ -0,0 +1,339 @@
+//-----------------------------------------------------------------------------
+// MurmurHash3 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+// Note - The x86 and x64 versions do _not_ produce the same results, as the
+// algorithms are optimized for their respective platforms. You can still
+// compile and run any of them on any platform, but your performance with the
+// non-native version will be less than optimal.
+
+#include "MurmurHash3.h"
+
+namespace libhla {
+namespace hash {
+//-----------------------------------------------------------------------------
+// Platform-specific functions and macros
+
+// Microsoft Visual Studio
+
+#if defined(_MSC_VER)
+
+#define FORCE_INLINE __forceinline
+
+#include
+
+#define ROTL32(x,y) _rotl(x,y)
+#define ROTL64(x,y) _rotl64(x,y)
+
+#define BIG_CONSTANT(x) (x)
+
+// Other compilers
+
+#else // defined(_MSC_VER)
+
+#define FORCE_INLINE inline __attribute__((always_inline))
+
+inline uint32_t rotl32 ( uint32_t x, int8_t r )
+{
+ return (x << r) | (x >> (32 - r));
+}
+
+inline uint64_t rotl64 ( uint64_t x, int8_t r )
+{
+ return (x << r) | (x >> (64 - r));
+}
+
+#define ROTL32(x,y) rotl32(x,y)
+#define ROTL64(x,y) rotl64(x,y)
+
+#define BIG_CONSTANT(x) (x##LLU)
+
+#endif // !defined(_MSC_VER)
+
+//-----------------------------------------------------------------------------
+// Block read - if your platform needs to do endian-swapping or can only
+// handle aligned reads, do the conversion here
+
+FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i )
+{
+ return p[i];
+}
+
+FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
+{
+ return p[i];
+}
+
+//-----------------------------------------------------------------------------
+// Finalization mix - force all bits of a hash block to avalanche
+
+FORCE_INLINE uint32_t fmix32 ( uint32_t h )
+{
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+//----------
+
+FORCE_INLINE uint64_t fmix64 ( uint64_t k )
+{
+ k ^= k >> 33;
+ k *= BIG_CONSTANT(0xff51afd7ed558ccd);
+ k ^= k >> 33;
+ k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
+ k ^= k >> 33;
+
+ return k;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x86_32 ( const void * key, int len,
+ uint32_t seed, void * out )
+{
+ const uint8_t * data = (const uint8_t*)key;
+ const int nblocks = len / 4;
+
+ uint32_t h1 = seed;
+
+ const uint32_t c1 = 0xcc9e2d51;
+ const uint32_t c2 = 0x1b873593;
+
+ //----------
+ // body
+
+ const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
+
+ for(int i = -nblocks; i; i++)
+ {
+ uint32_t k1 = getblock32(blocks,i);
+
+ k1 *= c1;
+ k1 = ROTL32(k1,15);
+ k1 *= c2;
+
+ h1 ^= k1;
+ h1 = ROTL32(h1,13);
+ h1 = h1*5+0xe6546b64;
+ }
+
+ //----------
+ // tail
+
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
+
+ uint32_t k1 = 0;
+
+ switch(len & 3)
+ {
+ case 3: k1 ^= tail[2] << 16;
+ case 2: k1 ^= tail[1] << 8;
+ case 1: k1 ^= tail[0];
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+ };
+
+ //----------
+ // finalization
+
+ h1 ^= len;
+
+ h1 = fmix32(h1);
+
+ *(uint32_t*)out = h1;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x86_128 ( const void * key, const int len,
+ uint32_t seed, void * out )
+{
+ const uint8_t * data = (const uint8_t*)key;
+ const int nblocks = len / 16;
+
+ uint32_t h1 = seed;
+ uint32_t h2 = seed;
+ uint32_t h3 = seed;
+ uint32_t h4 = seed;
+
+ const uint32_t c1 = 0x239b961b;
+ const uint32_t c2 = 0xab0e9789;
+ const uint32_t c3 = 0x38b34ae5;
+ const uint32_t c4 = 0xa1e38b93;
+
+ //----------
+ // body
+
+ const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
+
+ for(int i = -nblocks; i; i++)
+ {
+ uint32_t k1 = getblock32(blocks,i*4+0);
+ uint32_t k2 = getblock32(blocks,i*4+1);
+ uint32_t k3 = getblock32(blocks,i*4+2);
+ uint32_t k4 = getblock32(blocks,i*4+3);
+
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+
+ h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
+
+ k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
+
+ h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
+
+ k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
+
+ h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
+
+ k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
+
+ h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
+ }
+
+ //----------
+ // tail
+
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
+
+ uint32_t k1 = 0;
+ uint32_t k2 = 0;
+ uint32_t k3 = 0;
+ uint32_t k4 = 0;
+
+ switch(len & 15)
+ {
+ case 15: k4 ^= tail[14] << 16;
+ case 14: k4 ^= tail[13] << 8;
+ case 13: k4 ^= tail[12] << 0;
+ k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
+
+ case 12: k3 ^= tail[11] << 24;
+ case 11: k3 ^= tail[10] << 16;
+ case 10: k3 ^= tail[ 9] << 8;
+ case 9: k3 ^= tail[ 8] << 0;
+ k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
+
+ case 8: k2 ^= tail[ 7] << 24;
+ case 7: k2 ^= tail[ 6] << 16;
+ case 6: k2 ^= tail[ 5] << 8;
+ case 5: k2 ^= tail[ 4] << 0;
+ k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
+
+ case 4: k1 ^= tail[ 3] << 24;
+ case 3: k1 ^= tail[ 2] << 16;
+ case 2: k1 ^= tail[ 1] << 8;
+ case 1: k1 ^= tail[ 0] << 0;
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+ };
+
+ //----------
+ // finalization
+
+ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ h1 = fmix32(h1);
+ h2 = fmix32(h2);
+ h3 = fmix32(h3);
+ h4 = fmix32(h4);
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ ((uint32_t*)out)[0] = h1;
+ ((uint32_t*)out)[1] = h2;
+ ((uint32_t*)out)[2] = h3;
+ ((uint32_t*)out)[3] = h4;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x64_128 ( const void * key, const int len,
+ const uint32_t seed, void * out )
+{
+ const uint8_t * data = (const uint8_t*)key;
+ const int nblocks = len / 16;
+
+ uint64_t h1 = seed;
+ uint64_t h2 = seed;
+
+ const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
+ const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
+
+ //----------
+ // body
+
+ const uint64_t * blocks = (const uint64_t *)(data);
+
+ for(int i = 0; i < nblocks; i++)
+ {
+ uint64_t k1 = getblock64(blocks,i*2+0);
+ uint64_t k2 = getblock64(blocks,i*2+1);
+
+ k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
+
+ h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
+
+ k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
+
+ h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
+ }
+
+ //----------
+ // tail
+
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
+
+ uint64_t k1 = 0;
+ uint64_t k2 = 0;
+
+ switch(len & 15)
+ {
+ case 15: k2 ^= ((uint64_t)tail[14]) << 48;
+ case 14: k2 ^= ((uint64_t)tail[13]) << 40;
+ case 13: k2 ^= ((uint64_t)tail[12]) << 32;
+ case 12: k2 ^= ((uint64_t)tail[11]) << 24;
+ case 11: k2 ^= ((uint64_t)tail[10]) << 16;
+ case 10: k2 ^= ((uint64_t)tail[ 9]) << 8;
+ case 9: k2 ^= ((uint64_t)tail[ 8]) << 0;
+ k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
+
+ case 8: k1 ^= ((uint64_t)tail[ 7]) << 56;
+ case 7: k1 ^= ((uint64_t)tail[ 6]) << 48;
+ case 6: k1 ^= ((uint64_t)tail[ 5]) << 40;
+ case 5: k1 ^= ((uint64_t)tail[ 4]) << 32;
+ case 4: k1 ^= ((uint64_t)tail[ 3]) << 24;
+ case 3: k1 ^= ((uint64_t)tail[ 2]) << 16;
+ case 2: k1 ^= ((uint64_t)tail[ 1]) << 8;
+ case 1: k1 ^= ((uint64_t)tail[ 0]) << 0;
+ k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
+ };
+
+ //----------
+ // finalization
+
+ h1 ^= len; h2 ^= len;
+
+ h1 += h2;
+ h2 += h1;
+
+ h1 = fmix64(h1);
+ h2 = fmix64(h2);
+
+ h1 += h2;
+ h2 += h1;
+
+ ((uint64_t*)out)[0] = h1;
+ ((uint64_t*)out)[1] = h2;
+}
+
+//-----------------------------------------------------------------------------
+
+} /* end of namespace hash */
+} /* end of namespace libhla */
diff --git a/libHLA/MurmurHash3.h b/libHLA/MurmurHash3.h
new file mode 100644
index 0000000..0181dda
--- /dev/null
+++ b/libHLA/MurmurHash3.h
@@ -0,0 +1,25 @@
+//-----------------------------------------------------------------------------
+// MurmurHash3 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+#ifndef _MURMURHASH3_H_
+#define _MURMURHASH3_H_
+
+//-----------------------------------------------------------------------------
+
+#include "libhla.hh"
+
+namespace libhla {
+namespace hash {
+
+HLA_EXPORT void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out );
+
+HLA_EXPORT void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out );
+
+HLA_EXPORT void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out );
+
+} /* end of namespace hash */
+} /* end of namespace libhla */
+//-----------------------------------------------------------------------------
+
+#endif // _MURMURHASH3_H_
diff --git a/libHLA/PMurHash.c b/libHLA/PMurHash.c
new file mode 100644
index 0000000..0175012
--- /dev/null
+++ b/libHLA/PMurHash.c
@@ -0,0 +1,317 @@
+/*-----------------------------------------------------------------------------
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain.
+ *
+ * This implementation was written by Shane Day, and is also public domain.
+ *
+ * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A)
+ * with support for progressive processing.
+ */
+
+/*-----------------------------------------------------------------------------
+
+If you want to understand the MurmurHash algorithm you would be much better
+off reading the original source. Just point your browser at:
+http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
+
+
+What this version provides?
+
+1. Progressive data feeding. Useful when the entire payload to be hashed
+does not fit in memory or when the data is streamed through the application.
+Also useful when hashing a number of strings with a common prefix. A partial
+hash of a prefix string can be generated and reused for each suffix string.
+
+2. Portability. Plain old C so that it should compile on any old compiler.
+Both CPU endian and access-alignment neutral, but avoiding inefficient code
+when possible depending on CPU capabilities.
+
+3. Drop in. I personally like nice self contained public domain code, making it
+easy to pilfer without loads of refactoring to work properly in the existing
+application code & makefile structure and mucking around with licence files.
+Just copy PMurHash.h and PMurHash.c and you're ready to go.
+
+
+How does it work?
+
+We can only process entire 32 bit chunks of input, except for the very end
+that may be shorter. So along with the partial hash we need to give back to
+the caller a carry containing up to 3 bytes that we were unable to process.
+This carry also needs to record the number of bytes the carry holds. I use
+the low 2 bits as a count (0..3) and the carry bytes are shifted into the
+high byte in stream order.
+
+To handle endianess I simply use a macro that reads a uint32_t and define
+that macro to be a direct read on little endian machines, a read and swap
+on big endian machines, or a byte-by-byte read if the endianess is unknown.
+
+-----------------------------------------------------------------------------*/
+
+
+#include "PMurHash.h"
+
+/* I used ugly type names in the header to avoid potential conflicts with
+ * application or system typedefs & defines. Since I'm not including any more
+ * headers below here I can rename these so that the code reads like C99 */
+#undef uint32_t
+#define uint32_t MH_UINT32
+#undef uint8_t
+#define uint8_t MH_UINT8
+
+/* MSVC warnings we choose to ignore */
+#if defined(_MSC_VER)
+ #pragma warning(disable: 4127) /* conditional expression is constant */
+#endif
+
+/*-----------------------------------------------------------------------------
+ * Endianess, misalignment capabilities and util macros
+ *
+ * The following 3 macros are defined in this section. The other macros defined
+ * are only needed to help derive these 3.
+ *
+ * READ_UINT32(x) Read a little endian unsigned 32-bit int
+ * UNALIGNED_SAFE Defined if READ_UINT32 works on non-word boundaries
+ * ROTL32(x,r) Rotate x left by r bits
+ */
+
+/* Convention is to define __BYTE_ORDER == to one of these values */
+#if !defined(__BIG_ENDIAN)
+ #define __BIG_ENDIAN 4321
+#endif
+#if !defined(__LITTLE_ENDIAN)
+ #define __LITTLE_ENDIAN 1234
+#endif
+
+/* I386 */
+#if defined(_M_IX86) || defined(__i386__) || defined(__i386) || defined(i386)
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #define UNALIGNED_SAFE
+#endif
+
+/* gcc 'may' define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ to 1 (Note the trailing __),
+ * or even _LITTLE_ENDIAN or _BIG_ENDIAN (Note the single _ prefix) */
+#if !defined(__BYTE_ORDER)
+ #if defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__==1 || defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN==1
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1 || defined(_BIG_ENDIAN) && _BIG_ENDIAN==1
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #endif
+#endif
+
+/* gcc (usually) defines xEL/EB macros for ARM and MIPS endianess */
+#if !defined(__BYTE_ORDER)
+ #if defined(__ARMEL__) || defined(__MIPSEL__)
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #endif
+ #if defined(__ARMEB__) || defined(__MIPSEB__)
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #endif
+#endif
+
+/* Now find best way we can to READ_UINT32 */
+#if __BYTE_ORDER==__LITTLE_ENDIAN
+ /* CPU endian matches murmurhash algorithm, so read 32-bit word directly */
+ #define READ_UINT32(ptr) (*((uint32_t*)(ptr)))
+#elif __BYTE_ORDER==__BIG_ENDIAN
+ /* TODO: Add additional cases below where a compiler provided bswap32 is available */
+ #if defined(__GNUC__) && (__GNUC__>4 || (__GNUC__==4 && __GNUC_MINOR__>=3))
+ #define READ_UINT32(ptr) (__builtin_bswap32(*((uint32_t*)(ptr))))
+ #else
+ /* Without a known fast bswap32 we're just as well off doing this */
+ #define READ_UINT32(ptr) (ptr[0]|ptr[1]<<8|ptr[2]<<16|ptr[3]<<24)
+ #define UNALIGNED_SAFE
+ #endif
+#else
+ /* Unknown endianess so last resort is to read individual bytes */
+ #define READ_UINT32(ptr) (ptr[0]|ptr[1]<<8|ptr[2]<<16|ptr[3]<<24)
+
+ /* Since we're not doing word-reads we can skip the messing about with realignment */
+ #define UNALIGNED_SAFE
+#endif
+
+/* Find best way to ROTL32 */
+#if defined(_MSC_VER)
+ #include /* Microsoft put _rotl declaration in here */
+ #define ROTL32(x,r) _rotl(x,r)
+#else
+ /* gcc recognises this code and generates a rotate instruction for CPUs with one */
+ #define ROTL32(x,r) (((uint32_t)x << r) | ((uint32_t)x >> (32 - r)))
+#endif
+
+
+/*-----------------------------------------------------------------------------
+ * Core murmurhash algorithm macros */
+
+#define C1 (0xcc9e2d51)
+#define C2 (0x1b873593)
+
+/* This is the main processing body of the algorithm. It operates
+ * on each full 32-bits of input. */
+#define DOBLOCK(h1, k1) do{ \
+ k1 *= C1; \
+ k1 = ROTL32(k1,15); \
+ k1 *= C2; \
+ \
+ h1 ^= k1; \
+ h1 = ROTL32(h1,13); \
+ h1 = h1*5+0xe6546b64; \
+ }while(0)
+
+
+/* Append unaligned bytes to carry, forcing hash churn if we have 4 bytes */
+/* cnt=bytes to process, h1=name of h1 var, c=carry, n=bytes in c, ptr/len=payload */
+#define DOBYTES(cnt, h1, c, n, ptr, len) do{ \
+ int _i = cnt; \
+ while(_i--) { \
+ c = c>>8 | *ptr++<<24; \
+ n++; len--; \
+ if(n==4) { \
+ DOBLOCK(h1, c); \
+ n = 0; \
+ } \
+ } }while(0)
+
+/*---------------------------------------------------------------------------*/
+
+/* Main hashing function. Initialise carry to 0 and h1 to 0 or an initial seed
+ * if wanted. Both ph1 and pcarry are required arguments. */
+void PMurHash32_Process(uint32_t *ph1, uint32_t *pcarry, const void *key, int len)
+{
+ uint32_t h1 = *ph1;
+ uint32_t c = *pcarry;
+
+ const uint8_t *ptr = (uint8_t*)key;
+ const uint8_t *end;
+
+ /* Extract carry count from low 2 bits of c value */
+ int n = c & 3;
+
+#if defined(UNALIGNED_SAFE)
+ /* This CPU handles unaligned word access */
+
+ /* Consume any carry bytes */
+ int i = (4-n) & 3;
+ if(i && i <= len) {
+ DOBYTES(i, h1, c, n, ptr, len);
+ }
+
+ /* Process 32-bit chunks */
+ end = ptr + len/4*4;
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = READ_UINT32(ptr);
+ DOBLOCK(h1, k1);
+ }
+
+#else /*UNALIGNED_SAFE*/
+ /* This CPU does not handle unaligned word access */
+
+ /* Consume enough so that the next data byte is word aligned */
+ int i = -(long)ptr & 3;
+ if(i && i <= len) {
+ DOBYTES(i, h1, c, n, ptr, len);
+ }
+
+ /* We're now aligned. Process in aligned blocks. Specialise for each possible carry count */
+ end = ptr + len/4*4;
+ switch(n) { /* how many bytes in c */
+ case 0: /* c=[----] w=[3210] b=[3210]=w c'=[----] */
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = READ_UINT32(ptr);
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 1: /* c=[0---] w=[4321] b=[3210]=c>>24|w<<8 c'=[4---] */
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = c>>24;
+ c = READ_UINT32(ptr);
+ k1 |= c<<8;
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 2: /* c=[10--] w=[5432] b=[3210]=c>>16|w<<16 c'=[54--] */
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = c>>16;
+ c = READ_UINT32(ptr);
+ k1 |= c<<16;
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 3: /* c=[210-] w=[6543] b=[3210]=c>>8|w<<24 c'=[654-] */
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = c>>8;
+ c = READ_UINT32(ptr);
+ k1 |= c<<24;
+ DOBLOCK(h1, k1);
+ }
+ }
+#endif /*UNALIGNED_SAFE*/
+
+ /* Advance over whole 32-bit chunks, possibly leaving 1..3 bytes */
+ len -= len/4*4;
+
+ /* Append any remaining bytes into carry */
+ DOBYTES(len, h1, c, n, ptr, len);
+
+ /* Copy out new running hash and carry */
+ *ph1 = h1;
+ *pcarry = (c & ~0xff) | n;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Finalize a hash. To match the original Murmur3A the total_length must be provided */
+uint32_t PMurHash32_Result(uint32_t h, uint32_t carry, uint32_t total_length)
+{
+ uint32_t k1;
+ int n = carry & 3;
+ if(n) {
+ k1 = carry >> (4-n)*8;
+ k1 *= C1; k1 = ROTL32(k1,15); k1 *= C2; h ^= k1;
+ }
+ h ^= total_length;
+
+ /* fmix */
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Murmur3A compatable all-at-once */
+uint32_t PMurHash32(uint32_t seed, const void *key, int len)
+{
+ uint32_t h1=seed, carry=0;
+ PMurHash32_Process(&h1, &carry, key, len);
+ return PMurHash32_Result(h1, carry, len);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Provide an API suitable for smhasher */
+void PMurHash32_test(const void *key, int len, uint32_t seed, void *out)
+{
+ uint32_t h1=seed, carry=0;
+ const uint8_t *ptr = (uint8_t*)key;
+ const uint8_t *end = ptr + len;
+
+#if 0 /* Exercise the progressive processing */
+ while(ptr < end) {
+ //const uint8_t *mid = ptr + rand()%(end-ptr)+1;
+ const uint8_t *mid = ptr + (rand()&0xF);
+ mid = mid= 199901L )
+ #include
+ #define MH_UINT32 uint32_t
+#endif
+
+/* Otherwise try testing against max value macros from limit.h */
+#if !defined(MH_UINT32)
+ #include
+ #if (USHRT_MAX == 0xffffffffUL)
+ #define MH_UINT32 unsigned short
+ #elif (UINT_MAX == 0xffffffffUL)
+ #define MH_UINT32 unsigned int
+ #elif (ULONG_MAX == 0xffffffffUL)
+ #define MH_UINT32 unsigned long
+ #endif
+#endif
+
+#if !defined(MH_UINT32)
+ #error Unable to determine type name for unsigned 32-bit int
+#endif
+
+/* I'm yet to work on a platform where 'unsigned char' is not 8 bits */
+#define MH_UINT8 unsigned char
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+ #pragma warning(disable: 4251)
+ #define ANY_DLL_EXPORT __declspec(dllexport)
+ #define ANY_DLL_IMPORT __declspec(dllimport)
+ #define ANY_DLL_LOCAL
+#else
+ #if (__GNUC__ >= 4)
+ #define ANY_DLL_EXPORT __attribute__ ((visibility("default")))
+ #define ANY_DLL_IMPORT __attribute__ ((visibility("default")))
+ #define ANY_DLL_LOCAL __attribute__ ((visibility("hidden")))
+ #else
+ #define ANY_DLL_EXPORT
+ #define ANY_DLL_IMPORT
+ #define ANY_DLL_LOCAL
+ #endif
+#endif
+
+#if defined(HLA_EXPORTS)
+ #define HLA_EXPORT ANY_DLL_EXPORT
+#else
+ #define HLA_EXPORT ANY_DLL_IMPORT
+#endif
+
+
+/* ------------------------------------------------------------------------- */
+/* Prototypes */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+HLA_EXPORT void PMurHash32_Process(MH_UINT32 *ph1, MH_UINT32 *pcarry, const void *key, int len);
+HLA_EXPORT MH_UINT32 PMurHash32_Result(MH_UINT32 h1, MH_UINT32 carry, MH_UINT32 total_length);
+HLA_EXPORT MH_UINT32 PMurHash32(MH_UINT32 seed, const void *key, int len);
+
+void PMurHash32_test(const void *key, int len, MH_UINT32 seed, void *out);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libRTI/ieee1516-2010/CMakeLists.txt b/libRTI/ieee1516-2010/CMakeLists.txt
index f2c7379..74a60f2 100644
--- a/libRTI/ieee1516-2010/CMakeLists.txt
+++ b/libRTI/ieee1516-2010/CMakeLists.txt
@@ -1,6 +1,8 @@
# Add standard specific include directory
include_directories(${CMAKE_SOURCE_DIR}/include/ieee1516-2010)
include_directories(${CMAKE_BINARY_DIR}/include/ieee1516-2010)
+# Add libhla for hash function
+include_directories(${CMAKE_SOURCE_DIR}/libhla)
# TO BE CONTINUED
##########################################################
@@ -82,7 +84,7 @@ add_library(RTI1516e ${RTI1516e_LIB_SRCS} ${RTI1516e_LIB_INCLUDE})
# Incorrect line
#target_link_libraries(RTI1516 CERTI)
# Correct line
-target_link_libraries(RTI1516e CERTI FedTime1516e)
+target_link_libraries(RTI1516e CERTI FedTime1516e HLA)
install(FILES RTI1516fedTime.h DESTINATION include/ieee1516-2010/RTI)
message(STATUS "libRTI variant: HLA 1516e")
set_target_properties(RTI1516e PROPERTIES OUTPUT_NAME "RTI1516e")
diff --git a/libRTI/ieee1516-2010/HandleImplementation.cpp b/libRTI/ieee1516-2010/HandleImplementation.cpp
index e5c80a3..131c00c 100644
--- a/libRTI/ieee1516-2010/HandleImplementation.cpp
+++ b/libRTI/ieee1516-2010/HandleImplementation.cpp
@@ -26,9 +26,12 @@
#include
#include
#include
+#include
#include "HandleImplementation.h"
#include "RTIvariableLengthDataImplementation.h"
+#include "PMurHash.h"
+
namespace rti1516e
{
@@ -82,6 +85,31 @@ bool HandleImplementation::isValid() const
return true;
}
+
+long HandleImplementation::hash () const
+{
+ long retval;
+ //from
+ // more details at http://www.cplusplus.com/reference/functional/hash/
+ //should think about a non-c++11 approach if needed
+ // std::hash localhash;
+ // return localhash(toString());
+
+ // Use Portable MurmurHash implementation from http://code.google.com/p/smhasher/
+ std::wstring strval = toString();
+ // if long a 32 bits integer then hash is computed in one piece
+ if (sizeof(long)==sizeof(uint32_t)) {
+ retval = PMurHash32(0,strval.c_str(),strval.length());
+ }
+ // otherwise long is 64 bits so hash should be computed in two 32 bits pieces
+ else {
+ int l = strval.length();
+ retval = PMurHash32(0,strval.c_str(),l/2);
+ retval |= (retval >> 32) | PMurHash32(0,strval.c_str()+l/2,l-l/2);
+ }
+ return retval;
+}
+
/* Generate an encoded value that can be used to send */
/* handles to other federates in updates or interactions. */
VariableLengthData HandleImplementation::encode() const
diff --git a/libRTI/ieee1516-2010/HandleImplementation.h b/libRTI/ieee1516-2010/HandleImplementation.h
index e10376c..f9f3d4f 100644
--- a/libRTI/ieee1516-2010/HandleImplementation.h
+++ b/libRTI/ieee1516-2010/HandleImplementation.h
@@ -65,7 +65,16 @@ namespace rti1516e
/* Generate an encoded value that can be used to send */
/* handles to other federates in updates or interactions. */
virtual VariableLengthData encode() const;
-
+
+ /* Generate a hash value for use in storing handles in a */
+ /* in a hash table. */
+ /* Note: The hash value may not be unique across two */
+ /* separate handle values but it will be equal given two */
+ /* separate instances of the same handle value. */
+ /* H1 == H2 implies H1.hash() == H2.hash() */
+ /* H1 != H2 does not imply H1.hash() != H2.hash() */
+ long hash () const;
+
/* Alternate encode for directly filling a buffer */
virtual unsigned long encodedLength() const;
virtual unsigned long encode(
--
2.1.4