summaryrefslogtreecommitdiff
path: root/dviware/dvisvgm/libs/xxHash/xxhash.c
diff options
context:
space:
mode:
Diffstat (limited to 'dviware/dvisvgm/libs/xxHash/xxhash.c')
-rw-r--r--dviware/dvisvgm/libs/xxHash/xxhash.c688
1 files changed, 394 insertions, 294 deletions
diff --git a/dviware/dvisvgm/libs/xxHash/xxhash.c b/dviware/dvisvgm/libs/xxHash/xxhash.c
index eb881ff3b1..3f49b7d1d8 100644
--- a/dviware/dvisvgm/libs/xxHash/xxhash.c
+++ b/dviware/dvisvgm/libs/xxHash/xxhash.c
@@ -33,6 +33,12 @@
*/
+/* since xxhash.c can be included (via XXH_INLINE_ALL),
+ * it's good practice to protect it with guard
+ * in case of multiples inclusions */
+#ifndef XXHASH_C_01393879
+#define XXHASH_C_01393879
+
/* *************************************
* Tuning parameters
***************************************/
@@ -50,14 +56,10 @@
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
-# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
- || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
- || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6)
# define XXH_FORCE_MEMORY_ACCESS 2
-# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
- (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
- || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
- || defined(__ARM_ARCH_7S__) ))
+# elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
+ (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7)))
# define XXH_FORCE_MEMORY_ACCESS 1
# endif
#endif
@@ -86,6 +88,18 @@
# endif
#endif
+/*!XXH_REROLL:
+ * Whether to reroll XXH32_finalize, and XXH64_finalize,
+ * instead of using an unrolled jump table/if statement loop.
+ *
+ * This is automatically defined on -Os/-Oz on GCC and Clang. */
+#ifndef XXH_REROLL
+# if defined(__OPTIMIZE_SIZE__)
+# define XXH_REROLL 1
+# else
+# define XXH_REROLL 0
+# endif
+#endif
/* *************************************
* Includes & Memory related functions
@@ -99,7 +113,7 @@ static void XXH_free (void* p) { free(p); }
#include <string.h>
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
-#include <assert.h> /* assert */
+#include <limits.h> /* ULLONG_MAX */
#define XXH_STATIC_LINKING_ONLY
#include "xxhash.h"
@@ -111,36 +125,57 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# define XXH_FORCE_INLINE static __forceinline
+# define XXH_NO_INLINE static __declspec(noinline)
#else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define XXH_FORCE_INLINE static inline __attribute__((always_inline))
+# define XXH_NO_INLINE static __attribute__((noinline))
# else
# define XXH_FORCE_INLINE static inline
+# define XXH_NO_INLINE static
# endif
# else
# define XXH_FORCE_INLINE static
+# define XXH_NO_INLINE static
# endif /* __STDC_VERSION__ */
#endif
+
+/* *************************************
+* Debug
+***************************************/
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+# define DEBUGLEVEL 0
+#endif
+
+#if (DEBUGLEVEL>=1)
+# include <assert.h> /* note : can still be disabled with NDEBUG */
+# define XXH_ASSERT(c) assert(c)
+#else
+# define XXH_ASSERT(c) ((void)0)
+#endif
+
+/* note : use after variable declarations */
+#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; }
+
+
/* *************************************
* Basic Types
***************************************/
-#ifndef MEM_MODULE
-# if !defined (__VMS) \
- && (defined (__cplusplus) \
- || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
- typedef uint8_t BYTE;
- typedef uint16_t U16;
- typedef uint32_t U32;
-# else
- typedef unsigned char BYTE;
- typedef unsigned short U16;
- typedef unsigned int U32;
-# endif
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t xxh_u8;
+#else
+ typedef unsigned char xxh_u8;
#endif
+typedef XXH32_hash_t xxh_u32;
/* === Memory access === */
@@ -148,23 +183,23 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
-static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
-typedef union { U32 u32; } __attribute__((packed)) unalign;
-static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
+static xxh_u32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
#else
/* portable and safe solution. Generally efficient.
* see : http://stackoverflow.com/a/32095106/646947
*/
-static U32 XXH_read32(const void* memPtr)
+static xxh_u32 XXH_read32(const void* memPtr)
{
- U32 val;
+ xxh_u32 val;
memcpy(&val, memPtr, sizeof(val));
return val;
}
@@ -177,12 +212,21 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
#ifndef XXH_CPU_LITTLE_ENDIAN
+# if defined(_WIN32) /* Windows is always little endian */ \
+ || defined(__LITTLE_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 1
+# elif defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 0
+# else
static int XXH_isLittleEndian(void)
{
- const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
+# endif
#endif
@@ -193,8 +237,15 @@ static int XXH_isLittleEndian(void)
******************************************/
#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#if !defined(NO_CLANG_BUILTIN) && __has_builtin(__builtin_rotateleft32) && __has_builtin(__builtin_rotateleft64)
+# define XXH_rotl32 __builtin_rotateleft32
+# define XXH_rotl64 __builtin_rotateleft64
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
-#if defined(_MSC_VER)
+#elif defined(_MSC_VER)
# define XXH_rotl32(x,r) _rotl(x,r)
# define XXH_rotl64(x,r) _rotl64(x,r)
#else
@@ -207,7 +258,7 @@ static int XXH_isLittleEndian(void)
#elif XXH_GCC_VERSION >= 403
# define XXH_swap32 __builtin_bswap32
#else
-static U32 XXH_swap32 (U32 x)
+static xxh_u32 XXH_swap32 (xxh_u32 x)
{
return ((x << 24) & 0xff000000 ) |
((x << 8) & 0x00ff0000 ) |
@@ -222,44 +273,43 @@ static U32 XXH_swap32 (U32 x)
*****************************/
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
-XXH_FORCE_INLINE U32 XXH_readLE32(const void* ptr)
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
}
-static U32 XXH_readBE32(const void* ptr)
+static xxh_u32 XXH_readBE32(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
}
-XXH_FORCE_INLINE U32
+XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void* ptr, XXH_alignment align)
{
if (align==XXH_unaligned) {
return XXH_readLE32(ptr);
} else {
- return XXH_CPU_LITTLE_ENDIAN ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
}
}
/* *************************************
-* Macros
+* Misc
***************************************/
-#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
/* *******************************************************************
* 32-bit hash functions
*********************************************************************/
-static const U32 PRIME32_1 = 2654435761U; /* 0b10011110001101110111100110110001 */
-static const U32 PRIME32_2 = 2246822519U; /* 0b10000101111010111100101001110111 */
-static const U32 PRIME32_3 = 3266489917U; /* 0b11000010101100101010111000111101 */
-static const U32 PRIME32_4 = 668265263U; /* 0b00100111110101001110101100101111 */
-static const U32 PRIME32_5 = 374761393U; /* 0b00010110010101100110011110110001 */
+static const xxh_u32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */
+static const xxh_u32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */
+static const xxh_u32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */
+static const xxh_u32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */
+static const xxh_u32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */
-static U32 XXH32_round(U32 acc, U32 input)
+static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
{
acc += input * PRIME32_2;
acc = XXH_rotl32(acc, 13);
@@ -312,7 +362,7 @@ static U32 XXH32_round(U32 acc, U32 input)
}
/* mix all bits */
-static U32 XXH32_avalanche(U32 h32)
+static xxh_u32 XXH32_avalanche(xxh_u32 h32)
{
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
@@ -324,92 +374,101 @@ static U32 XXH32_avalanche(U32 h32)
#define XXH_get32bits(p) XXH_readLE32_align(p, align)
-static U32
-XXH32_finalize(U32 h32, const void* ptr, size_t len, XXH_alignment align)
-
+static xxh_u32
+XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
{
- const BYTE* p = (const BYTE*)ptr;
-
#define PROCESS1 \
- h32 += (*p++) * PRIME32_5; \
+ h32 += (*ptr++) * PRIME32_5; \
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
#define PROCESS4 \
- h32 += XXH_get32bits(p) * PRIME32_3; \
- p+=4; \
+ h32 += XXH_get32bits(ptr) * PRIME32_3; \
+ ptr+=4; \
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
- switch(len&15) /* or switch(bEnd - p) */
- {
- case 12: PROCESS4;
- /* fallthrough */
- case 8: PROCESS4;
- /* fallthrough */
- case 4: PROCESS4;
- return XXH32_avalanche(h32);
-
- case 13: PROCESS4;
- /* fallthrough */
- case 9: PROCESS4;
- /* fallthrough */
- case 5: PROCESS4;
- PROCESS1;
- return XXH32_avalanche(h32);
-
- case 14: PROCESS4;
- /* fallthrough */
- case 10: PROCESS4;
- /* fallthrough */
- case 6: PROCESS4;
- PROCESS1;
- PROCESS1;
- return XXH32_avalanche(h32);
-
- case 15: PROCESS4;
- /* fallthrough */
- case 11: PROCESS4;
- /* fallthrough */
- case 7: PROCESS4;
- /* fallthrough */
- case 3: PROCESS1;
- /* fallthrough */
- case 2: PROCESS1;
- /* fallthrough */
- case 1: PROCESS1;
- /* fallthrough */
- case 0: return XXH32_avalanche(h32);
+ /* Compact rerolled version */
+ if (XXH_REROLL) {
+ len &= 15;
+ while (len >= 4) {
+ PROCESS4;
+ len -= 4;
+ }
+ while (len > 0) {
+ PROCESS1;
+ --len;
+ }
+ return XXH32_avalanche(h32);
+ } else {
+ switch(len&15) /* or switch(bEnd - p) */ {
+ case 12: PROCESS4;
+ /* fallthrough */
+ case 8: PROCESS4;
+ /* fallthrough */
+ case 4: PROCESS4;
+ return XXH32_avalanche(h32);
+
+ case 13: PROCESS4;
+ /* fallthrough */
+ case 9: PROCESS4;
+ /* fallthrough */
+ case 5: PROCESS4;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 14: PROCESS4;
+ /* fallthrough */
+ case 10: PROCESS4;
+ /* fallthrough */
+ case 6: PROCESS4;
+ PROCESS1;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 15: PROCESS4;
+ /* fallthrough */
+ case 11: PROCESS4;
+ /* fallthrough */
+ case 7: PROCESS4;
+ /* fallthrough */
+ case 3: PROCESS1;
+ /* fallthrough */
+ case 2: PROCESS1;
+ /* fallthrough */
+ case 1: PROCESS1;
+ /* fallthrough */
+ case 0: return XXH32_avalanche(h32);
+ }
+ XXH_ASSERT(0);
+ return h32; /* reaching this point is deemed impossible */
}
- assert(0);
- return h32; /* reaching this point is deemed impossible */
}
-XXH_FORCE_INLINE U32
-XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_alignment align)
+XXH_FORCE_INLINE xxh_u32
+XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
{
- const BYTE* p = (const BYTE*)input;
- const BYTE* bEnd = p + len;
- U32 h32;
+ const xxh_u8* bEnd = input + len;
+ xxh_u32 h32;
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
- if (p==NULL) {
+ if (input==NULL) {
len=0;
- bEnd=p=(const BYTE*)(size_t)16;
+ bEnd=input=(const xxh_u8*)(size_t)16;
}
#endif
if (len>=16) {
- const BYTE* const limit = bEnd - 15;
- U32 v1 = seed + PRIME32_1 + PRIME32_2;
- U32 v2 = seed + PRIME32_2;
- U32 v3 = seed + 0;
- U32 v4 = seed - PRIME32_1;
+ const xxh_u8* const limit = bEnd - 15;
+ xxh_u32 v1 = seed + PRIME32_1 + PRIME32_2;
+ xxh_u32 v2 = seed + PRIME32_2;
+ xxh_u32 v3 = seed + 0;
+ xxh_u32 v4 = seed - PRIME32_1;
do {
- v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
- v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
- v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
- v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
- } while (p < limit);
+ v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
+ v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
+ v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
+ v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
+ } while (input < limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
@@ -417,29 +476,29 @@ XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_alignment align)
h32 = seed + PRIME32_5;
}
- h32 += (U32)len;
+ h32 += (xxh_u32)len;
- return XXH32_finalize(h32, p, len&15, align);
+ return XXH32_finalize(h32, input, len&15, align);
}
-XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH32_state_t state;
XXH32_reset(&state, seed);
- XXH32_update(&state, input, len);
+ XXH32_update(&state, (const xxh_u8*)input, len);
return XXH32_digest(&state);
#else
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
- return XXH32_endian_align(input, len, seed, XXH_aligned);
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
} }
- return XXH32_endian_align(input, len, seed, XXH_unaligned);
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
#endif
}
@@ -462,7 +521,7 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t
memcpy(dstState, srcState, sizeof(*dstState));
}
-XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
{
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
memset(&state, 0, sizeof(state));
@@ -486,21 +545,21 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len)
return XXH_ERROR;
#endif
- { const BYTE* p = (const BYTE*)input;
- const BYTE* const bEnd = p + len;
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
state->total_len_32 += (XXH32_hash_t)len;
state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
if (state->memsize + len < 16) { /* fill in tmp buffer */
- XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
state->memsize += (XXH32_hash_t)len;
return XXH_OK;
}
if (state->memsize) { /* some data left from previous update */
- XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
- { const U32* p32 = state->mem32;
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const xxh_u32* p32 = state->mem32;
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
@@ -511,11 +570,11 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len)
}
if (p <= bEnd-16) {
- const BYTE* const limit = bEnd - 16;
- U32 v1 = state->v1;
- U32 v2 = state->v2;
- U32 v3 = state->v3;
- U32 v4 = state->v4;
+ const xxh_u8* const limit = bEnd - 16;
+ xxh_u32 v1 = state->v1;
+ xxh_u32 v2 = state->v2;
+ xxh_u32 v3 = state->v3;
+ xxh_u32 v4 = state->v4;
do {
v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
@@ -540,9 +599,9 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len)
}
-XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state)
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state)
{
- U32 h32;
+ xxh_u32 h32;
if (state->large_len) {
h32 = XXH_rotl32(state->v1, 1)
@@ -555,7 +614,7 @@ XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state)
h32 += state->total_len_32;
- return XXH32_finalize(h32, state->mem32, state->memsize, XXH_aligned);
+ return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
}
@@ -588,31 +647,46 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
/*====== Memory access ======*/
-#ifndef MEM_MODULE
-# define MEM_MODULE
-# if !defined (__VMS) \
- && (defined (__cplusplus) \
- || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
- typedef uint64_t U64;
-# else
- /* if compiler doesn't support unsigned long long, replace by another 64-bit type */
- typedef unsigned long long U64;
-# endif
-#endif
-
+typedef XXH64_hash_t xxh_u64;
+
+
+/*! XXH_REROLL_XXH64:
+ * Whether to reroll the XXH64_finalize() loop.
+ *
+ * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a performance gain
+ * on 64-bit hosts, as only one jump is required.
+ *
+ * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit registers,
+ * and 64-bit arithmetic needs to be simulated, it isn't beneficial to unroll. The code becomes
+ * ridiculously large (the largest function in the binary on i386!), and rerolling it saves
+ * anywhere from 3kB to 20kB. It is also slightly faster because it fits into cache better
+ * and is more likely to be inlined by the compiler.
+ *
+ * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. */
+#ifndef XXH_REROLL_XXH64
+# if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
+ || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \
+ || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \
+ || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \
+ || defined(__mips64__) || defined(__mips64)) /* mips64 */ \
+ || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */
+# define XXH_REROLL_XXH64 1
+# else
+# define XXH_REROLL_XXH64 0
+# endif
+#endif /* !defined(XXH_REROLL_XXH64) */
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
-static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
+static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; }
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
-typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
-static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
+typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
+static xxh_u64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
#else
@@ -620,9 +694,9 @@ static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
* see : http://stackoverflow.com/a/32095106/646947
*/
-static U64 XXH_read64(const void* memPtr)
+static xxh_u64 XXH_read64(const void* memPtr)
{
- U64 val;
+ xxh_u64 val;
memcpy(&val, memPtr, sizeof(val));
return val;
}
@@ -634,7 +708,7 @@ static U64 XXH_read64(const void* memPtr)
#elif XXH_GCC_VERSION >= 403
# define XXH_swap64 __builtin_bswap64
#else
-static U64 XXH_swap64 (U64 x)
+static xxh_u64 XXH_swap64 (xxh_u64 x)
{
return ((x << 56) & 0xff00000000000000ULL) |
((x << 40) & 0x00ff000000000000ULL) |
@@ -647,35 +721,35 @@ static U64 XXH_swap64 (U64 x)
}
#endif
-XXH_FORCE_INLINE U64 XXH_readLE64(const void* ptr)
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
}
-static U64 XXH_readBE64(const void* ptr)
+static xxh_u64 XXH_readBE64(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
}
-XXH_FORCE_INLINE U64
+XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void* ptr, XXH_alignment align)
{
if (align==XXH_unaligned)
return XXH_readLE64(ptr);
else
- return XXH_CPU_LITTLE_ENDIAN ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
}
/*====== xxh64 ======*/
-static const U64 PRIME64_1 = 11400714785074694791ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
-static const U64 PRIME64_2 = 14029467366897019727ULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
-static const U64 PRIME64_3 = 1609587929392839161ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
-static const U64 PRIME64_4 = 9650029242287828579ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
-static const U64 PRIME64_5 = 2870177450012600261ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
+static const xxh_u64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
+static const xxh_u64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
+static const xxh_u64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
+static const xxh_u64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
+static const xxh_u64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
-static U64 XXH64_round(U64 acc, U64 input)
+static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
{
acc += input * PRIME64_2;
acc = XXH_rotl64(acc, 31);
@@ -683,7 +757,7 @@ static U64 XXH64_round(U64 acc, U64 input)
return acc;
}
-static U64 XXH64_mergeRound(U64 acc, U64 val)
+static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
{
val = XXH64_round(0, val);
acc ^= val;
@@ -691,7 +765,7 @@ static U64 XXH64_mergeRound(U64 acc, U64 val)
return acc;
}
-static U64 XXH64_avalanche(U64 h64)
+static xxh_u64 XXH64_avalanche(xxh_u64 h64)
{
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
@@ -704,141 +778,155 @@ static U64 XXH64_avalanche(U64 h64)
#define XXH_get64bits(p) XXH_readLE64_align(p, align)
-static U64
-XXH64_finalize(U64 h64, const void* ptr, size_t len, XXH_alignment align)
+static xxh_u64
+XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
{
- const BYTE* p = (const BYTE*)ptr;
-
#define PROCESS1_64 \
- h64 ^= (*p++) * PRIME64_5; \
+ h64 ^= (*ptr++) * PRIME64_5; \
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
#define PROCESS4_64 \
- h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
- p+=4; \
+ h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * PRIME64_1; \
+ ptr+=4; \
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
#define PROCESS8_64 { \
- U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
- p+=8; \
+ xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
+ ptr+=8; \
h64 ^= k1; \
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
}
- switch(len&31) {
- case 24: PROCESS8_64;
- /* fallthrough */
- case 16: PROCESS8_64;
- /* fallthrough */
- case 8: PROCESS8_64;
- return XXH64_avalanche(h64);
-
- case 28: PROCESS8_64;
- /* fallthrough */
- case 20: PROCESS8_64;
- /* fallthrough */
- case 12: PROCESS8_64;
- /* fallthrough */
- case 4: PROCESS4_64;
- return XXH64_avalanche(h64);
-
- case 25: PROCESS8_64;
- /* fallthrough */
- case 17: PROCESS8_64;
- /* fallthrough */
- case 9: PROCESS8_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 29: PROCESS8_64;
- /* fallthrough */
- case 21: PROCESS8_64;
- /* fallthrough */
- case 13: PROCESS8_64;
- /* fallthrough */
- case 5: PROCESS4_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 26: PROCESS8_64;
- /* fallthrough */
- case 18: PROCESS8_64;
- /* fallthrough */
- case 10: PROCESS8_64;
- PROCESS1_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 30: PROCESS8_64;
- /* fallthrough */
- case 22: PROCESS8_64;
- /* fallthrough */
- case 14: PROCESS8_64;
- /* fallthrough */
- case 6: PROCESS4_64;
- PROCESS1_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 27: PROCESS8_64;
- /* fallthrough */
- case 19: PROCESS8_64;
- /* fallthrough */
- case 11: PROCESS8_64;
- PROCESS1_64;
- PROCESS1_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 31: PROCESS8_64;
- /* fallthrough */
- case 23: PROCESS8_64;
- /* fallthrough */
- case 15: PROCESS8_64;
- /* fallthrough */
- case 7: PROCESS4_64;
- /* fallthrough */
- case 3: PROCESS1_64;
- /* fallthrough */
- case 2: PROCESS1_64;
- /* fallthrough */
- case 1: PROCESS1_64;
- /* fallthrough */
- case 0: return XXH64_avalanche(h64);
+ /* Rerolled version for 32-bit targets is faster and much smaller. */
+ if (XXH_REROLL || XXH_REROLL_XXH64) {
+ len &= 31;
+ while (len >= 8) {
+ PROCESS8_64;
+ len -= 8;
+ }
+ if (len >= 4) {
+ PROCESS4_64;
+ len -= 4;
+ }
+ while (len > 0) {
+ PROCESS1_64;
+ --len;
+ }
+ return XXH64_avalanche(h64);
+ } else {
+ switch(len & 31) {
+ case 24: PROCESS8_64;
+ /* fallthrough */
+ case 16: PROCESS8_64;
+ /* fallthrough */
+ case 8: PROCESS8_64;
+ return XXH64_avalanche(h64);
+
+ case 28: PROCESS8_64;
+ /* fallthrough */
+ case 20: PROCESS8_64;
+ /* fallthrough */
+ case 12: PROCESS8_64;
+ /* fallthrough */
+ case 4: PROCESS4_64;
+ return XXH64_avalanche(h64);
+
+ case 25: PROCESS8_64;
+ /* fallthrough */
+ case 17: PROCESS8_64;
+ /* fallthrough */
+ case 9: PROCESS8_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 29: PROCESS8_64;
+ /* fallthrough */
+ case 21: PROCESS8_64;
+ /* fallthrough */
+ case 13: PROCESS8_64;
+ /* fallthrough */
+ case 5: PROCESS4_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 26: PROCESS8_64;
+ /* fallthrough */
+ case 18: PROCESS8_64;
+ /* fallthrough */
+ case 10: PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 30: PROCESS8_64;
+ /* fallthrough */
+ case 22: PROCESS8_64;
+ /* fallthrough */
+ case 14: PROCESS8_64;
+ /* fallthrough */
+ case 6: PROCESS4_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 27: PROCESS8_64;
+ /* fallthrough */
+ case 19: PROCESS8_64;
+ /* fallthrough */
+ case 11: PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 31: PROCESS8_64;
+ /* fallthrough */
+ case 23: PROCESS8_64;
+ /* fallthrough */
+ case 15: PROCESS8_64;
+ /* fallthrough */
+ case 7: PROCESS4_64;
+ /* fallthrough */
+ case 3: PROCESS1_64;
+ /* fallthrough */
+ case 2: PROCESS1_64;
+ /* fallthrough */
+ case 1: PROCESS1_64;
+ /* fallthrough */
+ case 0: return XXH64_avalanche(h64);
+ }
}
-
/* impossible to reach */
- assert(0);
+ XXH_ASSERT(0);
return 0; /* unreachable, but some compilers complain without it */
}
-XXH_FORCE_INLINE U64
-XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_alignment align)
+XXH_FORCE_INLINE xxh_u64
+XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
{
- const BYTE* p = (const BYTE*)input;
- const BYTE* bEnd = p + len;
- U64 h64;
+ const xxh_u8* bEnd = input + len;
+ xxh_u64 h64;
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
- if (p==NULL) {
+ if (input==NULL) {
len=0;
- bEnd=p=(const BYTE*)(size_t)32;
+ bEnd=input=(const xxh_u8*)(size_t)32;
}
#endif
if (len>=32) {
- const BYTE* const limit = bEnd - 32;
- U64 v1 = seed + PRIME64_1 + PRIME64_2;
- U64 v2 = seed + PRIME64_2;
- U64 v3 = seed + 0;
- U64 v4 = seed - PRIME64_1;
+ const xxh_u8* const limit = bEnd - 32;
+ xxh_u64 v1 = seed + PRIME64_1 + PRIME64_2;
+ xxh_u64 v2 = seed + PRIME64_2;
+ xxh_u64 v3 = seed + 0;
+ xxh_u64 v4 = seed - PRIME64_1;
do {
- v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
- v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
- v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
- v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
- } while (p<=limit);
+ v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
+ } while (input<=limit);
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
h64 = XXH64_mergeRound(h64, v1);
@@ -850,29 +938,29 @@ XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_alignment align)
h64 = seed + PRIME64_5;
}
- h64 += (U64) len;
+ h64 += (xxh_u64) len;
- return XXH64_finalize(h64, p, len, align);
+ return XXH64_finalize(h64, input, len, align);
}
-XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
+XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH64_state_t state;
XXH64_reset(&state, seed);
- XXH64_update(&state, input, len);
+ XXH64_update(&state, (const xxh_u8*)input, len);
return XXH64_digest(&state);
#else
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
- return XXH64_endian_align(input, len, seed, XXH_aligned);
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
} }
- return XXH64_endian_align(input, len, seed, XXH_unaligned);
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
#endif
}
@@ -894,7 +982,7 @@ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t
memcpy(dstState, srcState, sizeof(*dstState));
}
-XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
{
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
memset(&state, 0, sizeof(state));
@@ -902,8 +990,8 @@ XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long
state.v2 = seed + PRIME64_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME64_1;
- /* do not write into reserved, planned to be removed in a future version */
- memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
+ /* do not write into reserved64, might be removed in a future version */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
return XXH_OK;
}
@@ -917,19 +1005,19 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len)
return XXH_ERROR;
#endif
- { const BYTE* p = (const BYTE*)input;
- const BYTE* const bEnd = p + len;
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */
- XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
- state->memsize += (U32)len;
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
+ state->memsize += (xxh_u32)len;
return XXH_OK;
}
if (state->memsize) { /* tmp buffer is full */
- XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
@@ -939,11 +1027,11 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len)
}
if (p+32 <= bEnd) {
- const BYTE* const limit = bEnd - 32;
- U64 v1 = state->v1;
- U64 v2 = state->v2;
- U64 v3 = state->v3;
- U64 v4 = state->v4;
+ const xxh_u8* const limit = bEnd - 32;
+ xxh_u64 v1 = state->v1;
+ xxh_u64 v2 = state->v2;
+ xxh_u64 v3 = state->v3;
+ xxh_u64 v4 = state->v4;
do {
v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
@@ -968,15 +1056,15 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len)
}
-XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state)
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state)
{
- U64 h64;
+ xxh_u64 h64;
if (state->total_len >= 32) {
- U64 const v1 = state->v1;
- U64 const v2 = state->v2;
- U64 const v3 = state->v3;
- U64 const v4 = state->v4;
+ xxh_u64 const v1 = state->v1;
+ xxh_u64 const v2 = state->v2;
+ xxh_u64 const v3 = state->v3;
+ xxh_u64 const v4 = state->v4;
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
h64 = XXH64_mergeRound(h64, v1);
@@ -987,9 +1075,9 @@ XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state)
h64 = state->v3 /*seed*/ + PRIME64_5;
}
- h64 += (U64) state->total_len;
+ h64 += (xxh_u64) state->total_len;
- return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, XXH_aligned);
+ return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
}
@@ -1007,4 +1095,16 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src
return XXH_readBE64(src);
}
+
+
+/* *********************************************************************
+* XXH3
+* New generation hash designed for speed on small keys and vectorization
+************************************************************************ */
+
+#include "xxh3.h"
+
+
#endif /* XXH_NO_LONG_LONG */
+
+#endif /* XXHASH_C_01393879 */