File Coverage

ext/xxHash/xxhash.c
Criterion Covered Total %
statement 59 198 29.8
branch 18 52 34.6
condition n/a
subroutine n/a
pod n/a
total 77 250 30.8


line stmt bran cond sub pod time code
1             /*
2             * xxHash - Fast Hash algorithm
3             * Copyright (C) 2012-2016, Yann Collet
4             *
5             * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6             *
7             * Redistribution and use in source and binary forms, with or without
8             * modification, are permitted provided that the following conditions are
9             * met:
10             *
11             * * Redistributions of source code must retain the above copyright
12             * notice, this list of conditions and the following disclaimer.
13             * * Redistributions in binary form must reproduce the above
14             * copyright notice, this list of conditions and the following disclaimer
15             * in the documentation and/or other materials provided with the
16             * distribution.
17             *
18             * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19             * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20             * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21             * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22             * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23             * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24             * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25             * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26             * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27             * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28             * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29             *
30             * You can contact the author at :
31             * - xxHash homepage: http://www.xxhash.com
32             * - xxHash source repository : https://github.com/Cyan4973/xxHash
33             */
34              
35              
36             /* *************************************
37             * Tuning parameters
38             ***************************************/
39             /*!XXH_FORCE_MEMORY_ACCESS :
40             * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
41             * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
42             * The below switch allow to select different access method for improved performance.
43             * Method 0 (default) : use `memcpy()`. Safe and portable.
44             * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
45             * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
46             * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
47             * It can generate buggy code on targets which do not support unaligned memory accesses.
48             * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
49             * See http://stackoverflow.com/a/32095106/646947 for details.
50             * Prefer these methods in priority order (0 > 1 > 2)
51             */
52             #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
53             # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
54             # define XXH_FORCE_MEMORY_ACCESS 2
55             # elif defined(__INTEL_COMPILER) || \
56             (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
57             # define XXH_FORCE_MEMORY_ACCESS 1
58             # endif
59             #endif
60              
61             /*!XXH_ACCEPT_NULL_INPUT_POINTER :
62             * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
63             * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
64             * By default, this option is disabled. To enable it, uncomment below define :
65             */
66             /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
67              
68             /*!XXH_FORCE_NATIVE_FORMAT :
69             * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
70             * Results are therefore identical for little-endian and big-endian CPU.
71             * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
72             * Should endian-independance be of no importance for your application, you may set the #define below to 1,
73             * to improve speed for Big-endian CPU.
74             * This option has no impact on Little_Endian CPU.
75             */
76             #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
77             # define XXH_FORCE_NATIVE_FORMAT 0
78             #endif
79              
80             /*!XXH_FORCE_ALIGN_CHECK :
81             * This is a minor performance trick, only useful with lots of very small keys.
82             * It means : check for aligned/unaligned input.
83             * The check costs one initial branch per hash; set to 0 when the input data
84             * is guaranteed to be aligned.
85             */
86             #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
87             # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
88             # define XXH_FORCE_ALIGN_CHECK 0
89             # else
90             # define XXH_FORCE_ALIGN_CHECK 1
91             # endif
92             #endif
93              
94              
95             /* *************************************
96             * Includes & Memory related functions
97             ***************************************/
98             /* Modify the local functions below should you wish to use some other memory routines */
99             /* for malloc(), free() */
100             #include
101 0           static void* XXH_malloc(size_t s) { return malloc(s); }
102 0           static void XXH_free (void* p) { free(p); }
103             /* for memcpy() */
104             #include
105             static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
106              
107             #define XXH_STATIC_LINKING_ONLY
108             #include "xxhash.h"
109              
110              
111             /* *************************************
112             * Compiler Specific Options
113             ***************************************/
114             #ifdef _MSC_VER /* Visual Studio */
115             # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
116             # define FORCE_INLINE static __forceinline
117             #else
118             # if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
119             # ifdef __GNUC__
120             # define FORCE_INLINE static inline __attribute__((always_inline))
121             # else
122             # define FORCE_INLINE static inline
123             # endif
124             # else
125             # define FORCE_INLINE static
126             # endif /* __STDC_VERSION__ */
127             #endif
128              
129              
130             /* *************************************
131             * Basic Types
132             ***************************************/
133             #ifndef MEM_MODULE
134             # define MEM_MODULE
135             # if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
136             # include
137             typedef uint8_t BYTE;
138             typedef uint16_t U16;
139             typedef uint32_t U32;
140             typedef int32_t S32;
141             typedef uint64_t U64;
142             # else
143             typedef unsigned char BYTE;
144             typedef unsigned short U16;
145             typedef unsigned int U32;
146             typedef signed int S32;
147             typedef unsigned long long U64;
148             # endif
149             #endif
150              
151              
152             #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
153              
154             /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
155             static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
156             static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
157              
158             #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
159              
160             /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
161             /* currently only defined for gcc and icc */
162             typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
163              
164             static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
165             static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
166              
167             #else
168              
169             /* portable and safe solution. Generally efficient.
170             * see : http://stackoverflow.com/a/32095106/646947
171             */
172              
173             static U32 XXH_read32(const void* memPtr)
174             {
175             U32 val;
176             memcpy(&val, memPtr, sizeof(val));
177             return val;
178             }
179              
180             static U64 XXH_read64(const void* memPtr)
181             {
182             U64 val;
183             memcpy(&val, memPtr, sizeof(val));
184             return val;
185             }
186              
187             #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
188              
189              
190             /* ****************************************
191             * Compiler-specific Functions and Macros
192             ******************************************/
193             #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
194              
195             /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
196             #if defined(_MSC_VER)
197             # define XXH_rotl32(x,r) _rotl(x,r)
198             # define XXH_rotl64(x,r) _rotl64(x,r)
199             #else
200             # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
201             # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
202             #endif
203              
204             #if defined(_MSC_VER) /* Visual Studio */
205             # define XXH_swap32 _byteswap_ulong
206             # define XXH_swap64 _byteswap_uint64
207             #elif GCC_VERSION >= 403
208             # define XXH_swap32 __builtin_bswap32
209             # define XXH_swap64 __builtin_bswap64
210             #else
211             static U32 XXH_swap32 (U32 x)
212             {
213             return ((x << 24) & 0xff000000 ) |
214             ((x << 8) & 0x00ff0000 ) |
215             ((x >> 8) & 0x0000ff00 ) |
216             ((x >> 24) & 0x000000ff );
217             }
218             static U64 XXH_swap64 (U64 x)
219             {
220             return ((x << 56) & 0xff00000000000000ULL) |
221             ((x << 40) & 0x00ff000000000000ULL) |
222             ((x << 24) & 0x0000ff0000000000ULL) |
223             ((x << 8) & 0x000000ff00000000ULL) |
224             ((x >> 8) & 0x00000000ff000000ULL) |
225             ((x >> 24) & 0x0000000000ff0000ULL) |
226             ((x >> 40) & 0x000000000000ff00ULL) |
227             ((x >> 56) & 0x00000000000000ffULL);
228             }
229             #endif
230              
231              
232             /* *************************************
233             * Architecture Macros
234             ***************************************/
235             typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
236              
237             /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
238             #ifndef XXH_CPU_LITTLE_ENDIAN
239             static const int g_one = 1;
240             # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
241             #endif
242              
243              
244             /* ***************************
245             * Memory reads
246             *****************************/
247             typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
248              
249             FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
250             {
251             if (align==XXH_unaligned)
252             return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
253             else
254             return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
255             }
256              
257             FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
258             {
259             return XXH_readLE32_align(ptr, endian, XXH_unaligned);
260             }
261              
262             static U32 XXH_readBE32(const void* ptr)
263             {
264 0           return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
265             }
266              
267             FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
268             {
269             if (align==XXH_unaligned)
270             return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
271             else
272             return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
273             }
274              
275             FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
276             {
277             return XXH_readLE64_align(ptr, endian, XXH_unaligned);
278             }
279              
280             static U64 XXH_readBE64(const void* ptr)
281             {
282 0           return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
283             }
284              
285              
286             /* *************************************
287             * Macros
288             ***************************************/
289             #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
290              
291              
292             /* *************************************
293             * Constants
294             ***************************************/
295             static const U32 PRIME32_1 = 2654435761U;
296             static const U32 PRIME32_2 = 2246822519U;
297             static const U32 PRIME32_3 = 3266489917U;
298             static const U32 PRIME32_4 = 668265263U;
299             static const U32 PRIME32_5 = 374761393U;
300              
301             static const U64 PRIME64_1 = 11400714785074694791ULL;
302             static const U64 PRIME64_2 = 14029467366897019727ULL;
303             static const U64 PRIME64_3 = 1609587929392839161ULL;
304             static const U64 PRIME64_4 = 9650029242287828579ULL;
305             static const U64 PRIME64_5 = 2870177450012600261ULL;
306              
307 0           XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
308              
309              
310             /* ***************************
311             * Simple Hash Functions
312             *****************************/
313              
314             static U32 XXH32_round(U32 seed, U32 input)
315             {
316 80           seed += input * PRIME32_2;
317 80           seed = XXH_rotl32(seed, 13);
318 80           seed *= PRIME32_1;
319             return seed;
320             }
321              
322             FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
323             {
324             const BYTE* p = (const BYTE*)input;
325 14           const BYTE* bEnd = p + len;
326             U32 h32;
327             #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
328              
329             #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
330             if (p==NULL) {
331             len=0;
332             bEnd=p=(const BYTE*)(size_t)16;
333             }
334             #endif
335              
336 14 100         if (len>=16) {
337 4           const BYTE* const limit = bEnd - 16;
338 4           U32 v1 = seed + PRIME32_1 + PRIME32_2;
339 4           U32 v2 = seed + PRIME32_2;
340             U32 v3 = seed + 0;
341 4           U32 v4 = seed - PRIME32_1;
342              
343             do {
344             v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
345             v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
346             v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
347 20           v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
348 20 100         } while (p<=limit);
349              
350 4           h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
351             } else {
352 10           h32 = seed + PRIME32_5;
353             }
354              
355 14           h32 += (U32) len;
356              
357 28 100         while (p+4<=bEnd) {
358 14           h32 += XXH_get32bits(p) * PRIME32_3;
359 14           h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
360             p+=4;
361             }
362              
363 24 100         while (p
364 10           h32 += (*p) * PRIME32_5;
365 10           h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
366 10           p++;
367             }
368              
369 14           h32 ^= h32 >> 15;
370 14           h32 *= PRIME32_2;
371 14           h32 ^= h32 >> 13;
372 14           h32 *= PRIME32_3;
373 14           h32 ^= h32 >> 16;
374              
375             return h32;
376             }
377              
378              
379 14           XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
380             {
381             #if 0
382             /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
383             XXH32_CREATESTATE_STATIC(state);
384             XXH32_reset(state, seed);
385             XXH32_update(state, input, len);
386             return XXH32_digest(state);
387             #else
388             XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
389              
390             if (XXH_FORCE_ALIGN_CHECK) {
391             if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
392             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
393             return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
394             else
395             return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
396             } }
397              
398             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
399             return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
400             else
401             return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
402             #endif
403             }
404              
405              
406             static U64 XXH64_round(U64 acc, U64 input)
407             {
408 25042           acc += input * PRIME64_2;
409 25042           acc = XXH_rotl64(acc, 31);
410 25042           acc *= PRIME64_1;
411             return acc;
412             }
413              
414             static U64 XXH64_mergeRound(U64 acc, U64 val)
415             {
416             val = XXH64_round(0, val);
417 16           acc ^= val;
418 4           acc = acc * PRIME64_1 + PRIME64_4;
419             return acc;
420             }
421              
422             FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
423             {
424             const BYTE* p = (const BYTE*)input;
425 13           const BYTE* const bEnd = p + len;
426             U64 h64;
427             #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
428              
429             #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
430             if (p==NULL) {
431             len=0;
432             bEnd=p=(const BYTE*)(size_t)32;
433             }
434             #endif
435              
436 13 100         if (len>=32) {
437 4           const BYTE* const limit = bEnd - 32;
438 4           U64 v1 = seed + PRIME64_1 + PRIME64_2;
439 4           U64 v2 = seed + PRIME64_2;
440             U64 v3 = seed + 0;
441 4           U64 v4 = seed - PRIME64_1;
442              
443             do {
444             v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
445             v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
446             v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
447 6256           v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
448 6256 100         } while (p<=limit);
449              
450 4           h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
451             h64 = XXH64_mergeRound(h64, v1);
452             h64 = XXH64_mergeRound(h64, v2);
453             h64 = XXH64_mergeRound(h64, v3);
454             h64 = XXH64_mergeRound(h64, v4);
455              
456             } else {
457 9           h64 = seed + PRIME64_5;
458             }
459              
460 13           h64 += (U64) len;
461              
462 15 100         while (p+8<=bEnd) {
463             U64 const k1 = XXH64_round(0, XXH_get64bits(p));
464 2           h64 ^= k1;
465 2           h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
466             p+=8;
467             }
468              
469 13 100         if (p+4<=bEnd) {
470 7           h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
471 13           h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
472             p+=4;
473             }
474              
475 27 100         while (p
476 14           h64 ^= (*p) * PRIME64_5;
477 14           h64 = XXH_rotl64(h64, 11) * PRIME64_1;
478 14           p++;
479             }
480              
481 13           h64 ^= h64 >> 33;
482 13           h64 *= PRIME64_2;
483 13           h64 ^= h64 >> 29;
484 13           h64 *= PRIME64_3;
485 13           h64 ^= h64 >> 32;
486              
487             return h64;
488             }
489              
490              
491 13           XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
492             {
493             #if 0
494             /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
495             XXH64_CREATESTATE_STATIC(state);
496             XXH64_reset(state, seed);
497             XXH64_update(state, input, len);
498             return XXH64_digest(state);
499             #else
500             XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
501              
502             if (XXH_FORCE_ALIGN_CHECK) {
503             if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
504             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
505             return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
506             else
507             return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
508             } }
509              
510             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
511             return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
512             else
513             return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
514             #endif
515             }
516              
517              
518             /* **************************************************
519             * Advanced Hash Functions
520             ****************************************************/
521              
522 0           XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
523             {
524 0           return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
525             }
526 0           XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
527             {
528             XXH_free(statePtr);
529 0           return XXH_OK;
530             }
531              
532 0           XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
533             {
534 0           return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
535             }
536 0           XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
537             {
538             XXH_free(statePtr);
539 0           return XXH_OK;
540             }
541              
542              
543             /*** Hash feed ***/
544              
545 0           XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
546             {
547             XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
548             memset(&state, 0, sizeof(state));
549 0           state.seed = seed;
550 0           state.v1 = seed + PRIME32_1 + PRIME32_2;
551 0           state.v2 = seed + PRIME32_2;
552 0           state.v3 = seed + 0;
553 0           state.v4 = seed - PRIME32_1;
554             memcpy(statePtr, &state, sizeof(state));
555 0           return XXH_OK;
556             }
557              
558              
559 0           XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
560             {
561             XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
562             memset(&state, 0, sizeof(state));
563 0           state.seed = seed;
564 0           state.v1 = seed + PRIME64_1 + PRIME64_2;
565 0           state.v2 = seed + PRIME64_2;
566 0           state.v3 = seed + 0;
567 0           state.v4 = seed - PRIME64_1;
568             memcpy(statePtr, &state, sizeof(state));
569 0           return XXH_OK;
570             }
571              
572              
573             FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
574             {
575             const BYTE* p = (const BYTE*)input;
576 0           const BYTE* const bEnd = p + len;
577              
578             #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
579             if (input==NULL) return XXH_ERROR;
580             #endif
581              
582 0           state->total_len += len;
583              
584 0 0         if (state->memsize + len < 16) { /* fill in tmp buffer */
585 0           XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
586 0           state->memsize += (U32)len;
587             return XXH_OK;
588             }
589              
590 0 0         if (state->memsize) { /* some data left from previous update */
591 0           XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
592             { const U32* p32 = state->mem32;
593 0           state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
594 0           state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
595 0           state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
596 0           state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
597             }
598 0           p += 16-state->memsize;
599 0           state->memsize = 0;
600             }
601              
602 0 0         if (p <= bEnd-16) {
603             const BYTE* const limit = bEnd - 16;
604 0           U32 v1 = state->v1;
605 0           U32 v2 = state->v2;
606 0           U32 v3 = state->v3;
607 0           U32 v4 = state->v4;
608              
609             do {
610             v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
611             v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
612             v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
613 0           v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
614 0 0         } while (p<=limit);
615              
616 0           state->v1 = v1;
617 0           state->v2 = v2;
618 0           state->v3 = v3;
619 0           state->v4 = v4;
620             }
621              
622 0 0         if (p < bEnd) {
623 0           XXH_memcpy(state->mem32, p, bEnd-p);
624 0           state->memsize = (int)(bEnd-p);
625             }
626              
627             return XXH_OK;
628             }
629              
630 0           XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
631             {
632             XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
633              
634             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
635             return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
636             else
637             return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
638             }
639              
640              
641              
642             FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
643             {
644 0           const BYTE * p = (const BYTE*)state->mem32;
645 0           const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
646             U32 h32;
647              
648 0 0         if (state->total_len >= 16) {
649 0           h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
650             } else {
651 0           h32 = state->seed + PRIME32_5;
652             }
653              
654 0           h32 += (U32) state->total_len;
655              
656 0 0         while (p+4<=bEnd) {
657 0           h32 += XXH_readLE32(p, endian) * PRIME32_3;
658 0           h32 = XXH_rotl32(h32, 17) * PRIME32_4;
659             p+=4;
660             }
661              
662 0 0         while (p
663 0           h32 += (*p) * PRIME32_5;
664 0           h32 = XXH_rotl32(h32, 11) * PRIME32_1;
665 0           p++;
666             }
667              
668 0           h32 ^= h32 >> 15;
669 0           h32 *= PRIME32_2;
670 0           h32 ^= h32 >> 13;
671 0           h32 *= PRIME32_3;
672 0           h32 ^= h32 >> 16;
673              
674             return h32;
675             }
676              
677              
678 0           XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
679             {
680             XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
681              
682             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
683             return XXH32_digest_endian(state_in, XXH_littleEndian);
684             else
685             return XXH32_digest_endian(state_in, XXH_bigEndian);
686             }
687              
688              
689              
690             /* **** XXH64 **** */
691              
692             FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
693             {
694             const BYTE* p = (const BYTE*)input;
695 0           const BYTE* const bEnd = p + len;
696              
697             #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
698             if (input==NULL) return XXH_ERROR;
699             #endif
700              
701 0           state->total_len += len;
702              
703 0 0         if (state->memsize + len < 32) { /* fill in tmp buffer */
704 0           XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
705 0           state->memsize += (U32)len;
706             return XXH_OK;
707             }
708              
709 0 0         if (state->memsize) { /* tmp buffer is full */
710 0           XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
711 0           state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
712 0           state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
713 0           state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
714 0           state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
715 0           p += 32-state->memsize;
716 0           state->memsize = 0;
717             }
718              
719 0 0         if (p+32 <= bEnd) {
720 0           const BYTE* const limit = bEnd - 32;
721 0           U64 v1 = state->v1;
722 0           U64 v2 = state->v2;
723 0           U64 v3 = state->v3;
724 0           U64 v4 = state->v4;
725              
726             do {
727             v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
728             v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
729             v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
730 0           v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
731 0 0         } while (p<=limit);
732              
733 0           state->v1 = v1;
734 0           state->v2 = v2;
735 0           state->v3 = v3;
736 0           state->v4 = v4;
737             }
738              
739 0 0         if (p < bEnd) {
740 0           XXH_memcpy(state->mem64, p, bEnd-p);
741 0           state->memsize = (int)(bEnd-p);
742             }
743              
744             return XXH_OK;
745             }
746              
747 0           XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
748             {
749             XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
750              
751             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
752             return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
753             else
754             return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
755             }
756              
757              
758              
759             FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
760             {
761 0           const BYTE * p = (const BYTE*)state->mem64;
762 0           const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
763             U64 h64;
764              
765 0 0         if (state->total_len >= 32) {
766 0           U64 const v1 = state->v1;
767 0           U64 const v2 = state->v2;
768 0           U64 const v3 = state->v3;
769 0           U64 const v4 = state->v4;
770              
771 0           h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
772             h64 = XXH64_mergeRound(h64, v1);
773             h64 = XXH64_mergeRound(h64, v2);
774             h64 = XXH64_mergeRound(h64, v3);
775             h64 = XXH64_mergeRound(h64, v4);
776             } else {
777 0           h64 = state->seed + PRIME64_5;
778             }
779              
780 0           h64 += (U64) state->total_len;
781              
782 0 0         while (p+8<=bEnd) {
783             U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
784 0           h64 ^= k1;
785 0           h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
786             p+=8;
787             }
788              
789 0 0         if (p+4<=bEnd) {
790 0           h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
791 0           h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
792             p+=4;
793             }
794              
795 0 0         while (p
796 0           h64 ^= (*p) * PRIME64_5;
797 0           h64 = XXH_rotl64(h64, 11) * PRIME64_1;
798 0           p++;
799             }
800              
801 0           h64 ^= h64 >> 33;
802 0           h64 *= PRIME64_2;
803 0           h64 ^= h64 >> 29;
804 0           h64 *= PRIME64_3;
805 0           h64 ^= h64 >> 32;
806              
807             return h64;
808             }
809              
810              
811 0           XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
812             {
813             XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
814              
815             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
816             return XXH64_digest_endian(state_in, XXH_littleEndian);
817             else
818             return XXH64_digest_endian(state_in, XXH_bigEndian);
819             }
820              
821              
822             /* **************************
823             * Canonical representation
824             ****************************/
825              
826             /*! Default XXH result types are basic unsigned 32 and 64 bits.
827             * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
828             * These functions allow transformation of hash result into and from its canonical format.
829             * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
830             */
831              
832 0           XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
833             {
834             XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
835 0           if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
836             memcpy(dst, &hash, sizeof(*dst));
837 0           }
838              
839 0           XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
840             {
841             XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
842 0           if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
843             memcpy(dst, &hash, sizeof(*dst));
844 0           }
845              
846 0           XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
847             {
848 0           return XXH_readBE32(src);
849             }
850              
851 0           XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
852             {
853 0           return XXH_readBE64(src);
854             }