LCOV - code coverage report
Current view: top level - pression/compressor/zstd/lib/common - xxhash.c (source / functions) Hit Total Coverage
Test: Pression Lines: 8 303 2.6 %
Date: 2016-12-06 05:44:58 Functions: 1 29 3.4 %

          Line data    Source code
       1             : /*
       2             : *  xxHash - Fast Hash algorithm
       3             : *  Copyright (C) 2012-2016, Yann Collet
       4             : *
       5             : *  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
       6             : *
       7             : *  Redistribution and use in source and binary forms, with or without
       8             : *  modification, are permitted provided that the following conditions are
       9             : *  met:
      10             : *
      11             : *  * Redistributions of source code must retain the above copyright
      12             : *  notice, this list of conditions and the following disclaimer.
      13             : *  * Redistributions in binary form must reproduce the above
      14             : *  copyright notice, this list of conditions and the following disclaimer
      15             : *  in the documentation and/or other materials provided with the
      16             : *  distribution.
      17             : *
      18             : *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      19             : *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      20             : *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      21             : *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      22             : *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      23             : *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      24             : *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      25             : *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      26             : *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      27             : *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      28             : *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      29             : *
      30             : *  You can contact the author at :
      31             : *  - xxHash homepage: http://www.xxhash.com
      32             : *  - xxHash source repository : https://github.com/Cyan4973/xxHash
      33             : */
      34             : 
      35             : 
      36             : /* *************************************
      37             : *  Tuning parameters
      38             : ***************************************/
      39             : /*!XXH_FORCE_MEMORY_ACCESS :
      40             :  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
      41             :  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
      42             :  * The below switch allow to select different access method for improved performance.
      43             :  * Method 0 (default) : use `memcpy()`. Safe and portable.
      44             :  * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
      45             :  *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
      46             :  * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
      47             :  *            It can generate buggy code on targets which do not support unaligned memory accesses.
      48             :  *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
      49             :  * See http://stackoverflow.com/a/32095106/646947 for details.
      50             :  * Prefer these methods in priority order (0 > 1 > 2)
      51             :  */
      52             : #ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
      53             : #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
      54             : #    define XXH_FORCE_MEMORY_ACCESS 2
      55             : #  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
      56             :   (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
      57             : #    define XXH_FORCE_MEMORY_ACCESS 1
      58             : #  endif
      59             : #endif
      60             : 
      61             : /*!XXH_ACCEPT_NULL_INPUT_POINTER :
      62             :  * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
      63             :  * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
      64             :  * By default, this option is disabled. To enable it, uncomment below define :
      65             :  */
      66             : /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
      67             : 
      68             : /*!XXH_FORCE_NATIVE_FORMAT :
      69             :  * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
      70             :  * Results are therefore identical for little-endian and big-endian CPU.
      71             :  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
      72             :  * Should endian-independance be of no importance for your application, you may set the #define below to 1,
      73             :  * to improve speed for Big-endian CPU.
      74             :  * This option has no impact on Little_Endian CPU.
      75             :  */
      76             : #ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
      77             : #  define XXH_FORCE_NATIVE_FORMAT 0
      78             : #endif
      79             : 
      80             : /*!XXH_FORCE_ALIGN_CHECK :
      81             :  * This is a minor performance trick, only useful with lots of very small keys.
      82             :  * It means : check for aligned/unaligned input.
      83             :  * The check costs one initial branch per hash; set to 0 when the input data
      84             :  * is guaranteed to be aligned.
      85             :  */
      86             : #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
      87             : #  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
      88             : #    define XXH_FORCE_ALIGN_CHECK 0
      89             : #  else
      90             : #    define XXH_FORCE_ALIGN_CHECK 1
      91             : #  endif
      92             : #endif
      93             : 
      94             : 
      95             : /* *************************************
      96             : *  Includes & Memory related functions
      97             : ***************************************/
      98             : /* Modify the local functions below should you wish to use some other memory routines */
      99             : /* for malloc(), free() */
     100             : #include <stdlib.h>
     101           0 : static void* XXH_malloc(size_t s) { return malloc(s); }
     102           0 : static void  XXH_free  (void* p)  { free(p); }
     103             : /* for memcpy() */
     104             : #include <string.h>
     105           0 : static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
     106             : 
     107             : #define XXH_STATIC_LINKING_ONLY
     108             : #include "xxhash.h"
     109             : 
     110             : 
     111             : /* *************************************
     112             : *  Compiler Specific Options
     113             : ***************************************/
     114             : #ifdef _MSC_VER    /* Visual Studio */
     115             : #  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
     116             : #  define FORCE_INLINE static __forceinline
     117             : #else
     118             : #  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
     119             : #    ifdef __GNUC__
     120             : #      define FORCE_INLINE static inline __attribute__((always_inline))
     121             : #    else
     122             : #      define FORCE_INLINE static inline
     123             : #    endif
     124             : #  else
     125             : #    define FORCE_INLINE static
     126             : #  endif /* __STDC_VERSION__ */
     127             : #endif
     128             : 
     129             : 
     130             : /* *************************************
     131             : *  Basic Types
     132             : ***************************************/
     133             : #ifndef MEM_MODULE
     134             : # define MEM_MODULE
     135             : # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
     136             : #   include <stdint.h>
     137             :     typedef uint8_t  BYTE;
     138             :     typedef uint16_t U16;
     139             :     typedef uint32_t U32;
     140             :     typedef  int32_t S32;
     141             :     typedef uint64_t U64;
     142             : #  else
     143             :     typedef unsigned char      BYTE;
     144             :     typedef unsigned short     U16;
     145             :     typedef unsigned int       U32;
     146             :     typedef   signed int       S32;
     147             :     typedef unsigned long long U64;   /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
     148             : #  endif
     149             : #endif
     150             : 
     151             : 
     152             : #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
     153             : 
     154             : /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
     155             : static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
     156             : static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
     157             : 
     158             : #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
     159             : 
     160             : /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
     161             : /* currently only defined for gcc and icc */
     162             : typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
     163             : 
     164             : static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
     165             : static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
     166             : 
     167             : #else
     168             : 
     169             : /* portable and safe solution. Generally efficient.
     170             :  * see : http://stackoverflow.com/a/32095106/646947
     171             :  */
     172             : 
     173           0 : static U32 XXH_read32(const void* memPtr)
     174             : {
     175             :     U32 val;
     176           0 :     memcpy(&val, memPtr, sizeof(val));
     177           0 :     return val;
     178             : }
     179             : 
     180           0 : static U64 XXH_read64(const void* memPtr)
     181             : {
     182             :     U64 val;
     183           0 :     memcpy(&val, memPtr, sizeof(val));
     184           0 :     return val;
     185             : }
     186             : 
     187             : #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
     188             : 
     189             : 
     190             : /* ****************************************
     191             : *  Compiler-specific Functions and Macros
     192             : ******************************************/
     193             : #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
     194             : 
     195             : /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
     196             : #if defined(_MSC_VER)
     197             : #  define XXH_rotl32(x,r) _rotl(x,r)
     198             : #  define XXH_rotl64(x,r) _rotl64(x,r)
     199             : #else
     200             : #  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
     201             : #  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
     202             : #endif
     203             : 
     204             : #if defined(_MSC_VER)     /* Visual Studio */
     205             : #  define XXH_swap32 _byteswap_ulong
     206             : #  define XXH_swap64 _byteswap_uint64
     207             : #elif GCC_VERSION >= 403
     208             : #  define XXH_swap32 __builtin_bswap32
     209             : #  define XXH_swap64 __builtin_bswap64
     210             : #else
     211             : static U32 XXH_swap32 (U32 x)
     212             : {
     213             :     return  ((x << 24) & 0xff000000 ) |
     214             :             ((x <<  8) & 0x00ff0000 ) |
     215             :             ((x >>  8) & 0x0000ff00 ) |
     216             :             ((x >> 24) & 0x000000ff );
     217             : }
     218             : static U64 XXH_swap64 (U64 x)
     219             : {
     220             :     return  ((x << 56) & 0xff00000000000000ULL) |
     221             :             ((x << 40) & 0x00ff000000000000ULL) |
     222             :             ((x << 24) & 0x0000ff0000000000ULL) |
     223             :             ((x << 8)  & 0x000000ff00000000ULL) |
     224             :             ((x >> 8)  & 0x00000000ff000000ULL) |
     225             :             ((x >> 24) & 0x0000000000ff0000ULL) |
     226             :             ((x >> 40) & 0x000000000000ff00ULL) |
     227             :             ((x >> 56) & 0x00000000000000ffULL);
     228             : }
     229             : #endif
     230             : 
     231             : 
     232             : /* *************************************
     233             : *  Architecture Macros
     234             : ***************************************/
     235             : typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
     236             : 
     237             : /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
     238             : #ifndef XXH_CPU_LITTLE_ENDIAN
     239             :     static const int g_one = 1;
     240             : #   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))
     241             : #endif
     242             : 
     243             : 
     244             : /* ***************************
     245             : *  Memory reads
     246             : *****************************/
     247             : typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
     248             : 
     249             : FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
     250             : {
     251           0 :     if (align==XXH_unaligned)
     252           0 :         return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
     253             :     else
     254           0 :         return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
     255             : }
     256             : 
     257             : FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
     258             : {
     259           0 :     return XXH_readLE32_align(ptr, endian, XXH_unaligned);
     260             : }
     261             : 
     262           0 : static U32 XXH_readBE32(const void* ptr)
     263             : {
     264           0 :     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
     265             : }
     266             : 
     267             : FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
     268             : {
     269           0 :     if (align==XXH_unaligned)
     270           0 :         return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
     271             :     else
     272           0 :         return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
     273             : }
     274             : 
     275             : FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
     276             : {
     277           0 :     return XXH_readLE64_align(ptr, endian, XXH_unaligned);
     278             : }
     279             : 
     280           0 : static U64 XXH_readBE64(const void* ptr)
     281             : {
     282           0 :     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
     283             : }
     284             : 
     285             : 
     286             : /* *************************************
     287             : *  Macros
     288             : ***************************************/
     289             : #define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }    /* use only *after* variable declarations */
     290             : 
     291             : 
     292             : /* *************************************
     293             : *  Constants
     294             : ***************************************/
     295             : static const U32 PRIME32_1 = 2654435761U;
     296             : static const U32 PRIME32_2 = 2246822519U;
     297             : static const U32 PRIME32_3 = 3266489917U;
     298             : static const U32 PRIME32_4 =  668265263U;
     299             : static const U32 PRIME32_5 =  374761393U;
     300             : 
     301             : static const U64 PRIME64_1 = 11400714785074694791ULL;
     302             : static const U64 PRIME64_2 = 14029467366897019727ULL;
     303             : static const U64 PRIME64_3 =  1609587929392839161ULL;
     304             : static const U64 PRIME64_4 =  9650029242287828579ULL;
     305             : static const U64 PRIME64_5 =  2870177450012600261ULL;
     306             : 
     307           0 : XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
     308             : 
     309             : 
     310             : /* **************************
     311             : *  Utils
     312             : ****************************/
     313           0 : XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
     314             : {
     315           0 :     memcpy(dstState, srcState, sizeof(*dstState));
     316           0 : }
     317             : 
     318           0 : XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
     319             : {
     320           0 :     memcpy(dstState, srcState, sizeof(*dstState));
     321           0 : }
     322             : 
     323             : 
     324             : /* ***************************
     325             : *  Simple Hash Functions
     326             : *****************************/
     327             : 
     328           0 : static U32 XXH32_round(U32 seed, U32 input)
     329             : {
     330           0 :     seed += input * PRIME32_2;
     331           0 :     seed  = XXH_rotl32(seed, 13);
     332           0 :     seed *= PRIME32_1;
     333           0 :     return seed;
     334             : }
     335             : 
     336             : FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
     337             : {
     338           0 :     const BYTE* p = (const BYTE*)input;
     339           0 :     const BYTE* bEnd = p + len;
     340             :     U32 h32;
     341             : #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
     342             : 
     343             : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
     344             :     if (p==NULL) {
     345             :         len=0;
     346             :         bEnd=p=(const BYTE*)(size_t)16;
     347             :     }
     348             : #endif
     349             : 
     350           0 :     if (len>=16) {
     351           0 :         const BYTE* const limit = bEnd - 16;
     352           0 :         U32 v1 = seed + PRIME32_1 + PRIME32_2;
     353           0 :         U32 v2 = seed + PRIME32_2;
     354           0 :         U32 v3 = seed + 0;
     355           0 :         U32 v4 = seed - PRIME32_1;
     356             : 
     357             :         do {
     358           0 :             v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
     359           0 :             v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
     360           0 :             v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
     361           0 :             v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
     362           0 :         } while (p<=limit);
     363             : 
     364           0 :         h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
     365             :     } else {
     366           0 :         h32  = seed + PRIME32_5;
     367             :     }
     368             : 
     369           0 :     h32 += (U32) len;
     370             : 
     371           0 :     while (p+4<=bEnd) {
     372           0 :         h32 += XXH_get32bits(p) * PRIME32_3;
     373           0 :         h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
     374           0 :         p+=4;
     375             :     }
     376             : 
     377           0 :     while (p<bEnd) {
     378           0 :         h32 += (*p) * PRIME32_5;
     379           0 :         h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
     380           0 :         p++;
     381             :     }
     382             : 
     383           0 :     h32 ^= h32 >> 15;
     384           0 :     h32 *= PRIME32_2;
     385           0 :     h32 ^= h32 >> 13;
     386           0 :     h32 *= PRIME32_3;
     387           0 :     h32 ^= h32 >> 16;
     388             : 
     389           0 :     return h32;
     390             : }
     391             : 
     392             : 
     393           0 : XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
     394             : {
     395             : #if 0
     396             :     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
     397             :     XXH32_CREATESTATE_STATIC(state);
     398             :     XXH32_reset(state, seed);
     399             :     XXH32_update(state, input, len);
     400             :     return XXH32_digest(state);
     401             : #else
     402           0 :     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
     403             : 
     404             :     if (XXH_FORCE_ALIGN_CHECK) {
     405             :         if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
     406             :             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
     407             :                 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
     408             :             else
     409             :                 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
     410             :     }   }
     411             : 
     412           0 :     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
     413           0 :         return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
     414             :     else
     415           0 :         return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
     416             : #endif
     417             : }
     418             : 
     419             : 
     420           0 : static U64 XXH64_round(U64 acc, U64 input)
     421             : {
     422           0 :     acc += input * PRIME64_2;
     423           0 :     acc  = XXH_rotl64(acc, 31);
     424           0 :     acc *= PRIME64_1;
     425           0 :     return acc;
     426             : }
     427             : 
     428           0 : static U64 XXH64_mergeRound(U64 acc, U64 val)
     429             : {
     430           0 :     val  = XXH64_round(0, val);
     431           0 :     acc ^= val;
     432           0 :     acc  = acc * PRIME64_1 + PRIME64_4;
     433           0 :     return acc;
     434             : }
     435             : 
     436             : FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
     437             : {
     438           0 :     const BYTE* p = (const BYTE*)input;
     439           0 :     const BYTE* const bEnd = p + len;
     440             :     U64 h64;
     441             : #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
     442             : 
     443             : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
     444             :     if (p==NULL) {
     445             :         len=0;
     446             :         bEnd=p=(const BYTE*)(size_t)32;
     447             :     }
     448             : #endif
     449             : 
     450           0 :     if (len>=32) {
     451           0 :         const BYTE* const limit = bEnd - 32;
     452           0 :         U64 v1 = seed + PRIME64_1 + PRIME64_2;
     453           0 :         U64 v2 = seed + PRIME64_2;
     454           0 :         U64 v3 = seed + 0;
     455           0 :         U64 v4 = seed - PRIME64_1;
     456             : 
     457             :         do {
     458           0 :             v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
     459           0 :             v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
     460           0 :             v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
     461           0 :             v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
     462           0 :         } while (p<=limit);
     463             : 
     464           0 :         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
     465           0 :         h64 = XXH64_mergeRound(h64, v1);
     466           0 :         h64 = XXH64_mergeRound(h64, v2);
     467           0 :         h64 = XXH64_mergeRound(h64, v3);
     468           0 :         h64 = XXH64_mergeRound(h64, v4);
     469             : 
     470             :     } else {
     471           0 :         h64  = seed + PRIME64_5;
     472             :     }
     473             : 
     474           0 :     h64 += (U64) len;
     475             : 
     476           0 :     while (p+8<=bEnd) {
     477           0 :         U64 const k1 = XXH64_round(0, XXH_get64bits(p));
     478           0 :         h64 ^= k1;
     479           0 :         h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
     480           0 :         p+=8;
     481             :     }
     482             : 
     483           0 :     if (p+4<=bEnd) {
     484           0 :         h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
     485           0 :         h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
     486           0 :         p+=4;
     487             :     }
     488             : 
     489           0 :     while (p<bEnd) {
     490           0 :         h64 ^= (*p) * PRIME64_5;
     491           0 :         h64 = XXH_rotl64(h64, 11) * PRIME64_1;
     492           0 :         p++;
     493             :     }
     494             : 
     495           0 :     h64 ^= h64 >> 33;
     496           0 :     h64 *= PRIME64_2;
     497           0 :     h64 ^= h64 >> 29;
     498           0 :     h64 *= PRIME64_3;
     499           0 :     h64 ^= h64 >> 32;
     500             : 
     501           0 :     return h64;
     502             : }
     503             : 
     504             : 
     505           0 : XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
     506             : {
     507             : #if 0
     508             :     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
     509             :     XXH64_CREATESTATE_STATIC(state);
     510             :     XXH64_reset(state, seed);
     511             :     XXH64_update(state, input, len);
     512             :     return XXH64_digest(state);
     513             : #else
     514           0 :     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
     515             : 
     516             :     if (XXH_FORCE_ALIGN_CHECK) {
     517             :         if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
     518             :             if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
     519             :                 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
     520             :             else
     521             :                 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
     522             :     }   }
     523             : 
     524           0 :     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
     525           0 :         return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
     526             :     else
     527           0 :         return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
     528             : #endif
     529             : }
     530             : 
     531             : 
     532             : /* **************************************************
     533             : *  Advanced Hash Functions
     534             : ****************************************************/
     535             : 
     536           0 : XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
     537             : {
     538           0 :     return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
     539             : }
     540           0 : XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
     541             : {
     542           0 :     XXH_free(statePtr);
     543           0 :     return XXH_OK;
     544             : }
     545             : 
     546           0 : XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
     547             : {
     548           0 :     return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
     549             : }
     550           0 : XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
     551             : {
     552           0 :     XXH_free(statePtr);
     553           0 :     return XXH_OK;
     554             : }
     555             : 
     556             : 
     557             : /*** Hash feed ***/
     558             : 
     559           0 : XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
     560             : {
     561             :     XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
     562           0 :     memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for future removal */
     563           0 :     state.v1 = seed + PRIME32_1 + PRIME32_2;
     564           0 :     state.v2 = seed + PRIME32_2;
     565           0 :     state.v3 = seed + 0;
     566           0 :     state.v4 = seed - PRIME32_1;
     567           0 :     memcpy(statePtr, &state, sizeof(state));
     568           0 :     return XXH_OK;
     569             : }
     570             : 
     571             : 
     572          74 : XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
     573             : {
     574             :     XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
     575          74 :     memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for future removal */
     576          74 :     state.v1 = seed + PRIME64_1 + PRIME64_2;
     577          74 :     state.v2 = seed + PRIME64_2;
     578          74 :     state.v3 = seed + 0;
     579          74 :     state.v4 = seed - PRIME64_1;
     580          74 :     memcpy(statePtr, &state, sizeof(state));
     581          74 :     return XXH_OK;
     582             : }
     583             : 
     584             : 
     585             : FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
     586             : {
     587           0 :     const BYTE* p = (const BYTE*)input;
     588           0 :     const BYTE* const bEnd = p + len;
     589             : 
     590             : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
     591             :     if (input==NULL) return XXH_ERROR;
     592             : #endif
     593             : 
     594           0 :     state->total_len_32 += (unsigned)len;
     595           0 :     state->large_len |= (len>=16) | (state->total_len_32>=16);
     596             : 
     597           0 :     if (state->memsize + len < 16)  {   /* fill in tmp buffer */
     598           0 :         XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
     599           0 :         state->memsize += (unsigned)len;
     600           0 :         return XXH_OK;
     601             :     }
     602             : 
     603           0 :     if (state->memsize) {   /* some data left from previous update */
     604           0 :         XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
     605           0 :         {   const U32* p32 = state->mem32;
     606           0 :             state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
     607           0 :             state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
     608           0 :             state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
     609           0 :             state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
     610             :         }
     611           0 :         p += 16-state->memsize;
     612           0 :         state->memsize = 0;
     613             :     }
     614             : 
     615           0 :     if (p <= bEnd-16) {
     616           0 :         const BYTE* const limit = bEnd - 16;
     617           0 :         U32 v1 = state->v1;
     618           0 :         U32 v2 = state->v2;
     619           0 :         U32 v3 = state->v3;
     620           0 :         U32 v4 = state->v4;
     621             : 
     622             :         do {
     623           0 :             v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
     624           0 :             v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
     625           0 :             v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
     626           0 :             v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
     627           0 :         } while (p<=limit);
     628             : 
     629           0 :         state->v1 = v1;
     630           0 :         state->v2 = v2;
     631           0 :         state->v3 = v3;
     632           0 :         state->v4 = v4;
     633             :     }
     634             : 
     635           0 :     if (p < bEnd) {
     636           0 :         XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
     637           0 :         state->memsize = (unsigned)(bEnd-p);
     638             :     }
     639             : 
     640           0 :     return XXH_OK;
     641             : }
     642             : 
     643           0 : XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
     644             : {
     645           0 :     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
     646             : 
     647           0 :     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
     648           0 :         return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
     649             :     else
     650           0 :         return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
     651             : }
     652             : 
     653             : 
     654             : 
     655             : FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
     656             : {
     657           0 :     const BYTE * p = (const BYTE*)state->mem32;
     658           0 :     const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
     659             :     U32 h32;
     660             : 
     661           0 :     if (state->large_len) {
     662           0 :         h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
     663             :     } else {
     664           0 :         h32 = state->v3 /* == seed */ + PRIME32_5;
     665             :     }
     666             : 
     667           0 :     h32 += state->total_len_32;
     668             : 
     669           0 :     while (p+4<=bEnd) {
     670           0 :         h32 += XXH_readLE32(p, endian) * PRIME32_3;
     671           0 :         h32  = XXH_rotl32(h32, 17) * PRIME32_4;
     672           0 :         p+=4;
     673             :     }
     674             : 
     675           0 :     while (p<bEnd) {
     676           0 :         h32 += (*p) * PRIME32_5;
     677           0 :         h32  = XXH_rotl32(h32, 11) * PRIME32_1;
     678           0 :         p++;
     679             :     }
     680             : 
     681           0 :     h32 ^= h32 >> 15;
     682           0 :     h32 *= PRIME32_2;
     683           0 :     h32 ^= h32 >> 13;
     684           0 :     h32 *= PRIME32_3;
     685           0 :     h32 ^= h32 >> 16;
     686             : 
     687           0 :     return h32;
     688             : }
     689             : 
     690             : 
     691           0 : XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
     692             : {
     693           0 :     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
     694             : 
     695           0 :     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
     696           0 :         return XXH32_digest_endian(state_in, XXH_littleEndian);
     697             :     else
     698           0 :         return XXH32_digest_endian(state_in, XXH_bigEndian);
     699             : }
     700             : 
     701             : 
     702             : 
     703             : /* **** XXH64 **** */
     704             : 
     705             : FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
     706             : {
     707           0 :     const BYTE* p = (const BYTE*)input;
     708           0 :     const BYTE* const bEnd = p + len;
     709             : 
     710             : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
     711             :     if (input==NULL) return XXH_ERROR;
     712             : #endif
     713             : 
     714           0 :     state->total_len += len;
     715             : 
     716           0 :     if (state->memsize + len < 32) {  /* fill in tmp buffer */
     717           0 :         XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
     718           0 :         state->memsize += (U32)len;
     719           0 :         return XXH_OK;
     720             :     }
     721             : 
     722           0 :     if (state->memsize) {   /* tmp buffer is full */
     723           0 :         XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
     724           0 :         state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
     725           0 :         state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
     726           0 :         state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
     727           0 :         state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
     728           0 :         p += 32-state->memsize;
     729           0 :         state->memsize = 0;
     730             :     }
     731             : 
     732           0 :     if (p+32 <= bEnd) {
     733           0 :         const BYTE* const limit = bEnd - 32;
     734           0 :         U64 v1 = state->v1;
     735           0 :         U64 v2 = state->v2;
     736           0 :         U64 v3 = state->v3;
     737           0 :         U64 v4 = state->v4;
     738             : 
     739             :         do {
     740           0 :             v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
     741           0 :             v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
     742           0 :             v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
     743           0 :             v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
     744           0 :         } while (p<=limit);
     745             : 
     746           0 :         state->v1 = v1;
     747           0 :         state->v2 = v2;
     748           0 :         state->v3 = v3;
     749           0 :         state->v4 = v4;
     750             :     }
     751             : 
     752           0 :     if (p < bEnd) {
     753           0 :         XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
     754           0 :         state->memsize = (unsigned)(bEnd-p);
     755             :     }
     756             : 
     757           0 :     return XXH_OK;
     758             : }
     759             : 
     760           0 : XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
     761             : {
     762           0 :     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
     763             : 
     764           0 :     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
     765           0 :         return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
     766             :     else
     767           0 :         return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
     768             : }
     769             : 
     770             : 
     771             : 
     772             : FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
     773             : {
     774           0 :     const BYTE * p = (const BYTE*)state->mem64;
     775           0 :     const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
     776             :     U64 h64;
     777             : 
     778           0 :     if (state->total_len >= 32) {
     779           0 :         U64 const v1 = state->v1;
     780           0 :         U64 const v2 = state->v2;
     781           0 :         U64 const v3 = state->v3;
     782           0 :         U64 const v4 = state->v4;
     783             : 
     784           0 :         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
     785           0 :         h64 = XXH64_mergeRound(h64, v1);
     786           0 :         h64 = XXH64_mergeRound(h64, v2);
     787           0 :         h64 = XXH64_mergeRound(h64, v3);
     788           0 :         h64 = XXH64_mergeRound(h64, v4);
     789             :     } else {
     790           0 :         h64  = state->v3 + PRIME64_5;
     791             :     }
     792             : 
     793           0 :     h64 += (U64) state->total_len;
     794             : 
     795           0 :     while (p+8<=bEnd) {
     796           0 :         U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
     797           0 :         h64 ^= k1;
     798           0 :         h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
     799           0 :         p+=8;
     800             :     }
     801             : 
     802           0 :     if (p+4<=bEnd) {
     803           0 :         h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
     804           0 :         h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
     805           0 :         p+=4;
     806             :     }
     807             : 
     808           0 :     while (p<bEnd) {
     809           0 :         h64 ^= (*p) * PRIME64_5;
     810           0 :         h64  = XXH_rotl64(h64, 11) * PRIME64_1;
     811           0 :         p++;
     812             :     }
     813             : 
     814           0 :     h64 ^= h64 >> 33;
     815           0 :     h64 *= PRIME64_2;
     816           0 :     h64 ^= h64 >> 29;
     817           0 :     h64 *= PRIME64_3;
     818           0 :     h64 ^= h64 >> 32;
     819             : 
     820           0 :     return h64;
     821             : }
     822             : 
     823             : 
     824           0 : XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
     825             : {
     826           0 :     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
     827             : 
     828           0 :     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
     829           0 :         return XXH64_digest_endian(state_in, XXH_littleEndian);
     830             :     else
     831           0 :         return XXH64_digest_endian(state_in, XXH_bigEndian);
     832             : }
     833             : 
     834             : 
     835             : /* **************************
     836             : *  Canonical representation
     837             : ****************************/
     838             : 
     839             : /*! Default XXH result types are basic unsigned 32 and 64 bits.
     840             : *   The canonical representation follows human-readable write convention, aka big-endian (large digits first).
     841             : *   These functions allow transformation of hash result into and from its canonical format.
     842             : *   This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
     843             : */
     844             : 
     845           0 : XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
     846             : {
     847             :     XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
     848           0 :     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
     849           0 :     memcpy(dst, &hash, sizeof(*dst));
     850           0 : }
     851             : 
     852           0 : XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
     853             : {
     854             :     XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
     855           0 :     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
     856           0 :     memcpy(dst, &hash, sizeof(*dst));
     857           0 : }
     858             : 
     859           0 : XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
     860             : {
     861           0 :     return XXH_readBE32(src);
     862             : }
     863             : 
     864           0 : XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
     865             : {
     866           0 :     return XXH_readBE64(src);
     867             : }

Generated by: LCOV version 1.11