LCOV - code coverage report
Current view: top level - lib - murmur3.c (source / functions) Hit Total Coverage
Test: coverage.info.cleaned Lines: 88 92 95.7 %
Date: 2023-04-18 16:19:03 Functions: 2 3 66.7 %

          Line data    Source code
       1             : //-----------------------------------------------------------------------------
       2             : // MurmurHash3 was written by Austin Appleby, and is placed in the public
       3             : // domain. The author hereby disclaims copyright to this source code.
       4             : 
       5             : // Note - The x86 and x64 versions do _not_ produce the same results, as the
       6             : // algorithms are optimized for their respective platforms. You can still
       7             : // compile and run any of them on any platform, but your performance with the
       8             : // non-native version will be less than optimal.
       9             : 
      10             : #include <stdint.h>
      11             : #include <string.h>
      12             : 
      13             : #include "murmur3.h"
      14             : 
      15             : //-----------------------------------------------------------------------------
      16             : // Platform-specific functions and macros
      17             : 
      18             : #ifdef __GNUC__
      19             : #define FORCE_INLINE __attribute__((always_inline)) inline
      20             : #else                /*  */
      21             : #define FORCE_INLINE
      22             : #endif                /*  */
      23             : 
      24             : #ifndef __x86_64__
      25             : static FORCE_INLINE uint32_t rotl32(uint32_t x, int8_t r)
      26             : {
      27             :     return (x << r) | (x >> (32 - r));
      28             : }
      29             : #endif
      30             : 
      31             : static FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r)
      32             : {
      33       17025 :     return (x << r) | (x >> (64 - r));
      34             : }
      35             : 
      36             : #define BIG_CONSTANT(x) (x##LLU)
      37             : 
      38             : static uint32_t seed_value = 0xdeadbeef;
      39             : 
      40             : //-----------------------------------------------------------------------------
      41             : // Finalization mix - force all bits of a hash block to avalanche
      42             : #ifndef __x86_64__
      43             : static FORCE_INLINE uint32_t fmix32(uint32_t h)
      44             : {
      45             :     h ^= h >> 16;
      46             :     h *= 0x85ebca6b;
      47             :     h ^= h >> 13;
      48             :     h *= 0xc2b2ae35;
      49             :     h ^= h >> 16;
      50             :     return h;
      51             : }
      52             : #endif
      53             : 
      54             : //----------
      55           0 : FORCE_INLINE uint64_t murmur3_fmix64(uint64_t k)
      56             : {
      57       16252 :     k ^= k >> 33;
      58       16252 :     k *= BIG_CONSTANT(0xff51afd7ed558ccd);
      59       16252 :     k ^= k >> 33;
      60       16252 :     k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
      61       16252 :     k ^= k >> 33;
      62           0 :     return k;
      63             : }
      64             : 
      65             : 
      66             : //-----------------------------------------------------------------------------
      67             : #ifndef __x86_64__
      68             : FORCE_INLINE static void
      69             : MurmurHash3_x86_32(const void *key, size_t len, uint32_t seed, void *out)
      70             : {
      71             :     const uint8_t *data = (const uint8_t *)key;
      72             :     const size_t nblocks = len / 4;
      73             :     size_t i;
      74             :     uint32_t h1 = seed;
      75             :     uint32_t c1 = 0xcc9e2d51;
      76             :     uint32_t c2 = 0x1b873593;
      77             : 
      78             :     //----------
      79             :     // body
      80             :     const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4);
      81             :     for (i = -nblocks; i; i++) {
      82             :         uint32_t k1;
      83             : 
      84             :         memcpy(&k1, blocks + i, sizeof(k1));
      85             : 
      86             :         k1 *= c1;
      87             :         k1 = rotl32(k1, 15);
      88             :         k1 *= c2;
      89             :         h1 ^= k1;
      90             :         h1 = rotl32(h1, 13);
      91             :         h1 = h1 * 5 + 0xe6546b64;
      92             :     }
      93             : 
      94             :     //----------
      95             :     // tail
      96             :     const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
      97             :     uint32_t k1 = 0;
      98             :     switch (len & 3) {
      99             :     case 3:
     100             :         k1 ^= (uint32_t)tail[2] << 16;
     101             :         /* fallthrough */
     102             :     case 2:
     103             :         k1 ^= (uint32_t)tail[1] << 8;
     104             :         /* fallthrough */
     105             :     case 1:
     106             :         k1 ^= (uint32_t)tail[0];
     107             :         k1 *= c1;
     108             :         k1 = rotl32(k1, 15);
     109             :         k1 *= c2;
     110             :         h1 ^= k1;
     111             :     };
     112             : 
     113             :     //----------
     114             :     // finalization
     115             :     h1 ^= (uint32_t)len;
     116             :     h1 = fmix32(h1);
     117             :     *(uint32_t *) out = h1;
     118             : }
     119             : 
     120             : 
     121             : //-----------------------------------------------------------------------------
     122             : FORCE_INLINE static void MurmurHash3_x86_128 (const void *key, const size_t len,
     123             :                                               uint32_t seed, void *out)
     124             : {
     125             :     size_t i;
     126             :     const uint8_t * data = (const uint8_t*)key;
     127             :     const size_t nblocks = len / 16;
     128             : 
     129             :     uint32_t h1 = seed;
     130             :     uint32_t h2 = seed;
     131             :     uint32_t h3 = seed;
     132             :     uint32_t h4 = seed;
     133             : 
     134             :     const uint32_t c1 = 0x239b961b;
     135             :     const uint32_t c2 = 0xab0e9789;
     136             :     const uint32_t c3 = 0x38b34ae5;
     137             :     const uint32_t c4 = 0xa1e38b93;
     138             : 
     139             :     //----------
     140             :     // body
     141             : 
     142             :     const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
     143             : 
     144             :     for(i = -nblocks; i; i++) {
     145             :         uint32_t k1, k2, k3, k4;
     146             : 
     147             :         memcpy(&k1, blocks + i * 4 + 0, sizeof(k1));
     148             :         memcpy(&k2, blocks + i * 4 + 1, sizeof(k2));
     149             :         memcpy(&k3, blocks + i * 4 + 2, sizeof(k3));
     150             :         memcpy(&k4, blocks + i * 4 + 3, sizeof(k4));
     151             : 
     152             :         k1 *= c1; k1 = rotl32(k1,15); k1 *= c2; h1 ^= k1;
     153             :         h1 = rotl32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
     154             :         k2 *= c2; k2 = rotl32(k2,16); k2 *= c3; h2 ^= k2;
     155             :         h2 = rotl32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
     156             :         k3 *= c3; k3 = rotl32(k3,17); k3 *= c4; h3 ^= k3;
     157             :         h3 = rotl32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
     158             :         k4 *= c4; k4 = rotl32(k4,18); k4 *= c1; h4 ^= k4;
     159             :         h4 = rotl32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
     160             :     }
     161             : 
     162             :     //----------
     163             :     // tail
     164             : 
     165             :     const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
     166             : 
     167             :     uint32_t k1 = 0;
     168             :     uint32_t k2 = 0;
     169             :     uint32_t k3 = 0;
     170             :     uint32_t k4 = 0;
     171             : 
     172             :     switch(len & 15) {
     173             :     case 15: k4 ^= (uint32_t)tail[14] << 16; /* fallthrough */
     174             :     case 14: k4 ^= (uint32_t)tail[13] << 8;  /* fallthrough */
     175             :     case 13: k4 ^= (uint32_t)tail[12] << 0;
     176             :              k4 *= c4; k4 = rotl32(k4,18); k4 *= c1; h4 ^= k4;
     177             :              /* fallthrough */
     178             :     case 12: k3 ^= (uint32_t)tail[11] << 24; /* fallthrough */
     179             :     case 11: k3 ^= (uint32_t)tail[10] << 16; /* fallthrough */
     180             :     case 10: k3 ^= (uint32_t)tail[ 9] << 8;  /* fallthrough */
     181             :     case  9: k3 ^= (uint32_t)tail[ 8] << 0;
     182             :              k3 *= c3; k3 = rotl32(k3,17); k3 *= c4; h3 ^= k3;
     183             :              /* fallthrough */
     184             :     case  8: k2 ^= (uint32_t)tail[ 7] << 24; /* fallthrough */
     185             :     case  7: k2 ^= (uint32_t)tail[ 6] << 16; /* fallthrough */
     186             :     case  6: k2 ^= (uint32_t)tail[ 5] << 8;  /* fallthrough */
     187             :     case  5: k2 ^= (uint32_t)tail[ 4] << 0;
     188             :              k2 *= c2; k2 = rotl32(k2,16); k2 *= c3; h2 ^= k2;
     189             :              /* fallthrough */
     190             :     case  4: k1 ^= (uint32_t)tail[ 3] << 24; /* fallthrough */
     191             :     case  3: k1 ^= (uint32_t)tail[ 2] << 16; /* fallthrough */
     192             :     case  2: k1 ^= (uint32_t)tail[ 1] << 8;  /* fallthrough */
     193             :     case  1: k1 ^= (uint32_t)tail[ 0] << 0;
     194             :              k1 *= c1; k1 = rotl32(k1,15); k1 *= c2; h1 ^= k1;
     195             :     }
     196             : 
     197             :     //----------
     198             :     // finalization
     199             : 
     200             :     h1 ^= (uint32_t)len; h2 ^= (uint32_t)len;
     201             :     h3 ^= (uint32_t)len; h4 ^= (uint32_t)len;
     202             : 
     203             :     h1 += h2; h1 += h3; h1 += h4;
     204             :     h2 += h1; h3 += h1; h4 += h1;
     205             : 
     206             :     h1 = fmix32(h1);
     207             :     h2 = fmix32(h2);
     208             :     h3 = fmix32(h3);
     209             :     h4 = fmix32(h4);
     210             : 
     211             :     h1 += h2; h1 += h3; h1 += h4;
     212             :     h2 += h1; h3 += h1; h4 += h1;
     213             : 
     214             :     ((uint32_t*)out)[0] = h1;
     215             :     ((uint32_t*)out)[1] = h2;
     216             :     ((uint32_t*)out)[2] = h3;
     217             :     ((uint32_t*)out)[3] = h4;
     218             : }
     219             : #endif
     220             : 
     221             : //-----------------------------------------------------------------------------
     222             : FORCE_INLINE static void
     223             : MurmurHash3_x64_128(const void *key, const size_t len, const uint32_t seed,
     224             :             void *out)
     225             : {
     226        8126 :     const uint8_t *data = (const uint8_t *)key;
     227        8126 :     const size_t nblocks = len / 16;
     228             :     size_t i;
     229        8126 :     uint64_t h1 = seed;
     230        8126 :     uint64_t h2 = seed;
     231        8126 :     uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
     232        8126 :     uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
     233             : 
     234             :     //----------
     235             :     // body
     236        8126 :     const uint64_t *blocks = (const uint64_t *)(data);
     237        9276 :     for (i = 0; i < nblocks; i++) {
     238             :         uint64_t k1, k2;
     239             : 
     240        1150 :         memcpy(&k1, blocks + i * 2 + 0, sizeof(k1));
     241        1150 :         memcpy(&k2, blocks + i * 2 + 1, sizeof(k2));
     242             : 
     243        1150 :         k1 *= c1;
     244        1150 :         k1 = rotl64(k1, 31);
     245        1150 :         k1 *= c2;
     246        1150 :         h1 ^= k1;
     247        1150 :         h1 = rotl64(h1, 27);
     248        1150 :         h1 += h2;
     249        1150 :         h1 = h1 * 5 + 0x52dce729;
     250        1150 :         k2 *= c2;
     251        1150 :         k2 = rotl64(k2, 33);
     252        1150 :         k2 *= c1;
     253        1150 :         h2 ^= k2;
     254        1150 :         h2 = rotl64(h2, 31);
     255        1150 :         h2 += h1;
     256        1150 :         h2 = h2 * 5 + 0x38495ab5;
     257             :     }
     258             : 
     259             :         //----------
     260             :         // tail
     261        8126 :     const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);
     262        8126 :     uint64_t k1 = 0;
     263        8126 :     uint64_t k2 = 0;
     264        8126 :     switch (len & 15) {
     265           0 :     case 15:
     266           0 :         k2 ^= (uint64_t) (tail[14]) << 48; /* fallthrough */
     267        1394 :     case 14:
     268        1394 :         k2 ^= (uint64_t) (tail[13]) << 40; /* fallthrough */
     269        1578 :     case 13:
     270        1578 :         k2 ^= (uint64_t) (tail[12]) << 32; /* fallthrough */
     271        2662 :     case 12:
     272        2662 :         k2 ^= (uint64_t) (tail[11]) << 24; /* fallthrough */
     273        3016 :     case 11:
     274        3016 :         k2 ^= (uint64_t) (tail[10]) << 16; /* fallthrough */
     275        3279 :     case 10:
     276        3279 :         k2 ^= (uint64_t) (tail[9]) << 8; /* fallthrough */
     277        4323 :     case 9:
     278        4323 :         k2 ^= (uint64_t) (tail[8]) << 0;
     279        4323 :         k2 *= c2;
     280        4323 :         k2 = rotl64(k2, 33);
     281        4323 :         k2 *= c1;
     282        4323 :         h2 ^= k2;
     283             :         /* fallthrough */
     284        5319 :     case 8:
     285        5319 :         k1 ^= (uint64_t) (tail[7]) << 56;
     286             :         /* fallthrough */
     287        5416 :     case 7:
     288        5416 :         k1 ^= (uint64_t) (tail[6]) << 48;
     289             :         /* fallthrough */
     290        6479 :     case 6:
     291        6479 :         k1 ^= (uint64_t) (tail[5]) << 40;
     292             :         /* fallthrough */
     293        6746 :     case 5:
     294        6746 :         k1 ^= (uint64_t) (tail[4]) << 32;
     295             :         /* fallthrough */
     296        7402 :     case 4:
     297        7402 :         k1 ^= (uint64_t) (tail[3]) << 24;
     298             :         /* fallthrough */
     299        7406 :     case 3:
     300        7406 :         k1 ^= (uint64_t) (tail[2]) << 16;
     301             :         /* fallthrough */
     302        8015 :     case 2:
     303        8015 :         k1 ^= (uint64_t) (tail[1]) << 8;
     304             :         /* fallthrough */
     305        8102 :     case 1:
     306        8102 :         k1 ^= (uint64_t) (tail[0]) << 0;
     307        8102 :         k1 *= c1;
     308        8102 :         k1 = rotl64(k1, 31);
     309        8102 :         k1 *= c2;
     310        8102 :         h1 ^= k1;
     311             :     };
     312             : 
     313             :     //----------
     314             :     // finalization
     315        8126 :     h1 ^= (uint64_t)len;
     316        8126 :     h2 ^= (uint64_t)len;
     317        8126 :     h1 += h2;
     318        8126 :     h2 += h1;
     319        8126 :     h1 = murmur3_fmix64(h1);
     320        8126 :     h2 = murmur3_fmix64(h2);
     321        8126 :     h1 += h2;
     322        8126 :     h2 += h1;
     323        8126 :     ((uint64_t *) out)[0] = h1;
     324        8126 :     ((uint64_t *) out)[1] = h2;
     325        8126 : }
     326             : 
     327             : 
     328             : //-----------------------------------------------------------------------------
     329             : unsigned int
     330        8126 : murmur3_simple(const void *keyptr)
     331             : {
     332        8126 :     size_t len = strlen((char *)keyptr);
     333             : #ifdef __x86_64__
     334             :     uint64_t hash[2];
     335        8126 :     MurmurHash3_x64_128(keyptr, len, seed_value, hash);
     336        8126 :     return (unsigned int)hash[1];
     337             : #else
     338             :     if (len <= 16) {
     339             :         unsigned int hash;
     340             :         MurmurHash3_x86_32(keyptr, len, seed_value, &hash);
     341             :         return hash;
     342             :     }
     343             : 
     344             :     unsigned int hash[4];
     345             :     MurmurHash3_x86_128(keyptr, len, seed_value, hash);
     346             :     return hash[3];
     347             : #endif
     348             : }
     349             : 
     350             : void
     351          92 : murmur3_set_seed(const uint32_t seed)
     352             : {
     353          92 :     seed_value = seed;
     354          92 : }

Generated by: LCOV version 1.15-2-gb9d6727