| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
|
|
|
|
|
|
/* |
|
2
|
|
|
|
|
|
|
Copyright (c) 2013 Marek Majkowski |
|
3
|
|
|
|
|
|
|
|
|
4
|
|
|
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
|
5
|
|
|
|
|
|
|
of this software and associated documentation files (the "Software"), to deal |
|
6
|
|
|
|
|
|
|
in the Software without restriction, including without limitation the rights |
|
7
|
|
|
|
|
|
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|
8
|
|
|
|
|
|
|
copies of the Software, and to permit persons to whom the Software is |
|
9
|
|
|
|
|
|
|
furnished to do so, subject to the following conditions: |
|
10
|
|
|
|
|
|
|
|
|
11
|
|
|
|
|
|
|
The above copyright notice and this permission notice shall be included in |
|
12
|
|
|
|
|
|
|
all copies or substantial portions of the Software. |
|
13
|
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|
15
|
|
|
|
|
|
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|
16
|
|
|
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|
17
|
|
|
|
|
|
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|
18
|
|
|
|
|
|
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|
19
|
|
|
|
|
|
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
|
20
|
|
|
|
|
|
|
THE SOFTWARE. |
|
21
|
|
|
|
|
|
|
|
|
22
|
|
|
|
|
|
|
|
|
23
|
|
|
|
|
|
|
Original location: |
|
24
|
|
|
|
|
|
|
https://github.com/majek/csiphash/ |
|
25
|
|
|
|
|
|
|
|
|
26
|
|
|
|
|
|
|
Solution inspired by code from: |
|
27
|
|
|
|
|
|
|
Samuel Neves (supercop/crypto_auth/siphash24/little) |
|
28
|
|
|
|
|
|
|
djb (supercop/crypto_auth/siphash24/little2) |
|
29
|
|
|
|
|
|
|
Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c) |
|
30
|
|
|
|
|
|
|
*/ |
|
31
|
|
|
|
|
|
|
|
|
32
|
|
|
|
|
|
|
#include |
|
33
|
|
|
|
|
|
|
|
|
34
|
|
|
|
|
|
|
/* NO-OP for little-endian platforms */ |
|
35
|
|
|
|
|
|
|
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) |
|
36
|
|
|
|
|
|
|
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
|
37
|
|
|
|
|
|
|
# define _le64toh(x) ((uint64_t)(x)) |
|
38
|
|
|
|
|
|
|
# endif |
|
39
|
|
|
|
|
|
|
/* if __BYTE_ORDER__ is not predefined (like FreeBSD), use arch */ |
|
40
|
|
|
|
|
|
|
#elif defined(__i386) || defined(__x86_64) \ |
|
41
|
|
|
|
|
|
|
|| defined(__alpha) || defined(__vax) |
|
42
|
|
|
|
|
|
|
# define _le64toh(x) ((uint64_t)(x)) |
|
43
|
|
|
|
|
|
|
/* use __builtin_bswap64 if available */ |
|
44
|
|
|
|
|
|
|
#elif defined(__GNUC__) || defined(__clang__) |
|
45
|
|
|
|
|
|
|
# ifdef __has_builtin && __has_builtin(__builtin_bswap64) |
|
46
|
|
|
|
|
|
|
# define _le64toh(x) __builtin_bswap64(x) |
|
47
|
|
|
|
|
|
|
# endif |
|
48
|
|
|
|
|
|
|
#endif |
|
49
|
|
|
|
|
|
|
/* last resort (big-endian w/o __builtin_bswap64) */ |
|
50
|
|
|
|
|
|
|
#ifndef _le64toh |
|
51
|
|
|
|
|
|
|
# define _le64toh(x) \ |
|
52
|
|
|
|
|
|
|
(((uint64_t)(x) << 56) | \ |
|
53
|
|
|
|
|
|
|
(((uint64_t)(x) << 40) & 0X00FF000000000000ULL) | \ |
|
54
|
|
|
|
|
|
|
(((uint64_t)(x) << 24) & 0X0000FF0000000000ULL) | \ |
|
55
|
|
|
|
|
|
|
(((uint64_t)(x) << 8) & 0X000000FF00000000ULL) | \ |
|
56
|
|
|
|
|
|
|
(((uint64_t)(x) >> 8) & 0X00000000FF000000ULL) | \ |
|
57
|
|
|
|
|
|
|
(((uint64_t)(x) >> 24) & 0X0000000000FF0000ULL) | \ |
|
58
|
|
|
|
|
|
|
(((uint64_t)(x) >> 40) & 0X000000000000FF00ULL) | \ |
|
59
|
|
|
|
|
|
|
((uint64_t)(x) >> 56)) |
|
60
|
|
|
|
|
|
|
#endif |
|
61
|
|
|
|
|
|
|
|
|
62
|
|
|
|
|
|
|
#define ROTATE(x, b) (uint64_t)( ((x) << (b)) | ( (x) >> (64 - (b))) ) |
|
63
|
|
|
|
|
|
|
|
|
64
|
|
|
|
|
|
|
#define HALF_ROUND(a,b,c,d,s,t) \ |
|
65
|
|
|
|
|
|
|
a += b; c += d; \ |
|
66
|
|
|
|
|
|
|
b = ROTATE(b, s) ^ a; \ |
|
67
|
|
|
|
|
|
|
d = ROTATE(d, t) ^ c; \ |
|
68
|
|
|
|
|
|
|
a = ROTATE(a, 32); |
|
69
|
|
|
|
|
|
|
|
|
70
|
|
|
|
|
|
|
#define DOUBLE_ROUND(v0,v1,v2,v3) \ |
|
71
|
|
|
|
|
|
|
HALF_ROUND(v0,v1,v2,v3,13,16); \ |
|
72
|
|
|
|
|
|
|
HALF_ROUND(v2,v1,v0,v3,17,21); \ |
|
73
|
|
|
|
|
|
|
HALF_ROUND(v0,v1,v2,v3,13,16); \ |
|
74
|
|
|
|
|
|
|
HALF_ROUND(v2,v1,v0,v3,17,21); |
|
75
|
|
|
|
|
|
|
|
|
76
|
|
|
|
|
|
|
|
|
77
|
11
|
|
|
|
|
|
uint64_t siphash24(const void *src, unsigned long src_sz, const char key[16]) { |
|
78
|
11
|
|
|
|
|
|
const uint64_t *_key = (uint64_t *)key; |
|
79
|
11
|
|
|
|
|
|
uint64_t k0 = _le64toh(_key[0]); |
|
80
|
11
|
|
|
|
|
|
uint64_t k1 = _le64toh(_key[1]); |
|
81
|
11
|
|
|
|
|
|
uint64_t b = (uint64_t)src_sz << 56; |
|
82
|
11
|
|
|
|
|
|
const uint64_t *in = (uint64_t*)src; |
|
83
|
|
|
|
|
|
|
|
|
84
|
11
|
|
|
|
|
|
uint64_t v0 = k0 ^ 0x736f6d6570736575ULL; |
|
85
|
11
|
|
|
|
|
|
uint64_t v1 = k1 ^ 0x646f72616e646f6dULL; |
|
86
|
11
|
|
|
|
|
|
uint64_t v2 = k0 ^ 0x6c7967656e657261ULL; |
|
87
|
11
|
|
|
|
|
|
uint64_t v3 = k1 ^ 0x7465646279746573ULL; |
|
88
|
|
|
|
|
|
|
|
|
89
|
15
|
100
|
|
|
|
|
while (src_sz >= 8) { |
|
90
|
4
|
|
|
|
|
|
uint64_t mi = _le64toh(*in); |
|
91
|
4
|
|
|
|
|
|
in += 1; src_sz -= 8; |
|
92
|
4
|
|
|
|
|
|
v3 ^= mi; |
|
93
|
4
|
|
|
|
|
|
DOUBLE_ROUND(v0,v1,v2,v3); |
|
94
|
4
|
|
|
|
|
|
v0 ^= mi; |
|
95
|
|
|
|
|
|
|
} |
|
96
|
|
|
|
|
|
|
|
|
97
|
11
|
|
|
|
|
|
uint64_t t = 0; uint8_t *pt = (uint8_t *)&t; uint8_t *m = (uint8_t *)in; |
|
98
|
11
|
|
|
|
|
|
switch (src_sz) { |
|
99
|
0
|
|
|
|
|
|
case 7: pt[6] = m[6]; |
|
100
|
0
|
|
|
|
|
|
case 6: pt[5] = m[5]; |
|
101
|
4
|
|
|
|
|
|
case 5: pt[4] = m[4]; |
|
102
|
4
|
|
|
|
|
|
case 4: *((uint32_t*)&pt[0]) = *((uint32_t*)&m[0]); break; |
|
103
|
0
|
|
|
|
|
|
case 3: pt[2] = m[2]; |
|
104
|
1
|
|
|
|
|
|
case 2: pt[1] = m[1]; |
|
105
|
7
|
|
|
|
|
|
case 1: pt[0] = m[0]; |
|
106
|
|
|
|
|
|
|
} |
|
107
|
11
|
|
|
|
|
|
b |= _le64toh(t); |
|
108
|
|
|
|
|
|
|
|
|
109
|
11
|
|
|
|
|
|
v3 ^= b; |
|
110
|
11
|
|
|
|
|
|
DOUBLE_ROUND(v0,v1,v2,v3); |
|
111
|
11
|
|
|
|
|
|
v0 ^= b; v2 ^= 0xff; |
|
112
|
11
|
|
|
|
|
|
DOUBLE_ROUND(v0,v1,v2,v3); |
|
113
|
11
|
|
|
|
|
|
DOUBLE_ROUND(v0,v1,v2,v3); |
|
114
|
11
|
|
|
|
|
|
return (v0 ^ v1) ^ (v2 ^ v3); |
|
115
|
|
|
|
|
|
|
} |