line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
/* |
2
|
|
|
|
|
|
|
Copyright 2011 Google Inc. All Rights Reserved. |
3
|
|
|
|
|
|
|
|
4
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without |
5
|
|
|
|
|
|
|
modification, are permitted provided that the following conditions are |
6
|
|
|
|
|
|
|
met: |
7
|
|
|
|
|
|
|
|
8
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright |
9
|
|
|
|
|
|
|
notice, this list of conditions and the following disclaimer. |
10
|
|
|
|
|
|
|
* Redistributions in binary form must reproduce the above |
11
|
|
|
|
|
|
|
copyright notice, this list of conditions and the following disclaimer |
12
|
|
|
|
|
|
|
in the documentation and/or other materials provided with the |
13
|
|
|
|
|
|
|
distribution. |
14
|
|
|
|
|
|
|
* Neither the name of Google Inc. nor the names of its |
15
|
|
|
|
|
|
|
contributors may be used to endorse or promote products derived from |
16
|
|
|
|
|
|
|
this software without specific prior written permission. |
17
|
|
|
|
|
|
|
|
18
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
19
|
|
|
|
|
|
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
20
|
|
|
|
|
|
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
21
|
|
|
|
|
|
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
22
|
|
|
|
|
|
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
23
|
|
|
|
|
|
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
24
|
|
|
|
|
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
25
|
|
|
|
|
|
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
26
|
|
|
|
|
|
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27
|
|
|
|
|
|
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28
|
|
|
|
|
|
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29
|
|
|
|
|
|
|
|
30
|
|
|
|
|
|
|
Various stubs for the open-source version of Snappy. |
31
|
|
|
|
|
|
|
|
32
|
|
|
|
|
|
|
File modified for the Linux Kernel by |
33
|
|
|
|
|
|
|
Zeev Tarantov |
34
|
|
|
|
|
|
|
|
35
|
|
|
|
|
|
|
File modified for Sereal by |
36
|
|
|
|
|
|
|
Steffen Mueller |
37
|
|
|
|
|
|
|
*/ |
38
|
|
|
|
|
|
|
|
39
|
|
|
|
|
|
|
#ifndef CSNAPPY_INTERNAL_H_ |
40
|
|
|
|
|
|
|
#define CSNAPPY_INTERNAL_H_ |
41
|
|
|
|
|
|
|
|
42
|
|
|
|
|
|
|
#include "csnappy_compat.h" |
43
|
|
|
|
|
|
|
|
44
|
|
|
|
|
|
|
#ifndef __KERNEL__ |
45
|
|
|
|
|
|
|
#include "csnappy_internal_userspace.h" |
46
|
|
|
|
|
|
|
#include |
47
|
|
|
|
|
|
|
#else |
48
|
|
|
|
|
|
|
|
49
|
|
|
|
|
|
|
#include |
50
|
|
|
|
|
|
|
#include |
51
|
|
|
|
|
|
|
#include |
52
|
|
|
|
|
|
|
#include |
53
|
|
|
|
|
|
|
#include |
54
|
|
|
|
|
|
|
|
55
|
|
|
|
|
|
|
#if (defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN)) || \ |
56
|
|
|
|
|
|
|
(!defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)) |
57
|
|
|
|
|
|
|
#error either __LITTLE_ENDIAN or __BIG_ENDIAN must be defined |
58
|
|
|
|
|
|
|
#endif |
59
|
|
|
|
|
|
|
#if defined(__LITTLE_ENDIAN) |
60
|
|
|
|
|
|
|
#define __BYTE_ORDER __LITTLE_ENDIAN |
61
|
|
|
|
|
|
|
#else |
62
|
|
|
|
|
|
|
#define __BYTE_ORDER __BIG_ENDIAN |
63
|
|
|
|
|
|
|
#endif |
64
|
|
|
|
|
|
|
|
65
|
|
|
|
|
|
|
#ifdef DEBUG |
66
|
|
|
|
|
|
|
#define DCHECK(cond) if (!(cond)) \ |
67
|
|
|
|
|
|
|
printk(KERN_DEBUG "assert failed @ %s:%i\n", \ |
68
|
|
|
|
|
|
|
__FILE__, __LINE__) |
69
|
|
|
|
|
|
|
#else |
70
|
|
|
|
|
|
|
#define DCHECK(cond) |
71
|
|
|
|
|
|
|
#endif |
72
|
|
|
|
|
|
|
|
73
|
|
|
|
|
|
|
#define UNALIGNED_LOAD16(_p) get_unaligned((const uint16_t *)(_p)) |
74
|
|
|
|
|
|
|
#define UNALIGNED_LOAD32(_p) get_unaligned((const uint32_t *)(_p)) |
75
|
|
|
|
|
|
|
#define UNALIGNED_LOAD64(_p) get_unaligned((const uint64_t *)(_p)) |
76
|
|
|
|
|
|
|
#define UNALIGNED_STORE16(_p, _val) put_unaligned((_val), (uint16_t *)(_p)) |
77
|
|
|
|
|
|
|
#define UNALIGNED_STORE32(_p, _val) put_unaligned((_val), (uint32_t *)(_p)) |
78
|
|
|
|
|
|
|
#define UNALIGNED_STORE64(_p, _val) put_unaligned((_val), (uint64_t *)(_p)) |
79
|
|
|
|
|
|
|
|
80
|
|
|
|
|
|
|
#define FindLSBSetNonZero(n) __builtin_ctz(n) |
81
|
|
|
|
|
|
|
#define FindLSBSetNonZero64(n) __builtin_ctzll(n) |
82
|
|
|
|
|
|
|
|
83
|
|
|
|
|
|
|
#endif /* __KERNEL__ */ |
84
|
|
|
|
|
|
|
|
85
|
|
|
|
|
|
|
#if (!defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)) || ! defined(__BYTE_ORDER) |
86
|
|
|
|
|
|
|
# error either __LITTLE_ENDIAN or __BIG_ENDIAN, plus __BYTE_ORDER must be defined |
87
|
|
|
|
|
|
|
#endif |
88
|
|
|
|
|
|
|
|
89
|
|
|
|
|
|
|
#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) || defined(__ARMV6__) || \ |
90
|
|
|
|
|
|
|
defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) |
91
|
|
|
|
|
|
|
# define ARCH_ARM_HAVE_UNALIGNED |
92
|
|
|
|
|
|
|
#endif |
93
|
|
|
|
|
|
|
|
94
|
|
|
|
|
|
|
|
95
|
616420
|
|
|
|
|
|
static INLINE void UnalignedCopy64(const void *src, void *dst) { |
96
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(ARCH_ARM_HAVE_UNALIGNED) || defined(__aarch64__) |
97
|
|
|
|
|
|
|
if ((sizeof(void *) == 8) || (sizeof(long) == 8)) { |
98
|
308210
|
|
|
|
|
|
UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src)); |
99
|
|
|
|
|
|
|
} else { |
100
|
|
|
|
|
|
|
/* This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64 |
101
|
|
|
|
|
|
|
on some platforms, in particular ARM. */ |
102
|
|
|
|
|
|
|
const uint8_t *src_bytep = (const uint8_t *)src; |
103
|
|
|
|
|
|
|
uint8_t *dst_bytep = (uint8_t *)dst; |
104
|
|
|
|
|
|
|
|
105
|
|
|
|
|
|
|
UNALIGNED_STORE32(dst_bytep, UNALIGNED_LOAD32(src_bytep)); |
106
|
|
|
|
|
|
|
UNALIGNED_STORE32(dst_bytep + 4, UNALIGNED_LOAD32(src_bytep + 4)); |
107
|
|
|
|
|
|
|
} |
108
|
|
|
|
|
|
|
#else |
109
|
|
|
|
|
|
|
const uint8_t *src_bytep = (const uint8_t *)src; |
110
|
|
|
|
|
|
|
uint8_t *dst_bytep = (uint8_t *)dst; |
111
|
|
|
|
|
|
|
dst_bytep[0] = src_bytep[0]; |
112
|
|
|
|
|
|
|
dst_bytep[1] = src_bytep[1]; |
113
|
|
|
|
|
|
|
dst_bytep[2] = src_bytep[2]; |
114
|
|
|
|
|
|
|
dst_bytep[3] = src_bytep[3]; |
115
|
|
|
|
|
|
|
dst_bytep[4] = src_bytep[4]; |
116
|
|
|
|
|
|
|
dst_bytep[5] = src_bytep[5]; |
117
|
|
|
|
|
|
|
dst_bytep[6] = src_bytep[6]; |
118
|
|
|
|
|
|
|
dst_bytep[7] = src_bytep[7]; |
119
|
|
|
|
|
|
|
#endif |
120
|
308210
|
|
|
|
|
|
} |
121
|
|
|
|
|
|
|
|
122
|
|
|
|
|
|
|
#if defined(__arm__) |
123
|
|
|
|
|
|
|
#if defined(ARCH_ARM_HAVE_UNALIGNED) |
124
|
|
|
|
|
|
|
static INLINE uint32_t get_unaligned_le(const void *p, uint32_t n) |
125
|
|
|
|
|
|
|
{ |
126
|
|
|
|
|
|
|
uint32_t wordmask = (1U << (8 * n)) - 1; |
127
|
|
|
|
|
|
|
return get_unaligned_le32(p) & wordmask; |
128
|
|
|
|
|
|
|
} |
129
|
|
|
|
|
|
|
#else |
130
|
|
|
|
|
|
|
extern uint32_t get_unaligned_le_armv5(const void *p, uint32_t n); |
131
|
|
|
|
|
|
|
#define get_unaligned_le get_unaligned_le_armv5 |
132
|
|
|
|
|
|
|
#endif |
133
|
|
|
|
|
|
|
#else |
134
|
|
|
|
|
|
|
static INLINE uint32_t get_unaligned_le(const void *p, uint32_t n) |
135
|
|
|
|
|
|
|
{ |
136
|
|
|
|
|
|
|
/* Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits */ |
137
|
|
|
|
|
|
|
static const uint32_t wordmask[] = { |
138
|
|
|
|
|
|
|
0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu |
139
|
|
|
|
|
|
|
}; |
140
|
|
|
|
|
|
|
return get_unaligned_le32(p) & wordmask[n]; |
141
|
|
|
|
|
|
|
} |
142
|
|
|
|
|
|
|
#endif |
143
|
|
|
|
|
|
|
|
144
|
|
|
|
|
|
|
#define DCHECK_EQ(a, b) DCHECK(((a) == (b))) |
145
|
|
|
|
|
|
|
#define DCHECK_NE(a, b) DCHECK(((a) != (b))) |
146
|
|
|
|
|
|
|
#define DCHECK_GT(a, b) DCHECK(((a) > (b))) |
147
|
|
|
|
|
|
|
#define DCHECK_GE(a, b) DCHECK(((a) >= (b))) |
148
|
|
|
|
|
|
|
#define DCHECK_LT(a, b) DCHECK(((a) < (b))) |
149
|
|
|
|
|
|
|
#define DCHECK_LE(a, b) DCHECK(((a) <= (b))) |
150
|
|
|
|
|
|
|
|
151
|
|
|
|
|
|
|
enum { |
152
|
|
|
|
|
|
|
LITERAL = 0, |
153
|
|
|
|
|
|
|
COPY_1_BYTE_OFFSET = 1, /* 3 bit length + 3 bits of offset in opcode */ |
154
|
|
|
|
|
|
|
COPY_2_BYTE_OFFSET = 2, |
155
|
|
|
|
|
|
|
COPY_4_BYTE_OFFSET = 3 |
156
|
|
|
|
|
|
|
}; |
157
|
|
|
|
|
|
|
|
158
|
|
|
|
|
|
|
#endif /* CSNAPPY_INTERNAL_H_ */ |