Line data Source code
1 : // Copyright 2011 Google Inc. All Rights Reserved.
2 : //
3 : // Redistribution and use in source and binary forms, with or without
4 : // modification, are permitted provided that the following conditions are
5 : // met:
6 : //
7 : // * Redistributions of source code must retain the above copyright
8 : // notice, this list of conditions and the following disclaimer.
9 : // * Redistributions in binary form must reproduce the above
10 : // copyright notice, this list of conditions and the following disclaimer
11 : // in the documentation and/or other materials provided with the
12 : // distribution.
13 : // * Neither the name of Google Inc. nor the names of its
14 : // contributors may be used to endorse or promote products derived from
15 : // this software without specific prior written permission.
16 : //
17 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 : // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 : // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 : // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 : // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 : // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 : // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 : // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 : // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 : // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 : // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 : //
29 : // Various stubs for the open-source version of Snappy.
30 :
31 : #ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
32 : #define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
33 :
34 : #ifdef HAVE_CONFIG_H
35 : #include "config.h"
36 : #endif
37 :
38 : #include <string>
39 :
40 : #include <assert.h>
41 : #include <stdlib.h>
42 : #include <string.h>
43 :
44 : #ifdef HAVE_SYS_MMAN_H
45 : #include <sys/mman.h>
46 : #endif
47 :
48 : #include "snappy-stubs-public.h"
49 :
50 : #if defined(__x86_64__)
51 :
52 : // Enable 64-bit optimized versions of some routines.
53 : #define ARCH_K8 1
54 :
55 : #endif
56 :
57 : // Needed by OS X, among others.
58 : #ifndef MAP_ANONYMOUS
59 : #define MAP_ANONYMOUS MAP_ANON
60 : #endif
61 :
62 : // Pull in std::min, std::ostream, and the likes. This is safe because this
63 : // header file is never used from any public header files.
64 : #pragma clang diagnostic ignored "-Wheader-hygiene"
65 : using namespace std;
66 :
67 : // The size of an array, if known at compile-time.
68 : // Will give unexpected results if used on a pointer.
69 : // We undefine it first, since some compilers already have a definition.
70 : #ifdef ARRAYSIZE
71 : #undef ARRAYSIZE
72 : #endif
73 : #define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
74 :
75 : // Static prediction hints.
76 : #ifdef HAVE_BUILTIN_EXPECT
77 : #define PREDICT_FALSE(x) (__builtin_expect(x, 0))
78 : #define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
79 : #else
80 : #define PREDICT_FALSE(x) x
81 : #define PREDICT_TRUE(x) x
82 : #endif
83 :
84 : // This is only used for recomputing the tag byte table used during
85 : // decompression; for simplicity we just remove it from the open-source
86 : // version (anyone who wants to regenerate it can just do the call
87 : // themselves within main()).
88 : #define DEFINE_bool(flag_name, default_value, description) \
89 : bool FLAGS_ ## flag_name = default_value
90 : #define DECLARE_bool(flag_name) \
91 : extern bool FLAGS_ ## flag_name
92 :
93 : namespace snappy {
94 :
95 : static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
96 : static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
97 :
98 : // Potentially unaligned loads and stores.
99 :
100 : // x86 and PowerPC can simply do these loads and stores native.
101 :
102 : #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
103 :
104 : #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
105 : #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
106 : #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
107 :
108 : #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
109 : #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
110 : #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
111 :
112 : // ARMv7 and newer support native unaligned accesses, but only of 16-bit
113 : // and 32-bit values (not 64-bit); older versions either raise a fatal signal,
114 : // do an unaligned read and rotate the words around a bit, or do the reads very
115 : // slowly (trip through kernel mode). There's no simple #define that says just
116 : // “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
117 : // sub-architectures.
118 : //
119 : // This is a mess, but there's not much we can do about it.
120 :
121 : #elif defined(__arm__) && \
122 : !defined(__ARM_ARCH_4__) && \
123 : !defined(__ARM_ARCH_4T__) && \
124 : !defined(__ARM_ARCH_5__) && \
125 : !defined(__ARM_ARCH_5T__) && \
126 : !defined(__ARM_ARCH_5TE__) && \
127 : !defined(__ARM_ARCH_5TEJ__) && \
128 : !defined(__ARM_ARCH_6__) && \
129 : !defined(__ARM_ARCH_6J__) && \
130 : !defined(__ARM_ARCH_6K__) && \
131 : !defined(__ARM_ARCH_6Z__) && \
132 : !defined(__ARM_ARCH_6ZK__) && \
133 : !defined(__ARM_ARCH_6T2__)
134 :
135 : #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
136 : #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
137 :
138 : #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
139 : #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
140 :
141 : // TODO(user): NEON supports unaligned 64-bit loads and stores.
142 : // See if that would be more efficient on platforms supporting it,
143 : // at least for copies.
144 :
145 : inline uint64 UNALIGNED_LOAD64(const void *p) {
146 : uint64 t;
147 : memcpy(&t, p, sizeof t);
148 : return t;
149 : }
150 :
151 : inline void UNALIGNED_STORE64(void *p, uint64 v) {
152 : memcpy(p, &v, sizeof v);
153 : }
154 :
155 : #else
156 :
157 : // These functions are provided for architectures that don't support
158 : // unaligned loads and stores.
159 :
160 : inline uint16 UNALIGNED_LOAD16(const void *p) {
161 : uint16 t;
162 : memcpy(&t, p, sizeof t);
163 : return t;
164 : }
165 :
166 : inline uint32 UNALIGNED_LOAD32(const void *p) {
167 : uint32 t;
168 : memcpy(&t, p, sizeof t);
169 : return t;
170 : }
171 :
172 : inline uint64 UNALIGNED_LOAD64(const void *p) {
173 : uint64 t;
174 : memcpy(&t, p, sizeof t);
175 : return t;
176 : }
177 :
178 : inline void UNALIGNED_STORE16(void *p, uint16 v) {
179 : memcpy(p, &v, sizeof v);
180 : }
181 :
182 : inline void UNALIGNED_STORE32(void *p, uint32 v) {
183 : memcpy(p, &v, sizeof v);
184 : }
185 :
186 : inline void UNALIGNED_STORE64(void *p, uint64 v) {
187 : memcpy(p, &v, sizeof v);
188 : }
189 :
190 : #endif
191 :
192 : // This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
193 : // on some platforms, in particular ARM.
194 67216810 : inline void UnalignedCopy64(const void *src, void *dst) {
195 : if (sizeof(void *) == 8) {
196 67216810 : UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
197 : } else {
198 : const char *src_char = reinterpret_cast<const char *>(src);
199 : char *dst_char = reinterpret_cast<char *>(dst);
200 :
201 : UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
202 : UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
203 : }
204 67216810 : }
205 :
206 : // The following guarantees declaration of the byte swap functions.
207 : #ifdef COMMON_BIGENDIAN
208 :
209 : #ifdef HAVE_SYS_BYTEORDER_H
210 : #include <sys/byteorder.h>
211 : #endif
212 :
213 : #ifdef HAVE_SYS_ENDIAN_H
214 : #include <sys/endian.h>
215 : #endif
216 :
217 : #ifdef _MSC_VER
218 : #include <stdlib.h>
219 : #define bswap_16(x) _byteswap_ushort(x)
220 : #define bswap_32(x) _byteswap_ulong(x)
221 : #define bswap_64(x) _byteswap_uint64(x)
222 :
223 : #elif defined(__APPLE__)
224 : // Mac OS X / Darwin features
225 : #include <libkern/OSByteOrder.h>
226 : #define bswap_16(x) OSSwapInt16(x)
227 : #define bswap_32(x) OSSwapInt32(x)
228 : #define bswap_64(x) OSSwapInt64(x)
229 :
230 : #elif defined(HAVE_BYTESWAP_H)
231 : #include <byteswap.h>
232 :
233 : #elif defined(bswap32)
234 : // FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
235 : #define bswap_16(x) bswap16(x)
236 : #define bswap_32(x) bswap32(x)
237 : #define bswap_64(x) bswap64(x)
238 :
239 : #elif defined(BSWAP_64)
240 : // Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
241 : #define bswap_16(x) BSWAP_16(x)
242 : #define bswap_32(x) BSWAP_32(x)
243 : #define bswap_64(x) BSWAP_64(x)
244 :
245 : #else
246 :
247 : inline uint16 bswap_16(uint16 x) {
248 : return (x << 8) | (x >> 8);
249 : }
250 :
251 : inline uint32 bswap_32(uint32 x) {
252 : x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
253 : return (x >> 16) | (x << 16);
254 : }
255 :
256 : inline uint64 bswap_64(uint64 x) {
257 : x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
258 : x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
259 : return (x >> 32) | (x << 32);
260 : }
261 :
262 : #endif
263 :
264 : #endif // COMMON_BIGENDIAN
265 :
266 : // Convert to little-endian storage, opposite of network format.
267 : // Convert x from host to little endian: x = LittleEndian.FromHost(x);
268 : // convert x from little endian to host: x = LittleEndian.ToHost(x);
269 : //
270 : // Store values into unaligned memory converting to little endian order:
271 : // LittleEndian.Store16(p, x);
272 : //
273 : // Load unaligned values stored in little endian converting to host order:
274 : // x = LittleEndian.Load16(p);
275 : class LittleEndian {
276 : public:
277 : // Conversion functions.
278 : #ifdef COMMON_BIGENDIAN
279 :
280 : static uint16 FromHost16(uint16 x) { return bswap_16(x); }
281 : static uint16 ToHost16(uint16 x) { return bswap_16(x); }
282 :
283 : static uint32 FromHost32(uint32 x) { return bswap_32(x); }
284 : static uint32 ToHost32(uint32 x) { return bswap_32(x); }
285 :
286 : static bool IsLittleEndian() { return false; }
287 :
288 : #else // !defined(COMMON_BIGENDIAN)
289 :
290 4551862 : static uint16 FromHost16(uint16 x) { return x; }
291 : static uint16 ToHost16(uint16 x) { return x; }
292 :
293 : static uint32 FromHost32(uint32 x) { return x; }
294 17645900 : static uint32 ToHost32(uint32 x) { return x; }
295 :
296 59676164 : static bool IsLittleEndian() { return true; }
297 :
298 : #endif // !defined(COMMON_BIGENDIAN)
299 :
300 : // Functions to do unaligned loads and stores in little-endian order.
301 : static uint16 Load16(const void *p) {
302 : return ToHost16(UNALIGNED_LOAD16(p));
303 : }
304 :
305 4551862 : static void Store16(void *p, uint16 v) {
306 4551862 : UNALIGNED_STORE16(p, FromHost16(v));
307 4551862 : }
308 :
309 17645900 : static uint32 Load32(const void *p) {
310 17645900 : return ToHost32(UNALIGNED_LOAD32(p));
311 : }
312 :
313 : static void Store32(void *p, uint32 v) {
314 : UNALIGNED_STORE32(p, FromHost32(v));
315 : }
316 : };
317 :
318 : // Some bit-manipulation functions.
319 : class Bits {
320 : public:
321 : // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
322 : static int Log2Floor(uint32 n);
323 :
324 : // Return the first set least / most significant bit, 0-indexed. Returns an
325 : // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
326 : // that it's 0-indexed.
327 : static int FindLSBSetNonZero(uint32 n);
328 : static int FindLSBSetNonZero64(uint64 n);
329 :
330 : private:
331 : DISALLOW_COPY_AND_ASSIGN(Bits);
332 : };
333 :
334 : #ifdef HAVE_BUILTIN_CTZ
335 :
336 6616 : inline int Bits::Log2Floor(uint32 n) {
337 6616 : return n == 0 ? -1 : 31 ^ __builtin_clz(n);
338 : }
339 :
340 : inline int Bits::FindLSBSetNonZero(uint32 n) {
341 : return __builtin_ctz(n);
342 : }
343 :
344 17423812 : inline int Bits::FindLSBSetNonZero64(uint64 n) {
345 17423812 : return __builtin_ctzll(n);
346 : }
347 :
348 : #else // Portable versions.
349 :
350 : inline int Bits::Log2Floor(uint32 n) {
351 : if (n == 0)
352 : return -1;
353 : int log = 0;
354 : uint32 value = n;
355 : for (int i = 4; i >= 0; --i) {
356 : int shift = (1 << i);
357 : uint32 x = value >> shift;
358 : if (x != 0) {
359 : value = x;
360 : log += shift;
361 : }
362 : }
363 : assert(value == 1);
364 : return log;
365 : }
366 :
367 : inline int Bits::FindLSBSetNonZero(uint32 n) {
368 : int rc = 31;
369 : for (int i = 4, shift = 1 << 4; i >= 0; --i) {
370 : const uint32 x = n << shift;
371 : if (x != 0) {
372 : n = x;
373 : rc -= shift;
374 : }
375 : shift >>= 1;
376 : }
377 : return rc;
378 : }
379 :
380 : // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
381 : inline int Bits::FindLSBSetNonZero64(uint64 n) {
382 : const uint32 bottombits = static_cast<uint32>(n);
383 : if (bottombits == 0) {
384 : // Bottom bits are zero, so scan in top bits
385 : return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
386 : } else {
387 : return FindLSBSetNonZero(bottombits);
388 : }
389 : }
390 :
391 : #endif // End portable versions.
392 :
393 : // Variable-length integer encoding.
394 : class Varint {
395 : public:
396 : // Maximum lengths of varint encoding of uint32.
397 : static const int kMax32 = 5;
398 :
399 : // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
400 : // Never reads a character at or beyond limit. If a valid/terminated varint32
401 : // was found in the range, stores it in *OUTPUT and returns a pointer just
402 : // past the last byte of the varint32. Else returns NULL. On success,
403 : // "result <= limit".
404 : static const char* Parse32WithLimit(const char* ptr, const char* limit,
405 : uint32* OUTPUT);
406 :
407 : // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
408 : // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
409 : // byte just past the last encoded byte.
410 : static char* Encode32(char* ptr, uint32 v);
411 :
412 : // EFFECTS Appends the varint representation of "value" to "*s".
413 : static void Append32(string* s, uint32 value);
414 : };
415 :
416 0 : inline const char* Varint::Parse32WithLimit(const char* p,
417 : const char* l,
418 : uint32* OUTPUT) {
419 0 : const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
420 0 : const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
421 : uint32 b, result;
422 0 : if (ptr >= limit) return NULL;
423 0 : b = *(ptr++); result = b & 127; if (b < 128) goto done;
424 0 : if (ptr >= limit) return NULL;
425 0 : b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
426 0 : if (ptr >= limit) return NULL;
427 0 : b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
428 0 : if (ptr >= limit) return NULL;
429 0 : b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
430 0 : if (ptr >= limit) return NULL;
431 0 : b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
432 0 : return NULL; // Value is too long to be a varint32
433 : done:
434 0 : *OUTPUT = result;
435 0 : return reinterpret_cast<const char*>(ptr);
436 : }
437 :
438 74 : inline char* Varint::Encode32(char* sptr, uint32 v) {
439 : // Operate on characters as unsigneds
440 74 : unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
441 : static const int B = 128;
442 74 : if (v < (1<<7)) {
443 0 : *(ptr++) = v;
444 74 : } else if (v < (1<<14)) {
445 0 : *(ptr++) = v | B;
446 0 : *(ptr++) = v>>7;
447 74 : } else if (v < (1<<21)) {
448 32 : *(ptr++) = v | B;
449 32 : *(ptr++) = (v>>7) | B;
450 32 : *(ptr++) = v>>14;
451 42 : } else if (v < (1<<28)) {
452 42 : *(ptr++) = v | B;
453 42 : *(ptr++) = (v>>7) | B;
454 42 : *(ptr++) = (v>>14) | B;
455 42 : *(ptr++) = v>>21;
456 : } else {
457 0 : *(ptr++) = v | B;
458 0 : *(ptr++) = (v>>7) | B;
459 0 : *(ptr++) = (v>>14) | B;
460 0 : *(ptr++) = (v>>21) | B;
461 0 : *(ptr++) = v>>28;
462 : }
463 74 : return reinterpret_cast<char*>(ptr);
464 : }
465 :
466 : // If you know the internal layout of the std::string in use, you can
467 : // replace this function with one that resizes the string without
468 : // filling the new space with zeros (if applicable) --
469 : // it will be non-portable but faster.
470 0 : inline void STLStringResizeUninitialized(string* s, size_t new_size) {
471 0 : s->resize(new_size);
472 0 : }
473 :
474 : // Return a mutable char* pointing to a string's internal buffer,
475 : // which may not be null-terminated. Writing through this pointer will
476 : // modify the string.
477 : //
478 : // string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
479 : // next call to a string method that invalidates iterators.
480 : //
481 : // As of 2006-04, there is no standard-blessed way of getting a
482 : // mutable reference to a string's internal buffer. However, issue 530
483 : // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
484 : // proposes this as the method. It will officially be part of the standard
485 : // for C++0x. This should already work on all current implementations.
486 0 : inline char* string_as_array(string* str) {
487 0 : return str->empty() ? NULL : &*str->begin();
488 : }
489 :
490 : } // namespace snappy
491 :
492 : #endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
|