Line data Source code
1 : // Copyright 2008 Google Inc. All Rights Reserved.
2 : //
3 : // Redistribution and use in source and binary forms, with or without
4 : // modification, are permitted provided that the following conditions are
5 : // met:
6 : //
7 : // * Redistributions of source code must retain the above copyright
8 : // notice, this list of conditions and the following disclaimer.
9 : // * Redistributions in binary form must reproduce the above
10 : // copyright notice, this list of conditions and the following disclaimer
11 : // in the documentation and/or other materials provided with the
12 : // distribution.
13 : // * Neither the name of Google Inc. nor the names of its
14 : // contributors may be used to endorse or promote products derived from
15 : // this software without specific prior written permission.
16 : //
17 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 : // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 : // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 : // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 : // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 : // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 : // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 : // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 : // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 : // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 : // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 : //
29 : // Internals shared between the Snappy implementation and its unittest.
30 :
31 : #ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
32 : #define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
33 :
34 : #include "snappy-stubs-internal.h"
35 :
36 : namespace snappy {
37 : namespace internal {
38 :
39 : class WorkingMemory {
40 : public:
41 : // cppcheck-suppress uninitMemberVar
42 74 : WorkingMemory() : large_table_(NULL) { }
43 74 : ~WorkingMemory() { delete[] large_table_; }
44 :
45 : // Allocates and clears a hash table using memory in "*this",
46 : // stores the number of buckets in "*table_size" and returns a pointer to
47 : // the base of the hash table.
48 : uint16* GetHashTable(size_t input_size, int* table_size);
49 :
50 : private:
51 : uint16 small_table_[1<<10]; // 2KB
52 : uint16* large_table_; // Allocated only when needed
53 :
54 : DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
55 : };
56 :
57 : // Flat array compression that does not emit the "uncompressed length"
58 : // prefix. Compresses "input" string to the "*op" buffer.
59 : //
60 : // REQUIRES: "input_length <= kBlockSize"
61 : // REQUIRES: "op" points to an array of memory that is at least
62 : // "MaxCompressedLength(input_length)" in size.
63 : // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
64 : // REQUIRES: "table_size" is a power of two
65 : //
66 : // Returns an "end" pointer into "op" buffer.
67 : // "end - op" is the compressed size of "input".
68 : char* CompressFragment(const char* input,
69 : size_t input_length,
70 : char* op,
71 : uint16* table,
72 : const int table_size);
73 :
74 : // Return the largest n such that
75 : //
76 : // s1[0,n-1] == s2[0,n-1]
77 : // and n <= (s2_limit - s2).
78 : //
79 : // Does not read *s2_limit or beyond.
80 : // Does not read *(s1 + (s2_limit - s2)) or beyond.
81 : // Requires that s2_limit >= s2.
82 : //
83 : // Separate implementation for x86_64, for speed. Uses the fact that
84 : // x86_64 is little endian.
85 : #if defined(ARCH_K8)
86 17424248 : static inline int FindMatchLength(const char* s1,
87 : const char* s2,
88 : const char* s2_limit) {
89 17424248 : assert(s2_limit >= s2);
90 17424248 : int matched = 0;
91 :
92 : // Find out how long the match is. We loop over the data 64 bits at a
93 : // time until we find a 64-bit block that doesn't match; then we find
94 : // the first non-matching bit and use that to calculate the total
95 : // length of the match.
96 26844144 : while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
97 22133760 : if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
98 4709948 : s2 += 8;
99 4709948 : matched += 8;
100 : } else {
101 : // On current (mid-2008) Opteron models there is a 3% more
102 : // efficient code sequence to find the first non-matching byte.
103 : // However, what follows is ~10% better on Intel Core 2 and newer,
104 : // and we expect AMD's bsf instruction to improve.
105 17423812 : uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
106 17423812 : int matching_bits = Bits::FindLSBSetNonZero64(x);
107 17423812 : matched += matching_bits >> 3;
108 17423812 : return matched;
109 : }
110 : }
111 3184 : while (PREDICT_TRUE(s2 < s2_limit)) {
112 1458 : if (PREDICT_TRUE(s1[matched] == *s2)) {
113 1374 : ++s2;
114 1374 : ++matched;
115 : } else {
116 84 : return matched;
117 : }
118 : }
119 352 : return matched;
120 : }
121 : #else
122 : static inline int FindMatchLength(const char* s1,
123 : const char* s2,
124 : const char* s2_limit) {
125 : // Implementation based on the x86-64 version, above.
126 : assert(s2_limit >= s2);
127 : int matched = 0;
128 :
129 : while (s2 <= s2_limit - 4 &&
130 : UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
131 : s2 += 4;
132 : matched += 4;
133 : }
134 : if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
135 : uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
136 : int matching_bits = Bits::FindLSBSetNonZero(x);
137 : matched += matching_bits >> 3;
138 : } else {
139 : while ((s2 < s2_limit) && (s1[matched] == *s2)) {
140 : ++s2;
141 : ++matched;
142 : }
143 : }
144 : return matched;
145 : }
146 : #endif
147 :
148 : } // end namespace internal
149 : } // end namespace snappy
150 :
151 : #endif // UTIL_SNAPPY_SNAPPY_INTERNAL_H_
|