1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This encoding algorithm, which prioritizes speed over output size, is
8 // based on Snappy's LZ77-style encoder: github.com/golang/snappy
11 tableBits
= 14 // Bits used in the table.
12 tableSize
= 1 << tableBits
// Size of the table.
13 tableMask
= tableSize
- 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
14 tableShift
= 32 - tableBits
// Right-shift to get the tableBits most significant bits of a uint32.
17 func load32(b
[]byte, i
int32) uint32 {
18 b
= b
[i
: i
+4 : len(b
)] // Help the compiler eliminate bounds checks on the next line.
19 return uint32(b
[0]) |
uint32(b
[1])<<8 |
uint32(b
[2])<<16 |
uint32(b
[3])<<24
22 func load64(b
[]byte, i
int32) uint64 {
23 b
= b
[i
: i
+8 : len(b
)] // Help the compiler eliminate bounds checks on the next line.
24 return uint64(b
[0]) |
uint64(b
[1])<<8 |
uint64(b
[2])<<16 |
uint64(b
[3])<<24 |
25 uint64(b
[4])<<32 |
uint64(b
[5])<<40 |
uint64(b
[6])<<48 |
uint64(b
[7])<<56
28 func hash(u
uint32) uint32 {
29 return (u
* 0x1e35a7bd) >> tableShift
32 // These constants are defined by the Snappy implementation so that its
33 // assembly implementation can fast-path some 16-bytes-at-a-time copies. They
34 // aren't necessary in the pure Go implementation, as we don't use those same
35 // optimizations, but using the same thresholds doesn't really hurt.
38 minNonLiteralBlockSize
= 1 + 1 + inputMargin
41 type tableEntry
struct {
42 val
uint32 // Value at destination
46 // deflateFast maintains the table for matches,
47 // and the previous byte block for cross block matching.
48 type deflateFast
struct {
49 table
[tableSize
]tableEntry
50 prev
[]byte // Previous block, zero length if unknown.
51 cur
int32 // Current match offset.
54 func newDeflateFast() *deflateFast
{
55 return &deflateFast
{cur
: maxStoreBlockSize
, prev
: make([]byte, 0, maxStoreBlockSize
)}
58 // encode encodes a block given in src and appends tokens
59 // to dst and returns the result.
60 func (e
*deflateFast
) encode(dst
[]token
, src
[]byte) []token
{
61 // Ensure that e.cur doesn't wrap.
66 // This check isn't in the Snappy implementation, but there, the caller
67 // instead of the callee handles this case.
68 if len(src
) < minNonLiteralBlockSize
{
69 e
.cur
+= maxStoreBlockSize
71 return emitLiteral(dst
, src
)
74 // sLimit is when to stop looking for offset/length copies. The inputMargin
75 // lets us use a fast path for emitLiteral in the main loop, while we are
76 // looking for copies.
77 sLimit
:= int32(len(src
) - inputMargin
)
79 // nextEmit is where in src the next emitLiteral should start from.
86 // Copied from the C++ snappy implementation:
88 // Heuristic match skipping: If 32 bytes are scanned with no matches
89 // found, start looking only at every other byte. If 32 more bytes are
90 // scanned (or skipped), look at every third byte, etc.. When a match
91 // is found, immediately go back to looking at every byte. This is a
92 // small loss (~5% performance, ~0.1% density) for compressible data
93 // due to more bookkeeping, but for non-compressible data (such as
94 // JPEG) it's a huge win since the compressor quickly "realizes" the
95 // data is incompressible and doesn't bother looking for matches
98 // The "skip" variable keeps track of how many bytes there are since
99 // the last match; dividing it by 32 (ie. right-shifting by five) gives
100 // the number of bytes to move ahead for each iteration.
104 var candidate tableEntry
107 bytesBetweenHashLookups
:= skip
>> 5
108 nextS
= s
+ bytesBetweenHashLookups
109 skip
+= bytesBetweenHashLookups
113 candidate
= e
.table
[nextHash
&tableMask
]
114 now
:= load32(src
, nextS
)
115 e
.table
[nextHash
&tableMask
] = tableEntry
{offset
: s
+ e
.cur
, val
: cv
}
118 offset
:= s
- (candidate
.offset
- e
.cur
)
119 if offset
> maxMatchOffset || cv
!= candidate
.val
{
120 // Out of range or not matched.
127 // A 4-byte match has been found. We'll later see if more than 4 bytes
128 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
129 // them as literal bytes.
130 dst
= emitLiteral(dst
, src
[nextEmit
:s
])
132 // Call emitCopy, and then see if another emitCopy could be our next
133 // move. Repeat until we find no match for the input immediately after
134 // what was consumed by the last emitCopy call.
136 // If we exit this loop normally then we need to call emitLiteral next,
137 // though we don't yet know how big the literal will be. We handle that
138 // by proceeding to the next iteration of the main loop. We also can
139 // exit this loop via goto if we get close to exhausting the input.
141 // Invariant: we have a 4-byte match at s, and no need to emit any
142 // literal bytes prior to s.
144 // Extend the 4-byte match as long as possible.
147 t
:= candidate
.offset
- e
.cur
+ 4
148 l
:= e
.matchLen(s
, t
, src
)
150 // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
151 dst
= append(dst
, matchToken(uint32(l
+4-baseMatchLength
), uint32(s
-t
-baseMatchOffset
)))
158 // We could immediately start working at s now, but to improve
159 // compression we first update the hash table at s-1 and at s. If
160 // another emitCopy is not our next move, also calculate nextHash
161 // at s+1. At least on GOARCH=amd64, these three hash calculations
162 // are faster as one load64 call (with some shifts) instead of
163 // three load32 calls.
164 x
:= load64(src
, s
-1)
165 prevHash
:= hash(uint32(x
))
166 e
.table
[prevHash
&tableMask
] = tableEntry
{offset
: e
.cur
+ s
- 1, val
: uint32(x
)}
168 currHash
:= hash(uint32(x
))
169 candidate
= e
.table
[currHash
&tableMask
]
170 e
.table
[currHash
&tableMask
] = tableEntry
{offset
: e
.cur
+ s
, val
: uint32(x
)}
172 offset
:= s
- (candidate
.offset
- e
.cur
)
173 if offset
> maxMatchOffset ||
uint32(x
) != candidate
.val
{
183 if int(nextEmit
) < len(src
) {
184 dst
= emitLiteral(dst
, src
[nextEmit
:])
186 e
.cur
+= int32(len(src
))
187 e
.prev
= e
.prev
[:len(src
)]
192 func emitLiteral(dst
[]token
, lit
[]byte) []token
{
193 for _
, v
:= range lit
{
194 dst
= append(dst
, literalToken(uint32(v
)))
199 // matchLen returns the match length between src[s:] and src[t:].
200 // t can be negative to indicate the match is starting in e.prev.
201 // We assume that src[s-4:s] and src[t-4:t] already match.
202 func (e
*deflateFast
) matchLen(s
, t
int32, src
[]byte) int32 {
203 s1
:= int(s
) + maxMatchLength
- 4
208 // If we are inside the current block
213 // Extend the match to be as long as possible.
222 // We found a match in the previous block.
223 tp
:= int32(len(e
.prev
)) + t
228 // Extend the match to be as long as possible.
241 // If we reached our limit, we matched everything we are
242 // allowed to in the previous block and we return.
248 // Continue looking for more matches in the current block.
256 return int32(len(a
)) + n
259 // Reset resets the encoding history.
260 // This ensures that no matches are made to the previous block.
261 func (e
*deflateFast
) reset() {
263 // Bump the offset, so all matches will fail distance check.
264 e
.cur
+= maxMatchOffset
266 // Protect against e.cur wraparound.
272 // resetAll resets the deflateFast struct and is only called in rare
273 // situations to prevent integer overflow. It manually resets each field
274 // to avoid causing large stack growth.
276 // See https://golang.org/issue/18636.
277 func (e
*deflateFast
) resetAll() {
278 // This is equivalent to:
279 // *e = deflateFast{cur: maxStoreBlockSize, prev: e.prev[:0]}
280 e
.cur
= maxStoreBlockSize
282 for i
:= range e
.table
{
283 e
.table
[i
] = tableEntry
{}