Bug 917642 - [Helix] Please update the helix blobs. r=nhirata
[gecko.git] / mfbt / lz4.c
blob91819addd83686ef8ccd41f985e0cc55a2610ccb
1 /*
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2013, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
8 met:
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
15 distribution.
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 You can contact the author at :
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 - LZ4 source repository : http://code.google.com/p/lz4/
35 Note : this source file requires "lz4_encoder.h"
38 //**************************************
39 // Tuning parameters
40 //**************************************
41 // MEMORY_USAGE :
42 // Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
43 // Increasing memory usage improves compression ratio
44 // Reduced memory usage can improve speed, due to cache effect
45 // Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
46 #define MEMORY_USAGE 14
48 // HEAPMODE :
49 // Select how default compression function will allocate memory for its hash table,
50 // in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
51 // Default allocation strategy is to use stack (HEAPMODE 0)
52 // Note : explicit functions *_stack* and *_heap* are unaffected by this setting
53 #define HEAPMODE 0
55 // BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
56 // This will provide a small boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
57 // You can set this option to 1 in situations where data will remain within closed environment
58 // This option is useless on Little_Endian CPU (such as x86)
59 //#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
63 //**************************************
64 // CPU Feature Detection
65 //**************************************
66 // 32 or 64 bits ?
67 #if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
68 || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
69 || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
70 || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode
71 # define LZ4_ARCH64 1
72 #else
73 # define LZ4_ARCH64 0
74 #endif
76 // Little Endian or Big Endian ?
77 // Overwrite the #define below if you know your architecture endianess
78 #if defined (__GLIBC__)
79 # include <endian.h>
80 # if (__BYTE_ORDER == __BIG_ENDIAN)
81 # define LZ4_BIG_ENDIAN 1
82 # endif
83 #elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
84 # define LZ4_BIG_ENDIAN 1
85 #elif defined(__sparc) || defined(__sparc__) \
86 || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
87 || defined(__hpux) || defined(__hppa) \
88 || defined(_MIPSEB) || defined(__s390__)
89 # define LZ4_BIG_ENDIAN 1
90 #else
91 // Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
92 #endif
94 // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
95 // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
96 // If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
97 #if defined(__ARM_FEATURE_UNALIGNED)
98 # define LZ4_FORCE_UNALIGNED_ACCESS 1
99 #endif
101 // Define this parameter if your target system or compiler does not support hardware bit count
102 #if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
103 # define LZ4_FORCE_SW_BITCOUNT
104 #endif
107 //**************************************
108 // Compiler Options
109 //**************************************
110 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
111 /* "restrict" is a known keyword */
112 #else
113 # define restrict // Disable restrict
114 #endif
116 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
118 #ifdef _MSC_VER // Visual Studio
119 # include <intrin.h> // For Visual 2005
120 # if LZ4_ARCH64 // 64-bit
121 # pragma intrinsic(_BitScanForward64) // For Visual 2005
122 # pragma intrinsic(_BitScanReverse64) // For Visual 2005
123 # else
124 # pragma intrinsic(_BitScanForward) // For Visual 2005
125 # pragma intrinsic(_BitScanReverse) // For Visual 2005
126 # endif
127 # pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
128 #endif
130 #ifdef _MSC_VER
131 # define lz4_bswap16(x) _byteswap_ushort(x)
132 #else
133 # define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
134 #endif
136 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
137 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
138 #else
139 # define expect(expr,value) (expr)
140 #endif
142 #define likely(expr) expect((expr) != 0, 1)
143 #define unlikely(expr) expect((expr) != 0, 0)
146 //**************************************
147 // Includes
148 //**************************************
149 #include <stdlib.h> // for malloc
150 #include <string.h> // for memset
151 #include "lz4.h"
154 //**************************************
155 // Basic Types
156 //**************************************
157 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
158 # include <stdint.h>
159 typedef uint8_t BYTE;
160 typedef uint16_t U16;
161 typedef uint32_t U32;
162 typedef int32_t S32;
163 typedef uint64_t U64;
164 #else
165 typedef unsigned char BYTE;
166 typedef unsigned short U16;
167 typedef unsigned int U32;
168 typedef signed int S32;
169 typedef unsigned long long U64;
170 #endif
172 #if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
173 # define _PACKED __attribute__ ((packed))
174 #else
175 # define _PACKED
176 #endif
178 #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
179 # pragma pack(push, 1)
180 #endif
182 typedef struct _U16_S { U16 v; } _PACKED U16_S;
183 typedef struct _U32_S { U32 v; } _PACKED U32_S;
184 typedef struct _U64_S { U64 v; } _PACKED U64_S;
186 #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
187 # pragma pack(pop)
188 #endif
190 #define A64(x) (((U64_S *)(x))->v)
191 #define A32(x) (((U32_S *)(x))->v)
192 #define A16(x) (((U16_S *)(x))->v)
195 //**************************************
196 // Constants
197 //**************************************
198 #define HASHTABLESIZE (1 << MEMORY_USAGE)
200 #define MINMATCH 4
202 #define COPYLENGTH 8
203 #define LASTLITERALS 5
204 #define MFLIMIT (COPYLENGTH+MINMATCH)
205 #define MINLENGTH (MFLIMIT+1)
207 #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
208 #define SKIPSTRENGTH 6 // Increasing this value will make the compression run slower on incompressible data
210 #define MAXD_LOG 16
211 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
213 #define ML_BITS 4
214 #define ML_MASK ((1U<<ML_BITS)-1)
215 #define RUN_BITS (8-ML_BITS)
216 #define RUN_MASK ((1U<<RUN_BITS)-1)
219 //**************************************
220 // Architecture-specific macros
221 //**************************************
222 #if LZ4_ARCH64 // 64-bit
223 # define STEPSIZE 8
224 # define UARCH U64
225 # define AARCH A64
226 # define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
227 # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
228 # define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
229 # define HTYPE U32
230 # define INITBASE(base) const BYTE* const base = ip
231 #else // 32-bit
232 # define STEPSIZE 4
233 # define UARCH U32
234 # define AARCH A32
235 # define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
236 # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
237 # define LZ4_SECURECOPY LZ4_WILDCOPY
238 # define HTYPE const BYTE*
239 # define INITBASE(base) const int base = 0
240 #endif
242 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
243 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
244 # define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
245 #else // Little Endian
246 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
247 # define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
248 #endif
251 //**************************************
252 // Macros
253 //**************************************
254 #define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
255 #define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+(l); LZ4_WILDCOPY(s,d,e); d=e; }
258 //****************************
259 // Private functions
260 //****************************
261 #if LZ4_ARCH64
263 static inline int LZ4_NbCommonBytes (register U64 val)
265 #if defined(LZ4_BIG_ENDIAN)
266 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
267 unsigned long r = 0;
268 _BitScanReverse64( &r, val );
269 return (int)(r>>3);
270 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
271 return (__builtin_clzll(val) >> 3);
272 #else
273 int r;
274 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
275 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
276 r += (!val);
277 return r;
278 #endif
279 #else
280 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
281 unsigned long r = 0;
282 _BitScanForward64( &r, val );
283 return (int)(r>>3);
284 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
285 return (__builtin_ctzll(val) >> 3);
286 #else
287 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
288 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
289 #endif
290 #endif
293 #else
295 static inline int LZ4_NbCommonBytes (register U32 val)
297 #if defined(LZ4_BIG_ENDIAN)
298 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
299 unsigned long r = 0;
300 _BitScanReverse( &r, val );
301 return (int)(r>>3);
302 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
303 return (__builtin_clz(val) >> 3);
304 # else
305 int r;
306 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
307 r += (!val);
308 return r;
309 # endif
310 #else
311 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
312 unsigned long r;
313 _BitScanForward( &r, val );
314 return (int)(r>>3);
315 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
316 return (__builtin_ctz(val) >> 3);
317 # else
318 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
319 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
320 # endif
321 #endif
324 #endif
328 //******************************
329 // Compression functions
330 //******************************
333 int LZ4_compress_stack(
334 const char* source,
335 char* dest,
336 int inputSize)
338 Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
339 Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
340 return : the number of bytes written in buffer 'dest'
342 #define FUNCTION_NAME LZ4_compress_stack
343 #include "lz4_encoder.h"
347 int LZ4_compress_stack_limitedOutput(
348 const char* source,
349 char* dest,
350 int inputSize,
351 int maxOutputSize)
353 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
354 If it cannot achieve it, compression will stop, and result of the function will be zero.
355 return : the number of bytes written in buffer 'dest', or 0 if the compression fails
357 #define FUNCTION_NAME LZ4_compress_stack_limitedOutput
358 #define LIMITED_OUTPUT
359 #include "lz4_encoder.h"
363 int LZ4_compress64k_stack(
364 const char* source,
365 char* dest,
366 int inputSize)
368 Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
369 This function compresses better than LZ4_compress_stack(), on the condition that
370 'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
371 Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
372 return : the number of bytes written in buffer 'dest', or 0 if compression fails
374 #define FUNCTION_NAME LZ4_compress64k_stack
375 #define COMPRESS_64K
376 #include "lz4_encoder.h"
380 int LZ4_compress64k_stack_limitedOutput(
381 const char* source,
382 char* dest,
383 int inputSize,
384 int maxOutputSize)
386 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
387 This function compresses better than LZ4_compress_stack_limitedOutput(), on the condition that
388 'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
389 If it cannot achieve it, compression will stop, and result of the function will be zero.
390 return : the number of bytes written in buffer 'dest', or 0 if the compression fails
392 #define FUNCTION_NAME LZ4_compress64k_stack_limitedOutput
393 #define COMPRESS_64K
394 #define LIMITED_OUTPUT
395 #include "lz4_encoder.h"
399 void* LZ4_createHeapMemory();
400 int LZ4_freeHeapMemory(void* ctx);
402 Used to allocate and free hashTable memory
403 to be used by the LZ4_compress_heap* family of functions.
404 LZ4_createHeapMemory() returns NULL is memory allocation fails.
406 void* LZ4_create() { return malloc(HASHTABLESIZE); }
407 int LZ4_free(void* ctx) { free(ctx); return 0; }
411 int LZ4_compress_heap(
412 void* ctx,
413 const char* source,
414 char* dest,
415 int inputSize)
417 Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
418 The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
419 Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
420 return : the number of bytes written in buffer 'dest'
422 #define FUNCTION_NAME LZ4_compress_heap
423 #define USE_HEAPMEMORY
424 #include "lz4_encoder.h"
428 int LZ4_compress_heap_limitedOutput(
429 void* ctx,
430 const char* source,
431 char* dest,
432 int inputSize,
433 int maxOutputSize)
435 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
436 If it cannot achieve it, compression will stop, and result of the function will be zero.
437 The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
438 return : the number of bytes written in buffer 'dest', or 0 if the compression fails
440 #define FUNCTION_NAME LZ4_compress_heap_limitedOutput
441 #define LIMITED_OUTPUT
442 #define USE_HEAPMEMORY
443 #include "lz4_encoder.h"
447 int LZ4_compress64k_heap(
448 void* ctx,
449 const char* source,
450 char* dest,
451 int inputSize)
453 Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
454 The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
455 'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
456 Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
457 return : the number of bytes written in buffer 'dest'
459 #define FUNCTION_NAME LZ4_compress64k_heap
460 #define COMPRESS_64K
461 #define USE_HEAPMEMORY
462 #include "lz4_encoder.h"
466 int LZ4_compress64k_heap_limitedOutput(
467 void* ctx,
468 const char* source,
469 char* dest,
470 int inputSize,
471 int maxOutputSize)
473 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
474 If it cannot achieve it, compression will stop, and result of the function will be zero.
475 The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
476 'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
477 return : the number of bytes written in buffer 'dest', or 0 if the compression fails
479 #define FUNCTION_NAME LZ4_compress64k_heap_limitedOutput
480 #define COMPRESS_64K
481 #define LIMITED_OUTPUT
482 #define USE_HEAPMEMORY
483 #include "lz4_encoder.h"
486 int LZ4_compress(const char* source, char* dest, int inputSize)
488 #if HEAPMODE
489 void* ctx = LZ4_create();
490 int result;
491 if (ctx == NULL) return 0; // Failed allocation => compression not done
492 if (inputSize < LZ4_64KLIMIT)
493 result = LZ4_compress64k_heap(ctx, source, dest, inputSize);
494 else result = LZ4_compress_heap(ctx, source, dest, inputSize);
495 LZ4_free(ctx);
496 return result;
497 #else
498 if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack(source, dest, inputSize);
499 return LZ4_compress_stack(source, dest, inputSize);
500 #endif
504 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
506 #if HEAPMODE
507 void* ctx = LZ4_create();
508 int result;
509 if (ctx == NULL) return 0; // Failed allocation => compression not done
510 if (inputSize < LZ4_64KLIMIT)
511 result = LZ4_compress64k_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
512 else result = LZ4_compress_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
513 LZ4_free(ctx);
514 return result;
515 #else
516 if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
517 return LZ4_compress_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
518 #endif
522 //****************************
523 // Decompression functions
524 //****************************
526 typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
527 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } end_directive;
528 typedef enum { full = 0, partial = 1 } exit_directive;
531 // This generic decompression function cover all use cases.
532 // It shall be instanciated several times, using different sets of directives
533 // Note that it is essential this generic function is really inlined,
534 // in order to remove useless branches during compilation optimisation.
535 static inline int LZ4_decompress_generic(
536 const char* source,
537 char* dest,
538 int inputSize, //
539 int outputSize, // OutputSize must be != 0; if endOnInput==endOnInputSize, this value is the max size of Output Buffer.
541 int endOnInput, // endOnOutputSize, endOnInputSize
542 int prefix64k, // noPrefix, withPrefix
543 int partialDecoding, // full, partial
544 int targetOutputSize // only used if partialDecoding==partial
547 // Local Variables
548 const BYTE* restrict ip = (const BYTE*) source;
549 const BYTE* ref;
550 const BYTE* const iend = ip + inputSize;
552 BYTE* op = (BYTE*) dest;
553 BYTE* const oend = op + outputSize;
554 BYTE* cpy;
555 BYTE* oexit = op + targetOutputSize;
557 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
558 #if LZ4_ARCH64
559 size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
560 #endif
563 // Special case
564 if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; // targetOutputSize too large, better decode everything
565 if unlikely(outputSize==0) goto _output_error; // Empty output buffer
568 // Main Loop
569 while (1)
571 unsigned token;
572 size_t length;
574 // get runlength
575 token = *ip++;
576 if ((length=(token>>ML_BITS)) == RUN_MASK)
578 unsigned s=255;
579 while (((endOnInput)?ip<iend:1) && (s==255))
581 s = *ip++;
582 length += s;
586 // copy literals
587 cpy = op+length;
588 if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
589 || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
591 if (partialDecoding)
593 if (cpy > oend) goto _output_error; // Error : write attempt beyond end of output buffer
594 if ((endOnInput) && (ip+length > iend)) goto _output_error; // Error : read attempt beyond end of input buffer
596 else
598 if ((!endOnInput) && (cpy != oend)) goto _output_error; // Error : block decoding must stop exactly there, due to parsing restrictions
599 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; // Error : not enough place for another match (min 4) + 5 literals
601 memcpy(op, ip, length);
602 ip += length;
603 op += length;
604 break; // Necessarily EOF, due to parsing restrictions
606 LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
608 // get offset
609 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
610 if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error; // Error : offset outside destination buffer
612 // get matchlength
613 if ((length=(token&ML_MASK)) == ML_MASK)
615 while (endOnInput ? ip<iend-(LASTLITERALS+1) : 1) // A minimum nb of input bytes must remain for LASTLITERALS + token
617 unsigned s = *ip++;
618 length += s;
619 if (s==255) continue;
620 break;
624 // copy repeated sequence
625 if unlikely((op-ref)<STEPSIZE)
627 #if LZ4_ARCH64
628 size_t dec64 = dec64table[op-ref];
629 #else
630 const size_t dec64 = 0;
631 #endif
632 op[0] = ref[0];
633 op[1] = ref[1];
634 op[2] = ref[2];
635 op[3] = ref[3];
636 op += 4, ref += 4; ref -= dec32table[op-ref];
637 A32(op) = A32(ref);
638 op += STEPSIZE-4; ref -= dec64;
639 } else { LZ4_COPYSTEP(ref,op); }
640 cpy = op + length - (STEPSIZE-4);
642 if unlikely(cpy>oend-(COPYLENGTH)-(STEPSIZE-4))
644 if (cpy > oend-LASTLITERALS) goto _output_error; // Error : last 5 bytes must be literals
645 LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
646 while(op<cpy) *op++=*ref++;
647 op=cpy;
648 continue;
650 LZ4_WILDCOPY(ref, op, cpy);
651 op=cpy; // correction
654 // end of decoding
655 if (endOnInput)
656 return (int) (((char*)op)-dest); // Nb of output bytes decoded
657 else
658 return (int) (((char*)ip)-source); // Nb of input bytes read
660 // Overflow error detected
661 _output_error:
662 return (int) (-(((char*)ip)-source))-1;
666 int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
668 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
671 int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
673 return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
676 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
678 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
681 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
683 return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
686 int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
688 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);