2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2013, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 You can contact the author at :
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 - LZ4 source repository : http://code.google.com/p/lz4/
35 Note : this source file requires "lz4_encoder.h"
38 //**************************************
40 //**************************************
42 // Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
43 // Increasing memory usage improves compression ratio
44 // Reduced memory usage can improve speed, due to cache effect
45 // Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
46 #define MEMORY_USAGE 14
49 // Select how default compression function will allocate memory for its hash table,
50 // in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
51 // Default allocation strategy is to use stack (HEAPMODE 0)
52 // Note : explicit functions *_stack* and *_heap* are unaffected by this setting
55 // BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
56 // This will provide a small boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
57 // You can set this option to 1 in situations where data will remain within closed environment
58 // This option is useless on Little_Endian CPU (such as x86)
59 //#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
63 //**************************************
64 // CPU Feature Detection
65 //**************************************
67 #if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
68 || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
69 || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
70 || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode
76 // Little Endian or Big Endian ?
77 // Overwrite the #define below if you know your architecture endianess
78 #if defined (__GLIBC__)
80 # if (__BYTE_ORDER == __BIG_ENDIAN)
81 # define LZ4_BIG_ENDIAN 1
83 #elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
84 # define LZ4_BIG_ENDIAN 1
85 #elif defined(__sparc) || defined(__sparc__) \
86 || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
87 || defined(__hpux) || defined(__hppa) \
88 || defined(_MIPSEB) || defined(__s390__)
89 # define LZ4_BIG_ENDIAN 1
91 // Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
94 // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
95 // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
96 // If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
97 #if defined(__ARM_FEATURE_UNALIGNED)
98 # define LZ4_FORCE_UNALIGNED_ACCESS 1
101 // Define this parameter if your target system or compiler does not support hardware bit count
102 #if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
103 # define LZ4_FORCE_SW_BITCOUNT
107 //**************************************
109 //**************************************
110 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
111 /* "restrict" is a known keyword */
113 # define restrict // Disable restrict
116 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
118 #ifdef _MSC_VER // Visual Studio
119 # include <intrin.h> // For Visual 2005
120 # if LZ4_ARCH64 // 64-bit
121 # pragma intrinsic(_BitScanForward64) // For Visual 2005
122 # pragma intrinsic(_BitScanReverse64) // For Visual 2005
124 # pragma intrinsic(_BitScanForward) // For Visual 2005
125 # pragma intrinsic(_BitScanReverse) // For Visual 2005
127 # pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
131 # define lz4_bswap16(x) _byteswap_ushort(x)
133 # define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
136 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
137 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
139 # define expect(expr,value) (expr)
142 #define likely(expr) expect((expr) != 0, 1)
143 #define unlikely(expr) expect((expr) != 0, 0)
146 //**************************************
148 //**************************************
149 #include <stdlib.h> // for malloc
150 #include <string.h> // for memset
154 //**************************************
156 //**************************************
157 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
159 typedef uint8_t BYTE
;
160 typedef uint16_t U16
;
161 typedef uint32_t U32
;
163 typedef uint64_t U64
;
165 typedef unsigned char BYTE
;
166 typedef unsigned short U16
;
167 typedef unsigned int U32
;
168 typedef signed int S32
;
169 typedef unsigned long long U64
;
172 #if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
173 # define _PACKED __attribute__ ((packed))
178 #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
179 # pragma pack(push, 1)
182 typedef struct _U16_S
{ U16 v
; } _PACKED U16_S
;
183 typedef struct _U32_S
{ U32 v
; } _PACKED U32_S
;
184 typedef struct _U64_S
{ U64 v
; } _PACKED U64_S
;
186 #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
190 #define A64(x) (((U64_S *)(x))->v)
191 #define A32(x) (((U32_S *)(x))->v)
192 #define A16(x) (((U16_S *)(x))->v)
195 //**************************************
197 //**************************************
198 #define HASHTABLESIZE (1 << MEMORY_USAGE)
203 #define LASTLITERALS 5
204 #define MFLIMIT (COPYLENGTH+MINMATCH)
205 #define MINLENGTH (MFLIMIT+1)
207 #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
208 #define SKIPSTRENGTH 6 // Increasing this value will make the compression run slower on incompressible data
211 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
214 #define ML_MASK ((1U<<ML_BITS)-1)
215 #define RUN_BITS (8-ML_BITS)
216 #define RUN_MASK ((1U<<RUN_BITS)-1)
219 //**************************************
220 // Architecture-specific macros
221 //**************************************
222 #if LZ4_ARCH64 // 64-bit
226 # define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
227 # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
228 # define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
230 # define INITBASE(base) const BYTE* const base = ip
235 # define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
236 # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
237 # define LZ4_SECURECOPY LZ4_WILDCOPY
238 # define HTYPE const BYTE*
239 # define INITBASE(base) const int base = 0
242 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
243 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
244 # define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
245 #else // Little Endian
246 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
247 # define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
251 //**************************************
253 //**************************************
254 #define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
255 #define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+(l); LZ4_WILDCOPY(s,d,e); d=e; }
258 //****************************
260 //****************************
263 static inline int LZ4_NbCommonBytes (register U64 val
)
265 #if defined(LZ4_BIG_ENDIAN)
266 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
268 _BitScanReverse64( &r
, val
);
270 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
271 return (__builtin_clzll(val
) >> 3);
274 if (!(val
>>32)) { r
=4; } else { r
=0; val
>>=32; }
275 if (!(val
>>16)) { r
+=2; val
>>=8; } else { val
>>=24; }
280 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
282 _BitScanForward64( &r
, val
);
284 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
285 return (__builtin_ctzll(val
) >> 3);
287 static const int DeBruijnBytePos
[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
288 return DeBruijnBytePos
[((U64
)((val
& -val
) * 0x0218A392CDABBD3F)) >> 58];
295 static inline int LZ4_NbCommonBytes (register U32 val
)
297 #if defined(LZ4_BIG_ENDIAN)
298 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
300 _BitScanReverse( &r
, val
);
302 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
303 return (__builtin_clz(val
) >> 3);
306 if (!(val
>>16)) { r
=2; val
>>=8; } else { r
=0; val
>>=24; }
311 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
313 _BitScanForward( &r
, val
);
315 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
316 return (__builtin_ctz(val
) >> 3);
318 static const int DeBruijnBytePos
[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
319 return DeBruijnBytePos
[((U32
)((val
& -(S32
)val
) * 0x077CB531U
)) >> 27];
328 //******************************
329 // Compression functions
330 //******************************
333 int LZ4_compress_stack(
338 Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
339 Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
340 return : the number of bytes written in buffer 'dest'
342 #define FUNCTION_NAME LZ4_compress_stack
343 #include "lz4_encoder.h"
347 int LZ4_compress_stack_limitedOutput(
353 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
354 If it cannot achieve it, compression will stop, and result of the function will be zero.
355 return : the number of bytes written in buffer 'dest', or 0 if the compression fails
357 #define FUNCTION_NAME LZ4_compress_stack_limitedOutput
358 #define LIMITED_OUTPUT
359 #include "lz4_encoder.h"
363 int LZ4_compress64k_stack(
368 Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
369 This function compresses better than LZ4_compress_stack(), on the condition that
370 'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
371 Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
372 return : the number of bytes written in buffer 'dest', or 0 if compression fails
374 #define FUNCTION_NAME LZ4_compress64k_stack
376 #include "lz4_encoder.h"
380 int LZ4_compress64k_stack_limitedOutput(
386 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
387 This function compresses better than LZ4_compress_stack_limitedOutput(), on the condition that
388 'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
389 If it cannot achieve it, compression will stop, and result of the function will be zero.
390 return : the number of bytes written in buffer 'dest', or 0 if the compression fails
392 #define FUNCTION_NAME LZ4_compress64k_stack_limitedOutput
394 #define LIMITED_OUTPUT
395 #include "lz4_encoder.h"
399 void* LZ4_createHeapMemory();
400 int LZ4_freeHeapMemory(void* ctx);
402 Used to allocate and free hashTable memory
403 to be used by the LZ4_compress_heap* family of functions.
404 LZ4_createHeapMemory() returns NULL is memory allocation fails.
406 void* LZ4_create() { return malloc(HASHTABLESIZE
); }
407 int LZ4_free(void* ctx
) { free(ctx
); return 0; }
411 int LZ4_compress_heap(
417 Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
418 The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
419 Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
420 return : the number of bytes written in buffer 'dest'
422 #define FUNCTION_NAME LZ4_compress_heap
423 #define USE_HEAPMEMORY
424 #include "lz4_encoder.h"
428 int LZ4_compress_heap_limitedOutput(
435 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
436 If it cannot achieve it, compression will stop, and result of the function will be zero.
437 The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
438 return : the number of bytes written in buffer 'dest', or 0 if the compression fails
440 #define FUNCTION_NAME LZ4_compress_heap_limitedOutput
441 #define LIMITED_OUTPUT
442 #define USE_HEAPMEMORY
443 #include "lz4_encoder.h"
447 int LZ4_compress64k_heap(
453 Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
454 The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
455 'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
456 Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
457 return : the number of bytes written in buffer 'dest'
459 #define FUNCTION_NAME LZ4_compress64k_heap
461 #define USE_HEAPMEMORY
462 #include "lz4_encoder.h"
466 int LZ4_compress64k_heap_limitedOutput(
473 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
474 If it cannot achieve it, compression will stop, and result of the function will be zero.
475 The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
476 'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
477 return : the number of bytes written in buffer 'dest', or 0 if the compression fails
479 #define FUNCTION_NAME LZ4_compress64k_heap_limitedOutput
481 #define LIMITED_OUTPUT
482 #define USE_HEAPMEMORY
483 #include "lz4_encoder.h"
486 int LZ4_compress(const char* source
, char* dest
, int inputSize
)
489 void* ctx
= LZ4_create();
491 if (ctx
== NULL
) return 0; // Failed allocation => compression not done
492 if (inputSize
< LZ4_64KLIMIT
)
493 result
= LZ4_compress64k_heap(ctx
, source
, dest
, inputSize
);
494 else result
= LZ4_compress_heap(ctx
, source
, dest
, inputSize
);
498 if (inputSize
< (int)LZ4_64KLIMIT
) return LZ4_compress64k_stack(source
, dest
, inputSize
);
499 return LZ4_compress_stack(source
, dest
, inputSize
);
504 int LZ4_compress_limitedOutput(const char* source
, char* dest
, int inputSize
, int maxOutputSize
)
507 void* ctx
= LZ4_create();
509 if (ctx
== NULL
) return 0; // Failed allocation => compression not done
510 if (inputSize
< LZ4_64KLIMIT
)
511 result
= LZ4_compress64k_heap_limitedOutput(ctx
, source
, dest
, inputSize
, maxOutputSize
);
512 else result
= LZ4_compress_heap_limitedOutput(ctx
, source
, dest
, inputSize
, maxOutputSize
);
516 if (inputSize
< (int)LZ4_64KLIMIT
) return LZ4_compress64k_stack_limitedOutput(source
, dest
, inputSize
, maxOutputSize
);
517 return LZ4_compress_stack_limitedOutput(source
, dest
, inputSize
, maxOutputSize
);
522 //****************************
523 // Decompression functions
524 //****************************
526 typedef enum { noPrefix
= 0, withPrefix
= 1 } prefix64k_directive
;
527 typedef enum { endOnOutputSize
= 0, endOnInputSize
= 1 } end_directive
;
528 typedef enum { full
= 0, partial
= 1 } exit_directive
;
531 // This generic decompression function cover all use cases.
532 // It shall be instanciated several times, using different sets of directives
533 // Note that it is essential this generic function is really inlined,
534 // in order to remove useless branches during compilation optimisation.
535 static inline int LZ4_decompress_generic(
539 int outputSize
, // OutputSize must be != 0; if endOnInput==endOnInputSize, this value is the max size of Output Buffer.
541 int endOnInput
, // endOnOutputSize, endOnInputSize
542 int prefix64k
, // noPrefix, withPrefix
543 int partialDecoding
, // full, partial
544 int targetOutputSize
// only used if partialDecoding==partial
548 const BYTE
* restrict ip
= (const BYTE
*) source
;
550 const BYTE
* const iend
= ip
+ inputSize
;
552 BYTE
* op
= (BYTE
*) dest
;
553 BYTE
* const oend
= op
+ outputSize
;
555 BYTE
* oexit
= op
+ targetOutputSize
;
557 size_t dec32table
[] = {0, 3, 2, 3, 0, 0, 0, 0};
559 size_t dec64table
[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
564 if ((partialDecoding
) && (oexit
> oend
-MFLIMIT
)) oexit
= oend
-MFLIMIT
; // targetOutputSize too large, better decode everything
565 if unlikely(outputSize
==0) goto _output_error
; // Empty output buffer
576 if ((length
=(token
>>ML_BITS
)) == RUN_MASK
)
579 while (((endOnInput
)?ip
<iend
:1) && (s
==255))
588 if (((endOnInput
) && ((cpy
>(partialDecoding
?oexit
:oend
-MFLIMIT
)) || (ip
+length
>iend
-(2+1+LASTLITERALS
))) )
589 || ((!endOnInput
) && (cpy
>oend
-COPYLENGTH
)))
593 if (cpy
> oend
) goto _output_error
; // Error : write attempt beyond end of output buffer
594 if ((endOnInput
) && (ip
+length
> iend
)) goto _output_error
; // Error : read attempt beyond end of input buffer
598 if ((!endOnInput
) && (cpy
!= oend
)) goto _output_error
; // Error : block decoding must stop exactly there, due to parsing restrictions
599 if ((endOnInput
) && ((ip
+length
!= iend
) || (cpy
> oend
))) goto _output_error
; // Error : not enough place for another match (min 4) + 5 literals
601 memcpy(op
, ip
, length
);
604 break; // Necessarily EOF, due to parsing restrictions
606 LZ4_WILDCOPY(ip
, op
, cpy
); ip
-= (op
-cpy
); op
= cpy
;
609 LZ4_READ_LITTLEENDIAN_16(ref
,cpy
,ip
); ip
+=2;
610 if ((prefix64k
==noPrefix
) && unlikely(ref
< (BYTE
* const)dest
)) goto _output_error
; // Error : offset outside destination buffer
613 if ((length
=(token
&ML_MASK
)) == ML_MASK
)
615 while (endOnInput
? ip
<iend
-(LASTLITERALS
+1) : 1) // A minimum nb of input bytes must remain for LASTLITERALS + token
619 if (s
==255) continue;
624 // copy repeated sequence
625 if unlikely((op
-ref
)<STEPSIZE
)
628 size_t dec64
= dec64table
[op
-ref
];
630 const size_t dec64
= 0;
636 op
+= 4, ref
+= 4; ref
-= dec32table
[op
-ref
];
638 op
+= STEPSIZE
-4; ref
-= dec64
;
639 } else { LZ4_COPYSTEP(ref
,op
); }
640 cpy
= op
+ length
- (STEPSIZE
-4);
642 if unlikely(cpy
>oend
-(COPYLENGTH
)-(STEPSIZE
-4))
644 if (cpy
> oend
-LASTLITERALS
) goto _output_error
; // Error : last 5 bytes must be literals
645 LZ4_SECURECOPY(ref
, op
, (oend
-COPYLENGTH
));
646 while(op
<cpy
) *op
++=*ref
++;
650 LZ4_WILDCOPY(ref
, op
, cpy
);
651 op
=cpy
; // correction
656 return (int) (((char*)op
)-dest
); // Nb of output bytes decoded
658 return (int) (((char*)ip
)-source
); // Nb of input bytes read
660 // Overflow error detected
662 return (int) (-(((char*)ip
)-source
))-1;
666 int LZ4_decompress_safe(const char* source
, char* dest
, int inputSize
, int maxOutputSize
)
668 return LZ4_decompress_generic(source
, dest
, inputSize
, maxOutputSize
, endOnInputSize
, noPrefix
, full
, 0);
671 int LZ4_decompress_fast(const char* source
, char* dest
, int outputSize
)
673 return LZ4_decompress_generic(source
, dest
, 0, outputSize
, endOnOutputSize
, noPrefix
, full
, 0);
676 int LZ4_decompress_safe_withPrefix64k(const char* source
, char* dest
, int inputSize
, int maxOutputSize
)
678 return LZ4_decompress_generic(source
, dest
, inputSize
, maxOutputSize
, endOnInputSize
, withPrefix
, full
, 0);
681 int LZ4_decompress_fast_withPrefix64k(const char* source
, char* dest
, int outputSize
)
683 return LZ4_decompress_generic(source
, dest
, 0, outputSize
, endOnOutputSize
, withPrefix
, full
, 0);
686 int LZ4_decompress_safe_partial(const char* source
, char* dest
, int inputSize
, int targetOutputSize
, int maxOutputSize
)
688 return LZ4_decompress_generic(source
, dest
, inputSize
, maxOutputSize
, endOnInputSize
, noPrefix
, partial
, targetOutputSize
);