2 * LZ4 auto-framing library
3 * Copyright (C) 2011-2016, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://www.lz4.org
32 * - LZ4 source repository : https://github.com/lz4/lz4
35 /* LZ4F is a stand-alone API to create LZ4-compressed Frames
36 * in full conformance with specification v1.6.1 .
37 * This library rely upon memory management capabilities (malloc, free)
38 * provided either by <stdlib.h>,
39 * or redirected towards another library of user's choice
40 * (see Memory Routines below).
44 /*-************************************
46 **************************************/
47 #ifdef _MSC_VER /* Visual Studio */
48 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
52 /*-************************************
54 **************************************/
57 * Select how default compression functions will allocate memory for their hash table,
58 * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
61 # define LZ4F_HEAPMODE 0
65 /*-************************************
66 * Library declarations
67 **************************************/
68 #define LZ4F_STATIC_LINKING_ONLY
70 #define LZ4_STATIC_LINKING_ONLY
72 #define LZ4_HC_STATIC_LINKING_ONLY
74 #define XXH_STATIC_LINKING_ONLY
78 /*-************************************
80 **************************************/
82 * User may redirect invocations of
83 * malloc(), calloc() and free()
84 * towards another library or solution of their choice
85 * by modifying below section.
88 #include <string.h> /* memset, memcpy, memmove */
89 #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
90 # define MEM_INIT(p,v,s) memset((p),(v),(s))
93 #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
94 # include <stdlib.h> /* malloc, calloc, free */
95 # define ALLOC(s) malloc(s)
96 # define ALLOC_AND_ZERO(s) calloc(1,(s))
97 # define FREEMEM(p) free(p)
100 static void* LZ4F_calloc(size_t s
, LZ4F_CustomMem cmem
)
102 /* custom calloc defined : use it */
103 if (cmem
.customCalloc
!= NULL
) {
104 return cmem
.customCalloc(cmem
.opaqueState
, s
);
106 /* nothing defined : use default <stdlib.h>'s calloc() */
107 if (cmem
.customAlloc
== NULL
) {
108 return ALLOC_AND_ZERO(s
);
110 /* only custom alloc defined : use it, and combine it with memset() */
111 { void* const p
= cmem
.customAlloc(cmem
.opaqueState
, s
);
112 if (p
!= NULL
) MEM_INIT(p
, 0, s
);
116 static void* LZ4F_malloc(size_t s
, LZ4F_CustomMem cmem
)
118 /* custom malloc defined : use it */
119 if (cmem
.customAlloc
!= NULL
) {
120 return cmem
.customAlloc(cmem
.opaqueState
, s
);
122 /* nothing defined : use default <stdlib.h>'s malloc() */
126 static void LZ4F_free(void* p
, LZ4F_CustomMem cmem
)
128 /* custom malloc defined : use it */
129 if (cmem
.customFree
!= NULL
) {
130 cmem
.customFree(cmem
.opaqueState
, p
);
133 /* nothing defined : use default <stdlib.h>'s free() */
138 /*-************************************
140 **************************************/
141 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
145 # define assert(condition) ((void)0)
149 #define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
151 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
153 static int g_debuglog_enable
= 1;
154 # define DEBUGLOG(l, ...) { \
155 if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
156 fprintf(stderr, __FILE__ ": "); \
157 fprintf(stderr, __VA_ARGS__); \
158 fprintf(stderr, " \n"); \
161 # define DEBUGLOG(l, ...) {} /* disabled */
165 /*-************************************
167 **************************************/
168 #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
170 typedef uint8_t BYTE
;
171 typedef uint16_t U16
;
172 typedef uint32_t U32
;
174 typedef uint64_t U64
;
176 typedef unsigned char BYTE
;
177 typedef unsigned short U16
;
178 typedef unsigned int U32
;
179 typedef signed int S32
;
180 typedef unsigned long long U64
;
184 /* unoptimized version; solves endianness & alignment issues */
185 static U32
LZ4F_readLE32 (const void* src
)
187 const BYTE
* const srcPtr
= (const BYTE
*)src
;
188 U32 value32
= srcPtr
[0];
189 value32
+= ((U32
)srcPtr
[1])<< 8;
190 value32
+= ((U32
)srcPtr
[2])<<16;
191 value32
+= ((U32
)srcPtr
[3])<<24;
195 static void LZ4F_writeLE32 (void* dst
, U32 value32
)
197 BYTE
* const dstPtr
= (BYTE
*)dst
;
198 dstPtr
[0] = (BYTE
)value32
;
199 dstPtr
[1] = (BYTE
)(value32
>> 8);
200 dstPtr
[2] = (BYTE
)(value32
>> 16);
201 dstPtr
[3] = (BYTE
)(value32
>> 24);
204 static U64
LZ4F_readLE64 (const void* src
)
206 const BYTE
* const srcPtr
= (const BYTE
*)src
;
207 U64 value64
= srcPtr
[0];
208 value64
+= ((U64
)srcPtr
[1]<<8);
209 value64
+= ((U64
)srcPtr
[2]<<16);
210 value64
+= ((U64
)srcPtr
[3]<<24);
211 value64
+= ((U64
)srcPtr
[4]<<32);
212 value64
+= ((U64
)srcPtr
[5]<<40);
213 value64
+= ((U64
)srcPtr
[6]<<48);
214 value64
+= ((U64
)srcPtr
[7]<<56);
218 static void LZ4F_writeLE64 (void* dst
, U64 value64
)
220 BYTE
* const dstPtr
= (BYTE
*)dst
;
221 dstPtr
[0] = (BYTE
)value64
;
222 dstPtr
[1] = (BYTE
)(value64
>> 8);
223 dstPtr
[2] = (BYTE
)(value64
>> 16);
224 dstPtr
[3] = (BYTE
)(value64
>> 24);
225 dstPtr
[4] = (BYTE
)(value64
>> 32);
226 dstPtr
[5] = (BYTE
)(value64
>> 40);
227 dstPtr
[6] = (BYTE
)(value64
>> 48);
228 dstPtr
[7] = (BYTE
)(value64
>> 56);
232 /*-************************************
234 **************************************/
235 #ifndef LZ4_SRC_INCLUDED /* avoid double definition */
247 #define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
248 #define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
250 static const size_t minFHSize
= LZ4F_HEADER_SIZE_MIN
; /* 7 */
251 static const size_t maxFHSize
= LZ4F_HEADER_SIZE_MAX
; /* 19 */
252 static const size_t BHSize
= LZ4F_BLOCK_HEADER_SIZE
; /* block header : size, and compress flag */
253 static const size_t BFSize
= LZ4F_BLOCK_CHECKSUM_SIZE
; /* block footer : checksum (optional) */
256 /*-************************************
257 * Structures and local types
258 **************************************/
260 typedef enum { LZ4B_COMPRESSED
, LZ4B_UNCOMPRESSED
} LZ4F_blockCompression_t
;
262 typedef struct LZ4F_cctx_s
265 LZ4F_preferences_t prefs
;
268 const LZ4F_CDict
* cdict
;
270 size_t maxBufferSize
;
271 BYTE
* tmpBuff
; /* internal buffer, for streaming */
272 BYTE
* tmpIn
; /* starting position of data compress within internal buffer (>= tmpBuff) */
273 size_t tmpInSize
; /* amount of data to compress after tmpIn */
277 U16 lz4CtxAlloc
; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
278 U16 lz4CtxState
; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
279 LZ4F_blockCompression_t blockCompression
;
283 /*-************************************
285 **************************************/
286 #define LZ4F_GENERATE_STRING(STRING) #STRING,
287 static const char* LZ4F_errorStrings
[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING
) };
290 unsigned LZ4F_isError(LZ4F_errorCode_t code
)
292 return (code
> (LZ4F_errorCode_t
)(-LZ4F_ERROR_maxCode
));
295 const char* LZ4F_getErrorName(LZ4F_errorCode_t code
)
297 static const char* codeError
= "Unspecified error code";
298 if (LZ4F_isError(code
)) return LZ4F_errorStrings
[-(int)(code
)];
302 LZ4F_errorCodes
LZ4F_getErrorCode(size_t functionResult
)
304 if (!LZ4F_isError(functionResult
)) return LZ4F_OK_NoError
;
305 return (LZ4F_errorCodes
)(-(ptrdiff_t)functionResult
);
308 static LZ4F_errorCode_t
LZ4F_returnErrorCode(LZ4F_errorCodes code
)
310 /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
311 LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
312 return (LZ4F_errorCode_t
)-(ptrdiff_t)code
;
315 #define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
317 #define RETURN_ERROR_IF(c,e) if (c) RETURN_ERROR(e)
319 #define FORWARD_IF_ERROR(r) if (LZ4F_isError(r)) return (r)
321 unsigned LZ4F_getVersion(void) { return LZ4F_VERSION
; }
323 int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX
; }
325 size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID
)
327 static const size_t blockSizes
[4] = { 64 KB
, 256 KB
, 1 MB
, 4 MB
};
329 if (blockSizeID
== 0) blockSizeID
= LZ4F_BLOCKSIZEID_DEFAULT
;
330 if (blockSizeID
< LZ4F_max64KB
|| blockSizeID
> LZ4F_max4MB
)
331 RETURN_ERROR(maxBlockSize_invalid
);
332 { int const blockSizeIdx
= (int)blockSizeID
- (int)LZ4F_max64KB
;
333 return blockSizes
[blockSizeIdx
];
336 /*-************************************
338 **************************************/
339 #define MIN(a,b) ( (a) < (b) ? (a) : (b) )
341 static BYTE
LZ4F_headerChecksum (const void* header
, size_t length
)
343 U32
const xxh
= XXH32(header
, length
, 0);
344 return (BYTE
)(xxh
>> 8);
348 /*-************************************
349 * Simple-pass compression functions
350 **************************************/
351 static LZ4F_blockSizeID_t
LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID
,
352 const size_t srcSize
)
354 LZ4F_blockSizeID_t proposedBSID
= LZ4F_max64KB
;
355 size_t maxBlockSize
= 64 KB
;
356 while (requestedBSID
> proposedBSID
) {
357 if (srcSize
<= maxBlockSize
)
359 proposedBSID
= (LZ4F_blockSizeID_t
)((int)proposedBSID
+ 1);
362 return requestedBSID
;
365 /*! LZ4F_compressBound_internal() :
366 * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
367 * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
368 * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
369 * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
371 static size_t LZ4F_compressBound_internal(size_t srcSize
,
372 const LZ4F_preferences_t
* preferencesPtr
,
373 size_t alreadyBuffered
)
375 LZ4F_preferences_t prefsNull
= LZ4F_INIT_PREFERENCES
;
376 prefsNull
.frameInfo
.contentChecksumFlag
= LZ4F_contentChecksumEnabled
; /* worst case */
377 prefsNull
.frameInfo
.blockChecksumFlag
= LZ4F_blockChecksumEnabled
; /* worst case */
378 { const LZ4F_preferences_t
* const prefsPtr
= (preferencesPtr
==NULL
) ? &prefsNull
: preferencesPtr
;
379 U32
const flush
= prefsPtr
->autoFlush
| (srcSize
==0);
380 LZ4F_blockSizeID_t
const blockID
= prefsPtr
->frameInfo
.blockSizeID
;
381 size_t const blockSize
= LZ4F_getBlockSize(blockID
);
382 size_t const maxBuffered
= blockSize
- 1;
383 size_t const bufferedSize
= MIN(alreadyBuffered
, maxBuffered
);
384 size_t const maxSrcSize
= srcSize
+ bufferedSize
;
385 unsigned const nbFullBlocks
= (unsigned)(maxSrcSize
/ blockSize
);
386 size_t const partialBlockSize
= maxSrcSize
& (blockSize
-1);
387 size_t const lastBlockSize
= flush
? partialBlockSize
: 0;
388 unsigned const nbBlocks
= nbFullBlocks
+ (lastBlockSize
>0);
390 size_t const blockCRCSize
= BFSize
* prefsPtr
->frameInfo
.blockChecksumFlag
;
391 size_t const frameEnd
= BHSize
+ (prefsPtr
->frameInfo
.contentChecksumFlag
*BFSize
);
393 return ((BHSize
+ blockCRCSize
) * nbBlocks
) +
394 (blockSize
* nbFullBlocks
) + lastBlockSize
+ frameEnd
;
398 size_t LZ4F_compressFrameBound(size_t srcSize
, const LZ4F_preferences_t
* preferencesPtr
)
400 LZ4F_preferences_t prefs
;
401 size_t const headerSize
= maxFHSize
; /* max header size, including optional fields */
403 if (preferencesPtr
!=NULL
) prefs
= *preferencesPtr
;
404 else MEM_INIT(&prefs
, 0, sizeof(prefs
));
407 return headerSize
+ LZ4F_compressBound_internal(srcSize
, &prefs
, 0);;
411 /*! LZ4F_compressFrame_usingCDict() :
412 * Compress srcBuffer using a dictionary, in a single step.
413 * cdict can be NULL, in which case, no dictionary is used.
414 * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
415 * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
416 * however, it's the only way to provide a dictID, so it's not recommended.
417 * @return : number of bytes written into dstBuffer,
418 * or an error code if it fails (can be tested using LZ4F_isError())
420 size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx
* cctx
,
421 void* dstBuffer
, size_t dstCapacity
,
422 const void* srcBuffer
, size_t srcSize
,
423 const LZ4F_CDict
* cdict
,
424 const LZ4F_preferences_t
* preferencesPtr
)
426 LZ4F_preferences_t prefs
;
427 LZ4F_compressOptions_t options
;
428 BYTE
* const dstStart
= (BYTE
*) dstBuffer
;
429 BYTE
* dstPtr
= dstStart
;
430 BYTE
* const dstEnd
= dstStart
+ dstCapacity
;
432 if (preferencesPtr
!=NULL
)
433 prefs
= *preferencesPtr
;
435 MEM_INIT(&prefs
, 0, sizeof(prefs
));
436 if (prefs
.frameInfo
.contentSize
!= 0)
437 prefs
.frameInfo
.contentSize
= (U64
)srcSize
; /* auto-correct content size if selected (!=0) */
439 prefs
.frameInfo
.blockSizeID
= LZ4F_optimalBSID(prefs
.frameInfo
.blockSizeID
, srcSize
);
441 if (srcSize
<= LZ4F_getBlockSize(prefs
.frameInfo
.blockSizeID
))
442 prefs
.frameInfo
.blockMode
= LZ4F_blockIndependent
; /* only one block => no need for inter-block link */
444 MEM_INIT(&options
, 0, sizeof(options
));
445 options
.stableSrc
= 1;
447 RETURN_ERROR_IF(dstCapacity
< LZ4F_compressFrameBound(srcSize
, &prefs
), dstMaxSize_tooSmall
);
449 { size_t const headerSize
= LZ4F_compressBegin_usingCDict(cctx
, dstBuffer
, dstCapacity
, cdict
, &prefs
); /* write header */
450 FORWARD_IF_ERROR(headerSize
);
451 dstPtr
+= headerSize
; /* header size */ }
453 assert(dstEnd
>= dstPtr
);
454 { size_t const cSize
= LZ4F_compressUpdate(cctx
, dstPtr
, (size_t)(dstEnd
-dstPtr
), srcBuffer
, srcSize
, &options
);
455 FORWARD_IF_ERROR(cSize
);
458 assert(dstEnd
>= dstPtr
);
459 { size_t const tailSize
= LZ4F_compressEnd(cctx
, dstPtr
, (size_t)(dstEnd
-dstPtr
), &options
); /* flush last block, and generate suffix */
460 FORWARD_IF_ERROR(tailSize
);
461 dstPtr
+= tailSize
; }
463 assert(dstEnd
>= dstStart
);
464 return (size_t)(dstPtr
- dstStart
);
468 /*! LZ4F_compressFrame() :
469 * Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
470 * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
471 * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
472 * @return : number of bytes written into dstBuffer.
473 * or an error code if it fails (can be tested using LZ4F_isError())
475 size_t LZ4F_compressFrame(void* dstBuffer
, size_t dstCapacity
,
476 const void* srcBuffer
, size_t srcSize
,
477 const LZ4F_preferences_t
* preferencesPtr
)
481 LZ4F_cctx_t
* cctxPtr
;
482 result
= LZ4F_createCompressionContext(&cctxPtr
, LZ4F_VERSION
);
483 FORWARD_IF_ERROR(result
);
487 LZ4F_cctx_t
* const cctxPtr
= &cctx
;
489 MEM_INIT(&cctx
, 0, sizeof(cctx
));
490 cctx
.version
= LZ4F_VERSION
;
491 cctx
.maxBufferSize
= 5 MB
; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
492 if ( preferencesPtr
== NULL
493 || preferencesPtr
->compressionLevel
< LZ4HC_CLEVEL_MIN
) {
494 LZ4_initStream(&lz4ctx
, sizeof(lz4ctx
));
495 cctxPtr
->lz4CtxPtr
= &lz4ctx
;
496 cctxPtr
->lz4CtxAlloc
= 1;
497 cctxPtr
->lz4CtxState
= 1;
500 DEBUGLOG(4, "LZ4F_compressFrame");
502 result
= LZ4F_compressFrame_usingCDict(cctxPtr
, dstBuffer
, dstCapacity
,
504 NULL
, preferencesPtr
);
507 LZ4F_freeCompressionContext(cctxPtr
);
509 if ( preferencesPtr
!= NULL
510 && preferencesPtr
->compressionLevel
>= LZ4HC_CLEVEL_MIN
) {
511 LZ4F_free(cctxPtr
->lz4CtxPtr
, cctxPtr
->cmem
);
518 /*-***************************************************
519 * Dictionary compression
520 *****************************************************/
522 struct LZ4F_CDict_s
{
525 LZ4_stream_t
* fastCtx
;
526 LZ4_streamHC_t
* HCCtx
;
527 }; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
530 LZ4F_createCDict_advanced(LZ4F_CustomMem cmem
, const void* dictBuffer
, size_t dictSize
)
532 const char* dictStart
= (const char*)dictBuffer
;
533 LZ4F_CDict
* const cdict
= (LZ4F_CDict
*)LZ4F_malloc(sizeof(*cdict
), cmem
);
534 DEBUGLOG(4, "LZ4F_createCDict_advanced");
535 if (!cdict
) return NULL
;
537 if (dictSize
> 64 KB
) {
538 dictStart
+= dictSize
- 64 KB
;
541 cdict
->dictContent
= LZ4F_malloc(dictSize
, cmem
);
542 cdict
->fastCtx
= (LZ4_stream_t
*)LZ4F_malloc(sizeof(LZ4_stream_t
), cmem
);
544 LZ4_initStream(cdict
->fastCtx
, sizeof(LZ4_stream_t
));
545 cdict
->HCCtx
= (LZ4_streamHC_t
*)LZ4F_malloc(sizeof(LZ4_streamHC_t
), cmem
);
547 LZ4_initStream(cdict
->HCCtx
, sizeof(LZ4_streamHC_t
));
548 if (!cdict
->dictContent
|| !cdict
->fastCtx
|| !cdict
->HCCtx
) {
549 LZ4F_freeCDict(cdict
);
552 memcpy(cdict
->dictContent
, dictStart
, dictSize
);
553 LZ4_loadDict (cdict
->fastCtx
, (const char*)cdict
->dictContent
, (int)dictSize
);
554 LZ4_setCompressionLevel(cdict
->HCCtx
, LZ4HC_CLEVEL_DEFAULT
);
555 LZ4_loadDictHC(cdict
->HCCtx
, (const char*)cdict
->dictContent
, (int)dictSize
);
559 /*! LZ4F_createCDict() :
560 * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
561 * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
562 * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
563 * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
564 * @return : digested dictionary for compression, or NULL if failed */
565 LZ4F_CDict
* LZ4F_createCDict(const void* dictBuffer
, size_t dictSize
)
567 DEBUGLOG(4, "LZ4F_createCDict");
568 return LZ4F_createCDict_advanced(LZ4F_defaultCMem
, dictBuffer
, dictSize
);
571 void LZ4F_freeCDict(LZ4F_CDict
* cdict
)
573 if (cdict
==NULL
) return; /* support free on NULL */
574 LZ4F_free(cdict
->dictContent
, cdict
->cmem
);
575 LZ4F_free(cdict
->fastCtx
, cdict
->cmem
);
576 LZ4F_free(cdict
->HCCtx
, cdict
->cmem
);
577 LZ4F_free(cdict
, cdict
->cmem
);
581 /*-*********************************
582 * Advanced compression functions
583 ***********************************/
586 LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem
, unsigned version
)
588 LZ4F_cctx
* const cctxPtr
=
589 (LZ4F_cctx
*)LZ4F_calloc(sizeof(LZ4F_cctx
), customMem
);
590 if (cctxPtr
==NULL
) return NULL
;
592 cctxPtr
->cmem
= customMem
;
593 cctxPtr
->version
= version
;
594 cctxPtr
->cStage
= 0; /* Uninitialized. Next stage : init cctx */
599 /*! LZ4F_createCompressionContext() :
600 * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
601 * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
602 * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
603 * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
604 * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
605 * Object can release its memory using LZ4F_freeCompressionContext();
608 LZ4F_createCompressionContext(LZ4F_cctx
** LZ4F_compressionContextPtr
, unsigned version
)
610 assert(LZ4F_compressionContextPtr
!= NULL
); /* considered a violation of narrow contract */
611 /* in case it nonetheless happen in production */
612 RETURN_ERROR_IF(LZ4F_compressionContextPtr
== NULL
, parameter_null
);
614 *LZ4F_compressionContextPtr
= LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem
, version
);
615 RETURN_ERROR_IF(*LZ4F_compressionContextPtr
==NULL
, allocation_failed
);
616 return LZ4F_OK_NoError
;
620 LZ4F_errorCode_t
LZ4F_freeCompressionContext(LZ4F_cctx
* cctxPtr
)
622 if (cctxPtr
!= NULL
) { /* support free on NULL */
623 LZ4F_free(cctxPtr
->lz4CtxPtr
, cctxPtr
->cmem
); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
624 LZ4F_free(cctxPtr
->tmpBuff
, cctxPtr
->cmem
);
625 LZ4F_free(cctxPtr
, cctxPtr
->cmem
);
627 return LZ4F_OK_NoError
;
632 * This function prepares the internal LZ4(HC) stream for a new compression,
633 * resetting the context and attaching the dictionary, if there is one.
635 * It needs to be called at the beginning of each independent compression
636 * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
637 * beginning of each block in blockIndependent mode).
639 static void LZ4F_initStream(void* ctx
,
640 const LZ4F_CDict
* cdict
,
642 LZ4F_blockMode_t blockMode
) {
643 if (level
< LZ4HC_CLEVEL_MIN
) {
644 if (cdict
!= NULL
|| blockMode
== LZ4F_blockLinked
) {
645 /* In these cases, we will call LZ4_compress_fast_continue(),
646 * which needs an already reset context. Otherwise, we'll call a
647 * one-shot API. The non-continued APIs internally perform their own
648 * resets at the beginning of their calls, where they know what
649 * tableType they need the context to be in. So in that case this
650 * would be misguided / wasted work. */
651 LZ4_resetStream_fast((LZ4_stream_t
*)ctx
);
653 LZ4_attach_dictionary((LZ4_stream_t
*)ctx
, cdict
? cdict
->fastCtx
: NULL
);
655 LZ4_resetStreamHC_fast((LZ4_streamHC_t
*)ctx
, level
);
656 LZ4_attach_HC_dictionary((LZ4_streamHC_t
*)ctx
, cdict
? cdict
->HCCtx
: NULL
);
660 static int ctxTypeID_to_size(int ctxTypeID
) {
663 return LZ4_sizeofState();
665 return LZ4_sizeofStateHC();
671 /*! LZ4F_compressBegin_usingCDict() :
672 * init streaming compression AND writes frame header into @dstBuffer.
673 * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
674 * @return : number of bytes written into @dstBuffer for the header
675 * or an error code (can be tested using LZ4F_isError())
677 size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx
* cctxPtr
,
678 void* dstBuffer
, size_t dstCapacity
,
679 const LZ4F_CDict
* cdict
,
680 const LZ4F_preferences_t
* preferencesPtr
)
682 LZ4F_preferences_t
const prefNull
= LZ4F_INIT_PREFERENCES
;
683 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
684 BYTE
* dstPtr
= dstStart
;
686 RETURN_ERROR_IF(dstCapacity
< maxFHSize
, dstMaxSize_tooSmall
);
687 if (preferencesPtr
== NULL
) preferencesPtr
= &prefNull
;
688 cctxPtr
->prefs
= *preferencesPtr
;
690 /* cctx Management */
691 { U16
const ctxTypeID
= (cctxPtr
->prefs
.compressionLevel
< LZ4HC_CLEVEL_MIN
) ? 1 : 2;
692 int requiredSize
= ctxTypeID_to_size(ctxTypeID
);
693 int allocatedSize
= ctxTypeID_to_size(cctxPtr
->lz4CtxAlloc
);
694 if (allocatedSize
< requiredSize
) {
695 /* not enough space allocated */
696 LZ4F_free(cctxPtr
->lz4CtxPtr
, cctxPtr
->cmem
);
697 if (cctxPtr
->prefs
.compressionLevel
< LZ4HC_CLEVEL_MIN
) {
698 /* must take ownership of memory allocation,
699 * in order to respect custom allocator contract */
700 cctxPtr
->lz4CtxPtr
= LZ4F_malloc(sizeof(LZ4_stream_t
), cctxPtr
->cmem
);
701 if (cctxPtr
->lz4CtxPtr
)
702 LZ4_initStream(cctxPtr
->lz4CtxPtr
, sizeof(LZ4_stream_t
));
704 cctxPtr
->lz4CtxPtr
= LZ4F_malloc(sizeof(LZ4_streamHC_t
), cctxPtr
->cmem
);
705 if (cctxPtr
->lz4CtxPtr
)
706 LZ4_initStreamHC(cctxPtr
->lz4CtxPtr
, sizeof(LZ4_streamHC_t
));
708 RETURN_ERROR_IF(cctxPtr
->lz4CtxPtr
== NULL
, allocation_failed
);
709 cctxPtr
->lz4CtxAlloc
= ctxTypeID
;
710 cctxPtr
->lz4CtxState
= ctxTypeID
;
711 } else if (cctxPtr
->lz4CtxState
!= ctxTypeID
) {
712 /* otherwise, a sufficient buffer is already allocated,
713 * but we need to reset it to the correct context type */
714 if (cctxPtr
->prefs
.compressionLevel
< LZ4HC_CLEVEL_MIN
) {
715 LZ4_initStream((LZ4_stream_t
*)cctxPtr
->lz4CtxPtr
, sizeof(LZ4_stream_t
));
717 LZ4_initStreamHC((LZ4_streamHC_t
*)cctxPtr
->lz4CtxPtr
, sizeof(LZ4_streamHC_t
));
718 LZ4_setCompressionLevel((LZ4_streamHC_t
*)cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
);
720 cctxPtr
->lz4CtxState
= ctxTypeID
;
723 /* Buffer Management */
724 if (cctxPtr
->prefs
.frameInfo
.blockSizeID
== 0)
725 cctxPtr
->prefs
.frameInfo
.blockSizeID
= LZ4F_BLOCKSIZEID_DEFAULT
;
726 cctxPtr
->maxBlockSize
= LZ4F_getBlockSize(cctxPtr
->prefs
.frameInfo
.blockSizeID
);
728 { size_t const requiredBuffSize
= preferencesPtr
->autoFlush
?
729 ((cctxPtr
->prefs
.frameInfo
.blockMode
== LZ4F_blockLinked
) ? 64 KB
: 0) : /* only needs past data up to window size */
730 cctxPtr
->maxBlockSize
+ ((cctxPtr
->prefs
.frameInfo
.blockMode
== LZ4F_blockLinked
) ? 128 KB
: 0);
732 if (cctxPtr
->maxBufferSize
< requiredBuffSize
) {
733 cctxPtr
->maxBufferSize
= 0;
734 LZ4F_free(cctxPtr
->tmpBuff
, cctxPtr
->cmem
);
735 cctxPtr
->tmpBuff
= (BYTE
*)LZ4F_calloc(requiredBuffSize
, cctxPtr
->cmem
);
736 RETURN_ERROR_IF(cctxPtr
->tmpBuff
== NULL
, allocation_failed
);
737 cctxPtr
->maxBufferSize
= requiredBuffSize
;
739 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
;
740 cctxPtr
->tmpInSize
= 0;
741 (void)XXH32_reset(&(cctxPtr
->xxh
), 0);
744 cctxPtr
->cdict
= cdict
;
745 if (cctxPtr
->prefs
.frameInfo
.blockMode
== LZ4F_blockLinked
) {
746 /* frame init only for blockLinked : blockIndependent will be init at each block */
747 LZ4F_initStream(cctxPtr
->lz4CtxPtr
, cdict
, cctxPtr
->prefs
.compressionLevel
, LZ4F_blockLinked
);
749 if (preferencesPtr
->compressionLevel
>= LZ4HC_CLEVEL_MIN
) {
750 LZ4_favorDecompressionSpeed((LZ4_streamHC_t
*)cctxPtr
->lz4CtxPtr
, (int)preferencesPtr
->favorDecSpeed
);
754 LZ4F_writeLE32(dstPtr
, LZ4F_MAGICNUMBER
);
756 { BYTE
* const headerStart
= dstPtr
;
759 *dstPtr
++ = (BYTE
)(((1 & _2BITS
) << 6) /* Version('01') */
760 + ((cctxPtr
->prefs
.frameInfo
.blockMode
& _1BIT
) << 5)
761 + ((cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
& _1BIT
) << 4)
762 + ((unsigned)(cctxPtr
->prefs
.frameInfo
.contentSize
> 0) << 3)
763 + ((cctxPtr
->prefs
.frameInfo
.contentChecksumFlag
& _1BIT
) << 2)
764 + (cctxPtr
->prefs
.frameInfo
.dictID
> 0) );
766 *dstPtr
++ = (BYTE
)((cctxPtr
->prefs
.frameInfo
.blockSizeID
& _3BITS
) << 4);
767 /* Optional Frame content size field */
768 if (cctxPtr
->prefs
.frameInfo
.contentSize
) {
769 LZ4F_writeLE64(dstPtr
, cctxPtr
->prefs
.frameInfo
.contentSize
);
771 cctxPtr
->totalInSize
= 0;
773 /* Optional dictionary ID field */
774 if (cctxPtr
->prefs
.frameInfo
.dictID
) {
775 LZ4F_writeLE32(dstPtr
, cctxPtr
->prefs
.frameInfo
.dictID
);
778 /* Header CRC Byte */
779 *dstPtr
= LZ4F_headerChecksum(headerStart
, (size_t)(dstPtr
- headerStart
));
783 cctxPtr
->cStage
= 1; /* header written, now request input data block */
784 return (size_t)(dstPtr
- dstStart
);
788 /*! LZ4F_compressBegin() :
789 * init streaming compression AND writes frame header into @dstBuffer.
790 * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
791 * @preferencesPtr can be NULL, in which case default parameters are selected.
792 * @return : number of bytes written into dstBuffer for the header
793 * or an error code (can be tested using LZ4F_isError())
795 size_t LZ4F_compressBegin(LZ4F_cctx
* cctxPtr
,
796 void* dstBuffer
, size_t dstCapacity
,
797 const LZ4F_preferences_t
* preferencesPtr
)
799 return LZ4F_compressBegin_usingCDict(cctxPtr
, dstBuffer
, dstCapacity
,
800 NULL
, preferencesPtr
);
804 /* LZ4F_compressBound() :
805 * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
806 * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
807 * This function cannot fail.
809 size_t LZ4F_compressBound(size_t srcSize
, const LZ4F_preferences_t
* preferencesPtr
)
811 if (preferencesPtr
&& preferencesPtr
->autoFlush
) {
812 return LZ4F_compressBound_internal(srcSize
, preferencesPtr
, 0);
814 return LZ4F_compressBound_internal(srcSize
, preferencesPtr
, (size_t)-1);
818 typedef int (*compressFunc_t
)(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstSize
, int level
, const LZ4F_CDict
* cdict
);
821 /*! LZ4F_makeBlock():
822 * compress a single block, add header and optional checksum.
823 * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
825 static size_t LZ4F_makeBlock(void* dst
,
826 const void* src
, size_t srcSize
,
827 compressFunc_t compress
, void* lz4ctx
, int level
,
828 const LZ4F_CDict
* cdict
,
829 LZ4F_blockChecksum_t crcFlag
)
831 BYTE
* const cSizePtr
= (BYTE
*)dst
;
833 assert(compress
!= NULL
);
834 cSize
= (U32
)compress(lz4ctx
, (const char*)src
, (char*)(cSizePtr
+BHSize
),
835 (int)(srcSize
), (int)(srcSize
-1),
838 if (cSize
== 0 || cSize
>= srcSize
) {
839 cSize
= (U32
)srcSize
;
840 LZ4F_writeLE32(cSizePtr
, cSize
| LZ4F_BLOCKUNCOMPRESSED_FLAG
);
841 memcpy(cSizePtr
+BHSize
, src
, srcSize
);
843 LZ4F_writeLE32(cSizePtr
, cSize
);
846 U32
const crc32
= XXH32(cSizePtr
+BHSize
, cSize
, 0); /* checksum of compressed data */
847 LZ4F_writeLE32(cSizePtr
+BHSize
+cSize
, crc32
);
849 return BHSize
+ cSize
+ ((U32
)crcFlag
)*BFSize
;
853 static int LZ4F_compressBlock(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
855 int const acceleration
= (level
< 0) ? -level
+ 1 : 1;
856 DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize
);
857 LZ4F_initStream(ctx
, cdict
, level
, LZ4F_blockIndependent
);
859 return LZ4_compress_fast_continue((LZ4_stream_t
*)ctx
, src
, dst
, srcSize
, dstCapacity
, acceleration
);
861 return LZ4_compress_fast_extState_fastReset(ctx
, src
, dst
, srcSize
, dstCapacity
, acceleration
);
865 static int LZ4F_compressBlock_continue(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
867 int const acceleration
= (level
< 0) ? -level
+ 1 : 1;
868 (void)cdict
; /* init once at beginning of frame */
869 DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize
);
870 return LZ4_compress_fast_continue((LZ4_stream_t
*)ctx
, src
, dst
, srcSize
, dstCapacity
, acceleration
);
873 static int LZ4F_compressBlockHC(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
875 LZ4F_initStream(ctx
, cdict
, level
, LZ4F_blockIndependent
);
877 return LZ4_compress_HC_continue((LZ4_streamHC_t
*)ctx
, src
, dst
, srcSize
, dstCapacity
);
879 return LZ4_compress_HC_extStateHC_fastReset(ctx
, src
, dst
, srcSize
, dstCapacity
, level
);
882 static int LZ4F_compressBlockHC_continue(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
884 (void)level
; (void)cdict
; /* init once at beginning of frame */
885 return LZ4_compress_HC_continue((LZ4_streamHC_t
*)ctx
, src
, dst
, srcSize
, dstCapacity
);
888 static int LZ4F_doNotCompressBlock(void* ctx
, const char* src
, char* dst
, int srcSize
, int dstCapacity
, int level
, const LZ4F_CDict
* cdict
)
890 (void)ctx
; (void)src
; (void)dst
; (void)srcSize
; (void)dstCapacity
; (void)level
; (void)cdict
;
894 static compressFunc_t
LZ4F_selectCompression(LZ4F_blockMode_t blockMode
, int level
, LZ4F_blockCompression_t compressMode
)
896 if (compressMode
== LZ4B_UNCOMPRESSED
) return LZ4F_doNotCompressBlock
;
897 if (level
< LZ4HC_CLEVEL_MIN
) {
898 if (blockMode
== LZ4F_blockIndependent
) return LZ4F_compressBlock
;
899 return LZ4F_compressBlock_continue
;
901 if (blockMode
== LZ4F_blockIndependent
) return LZ4F_compressBlockHC
;
902 return LZ4F_compressBlockHC_continue
;
905 /* Save history (up to 64KB) into @tmpBuff */
906 static int LZ4F_localSaveDict(LZ4F_cctx_t
* cctxPtr
)
908 if (cctxPtr
->prefs
.compressionLevel
< LZ4HC_CLEVEL_MIN
)
909 return LZ4_saveDict ((LZ4_stream_t
*)(cctxPtr
->lz4CtxPtr
), (char*)(cctxPtr
->tmpBuff
), 64 KB
);
910 return LZ4_saveDictHC ((LZ4_streamHC_t
*)(cctxPtr
->lz4CtxPtr
), (char*)(cctxPtr
->tmpBuff
), 64 KB
);
913 typedef enum { notDone
, fromTmpBuffer
, fromSrcBuffer
} LZ4F_lastBlockStatus
;
915 static const LZ4F_compressOptions_t k_cOptionsNull
= { 0, { 0, 0, 0 } };
918 /*! LZ4F_compressUpdateImpl() :
919 * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
920 * When successful, the function always entirely consumes @srcBuffer.
921 * src data is either buffered or compressed into @dstBuffer.
922 * If the block compression does not match the compression of the previous block, the old data is flushed
923 * and operations continue with the new compression mode.
924 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on.
925 * @compressOptionsPtr is optional : provide NULL to mean "default".
926 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
927 * or an error code if it fails (which can be tested using LZ4F_isError())
928 * After an error, the state is left in a UB state, and must be re-initialized.
930 static size_t LZ4F_compressUpdateImpl(LZ4F_cctx
* cctxPtr
,
931 void* dstBuffer
, size_t dstCapacity
,
932 const void* srcBuffer
, size_t srcSize
,
933 const LZ4F_compressOptions_t
* compressOptionsPtr
,
934 LZ4F_blockCompression_t blockCompression
)
936 size_t const blockSize
= cctxPtr
->maxBlockSize
;
937 const BYTE
* srcPtr
= (const BYTE
*)srcBuffer
;
938 const BYTE
* const srcEnd
= srcPtr
+ srcSize
;
939 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
940 BYTE
* dstPtr
= dstStart
;
941 LZ4F_lastBlockStatus lastBlockCompressed
= notDone
;
942 compressFunc_t
const compress
= LZ4F_selectCompression(cctxPtr
->prefs
.frameInfo
.blockMode
, cctxPtr
->prefs
.compressionLevel
, blockCompression
);
944 DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize
);
946 RETURN_ERROR_IF(cctxPtr
->cStage
!= 1, compressionState_uninitialized
); /* state must be initialized and waiting for next block */
947 if (dstCapacity
< LZ4F_compressBound_internal(srcSize
, &(cctxPtr
->prefs
), cctxPtr
->tmpInSize
))
948 RETURN_ERROR(dstMaxSize_tooSmall
);
950 if (blockCompression
== LZ4B_UNCOMPRESSED
&& dstCapacity
< srcSize
)
951 RETURN_ERROR(dstMaxSize_tooSmall
);
953 /* flush currently written block, to continue with new block compression */
954 if (cctxPtr
->blockCompression
!= blockCompression
) {
955 bytesWritten
= LZ4F_flush(cctxPtr
, dstBuffer
, dstCapacity
, compressOptionsPtr
);
956 dstPtr
+= bytesWritten
;
957 cctxPtr
->blockCompression
= blockCompression
;
960 if (compressOptionsPtr
== NULL
) compressOptionsPtr
= &k_cOptionsNull
;
962 /* complete tmp buffer */
963 if (cctxPtr
->tmpInSize
> 0) { /* some data already within tmp buffer */
964 size_t const sizeToCopy
= blockSize
- cctxPtr
->tmpInSize
;
965 assert(blockSize
> cctxPtr
->tmpInSize
);
966 if (sizeToCopy
> srcSize
) {
967 /* add src to tmpIn buffer */
968 memcpy(cctxPtr
->tmpIn
+ cctxPtr
->tmpInSize
, srcBuffer
, srcSize
);
970 cctxPtr
->tmpInSize
+= srcSize
;
971 /* still needs some CRC */
973 /* complete tmpIn block and then compress it */
974 lastBlockCompressed
= fromTmpBuffer
;
975 memcpy(cctxPtr
->tmpIn
+ cctxPtr
->tmpInSize
, srcBuffer
, sizeToCopy
);
976 srcPtr
+= sizeToCopy
;
978 dstPtr
+= LZ4F_makeBlock(dstPtr
,
979 cctxPtr
->tmpIn
, blockSize
,
980 compress
, cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
,
982 cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
);
983 if (cctxPtr
->prefs
.frameInfo
.blockMode
==LZ4F_blockLinked
) cctxPtr
->tmpIn
+= blockSize
;
984 cctxPtr
->tmpInSize
= 0;
987 while ((size_t)(srcEnd
- srcPtr
) >= blockSize
) {
988 /* compress full blocks */
989 lastBlockCompressed
= fromSrcBuffer
;
990 dstPtr
+= LZ4F_makeBlock(dstPtr
,
992 compress
, cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
,
994 cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
);
998 if ((cctxPtr
->prefs
.autoFlush
) && (srcPtr
< srcEnd
)) {
999 /* autoFlush : remaining input (< blockSize) is compressed */
1000 lastBlockCompressed
= fromSrcBuffer
;
1001 dstPtr
+= LZ4F_makeBlock(dstPtr
,
1002 srcPtr
, (size_t)(srcEnd
- srcPtr
),
1003 compress
, cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
,
1005 cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
);
1009 /* preserve dictionary within @tmpBuff whenever necessary */
1010 if ((cctxPtr
->prefs
.frameInfo
.blockMode
==LZ4F_blockLinked
) && (lastBlockCompressed
==fromSrcBuffer
)) {
1011 /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */
1012 assert(blockCompression
== LZ4B_COMPRESSED
);
1013 if (compressOptionsPtr
->stableSrc
) {
1014 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
; /* src is stable : dictionary remains in src across invocations */
1016 int const realDictSize
= LZ4F_localSaveDict(cctxPtr
);
1017 assert(0 <= realDictSize
&& realDictSize
<= 64 KB
);
1018 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
+ realDictSize
;
1022 /* keep tmpIn within limits */
1023 if (!(cctxPtr
->prefs
.autoFlush
) /* no autoflush : there may be some data left within internal buffer */
1024 && (cctxPtr
->tmpIn
+ blockSize
) > (cctxPtr
->tmpBuff
+ cctxPtr
->maxBufferSize
) ) /* not enough room to store next block */
1026 /* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
1027 * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
1028 int const realDictSize
= LZ4F_localSaveDict(cctxPtr
);
1029 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
+ realDictSize
;
1030 assert((cctxPtr
->tmpIn
+ blockSize
) <= (cctxPtr
->tmpBuff
+ cctxPtr
->maxBufferSize
));
1033 /* some input data left, necessarily < blockSize */
1034 if (srcPtr
< srcEnd
) {
1035 /* fill tmp buffer */
1036 size_t const sizeToCopy
= (size_t)(srcEnd
- srcPtr
);
1037 memcpy(cctxPtr
->tmpIn
, srcPtr
, sizeToCopy
);
1038 cctxPtr
->tmpInSize
= sizeToCopy
;
1041 if (cctxPtr
->prefs
.frameInfo
.contentChecksumFlag
== LZ4F_contentChecksumEnabled
)
1042 (void)XXH32_update(&(cctxPtr
->xxh
), srcBuffer
, srcSize
);
1044 cctxPtr
->totalInSize
+= srcSize
;
1045 return (size_t)(dstPtr
- dstStart
);
1048 /*! LZ4F_compressUpdate() :
1049 * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
1050 * When successful, the function always entirely consumes @srcBuffer.
1051 * src data is either buffered or compressed into @dstBuffer.
1052 * If previously an uncompressed block was written, buffered data is flushed
1053 * before appending compressed data is continued.
1054 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
1055 * @compressOptionsPtr is optional : provide NULL to mean "default".
1056 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
1057 * or an error code if it fails (which can be tested using LZ4F_isError())
1058 * After an error, the state is left in a UB state, and must be re-initialized.
1060 size_t LZ4F_compressUpdate(LZ4F_cctx
* cctxPtr
,
1061 void* dstBuffer
, size_t dstCapacity
,
1062 const void* srcBuffer
, size_t srcSize
,
1063 const LZ4F_compressOptions_t
* compressOptionsPtr
)
1065 return LZ4F_compressUpdateImpl(cctxPtr
,
1066 dstBuffer
, dstCapacity
,
1068 compressOptionsPtr
, LZ4B_COMPRESSED
);
1071 /*! LZ4F_compressUpdate() :
1072 * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
1073 * When successful, the function always entirely consumes @srcBuffer.
1074 * src data is either buffered or compressed into @dstBuffer.
1075 * If previously an uncompressed block was written, buffered data is flushed
1076 * before appending compressed data is continued.
1077 * This is only supported when LZ4F_blockIndependent is used
1078 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
1079 * @compressOptionsPtr is optional : provide NULL to mean "default".
1080 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
1081 * or an error code if it fails (which can be tested using LZ4F_isError())
1082 * After an error, the state is left in a UB state, and must be re-initialized.
1084 size_t LZ4F_uncompressedUpdate(LZ4F_cctx
* cctxPtr
,
1085 void* dstBuffer
, size_t dstCapacity
,
1086 const void* srcBuffer
, size_t srcSize
,
1087 const LZ4F_compressOptions_t
* compressOptionsPtr
) {
1088 RETURN_ERROR_IF(cctxPtr
->prefs
.frameInfo
.blockMode
!= LZ4F_blockIndependent
, blockMode_invalid
);
1089 return LZ4F_compressUpdateImpl(cctxPtr
,
1090 dstBuffer
, dstCapacity
,
1092 compressOptionsPtr
, LZ4B_UNCOMPRESSED
);
1097 * When compressed data must be sent immediately, without waiting for a block to be filled,
1098 * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
1099 * The result of the function is the number of bytes written into dstBuffer.
1100 * It can be zero, this means there was no data left within LZ4F_cctx.
1101 * The function outputs an error code if it fails (can be tested using LZ4F_isError())
1102 * LZ4F_compressOptions_t* is optional. NULL is a valid argument.
1104 size_t LZ4F_flush(LZ4F_cctx
* cctxPtr
,
1105 void* dstBuffer
, size_t dstCapacity
,
1106 const LZ4F_compressOptions_t
* compressOptionsPtr
)
1108 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
1109 BYTE
* dstPtr
= dstStart
;
1110 compressFunc_t compress
;
1112 if (cctxPtr
->tmpInSize
== 0) return 0; /* nothing to flush */
1113 RETURN_ERROR_IF(cctxPtr
->cStage
!= 1, compressionState_uninitialized
);
1114 RETURN_ERROR_IF(dstCapacity
< (cctxPtr
->tmpInSize
+ BHSize
+ BFSize
), dstMaxSize_tooSmall
);
1115 (void)compressOptionsPtr
; /* not useful (yet) */
1117 /* select compression function */
1118 compress
= LZ4F_selectCompression(cctxPtr
->prefs
.frameInfo
.blockMode
, cctxPtr
->prefs
.compressionLevel
, cctxPtr
->blockCompression
);
1120 /* compress tmp buffer */
1121 dstPtr
+= LZ4F_makeBlock(dstPtr
,
1122 cctxPtr
->tmpIn
, cctxPtr
->tmpInSize
,
1123 compress
, cctxPtr
->lz4CtxPtr
, cctxPtr
->prefs
.compressionLevel
,
1125 cctxPtr
->prefs
.frameInfo
.blockChecksumFlag
);
1126 assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr
- dstStart
) <= dstCapacity
));
1128 if (cctxPtr
->prefs
.frameInfo
.blockMode
== LZ4F_blockLinked
)
1129 cctxPtr
->tmpIn
+= cctxPtr
->tmpInSize
;
1130 cctxPtr
->tmpInSize
= 0;
1132 /* keep tmpIn within limits */
1133 if ((cctxPtr
->tmpIn
+ cctxPtr
->maxBlockSize
) > (cctxPtr
->tmpBuff
+ cctxPtr
->maxBufferSize
)) { /* necessarily LZ4F_blockLinked */
1134 int const realDictSize
= LZ4F_localSaveDict(cctxPtr
);
1135 cctxPtr
->tmpIn
= cctxPtr
->tmpBuff
+ realDictSize
;
1138 return (size_t)(dstPtr
- dstStart
);
1142 /*! LZ4F_compressEnd() :
1143 * When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
1144 * It will flush whatever data remained within compressionContext (like LZ4_flush())
1145 * but also properly finalize the frame, with an endMark and an (optional) checksum.
1146 * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
1147 * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
1148 * or an error code if it fails (can be tested using LZ4F_isError())
1149 * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
1151 size_t LZ4F_compressEnd(LZ4F_cctx
* cctxPtr
,
1152 void* dstBuffer
, size_t dstCapacity
,
1153 const LZ4F_compressOptions_t
* compressOptionsPtr
)
1155 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
1156 BYTE
* dstPtr
= dstStart
;
1158 size_t const flushSize
= LZ4F_flush(cctxPtr
, dstBuffer
, dstCapacity
, compressOptionsPtr
);
1159 DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity
);
1160 FORWARD_IF_ERROR(flushSize
);
1161 dstPtr
+= flushSize
;
1163 assert(flushSize
<= dstCapacity
);
1164 dstCapacity
-= flushSize
;
1166 RETURN_ERROR_IF(dstCapacity
< 4, dstMaxSize_tooSmall
);
1167 LZ4F_writeLE32(dstPtr
, 0);
1168 dstPtr
+= 4; /* endMark */
1170 if (cctxPtr
->prefs
.frameInfo
.contentChecksumFlag
== LZ4F_contentChecksumEnabled
) {
1171 U32
const xxh
= XXH32_digest(&(cctxPtr
->xxh
));
1172 RETURN_ERROR_IF(dstCapacity
< 8, dstMaxSize_tooSmall
);
1173 DEBUGLOG(5,"Writing 32-bit content checksum");
1174 LZ4F_writeLE32(dstPtr
, xxh
);
1175 dstPtr
+=4; /* content Checksum */
1178 cctxPtr
->cStage
= 0; /* state is now re-usable (with identical preferences) */
1179 cctxPtr
->maxBufferSize
= 0; /* reuse HC context */
1181 if (cctxPtr
->prefs
.frameInfo
.contentSize
) {
1182 if (cctxPtr
->prefs
.frameInfo
.contentSize
!= cctxPtr
->totalInSize
)
1183 RETURN_ERROR(frameSize_wrong
);
1186 return (size_t)(dstPtr
- dstStart
);
1190 /*-***************************************************
1191 * Frame Decompression
1192 *****************************************************/
1195 dstage_getFrameHeader
=0, dstage_storeFrameHeader
,
1197 dstage_getBlockHeader
, dstage_storeBlockHeader
,
1198 dstage_copyDirect
, dstage_getBlockChecksum
,
1199 dstage_getCBlock
, dstage_storeCBlock
,
1201 dstage_getSuffix
, dstage_storeSuffix
,
1202 dstage_getSFrameSize
, dstage_storeSFrameSize
,
1203 dstage_skipSkippable
1206 struct LZ4F_dctx_s
{
1207 LZ4F_CustomMem cmem
;
1208 LZ4F_frameInfo_t frameInfo
;
1211 U64 frameRemainingSize
;
1212 size_t maxBlockSize
;
1213 size_t maxBufferSize
;
1224 XXH32_state_t blockChecksum
;
1226 BYTE header
[LZ4F_HEADER_SIZE_MAX
];
1227 }; /* typedef'd to LZ4F_dctx in lz4frame.h */
1230 LZ4F_dctx
* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem
, unsigned version
)
1232 LZ4F_dctx
* const dctx
= (LZ4F_dctx
*)LZ4F_calloc(sizeof(LZ4F_dctx
), customMem
);
1233 if (dctx
== NULL
) return NULL
;
1235 dctx
->cmem
= customMem
;
1236 dctx
->version
= version
;
1240 /*! LZ4F_createDecompressionContext() :
1241 * Create a decompressionContext object, which will track all decompression operations.
1242 * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
1243 * Object can later be released using LZ4F_freeDecompressionContext().
1244 * @return : if != 0, there was an error during context creation.
1247 LZ4F_createDecompressionContext(LZ4F_dctx
** LZ4F_decompressionContextPtr
, unsigned versionNumber
)
1249 assert(LZ4F_decompressionContextPtr
!= NULL
); /* violation of narrow contract */
1250 RETURN_ERROR_IF(LZ4F_decompressionContextPtr
== NULL
, parameter_null
); /* in case it nonetheless happen in production */
1252 *LZ4F_decompressionContextPtr
= LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem
, versionNumber
);
1253 if (*LZ4F_decompressionContextPtr
== NULL
) { /* failed allocation */
1254 RETURN_ERROR(allocation_failed
);
1256 return LZ4F_OK_NoError
;
1259 LZ4F_errorCode_t
LZ4F_freeDecompressionContext(LZ4F_dctx
* dctx
)
1261 LZ4F_errorCode_t result
= LZ4F_OK_NoError
;
1262 if (dctx
!= NULL
) { /* can accept NULL input, like free() */
1263 result
= (LZ4F_errorCode_t
)dctx
->dStage
;
1264 LZ4F_free(dctx
->tmpIn
, dctx
->cmem
);
1265 LZ4F_free(dctx
->tmpOutBuffer
, dctx
->cmem
);
1266 LZ4F_free(dctx
, dctx
->cmem
);
1272 /*==--- Streaming Decompression operations ---==*/
1274 void LZ4F_resetDecompressionContext(LZ4F_dctx
* dctx
)
1276 dctx
->dStage
= dstage_getFrameHeader
;
1279 dctx
->skipChecksum
= 0;
1283 /*! LZ4F_decodeHeader() :
1284 * input : `src` points at the **beginning of the frame**
1285 * output : set internal values of dctx, such as
1286 * dctx->frameInfo and dctx->dStage.
1287 * Also allocates internal buffers.
1288 * @return : nb Bytes read from src (necessarily <= srcSize)
1289 * or an error code (testable with LZ4F_isError())
1291 static size_t LZ4F_decodeHeader(LZ4F_dctx
* dctx
, const void* src
, size_t srcSize
)
1293 unsigned blockMode
, blockChecksumFlag
, contentSizeFlag
, contentChecksumFlag
, dictIDFlag
, blockSizeID
;
1294 size_t frameHeaderSize
;
1295 const BYTE
* srcPtr
= (const BYTE
*)src
;
1297 DEBUGLOG(5, "LZ4F_decodeHeader");
1298 /* need to decode header to get frameInfo */
1299 RETURN_ERROR_IF(srcSize
< minFHSize
, frameHeader_incomplete
); /* minimal frame header size */
1300 MEM_INIT(&(dctx
->frameInfo
), 0, sizeof(dctx
->frameInfo
));
1302 /* special case : skippable frames */
1303 if ((LZ4F_readLE32(srcPtr
) & 0xFFFFFFF0U
) == LZ4F_MAGIC_SKIPPABLE_START
) {
1304 dctx
->frameInfo
.frameType
= LZ4F_skippableFrame
;
1305 if (src
== (void*)(dctx
->header
)) {
1306 dctx
->tmpInSize
= srcSize
;
1307 dctx
->tmpInTarget
= 8;
1308 dctx
->dStage
= dstage_storeSFrameSize
;
1311 dctx
->dStage
= dstage_getSFrameSize
;
1315 /* control magic number */
1316 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1317 if (LZ4F_readLE32(srcPtr
) != LZ4F_MAGICNUMBER
) {
1318 DEBUGLOG(4, "frame header error : unknown magic number");
1319 RETURN_ERROR(frameType_unknown
);
1322 dctx
->frameInfo
.frameType
= LZ4F_frame
;
1325 { U32
const FLG
= srcPtr
[4];
1326 U32
const version
= (FLG
>>6) & _2BITS
;
1327 blockChecksumFlag
= (FLG
>>4) & _1BIT
;
1328 blockMode
= (FLG
>>5) & _1BIT
;
1329 contentSizeFlag
= (FLG
>>3) & _1BIT
;
1330 contentChecksumFlag
= (FLG
>>2) & _1BIT
;
1331 dictIDFlag
= FLG
& _1BIT
;
1333 if (((FLG
>>1)&_1BIT
) != 0) RETURN_ERROR(reservedFlag_set
); /* Reserved bit */
1334 if (version
!= 1) RETURN_ERROR(headerVersion_wrong
); /* Version Number, only supported value */
1337 /* Frame Header Size */
1338 frameHeaderSize
= minFHSize
+ (contentSizeFlag
?8:0) + (dictIDFlag
?4:0);
1340 if (srcSize
< frameHeaderSize
) {
1341 /* not enough input to fully decode frame header */
1342 if (srcPtr
!= dctx
->header
)
1343 memcpy(dctx
->header
, srcPtr
, srcSize
);
1344 dctx
->tmpInSize
= srcSize
;
1345 dctx
->tmpInTarget
= frameHeaderSize
;
1346 dctx
->dStage
= dstage_storeFrameHeader
;
1350 { U32
const BD
= srcPtr
[5];
1351 blockSizeID
= (BD
>>4) & _3BITS
;
1353 if (((BD
>>7)&_1BIT
) != 0) RETURN_ERROR(reservedFlag_set
); /* Reserved bit */
1354 if (blockSizeID
< 4) RETURN_ERROR(maxBlockSize_invalid
); /* 4-7 only supported values for the time being */
1355 if (((BD
>>0)&_4BITS
) != 0) RETURN_ERROR(reservedFlag_set
); /* Reserved bits */
1359 assert(frameHeaderSize
> 5);
1360 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1361 { BYTE
const HC
= LZ4F_headerChecksum(srcPtr
+4, frameHeaderSize
-5);
1362 RETURN_ERROR_IF(HC
!= srcPtr
[frameHeaderSize
-1], headerChecksum_invalid
);
1367 dctx
->frameInfo
.blockMode
= (LZ4F_blockMode_t
)blockMode
;
1368 dctx
->frameInfo
.blockChecksumFlag
= (LZ4F_blockChecksum_t
)blockChecksumFlag
;
1369 dctx
->frameInfo
.contentChecksumFlag
= (LZ4F_contentChecksum_t
)contentChecksumFlag
;
1370 dctx
->frameInfo
.blockSizeID
= (LZ4F_blockSizeID_t
)blockSizeID
;
1371 dctx
->maxBlockSize
= LZ4F_getBlockSize((LZ4F_blockSizeID_t
)blockSizeID
);
1372 if (contentSizeFlag
)
1373 dctx
->frameRemainingSize
= dctx
->frameInfo
.contentSize
= LZ4F_readLE64(srcPtr
+6);
1375 dctx
->frameInfo
.dictID
= LZ4F_readLE32(srcPtr
+ frameHeaderSize
- 5);
1377 dctx
->dStage
= dstage_init
;
1379 return frameHeaderSize
;
1383 /*! LZ4F_headerSize() :
1384 * @return : size of frame header
1385 * or an error code, which can be tested using LZ4F_isError()
1387 size_t LZ4F_headerSize(const void* src
, size_t srcSize
)
1389 RETURN_ERROR_IF(src
== NULL
, srcPtr_wrong
);
1391 /* minimal srcSize to determine header size */
1392 if (srcSize
< LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH
)
1393 RETURN_ERROR(frameHeader_incomplete
);
1395 /* special case : skippable frames */
1396 if ((LZ4F_readLE32(src
) & 0xFFFFFFF0U
) == LZ4F_MAGIC_SKIPPABLE_START
)
1399 /* control magic number */
1400 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1401 if (LZ4F_readLE32(src
) != LZ4F_MAGICNUMBER
)
1402 RETURN_ERROR(frameType_unknown
);
1405 /* Frame Header Size */
1406 { BYTE
const FLG
= ((const BYTE
*)src
)[4];
1407 U32
const contentSizeFlag
= (FLG
>>3) & _1BIT
;
1408 U32
const dictIDFlag
= FLG
& _1BIT
;
1409 return minFHSize
+ (contentSizeFlag
?8:0) + (dictIDFlag
?4:0);
1413 /*! LZ4F_getFrameInfo() :
1414 * This function extracts frame parameters (max blockSize, frame checksum, etc.).
1415 * Usage is optional. Objective is to provide relevant information for allocation purposes.
1416 * This function works in 2 situations :
1417 * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
1418 * Amount of input data provided must be large enough to successfully decode the frame header.
1419 * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
1420 * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
1421 * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
1422 * Decompression must resume from (srcBuffer + *srcSizePtr).
1423 * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
1424 * or an error code which can be tested using LZ4F_isError()
1425 * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
1426 * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
1428 LZ4F_errorCode_t
LZ4F_getFrameInfo(LZ4F_dctx
* dctx
,
1429 LZ4F_frameInfo_t
* frameInfoPtr
,
1430 const void* srcBuffer
, size_t* srcSizePtr
)
1432 LZ4F_STATIC_ASSERT(dstage_getFrameHeader
< dstage_storeFrameHeader
);
1433 if (dctx
->dStage
> dstage_storeFrameHeader
) {
1434 /* frameInfo already decoded */
1437 *frameInfoPtr
= dctx
->frameInfo
;
1438 /* returns : recommended nb of bytes for LZ4F_decompress() */
1439 return LZ4F_decompress(dctx
, NULL
, &o
, NULL
, &i
, NULL
);
1441 if (dctx
->dStage
== dstage_storeFrameHeader
) {
1442 /* frame decoding already started, in the middle of header => automatic fail */
1444 RETURN_ERROR(frameDecoding_alreadyStarted
);
1446 size_t const hSize
= LZ4F_headerSize(srcBuffer
, *srcSizePtr
);
1447 if (LZ4F_isError(hSize
)) { *srcSizePtr
=0; return hSize
; }
1448 if (*srcSizePtr
< hSize
) {
1450 RETURN_ERROR(frameHeader_incomplete
);
1453 { size_t decodeResult
= LZ4F_decodeHeader(dctx
, srcBuffer
, hSize
);
1454 if (LZ4F_isError(decodeResult
)) {
1457 *srcSizePtr
= decodeResult
;
1458 decodeResult
= BHSize
; /* block header size */
1460 *frameInfoPtr
= dctx
->frameInfo
;
1461 return decodeResult
;
1466 /* LZ4F_updateDict() :
1467 * only used for LZ4F_blockLinked mode
1468 * Condition : @dstPtr != NULL
1470 static void LZ4F_updateDict(LZ4F_dctx
* dctx
,
1471 const BYTE
* dstPtr
, size_t dstSize
, const BYTE
* dstBufferStart
,
1474 assert(dstPtr
!= NULL
);
1475 if (dctx
->dictSize
==0) dctx
->dict
= (const BYTE
*)dstPtr
; /* will lead to prefix mode */
1476 assert(dctx
->dict
!= NULL
);
1478 if (dctx
->dict
+ dctx
->dictSize
== dstPtr
) { /* prefix mode, everything within dstBuffer */
1479 dctx
->dictSize
+= dstSize
;
1483 assert(dstPtr
>= dstBufferStart
);
1484 if ((size_t)(dstPtr
- dstBufferStart
) + dstSize
>= 64 KB
) { /* history in dstBuffer becomes large enough to become dictionary */
1485 dctx
->dict
= (const BYTE
*)dstBufferStart
;
1486 dctx
->dictSize
= (size_t)(dstPtr
- dstBufferStart
) + dstSize
;
1490 assert(dstSize
< 64 KB
); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
1492 /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
1493 assert(dctx
->tmpOutBuffer
!= NULL
);
1495 if (withinTmp
&& (dctx
->dict
== dctx
->tmpOutBuffer
)) { /* continue history within tmpOutBuffer */
1496 /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
1497 assert(dctx
->dict
+ dctx
->dictSize
== dctx
->tmpOut
+ dctx
->tmpOutStart
);
1498 dctx
->dictSize
+= dstSize
;
1502 if (withinTmp
) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
1503 size_t const preserveSize
= (size_t)(dctx
->tmpOut
- dctx
->tmpOutBuffer
);
1504 size_t copySize
= 64 KB
- dctx
->tmpOutSize
;
1505 const BYTE
* const oldDictEnd
= dctx
->dict
+ dctx
->dictSize
- dctx
->tmpOutStart
;
1506 if (dctx
->tmpOutSize
> 64 KB
) copySize
= 0;
1507 if (copySize
> preserveSize
) copySize
= preserveSize
;
1509 memcpy(dctx
->tmpOutBuffer
+ preserveSize
- copySize
, oldDictEnd
- copySize
, copySize
);
1511 dctx
->dict
= dctx
->tmpOutBuffer
;
1512 dctx
->dictSize
= preserveSize
+ dctx
->tmpOutStart
+ dstSize
;
1516 if (dctx
->dict
== dctx
->tmpOutBuffer
) { /* copy dst into tmp to complete dict */
1517 if (dctx
->dictSize
+ dstSize
> dctx
->maxBufferSize
) { /* tmp buffer not large enough */
1518 size_t const preserveSize
= 64 KB
- dstSize
;
1519 memcpy(dctx
->tmpOutBuffer
, dctx
->dict
+ dctx
->dictSize
- preserveSize
, preserveSize
);
1520 dctx
->dictSize
= preserveSize
;
1522 memcpy(dctx
->tmpOutBuffer
+ dctx
->dictSize
, dstPtr
, dstSize
);
1523 dctx
->dictSize
+= dstSize
;
1527 /* join dict & dest into tmp */
1528 { size_t preserveSize
= 64 KB
- dstSize
;
1529 if (preserveSize
> dctx
->dictSize
) preserveSize
= dctx
->dictSize
;
1530 memcpy(dctx
->tmpOutBuffer
, dctx
->dict
+ dctx
->dictSize
- preserveSize
, preserveSize
);
1531 memcpy(dctx
->tmpOutBuffer
+ preserveSize
, dstPtr
, dstSize
);
1532 dctx
->dict
= dctx
->tmpOutBuffer
;
1533 dctx
->dictSize
= preserveSize
+ dstSize
;
1538 /*! LZ4F_decompress() :
1539 * Call this function repetitively to regenerate compressed data in srcBuffer.
1540 * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
1541 * into dstBuffer of capacity *dstSizePtr.
1543 * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
1545 * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
1546 * If number of bytes read is < number of bytes provided, then decompression operation is not complete.
1547 * Remaining data will have to be presented again in a subsequent invocation.
1549 * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
1550 * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
1551 * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling.
1552 * Note that this is just a hint, and it's always possible to any srcSize value.
1553 * When a frame is fully decoded, @return will be 0.
1554 * If decompression failed, @return is an error code which can be tested using LZ4F_isError().
1556 size_t LZ4F_decompress(LZ4F_dctx
* dctx
,
1557 void* dstBuffer
, size_t* dstSizePtr
,
1558 const void* srcBuffer
, size_t* srcSizePtr
,
1559 const LZ4F_decompressOptions_t
* decompressOptionsPtr
)
1561 LZ4F_decompressOptions_t optionsNull
;
1562 const BYTE
* const srcStart
= (const BYTE
*)srcBuffer
;
1563 const BYTE
* const srcEnd
= srcStart
+ *srcSizePtr
;
1564 const BYTE
* srcPtr
= srcStart
;
1565 BYTE
* const dstStart
= (BYTE
*)dstBuffer
;
1566 BYTE
* const dstEnd
= dstStart
? dstStart
+ *dstSizePtr
: NULL
;
1567 BYTE
* dstPtr
= dstStart
;
1568 const BYTE
* selectedIn
= NULL
;
1569 unsigned doAnotherStage
= 1;
1570 size_t nextSrcSizeHint
= 1;
1573 DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
1574 srcBuffer
, (unsigned)*srcSizePtr
, dstBuffer
, (unsigned)*dstSizePtr
);
1575 if (dstBuffer
== NULL
) assert(*dstSizePtr
== 0);
1576 MEM_INIT(&optionsNull
, 0, sizeof(optionsNull
));
1577 if (decompressOptionsPtr
==NULL
) decompressOptionsPtr
= &optionsNull
;
1580 assert(dctx
!= NULL
);
1581 dctx
->skipChecksum
|= (decompressOptionsPtr
->skipChecksums
!= 0); /* once set, disable for the remainder of the frame */
1583 /* behaves as a state machine */
1585 while (doAnotherStage
) {
1587 switch(dctx
->dStage
)
1590 case dstage_getFrameHeader
:
1591 DEBUGLOG(6, "dstage_getFrameHeader");
1592 if ((size_t)(srcEnd
-srcPtr
) >= maxFHSize
) { /* enough to decode - shortcut */
1593 size_t const hSize
= LZ4F_decodeHeader(dctx
, srcPtr
, (size_t)(srcEnd
-srcPtr
)); /* will update dStage appropriately */
1594 FORWARD_IF_ERROR(hSize
);
1598 dctx
->tmpInSize
= 0;
1599 if (srcEnd
-srcPtr
== 0) return minFHSize
; /* 0-size input */
1600 dctx
->tmpInTarget
= minFHSize
; /* minimum size to decode header */
1601 dctx
->dStage
= dstage_storeFrameHeader
;
1604 case dstage_storeFrameHeader
:
1605 DEBUGLOG(6, "dstage_storeFrameHeader");
1606 { size_t const sizeToCopy
= MIN(dctx
->tmpInTarget
- dctx
->tmpInSize
, (size_t)(srcEnd
- srcPtr
));
1607 memcpy(dctx
->header
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1608 dctx
->tmpInSize
+= sizeToCopy
;
1609 srcPtr
+= sizeToCopy
;
1611 if (dctx
->tmpInSize
< dctx
->tmpInTarget
) {
1612 nextSrcSizeHint
= (dctx
->tmpInTarget
- dctx
->tmpInSize
) + BHSize
; /* rest of header + nextBlockHeader */
1613 doAnotherStage
= 0; /* not enough src data, ask for some more */
1616 FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx
, dctx
->header
, dctx
->tmpInTarget
) ); /* will update dStage appropriately */
1620 DEBUGLOG(6, "dstage_init");
1621 if (dctx
->frameInfo
.contentChecksumFlag
) (void)XXH32_reset(&(dctx
->xxh
), 0);
1622 /* internal buffers allocation */
1623 { size_t const bufferNeeded
= dctx
->maxBlockSize
1624 + ((dctx
->frameInfo
.blockMode
==LZ4F_blockLinked
) ? 128 KB
: 0);
1625 if (bufferNeeded
> dctx
->maxBufferSize
) { /* tmp buffers too small */
1626 dctx
->maxBufferSize
= 0; /* ensure allocation will be re-attempted on next entry*/
1627 LZ4F_free(dctx
->tmpIn
, dctx
->cmem
);
1628 dctx
->tmpIn
= (BYTE
*)LZ4F_malloc(dctx
->maxBlockSize
+ BFSize
/* block checksum */, dctx
->cmem
);
1629 RETURN_ERROR_IF(dctx
->tmpIn
== NULL
, allocation_failed
);
1630 LZ4F_free(dctx
->tmpOutBuffer
, dctx
->cmem
);
1631 dctx
->tmpOutBuffer
= (BYTE
*)LZ4F_malloc(bufferNeeded
, dctx
->cmem
);
1632 RETURN_ERROR_IF(dctx
->tmpOutBuffer
== NULL
, allocation_failed
);
1633 dctx
->maxBufferSize
= bufferNeeded
;
1635 dctx
->tmpInSize
= 0;
1636 dctx
->tmpInTarget
= 0;
1637 dctx
->tmpOut
= dctx
->tmpOutBuffer
;
1638 dctx
->tmpOutStart
= 0;
1639 dctx
->tmpOutSize
= 0;
1641 dctx
->dStage
= dstage_getBlockHeader
;
1644 case dstage_getBlockHeader
:
1645 if ((size_t)(srcEnd
- srcPtr
) >= BHSize
) {
1646 selectedIn
= srcPtr
;
1649 /* not enough input to read cBlockSize field */
1650 dctx
->tmpInSize
= 0;
1651 dctx
->dStage
= dstage_storeBlockHeader
;
1654 if (dctx
->dStage
== dstage_storeBlockHeader
) /* can be skipped */
1655 case dstage_storeBlockHeader
:
1656 { size_t const remainingInput
= (size_t)(srcEnd
- srcPtr
);
1657 size_t const wantedData
= BHSize
- dctx
->tmpInSize
;
1658 size_t const sizeToCopy
= MIN(wantedData
, remainingInput
);
1659 memcpy(dctx
->tmpIn
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1660 srcPtr
+= sizeToCopy
;
1661 dctx
->tmpInSize
+= sizeToCopy
;
1663 if (dctx
->tmpInSize
< BHSize
) { /* not enough input for cBlockSize */
1664 nextSrcSizeHint
= BHSize
- dctx
->tmpInSize
;
1668 selectedIn
= dctx
->tmpIn
;
1669 } /* if (dctx->dStage == dstage_storeBlockHeader) */
1671 /* decode block header */
1672 { U32
const blockHeader
= LZ4F_readLE32(selectedIn
);
1673 size_t const nextCBlockSize
= blockHeader
& 0x7FFFFFFFU
;
1674 size_t const crcSize
= dctx
->frameInfo
.blockChecksumFlag
* BFSize
;
1675 if (blockHeader
==0) { /* frameEnd signal, no more block */
1676 DEBUGLOG(5, "end of frame");
1677 dctx
->dStage
= dstage_getSuffix
;
1680 if (nextCBlockSize
> dctx
->maxBlockSize
) {
1681 RETURN_ERROR(maxBlockSize_invalid
);
1683 if (blockHeader
& LZ4F_BLOCKUNCOMPRESSED_FLAG
) {
1684 /* next block is uncompressed */
1685 dctx
->tmpInTarget
= nextCBlockSize
;
1686 DEBUGLOG(5, "next block is uncompressed (size %u)", (U32
)nextCBlockSize
);
1687 if (dctx
->frameInfo
.blockChecksumFlag
) {
1688 (void)XXH32_reset(&dctx
->blockChecksum
, 0);
1690 dctx
->dStage
= dstage_copyDirect
;
1693 /* next block is a compressed block */
1694 dctx
->tmpInTarget
= nextCBlockSize
+ crcSize
;
1695 dctx
->dStage
= dstage_getCBlock
;
1696 if (dstPtr
==dstEnd
|| srcPtr
==srcEnd
) {
1697 nextSrcSizeHint
= BHSize
+ nextCBlockSize
+ crcSize
;
1703 case dstage_copyDirect
: /* uncompressed block */
1704 DEBUGLOG(6, "dstage_copyDirect");
1705 { size_t sizeToCopy
;
1706 if (dstPtr
== NULL
) {
1709 size_t const minBuffSize
= MIN((size_t)(srcEnd
-srcPtr
), (size_t)(dstEnd
-dstPtr
));
1710 sizeToCopy
= MIN(dctx
->tmpInTarget
, minBuffSize
);
1711 memcpy(dstPtr
, srcPtr
, sizeToCopy
);
1712 if (!dctx
->skipChecksum
) {
1713 if (dctx
->frameInfo
.blockChecksumFlag
) {
1714 (void)XXH32_update(&dctx
->blockChecksum
, srcPtr
, sizeToCopy
);
1716 if (dctx
->frameInfo
.contentChecksumFlag
)
1717 (void)XXH32_update(&dctx
->xxh
, srcPtr
, sizeToCopy
);
1719 if (dctx
->frameInfo
.contentSize
)
1720 dctx
->frameRemainingSize
-= sizeToCopy
;
1722 /* history management (linked blocks only)*/
1723 if (dctx
->frameInfo
.blockMode
== LZ4F_blockLinked
) {
1724 LZ4F_updateDict(dctx
, dstPtr
, sizeToCopy
, dstStart
, 0);
1727 srcPtr
+= sizeToCopy
;
1728 dstPtr
+= sizeToCopy
;
1729 if (sizeToCopy
== dctx
->tmpInTarget
) { /* all done */
1730 if (dctx
->frameInfo
.blockChecksumFlag
) {
1731 dctx
->tmpInSize
= 0;
1732 dctx
->dStage
= dstage_getBlockChecksum
;
1734 dctx
->dStage
= dstage_getBlockHeader
; /* new block */
1737 dctx
->tmpInTarget
-= sizeToCopy
; /* need to copy more */
1739 nextSrcSizeHint
= dctx
->tmpInTarget
+
1740 +(dctx
->frameInfo
.blockChecksumFlag
? BFSize
: 0)
1741 + BHSize
/* next header size */;
1745 /* check block checksum for recently transferred uncompressed block */
1746 case dstage_getBlockChecksum
:
1747 DEBUGLOG(6, "dstage_getBlockChecksum");
1748 { const void* crcSrc
;
1749 if ((srcEnd
-srcPtr
>= 4) && (dctx
->tmpInSize
==0)) {
1753 size_t const stillToCopy
= 4 - dctx
->tmpInSize
;
1754 size_t const sizeToCopy
= MIN(stillToCopy
, (size_t)(srcEnd
-srcPtr
));
1755 memcpy(dctx
->header
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1756 dctx
->tmpInSize
+= sizeToCopy
;
1757 srcPtr
+= sizeToCopy
;
1758 if (dctx
->tmpInSize
< 4) { /* all input consumed */
1762 crcSrc
= dctx
->header
;
1764 if (!dctx
->skipChecksum
) {
1765 U32
const readCRC
= LZ4F_readLE32(crcSrc
);
1766 U32
const calcCRC
= XXH32_digest(&dctx
->blockChecksum
);
1767 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1768 DEBUGLOG(6, "compare block checksum");
1769 if (readCRC
!= calcCRC
) {
1770 DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
1772 RETURN_ERROR(blockChecksum_invalid
);
1779 dctx
->dStage
= dstage_getBlockHeader
; /* new block */
1782 case dstage_getCBlock
:
1783 DEBUGLOG(6, "dstage_getCBlock");
1784 if ((size_t)(srcEnd
-srcPtr
) < dctx
->tmpInTarget
) {
1785 dctx
->tmpInSize
= 0;
1786 dctx
->dStage
= dstage_storeCBlock
;
1789 /* input large enough to read full block directly */
1790 selectedIn
= srcPtr
;
1791 srcPtr
+= dctx
->tmpInTarget
;
1793 if (0) /* always jump over next block */
1794 case dstage_storeCBlock
:
1795 { size_t const wantedData
= dctx
->tmpInTarget
- dctx
->tmpInSize
;
1796 size_t const inputLeft
= (size_t)(srcEnd
-srcPtr
);
1797 size_t const sizeToCopy
= MIN(wantedData
, inputLeft
);
1798 memcpy(dctx
->tmpIn
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1799 dctx
->tmpInSize
+= sizeToCopy
;
1800 srcPtr
+= sizeToCopy
;
1801 if (dctx
->tmpInSize
< dctx
->tmpInTarget
) { /* need more input */
1802 nextSrcSizeHint
= (dctx
->tmpInTarget
- dctx
->tmpInSize
)
1803 + (dctx
->frameInfo
.blockChecksumFlag
? BFSize
: 0)
1804 + BHSize
/* next header size */;
1808 selectedIn
= dctx
->tmpIn
;
1811 /* At this stage, input is large enough to decode a block */
1813 /* First, decode and control block checksum if it exists */
1814 if (dctx
->frameInfo
.blockChecksumFlag
) {
1815 assert(dctx
->tmpInTarget
>= 4);
1816 dctx
->tmpInTarget
-= 4;
1817 assert(selectedIn
!= NULL
); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
1818 { U32
const readBlockCrc
= LZ4F_readLE32(selectedIn
+ dctx
->tmpInTarget
);
1819 U32
const calcBlockCrc
= XXH32(selectedIn
, dctx
->tmpInTarget
, 0);
1820 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1821 RETURN_ERROR_IF(readBlockCrc
!= calcBlockCrc
, blockChecksum_invalid
);
1828 /* decode directly into destination buffer if there is enough room */
1829 if ( ((size_t)(dstEnd
-dstPtr
) >= dctx
->maxBlockSize
)
1830 /* unless the dictionary is stored in tmpOut:
1831 * in which case it's faster to decode within tmpOut
1832 * to benefit from prefix speedup */
1833 && !(dctx
->dict
!= NULL
&& (const BYTE
*)dctx
->dict
+ dctx
->dictSize
== dctx
->tmpOut
) )
1835 const char* dict
= (const char*)dctx
->dict
;
1836 size_t dictSize
= dctx
->dictSize
;
1838 assert(dstPtr
!= NULL
);
1839 if (dict
&& dictSize
> 1 GB
) {
1840 /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */
1841 dict
+= dictSize
- 64 KB
;
1844 decodedSize
= LZ4_decompress_safe_usingDict(
1845 (const char*)selectedIn
, (char*)dstPtr
,
1846 (int)dctx
->tmpInTarget
, (int)dctx
->maxBlockSize
,
1847 dict
, (int)dictSize
);
1848 RETURN_ERROR_IF(decodedSize
< 0, decompressionFailed
);
1849 if ((dctx
->frameInfo
.contentChecksumFlag
) && (!dctx
->skipChecksum
))
1850 XXH32_update(&(dctx
->xxh
), dstPtr
, (size_t)decodedSize
);
1851 if (dctx
->frameInfo
.contentSize
)
1852 dctx
->frameRemainingSize
-= (size_t)decodedSize
;
1854 /* dictionary management */
1855 if (dctx
->frameInfo
.blockMode
==LZ4F_blockLinked
) {
1856 LZ4F_updateDict(dctx
, dstPtr
, (size_t)decodedSize
, dstStart
, 0);
1859 dstPtr
+= decodedSize
;
1860 dctx
->dStage
= dstage_getBlockHeader
; /* end of block, let's get another one */
1864 /* not enough place into dst : decode into tmpOut */
1866 /* manage dictionary */
1867 if (dctx
->frameInfo
.blockMode
== LZ4F_blockLinked
) {
1868 if (dctx
->dict
== dctx
->tmpOutBuffer
) {
1869 /* truncate dictionary to 64 KB if too big */
1870 if (dctx
->dictSize
> 128 KB
) {
1871 memcpy(dctx
->tmpOutBuffer
, dctx
->dict
+ dctx
->dictSize
- 64 KB
, 64 KB
);
1872 dctx
->dictSize
= 64 KB
;
1874 dctx
->tmpOut
= dctx
->tmpOutBuffer
+ dctx
->dictSize
;
1875 } else { /* dict not within tmpOut */
1876 size_t const reservedDictSpace
= MIN(dctx
->dictSize
, 64 KB
);
1877 dctx
->tmpOut
= dctx
->tmpOutBuffer
+ reservedDictSpace
;
1880 /* Decode block into tmpOut */
1881 { const char* dict
= (const char*)dctx
->dict
;
1882 size_t dictSize
= dctx
->dictSize
;
1884 if (dict
&& dictSize
> 1 GB
) {
1885 /* the dictSize param is an int, avoid truncation / sign issues */
1886 dict
+= dictSize
- 64 KB
;
1889 decodedSize
= LZ4_decompress_safe_usingDict(
1890 (const char*)selectedIn
, (char*)dctx
->tmpOut
,
1891 (int)dctx
->tmpInTarget
, (int)dctx
->maxBlockSize
,
1892 dict
, (int)dictSize
);
1893 RETURN_ERROR_IF(decodedSize
< 0, decompressionFailed
);
1894 if (dctx
->frameInfo
.contentChecksumFlag
&& !dctx
->skipChecksum
)
1895 XXH32_update(&(dctx
->xxh
), dctx
->tmpOut
, (size_t)decodedSize
);
1896 if (dctx
->frameInfo
.contentSize
)
1897 dctx
->frameRemainingSize
-= (size_t)decodedSize
;
1898 dctx
->tmpOutSize
= (size_t)decodedSize
;
1899 dctx
->tmpOutStart
= 0;
1900 dctx
->dStage
= dstage_flushOut
;
1904 case dstage_flushOut
: /* flush decoded data from tmpOut to dstBuffer */
1905 DEBUGLOG(6, "dstage_flushOut");
1906 if (dstPtr
!= NULL
) {
1907 size_t const sizeToCopy
= MIN(dctx
->tmpOutSize
- dctx
->tmpOutStart
, (size_t)(dstEnd
-dstPtr
));
1908 memcpy(dstPtr
, dctx
->tmpOut
+ dctx
->tmpOutStart
, sizeToCopy
);
1910 /* dictionary management */
1911 if (dctx
->frameInfo
.blockMode
== LZ4F_blockLinked
)
1912 LZ4F_updateDict(dctx
, dstPtr
, sizeToCopy
, dstStart
, 1 /*withinTmp*/);
1914 dctx
->tmpOutStart
+= sizeToCopy
;
1915 dstPtr
+= sizeToCopy
;
1917 if (dctx
->tmpOutStart
== dctx
->tmpOutSize
) { /* all flushed */
1918 dctx
->dStage
= dstage_getBlockHeader
; /* get next block */
1921 /* could not flush everything : stop there, just request a block header */
1923 nextSrcSizeHint
= BHSize
;
1926 case dstage_getSuffix
:
1927 RETURN_ERROR_IF(dctx
->frameRemainingSize
, frameSize_wrong
); /* incorrect frame size decoded */
1928 if (!dctx
->frameInfo
.contentChecksumFlag
) { /* no checksum, frame is completed */
1929 nextSrcSizeHint
= 0;
1930 LZ4F_resetDecompressionContext(dctx
);
1934 if ((srcEnd
- srcPtr
) < 4) { /* not enough size for entire CRC */
1935 dctx
->tmpInSize
= 0;
1936 dctx
->dStage
= dstage_storeSuffix
;
1938 selectedIn
= srcPtr
;
1942 if (dctx
->dStage
== dstage_storeSuffix
) /* can be skipped */
1943 case dstage_storeSuffix
:
1944 { size_t const remainingInput
= (size_t)(srcEnd
- srcPtr
);
1945 size_t const wantedData
= 4 - dctx
->tmpInSize
;
1946 size_t const sizeToCopy
= MIN(wantedData
, remainingInput
);
1947 memcpy(dctx
->tmpIn
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1948 srcPtr
+= sizeToCopy
;
1949 dctx
->tmpInSize
+= sizeToCopy
;
1950 if (dctx
->tmpInSize
< 4) { /* not enough input to read complete suffix */
1951 nextSrcSizeHint
= 4 - dctx
->tmpInSize
;
1955 selectedIn
= dctx
->tmpIn
;
1956 } /* if (dctx->dStage == dstage_storeSuffix) */
1958 /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
1959 if (!dctx
->skipChecksum
) {
1960 U32
const readCRC
= LZ4F_readLE32(selectedIn
);
1961 U32
const resultCRC
= XXH32_digest(&(dctx
->xxh
));
1962 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1963 RETURN_ERROR_IF(readCRC
!= resultCRC
, contentChecksum_invalid
);
1969 nextSrcSizeHint
= 0;
1970 LZ4F_resetDecompressionContext(dctx
);
1974 case dstage_getSFrameSize
:
1975 if ((srcEnd
- srcPtr
) >= 4) {
1976 selectedIn
= srcPtr
;
1979 /* not enough input to read cBlockSize field */
1980 dctx
->tmpInSize
= 4;
1981 dctx
->tmpInTarget
= 8;
1982 dctx
->dStage
= dstage_storeSFrameSize
;
1985 if (dctx
->dStage
== dstage_storeSFrameSize
)
1986 case dstage_storeSFrameSize
:
1987 { size_t const sizeToCopy
= MIN(dctx
->tmpInTarget
- dctx
->tmpInSize
,
1988 (size_t)(srcEnd
- srcPtr
) );
1989 memcpy(dctx
->header
+ dctx
->tmpInSize
, srcPtr
, sizeToCopy
);
1990 srcPtr
+= sizeToCopy
;
1991 dctx
->tmpInSize
+= sizeToCopy
;
1992 if (dctx
->tmpInSize
< dctx
->tmpInTarget
) {
1993 /* not enough input to get full sBlockSize; wait for more */
1994 nextSrcSizeHint
= dctx
->tmpInTarget
- dctx
->tmpInSize
;
1998 selectedIn
= dctx
->header
+ 4;
1999 } /* if (dctx->dStage == dstage_storeSFrameSize) */
2001 /* case dstage_decodeSFrameSize: */ /* no direct entry */
2002 { size_t const SFrameSize
= LZ4F_readLE32(selectedIn
);
2003 dctx
->frameInfo
.contentSize
= SFrameSize
;
2004 dctx
->tmpInTarget
= SFrameSize
;
2005 dctx
->dStage
= dstage_skipSkippable
;
2009 case dstage_skipSkippable
:
2010 { size_t const skipSize
= MIN(dctx
->tmpInTarget
, (size_t)(srcEnd
-srcPtr
));
2012 dctx
->tmpInTarget
-= skipSize
;
2014 nextSrcSizeHint
= dctx
->tmpInTarget
;
2015 if (nextSrcSizeHint
) break; /* still more to skip */
2016 /* frame fully skipped : prepare context for a new frame */
2017 LZ4F_resetDecompressionContext(dctx
);
2020 } /* switch (dctx->dStage) */
2021 } /* while (doAnotherStage) */
2023 /* preserve history within tmpOut whenever necessary */
2024 LZ4F_STATIC_ASSERT((unsigned)dstage_init
== 2);
2025 if ( (dctx
->frameInfo
.blockMode
==LZ4F_blockLinked
) /* next block will use up to 64KB from previous ones */
2026 && (dctx
->dict
!= dctx
->tmpOutBuffer
) /* dictionary is not already within tmp */
2027 && (dctx
->dict
!= NULL
) /* dictionary exists */
2028 && (!decompressOptionsPtr
->stableDst
) /* cannot rely on dst data to remain there for next call */
2029 && ((unsigned)(dctx
->dStage
)-2 < (unsigned)(dstage_getSuffix
)-2) ) /* valid stages : [init ... getSuffix[ */
2031 if (dctx
->dStage
== dstage_flushOut
) {
2032 size_t const preserveSize
= (size_t)(dctx
->tmpOut
- dctx
->tmpOutBuffer
);
2033 size_t copySize
= 64 KB
- dctx
->tmpOutSize
;
2034 const BYTE
* oldDictEnd
= dctx
->dict
+ dctx
->dictSize
- dctx
->tmpOutStart
;
2035 if (dctx
->tmpOutSize
> 64 KB
) copySize
= 0;
2036 if (copySize
> preserveSize
) copySize
= preserveSize
;
2037 assert(dctx
->tmpOutBuffer
!= NULL
);
2039 memcpy(dctx
->tmpOutBuffer
+ preserveSize
- copySize
, oldDictEnd
- copySize
, copySize
);
2041 dctx
->dict
= dctx
->tmpOutBuffer
;
2042 dctx
->dictSize
= preserveSize
+ dctx
->tmpOutStart
;
2044 const BYTE
* const oldDictEnd
= dctx
->dict
+ dctx
->dictSize
;
2045 size_t const newDictSize
= MIN(dctx
->dictSize
, 64 KB
);
2047 memcpy(dctx
->tmpOutBuffer
, oldDictEnd
- newDictSize
, newDictSize
);
2049 dctx
->dict
= dctx
->tmpOutBuffer
;
2050 dctx
->dictSize
= newDictSize
;
2051 dctx
->tmpOut
= dctx
->tmpOutBuffer
+ newDictSize
;
2055 *srcSizePtr
= (size_t)(srcPtr
- srcStart
);
2056 *dstSizePtr
= (size_t)(dstPtr
- dstStart
);
2057 return nextSrcSizeHint
;
2060 /*! LZ4F_decompress_usingDict() :
2061 * Same as LZ4F_decompress(), using a predefined dictionary.
2062 * Dictionary is used "in place", without any preprocessing.
2063 * It must remain accessible throughout the entire frame decoding.
2065 size_t LZ4F_decompress_usingDict(LZ4F_dctx
* dctx
,
2066 void* dstBuffer
, size_t* dstSizePtr
,
2067 const void* srcBuffer
, size_t* srcSizePtr
,
2068 const void* dict
, size_t dictSize
,
2069 const LZ4F_decompressOptions_t
* decompressOptionsPtr
)
2071 if (dctx
->dStage
<= dstage_init
) {
2072 dctx
->dict
= (const BYTE
*)dict
;
2073 dctx
->dictSize
= dictSize
;
2075 return LZ4F_decompress(dctx
, dstBuffer
, dstSizePtr
,
2076 srcBuffer
, srcSizePtr
,
2077 decompressOptionsPtr
);