d3d8/tests: Test the presentation parameters after creating a device.
[wine.git] / libs / zlib / deflate.c
blobbd011751920a87a6d94694751f1c36f3f48f86f2
1 /* deflate.c -- compress data using the deflation algorithm
2 * Copyright (C) 1995-2023 Jean-loup Gailly and Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h
4 */
6 /*
7 * ALGORITHM
9 * The "deflation" process depends on being able to identify portions
10 * of the input text which are identical to earlier input (within a
11 * sliding window trailing behind the input currently being processed).
13 * The most straightforward technique turns out to be the fastest for
14 * most input files: try all possible matches and select the longest.
15 * The key feature of this algorithm is that insertions into the string
16 * dictionary are very simple and thus fast, and deletions are avoided
17 * completely. Insertions are performed at each input character, whereas
18 * string matches are performed only when the previous match ends. So it
19 * is preferable to spend more time in matches to allow very fast string
20 * insertions and avoid deletions. The matching algorithm for small
21 * strings is inspired from that of Rabin & Karp. A brute force approach
22 * is used to find longer strings when a small match has been found.
23 * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
24 * (by Leonid Broukhis).
25 * A previous version of this file used a more sophisticated algorithm
26 * (by Fiala and Greene) which is guaranteed to run in linear amortized
27 * time, but has a larger average cost, uses more memory and is patented.
28 * However the F&G algorithm may be faster for some highly redundant
29 * files if the parameter max_chain_length (described below) is too large.
31 * ACKNOWLEDGEMENTS
33 * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
34 * I found it in 'freeze' written by Leonid Broukhis.
35 * Thanks to many people for bug reports and testing.
37 * REFERENCES
39 * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
40 * Available in http://tools.ietf.org/html/rfc1951
42 * A description of the Rabin and Karp algorithm is given in the book
43 * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
45 * Fiala,E.R., and Greene,D.H.
46 * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
50 /* @(#) $Id$ */
52 #include "deflate.h"
54 const char deflate_copyright[] =
55 " deflate 1.3 Copyright 1995-2023 Jean-loup Gailly and Mark Adler ";
57 If you use the zlib library in a product, an acknowledgment is welcome
58 in the documentation of your product. If for some reason you cannot
59 include such an acknowledgment, I would appreciate that you keep this
60 copyright string in the executable of your product.
63 typedef enum {
64 need_more, /* block not completed, need more input or more output */
65 block_done, /* block flush performed */
66 finish_started, /* finish started, need only more output at next deflate */
67 finish_done /* finish done, accept no more input or output */
68 } block_state;
70 typedef block_state (*compress_func)(deflate_state *s, int flush);
71 /* Compression function. Returns the block state after the call. */
73 local block_state deflate_stored(deflate_state *s, int flush);
74 local block_state deflate_fast(deflate_state *s, int flush);
75 #ifndef FASTEST
76 local block_state deflate_slow(deflate_state *s, int flush);
77 #endif
78 local block_state deflate_rle(deflate_state *s, int flush);
79 local block_state deflate_huff(deflate_state *s, int flush);
81 /* ===========================================================================
82 * Local data
85 #define NIL 0
86 /* Tail of hash chains */
88 #ifndef TOO_FAR
89 # define TOO_FAR 4096
90 #endif
91 /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
93 /* Values for max_lazy_match, good_match and max_chain_length, depending on
94 * the desired pack level (0..9). The values given below have been tuned to
95 * exclude worst case performance for pathological files. Better values may be
96 * found for specific files.
98 typedef struct config_s {
99 ush good_length; /* reduce lazy search above this match length */
100 ush max_lazy; /* do not perform lazy search above this match length */
101 ush nice_length; /* quit search above this match length */
102 ush max_chain;
103 compress_func func;
104 } config;
106 #ifdef FASTEST
107 local const config configuration_table[2] = {
108 /* good lazy nice chain */
109 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
110 /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */
111 #else
112 local const config configuration_table[10] = {
113 /* good lazy nice chain */
114 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
115 /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */
116 /* 2 */ {4, 5, 16, 8, deflate_fast},
117 /* 3 */ {4, 6, 32, 32, deflate_fast},
119 /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
120 /* 5 */ {8, 16, 32, 32, deflate_slow},
121 /* 6 */ {8, 16, 128, 128, deflate_slow},
122 /* 7 */ {8, 32, 128, 256, deflate_slow},
123 /* 8 */ {32, 128, 258, 1024, deflate_slow},
124 /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
125 #endif
127 /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
128 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
129 * meaning.
132 /* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */
133 #define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0))
135 /* ===========================================================================
136 * Update a hash value with the given input byte
137 * IN assertion: all calls to UPDATE_HASH are made with consecutive input
138 * characters, so that a running hash key can be computed from the previous
139 * key instead of complete recalculation each time.
141 #define UPDATE_HASH(s,h,c) (h = (((h) << s->hash_shift) ^ (c)) & s->hash_mask)
144 /* ===========================================================================
145 * Insert string str in the dictionary and set match_head to the previous head
146 * of the hash chain (the most recent string with same hash key). Return
147 * the previous length of the hash chain.
148 * If this file is compiled with -DFASTEST, the compression level is forced
149 * to 1, and no hash chains are maintained.
150 * IN assertion: all calls to INSERT_STRING are made with consecutive input
151 * characters and the first MIN_MATCH bytes of str are valid (except for
152 * the last MIN_MATCH-1 bytes of the input file).
154 #ifdef FASTEST
155 #define INSERT_STRING(s, str, match_head) \
156 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
157 match_head = s->head[s->ins_h], \
158 s->head[s->ins_h] = (Pos)(str))
159 #else
160 #define INSERT_STRING(s, str, match_head) \
161 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
162 match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
163 s->head[s->ins_h] = (Pos)(str))
164 #endif
166 /* ===========================================================================
167 * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
168 * prev[] will be initialized on the fly.
170 #define CLEAR_HASH(s) \
171 do { \
172 s->head[s->hash_size - 1] = NIL; \
173 zmemzero((Bytef *)s->head, \
174 (unsigned)(s->hash_size - 1)*sizeof(*s->head)); \
175 } while (0)
177 /* ===========================================================================
178 * Slide the hash table when sliding the window down (could be avoided with 32
179 * bit values at the expense of memory usage). We slide even when level == 0 to
180 * keep the hash table consistent if we switch back to level > 0 later.
182 #if defined(__has_feature)
183 # if __has_feature(memory_sanitizer)
184 __attribute__((no_sanitize("memory")))
185 # endif
186 #endif
187 local void slide_hash(deflate_state *s) {
188 unsigned n, m;
189 Posf *p;
190 uInt wsize = s->w_size;
192 n = s->hash_size;
193 p = &s->head[n];
194 do {
195 m = *--p;
196 *p = (Pos)(m >= wsize ? m - wsize : NIL);
197 } while (--n);
198 n = wsize;
199 #ifndef FASTEST
200 p = &s->prev[n];
201 do {
202 m = *--p;
203 *p = (Pos)(m >= wsize ? m - wsize : NIL);
204 /* If n is not on any hash chain, prev[n] is garbage but
205 * its value will never be used.
207 } while (--n);
208 #endif
211 /* ===========================================================================
212 * Read a new buffer from the current input stream, update the adler32
213 * and total number of bytes read. All deflate() input goes through
214 * this function so some applications may wish to modify it to avoid
215 * allocating a large strm->next_in buffer and copying from it.
216 * (See also flush_pending()).
218 local unsigned read_buf(z_streamp strm, Bytef *buf, unsigned size) {
219 unsigned len = strm->avail_in;
221 if (len > size) len = size;
222 if (len == 0) return 0;
224 strm->avail_in -= len;
226 zmemcpy(buf, strm->next_in, len);
227 if (strm->state->wrap == 1) {
228 strm->adler = adler32(strm->adler, buf, len);
230 #ifdef GZIP
231 else if (strm->state->wrap == 2) {
232 strm->adler = crc32(strm->adler, buf, len);
234 #endif
235 strm->next_in += len;
236 strm->total_in += len;
238 return len;
241 /* ===========================================================================
242 * Fill the window when the lookahead becomes insufficient.
243 * Updates strstart and lookahead.
245 * IN assertion: lookahead < MIN_LOOKAHEAD
246 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
247 * At least one byte has been read, or avail_in == 0; reads are
248 * performed for at least two bytes (required for the zip translate_eol
249 * option -- not supported here).
251 local void fill_window(deflate_state *s) {
252 unsigned n;
253 unsigned more; /* Amount of free space at the end of the window. */
254 uInt wsize = s->w_size;
256 Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
258 do {
259 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
261 /* Deal with !@#$% 64K limit: */
262 if (sizeof(int) <= 2) {
263 if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
264 more = wsize;
266 } else if (more == (unsigned)(-1)) {
267 /* Very unlikely, but possible on 16 bit machine if
268 * strstart == 0 && lookahead == 1 (input done a byte at time)
270 more--;
274 /* If the window is almost full and there is insufficient lookahead,
275 * move the upper half to the lower one to make room in the upper half.
277 if (s->strstart >= wsize + MAX_DIST(s)) {
279 zmemcpy(s->window, s->window + wsize, (unsigned)wsize - more);
280 s->match_start -= wsize;
281 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
282 s->block_start -= (long) wsize;
283 if (s->insert > s->strstart)
284 s->insert = s->strstart;
285 slide_hash(s);
286 more += wsize;
288 if (s->strm->avail_in == 0) break;
290 /* If there was no sliding:
291 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
292 * more == window_size - lookahead - strstart
293 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
294 * => more >= window_size - 2*WSIZE + 2
295 * In the BIG_MEM or MMAP case (not yet supported),
296 * window_size == input_size + MIN_LOOKAHEAD &&
297 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
298 * Otherwise, window_size == 2*WSIZE so more >= 2.
299 * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
301 Assert(more >= 2, "more < 2");
303 n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
304 s->lookahead += n;
306 /* Initialize the hash value now that we have some input: */
307 if (s->lookahead + s->insert >= MIN_MATCH) {
308 uInt str = s->strstart - s->insert;
309 s->ins_h = s->window[str];
310 UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
311 #if MIN_MATCH != 3
312 Call UPDATE_HASH() MIN_MATCH-3 more times
313 #endif
314 while (s->insert) {
315 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
316 #ifndef FASTEST
317 s->prev[str & s->w_mask] = s->head[s->ins_h];
318 #endif
319 s->head[s->ins_h] = (Pos)str;
320 str++;
321 s->insert--;
322 if (s->lookahead + s->insert < MIN_MATCH)
323 break;
326 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
327 * but this is not important since only literal bytes will be emitted.
330 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
332 /* If the WIN_INIT bytes after the end of the current data have never been
333 * written, then zero those bytes in order to avoid memory check reports of
334 * the use of uninitialized (or uninitialised as Julian writes) bytes by
335 * the longest match routines. Update the high water mark for the next
336 * time through here. WIN_INIT is set to MAX_MATCH since the longest match
337 * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
339 if (s->high_water < s->window_size) {
340 ulg curr = s->strstart + (ulg)(s->lookahead);
341 ulg init;
343 if (s->high_water < curr) {
344 /* Previous high water mark below current data -- zero WIN_INIT
345 * bytes or up to end of window, whichever is less.
347 init = s->window_size - curr;
348 if (init > WIN_INIT)
349 init = WIN_INIT;
350 zmemzero(s->window + curr, (unsigned)init);
351 s->high_water = curr + init;
353 else if (s->high_water < (ulg)curr + WIN_INIT) {
354 /* High water mark at or above current data, but below current data
355 * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
356 * to end of window, whichever is less.
358 init = (ulg)curr + WIN_INIT - s->high_water;
359 if (init > s->window_size - s->high_water)
360 init = s->window_size - s->high_water;
361 zmemzero(s->window + s->high_water, (unsigned)init);
362 s->high_water += init;
366 Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
367 "not enough room for search");
370 /* ========================================================================= */
371 int ZEXPORT deflateInit_(z_streamp strm, int level, const char *version,
372 int stream_size) {
373 return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
374 Z_DEFAULT_STRATEGY, version, stream_size);
375 /* To do: ignore strm->next_in if we use it as window */
378 /* ========================================================================= */
379 int ZEXPORT deflateInit2_(z_streamp strm, int level, int method,
380 int windowBits, int memLevel, int strategy,
381 const char *version, int stream_size) {
382 deflate_state *s;
383 int wrap = 1;
384 static const char my_version[] = ZLIB_VERSION;
386 if (version == Z_NULL || version[0] != my_version[0] ||
387 stream_size != sizeof(z_stream)) {
388 return Z_VERSION_ERROR;
390 if (strm == Z_NULL) return Z_STREAM_ERROR;
392 strm->msg = Z_NULL;
393 if (strm->zalloc == (alloc_func)0) {
394 #ifdef Z_SOLO
395 return Z_STREAM_ERROR;
396 #else
397 strm->zalloc = zcalloc;
398 strm->opaque = (voidpf)0;
399 #endif
401 if (strm->zfree == (free_func)0)
402 #ifdef Z_SOLO
403 return Z_STREAM_ERROR;
404 #else
405 strm->zfree = zcfree;
406 #endif
408 #ifdef FASTEST
409 if (level != 0) level = 1;
410 #else
411 if (level == Z_DEFAULT_COMPRESSION) level = 6;
412 #endif
414 if (windowBits < 0) { /* suppress zlib wrapper */
415 wrap = 0;
416 if (windowBits < -15)
417 return Z_STREAM_ERROR;
418 windowBits = -windowBits;
420 #ifdef GZIP
421 else if (windowBits > 15) {
422 wrap = 2; /* write gzip wrapper instead */
423 windowBits -= 16;
425 #endif
426 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
427 windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
428 strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) {
429 return Z_STREAM_ERROR;
431 if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
432 s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
433 if (s == Z_NULL) return Z_MEM_ERROR;
434 strm->state = (struct internal_state FAR *)s;
435 s->strm = strm;
436 s->status = INIT_STATE; /* to pass state test in deflateReset() */
438 s->wrap = wrap;
439 s->gzhead = Z_NULL;
440 s->w_bits = (uInt)windowBits;
441 s->w_size = 1 << s->w_bits;
442 s->w_mask = s->w_size - 1;
444 s->hash_bits = (uInt)memLevel + 7;
445 s->hash_size = 1 << s->hash_bits;
446 s->hash_mask = s->hash_size - 1;
447 s->hash_shift = ((s->hash_bits + MIN_MATCH-1) / MIN_MATCH);
449 s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
450 s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
451 s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
453 s->high_water = 0; /* nothing written to s->window yet */
455 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
457 /* We overlay pending_buf and sym_buf. This works since the average size
458 * for length/distance pairs over any compressed block is assured to be 31
459 * bits or less.
461 * Analysis: The longest fixed codes are a length code of 8 bits plus 5
462 * extra bits, for lengths 131 to 257. The longest fixed distance codes are
463 * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
464 * possible fixed-codes length/distance pair is then 31 bits total.
466 * sym_buf starts one-fourth of the way into pending_buf. So there are
467 * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
468 * in sym_buf is three bytes -- two for the distance and one for the
469 * literal/length. As each symbol is consumed, the pointer to the next
470 * sym_buf value to read moves forward three bytes. From that symbol, up to
471 * 31 bits are written to pending_buf. The closest the written pending_buf
472 * bits gets to the next sym_buf symbol to read is just before the last
473 * code is written. At that time, 31*(n - 2) bits have been written, just
474 * after 24*(n - 2) bits have been consumed from sym_buf. sym_buf starts at
475 * 8*n bits into pending_buf. (Note that the symbol buffer fills when n - 1
476 * symbols are written.) The closest the writing gets to what is unread is
477 * then n + 14 bits. Here n is lit_bufsize, which is 16384 by default, and
478 * can range from 128 to 32768.
480 * Therefore, at a minimum, there are 142 bits of space between what is
481 * written and what is read in the overlain buffers, so the symbols cannot
482 * be overwritten by the compressed data. That space is actually 139 bits,
483 * due to the three-bit fixed-code block header.
485 * That covers the case where either Z_FIXED is specified, forcing fixed
486 * codes, or when the use of fixed codes is chosen, because that choice
487 * results in a smaller compressed block than dynamic codes. That latter
488 * condition then assures that the above analysis also covers all dynamic
489 * blocks. A dynamic-code block will only be chosen to be emitted if it has
490 * fewer bits than a fixed-code block would for the same set of symbols.
491 * Therefore its average symbol length is assured to be less than 31. So
492 * the compressed data for a dynamic block also cannot overwrite the
493 * symbols from which it is being constructed.
496 s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);
497 s->pending_buf_size = (ulg)s->lit_bufsize * 4;
499 if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
500 s->pending_buf == Z_NULL) {
501 s->status = FINISH_STATE;
502 strm->msg = ERR_MSG(Z_MEM_ERROR);
503 deflateEnd (strm);
504 return Z_MEM_ERROR;
506 s->sym_buf = s->pending_buf + s->lit_bufsize;
507 s->sym_end = (s->lit_bufsize - 1) * 3;
508 /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
509 * on 16 bit machines and because stored blocks are restricted to
510 * 64K-1 bytes.
513 s->level = level;
514 s->strategy = strategy;
515 s->method = (Byte)method;
517 return deflateReset(strm);
520 /* =========================================================================
521 * Check for a valid deflate stream state. Return 0 if ok, 1 if not.
523 local int deflateStateCheck(z_streamp strm) {
524 deflate_state *s;
525 if (strm == Z_NULL ||
526 strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0)
527 return 1;
528 s = strm->state;
529 if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE &&
530 #ifdef GZIP
531 s->status != GZIP_STATE &&
532 #endif
533 s->status != EXTRA_STATE &&
534 s->status != NAME_STATE &&
535 s->status != COMMENT_STATE &&
536 s->status != HCRC_STATE &&
537 s->status != BUSY_STATE &&
538 s->status != FINISH_STATE))
539 return 1;
540 return 0;
543 /* ========================================================================= */
544 int ZEXPORT deflateSetDictionary(z_streamp strm, const Bytef *dictionary,
545 uInt dictLength) {
546 deflate_state *s;
547 uInt str, n;
548 int wrap;
549 unsigned avail;
550 z_const unsigned char *next;
552 if (deflateStateCheck(strm) || dictionary == Z_NULL)
553 return Z_STREAM_ERROR;
554 s = strm->state;
555 wrap = s->wrap;
556 if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead)
557 return Z_STREAM_ERROR;
559 /* when using zlib wrappers, compute Adler-32 for provided dictionary */
560 if (wrap == 1)
561 strm->adler = adler32(strm->adler, dictionary, dictLength);
562 s->wrap = 0; /* avoid computing Adler-32 in read_buf */
564 /* if dictionary would fill window, just replace the history */
565 if (dictLength >= s->w_size) {
566 if (wrap == 0) { /* already empty otherwise */
567 CLEAR_HASH(s);
568 s->strstart = 0;
569 s->block_start = 0L;
570 s->insert = 0;
572 dictionary += dictLength - s->w_size; /* use the tail */
573 dictLength = s->w_size;
576 /* insert dictionary into window and hash */
577 avail = strm->avail_in;
578 next = strm->next_in;
579 strm->avail_in = dictLength;
580 strm->next_in = (z_const Bytef *)dictionary;
581 fill_window(s);
582 while (s->lookahead >= MIN_MATCH) {
583 str = s->strstart;
584 n = s->lookahead - (MIN_MATCH-1);
585 do {
586 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
587 #ifndef FASTEST
588 s->prev[str & s->w_mask] = s->head[s->ins_h];
589 #endif
590 s->head[s->ins_h] = (Pos)str;
591 str++;
592 } while (--n);
593 s->strstart = str;
594 s->lookahead = MIN_MATCH-1;
595 fill_window(s);
597 s->strstart += s->lookahead;
598 s->block_start = (long)s->strstart;
599 s->insert = s->lookahead;
600 s->lookahead = 0;
601 s->match_length = s->prev_length = MIN_MATCH-1;
602 s->match_available = 0;
603 strm->next_in = next;
604 strm->avail_in = avail;
605 s->wrap = wrap;
606 return Z_OK;
609 /* ========================================================================= */
610 int ZEXPORT deflateGetDictionary(z_streamp strm, Bytef *dictionary,
611 uInt *dictLength) {
612 deflate_state *s;
613 uInt len;
615 if (deflateStateCheck(strm))
616 return Z_STREAM_ERROR;
617 s = strm->state;
618 len = s->strstart + s->lookahead;
619 if (len > s->w_size)
620 len = s->w_size;
621 if (dictionary != Z_NULL && len)
622 zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len);
623 if (dictLength != Z_NULL)
624 *dictLength = len;
625 return Z_OK;
628 /* ========================================================================= */
629 int ZEXPORT deflateResetKeep(z_streamp strm) {
630 deflate_state *s;
632 if (deflateStateCheck(strm)) {
633 return Z_STREAM_ERROR;
636 strm->total_in = strm->total_out = 0;
637 strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
638 strm->data_type = Z_UNKNOWN;
640 s = (deflate_state *)strm->state;
641 s->pending = 0;
642 s->pending_out = s->pending_buf;
644 if (s->wrap < 0) {
645 s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
647 s->status =
648 #ifdef GZIP
649 s->wrap == 2 ? GZIP_STATE :
650 #endif
651 INIT_STATE;
652 strm->adler =
653 #ifdef GZIP
654 s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
655 #endif
656 adler32(0L, Z_NULL, 0);
657 s->last_flush = -2;
659 _tr_init(s);
661 return Z_OK;
664 /* ===========================================================================
665 * Initialize the "longest match" routines for a new zlib stream
667 local void lm_init(deflate_state *s) {
668 s->window_size = (ulg)2L*s->w_size;
670 CLEAR_HASH(s);
672 /* Set the default configuration parameters:
674 s->max_lazy_match = configuration_table[s->level].max_lazy;
675 s->good_match = configuration_table[s->level].good_length;
676 s->nice_match = configuration_table[s->level].nice_length;
677 s->max_chain_length = configuration_table[s->level].max_chain;
679 s->strstart = 0;
680 s->block_start = 0L;
681 s->lookahead = 0;
682 s->insert = 0;
683 s->match_length = s->prev_length = MIN_MATCH-1;
684 s->match_available = 0;
685 s->ins_h = 0;
688 /* ========================================================================= */
689 int ZEXPORT deflateReset(z_streamp strm) {
690 int ret;
692 ret = deflateResetKeep(strm);
693 if (ret == Z_OK)
694 lm_init(strm->state);
695 return ret;
698 /* ========================================================================= */
699 int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head) {
700 if (deflateStateCheck(strm) || strm->state->wrap != 2)
701 return Z_STREAM_ERROR;
702 strm->state->gzhead = head;
703 return Z_OK;
706 /* ========================================================================= */
707 int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits) {
708 if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
709 if (pending != Z_NULL)
710 *pending = strm->state->pending;
711 if (bits != Z_NULL)
712 *bits = strm->state->bi_valid;
713 return Z_OK;
716 /* ========================================================================= */
717 int ZEXPORT deflatePrime(z_streamp strm, int bits, int value) {
718 deflate_state *s;
719 int put;
721 if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
722 s = strm->state;
723 if (bits < 0 || bits > 16 ||
724 s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
725 return Z_BUF_ERROR;
726 do {
727 put = Buf_size - s->bi_valid;
728 if (put > bits)
729 put = bits;
730 s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid);
731 s->bi_valid += put;
732 _tr_flush_bits(s);
733 value >>= put;
734 bits -= put;
735 } while (bits);
736 return Z_OK;
739 /* ========================================================================= */
740 int ZEXPORT deflateParams(z_streamp strm, int level, int strategy) {
741 deflate_state *s;
742 compress_func func;
744 if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
745 s = strm->state;
747 #ifdef FASTEST
748 if (level != 0) level = 1;
749 #else
750 if (level == Z_DEFAULT_COMPRESSION) level = 6;
751 #endif
752 if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) {
753 return Z_STREAM_ERROR;
755 func = configuration_table[s->level].func;
757 if ((strategy != s->strategy || func != configuration_table[level].func) &&
758 s->last_flush != -2) {
759 /* Flush the last buffer: */
760 int err = deflate(strm, Z_BLOCK);
761 if (err == Z_STREAM_ERROR)
762 return err;
763 if (strm->avail_in || (s->strstart - s->block_start) + s->lookahead)
764 return Z_BUF_ERROR;
766 if (s->level != level) {
767 if (s->level == 0 && s->matches != 0) {
768 if (s->matches == 1)
769 slide_hash(s);
770 else
771 CLEAR_HASH(s);
772 s->matches = 0;
774 s->level = level;
775 s->max_lazy_match = configuration_table[level].max_lazy;
776 s->good_match = configuration_table[level].good_length;
777 s->nice_match = configuration_table[level].nice_length;
778 s->max_chain_length = configuration_table[level].max_chain;
780 s->strategy = strategy;
781 return Z_OK;
784 /* ========================================================================= */
785 int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy,
786 int nice_length, int max_chain) {
787 deflate_state *s;
789 if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
790 s = strm->state;
791 s->good_match = (uInt)good_length;
792 s->max_lazy_match = (uInt)max_lazy;
793 s->nice_match = nice_length;
794 s->max_chain_length = (uInt)max_chain;
795 return Z_OK;
798 /* =========================================================================
799 * For the default windowBits of 15 and memLevel of 8, this function returns a
800 * close to exact, as well as small, upper bound on the compressed size. This
801 * is an expansion of ~0.03%, plus a small constant.
803 * For any setting other than those defaults for windowBits and memLevel, one
804 * of two worst case bounds is returned. This is at most an expansion of ~4% or
805 * ~13%, plus a small constant.
807 * Both the 0.03% and 4% derive from the overhead of stored blocks. The first
808 * one is for stored blocks of 16383 bytes (memLevel == 8), whereas the second
809 * is for stored blocks of 127 bytes (the worst case memLevel == 1). The
810 * expansion results from five bytes of header for each stored block.
812 * The larger expansion of 13% results from a window size less than or equal to
813 * the symbols buffer size (windowBits <= memLevel + 7). In that case some of
814 * the data being compressed may have slid out of the sliding window, impeding
815 * a stored block from being emitted. Then the only choice is a fixed or
816 * dynamic block, where a fixed block limits the maximum expansion to 9 bits
817 * per 8-bit byte, plus 10 bits for every block. The smallest block size for
818 * which this can occur is 255 (memLevel == 2).
820 * Shifts are used to approximate divisions, for speed.
822 uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) {
823 deflate_state *s;
824 uLong fixedlen, storelen, wraplen;
826 /* upper bound for fixed blocks with 9-bit literals and length 255
827 (memLevel == 2, which is the lowest that may not use stored blocks) --
828 ~13% overhead plus a small constant */
829 fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) +
830 (sourceLen >> 9) + 4;
832 /* upper bound for stored blocks with length 127 (memLevel == 1) --
833 ~4% overhead plus a small constant */
834 storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) +
835 (sourceLen >> 11) + 7;
837 /* if can't get parameters, return larger bound plus a zlib wrapper */
838 if (deflateStateCheck(strm))
839 return (fixedlen > storelen ? fixedlen : storelen) + 6;
841 /* compute wrapper length */
842 s = strm->state;
843 switch (s->wrap) {
844 case 0: /* raw deflate */
845 wraplen = 0;
846 break;
847 case 1: /* zlib wrapper */
848 wraplen = 6 + (s->strstart ? 4 : 0);
849 break;
850 #ifdef GZIP
851 case 2: /* gzip wrapper */
852 wraplen = 18;
853 if (s->gzhead != Z_NULL) { /* user-supplied gzip header */
854 Bytef *str;
855 if (s->gzhead->extra != Z_NULL)
856 wraplen += 2 + s->gzhead->extra_len;
857 str = s->gzhead->name;
858 if (str != Z_NULL)
859 do {
860 wraplen++;
861 } while (*str++);
862 str = s->gzhead->comment;
863 if (str != Z_NULL)
864 do {
865 wraplen++;
866 } while (*str++);
867 if (s->gzhead->hcrc)
868 wraplen += 2;
870 break;
871 #endif
872 default: /* for compiler happiness */
873 wraplen = 6;
876 /* if not default parameters, return one of the conservative bounds */
877 if (s->w_bits != 15 || s->hash_bits != 8 + 7)
878 return (s->w_bits <= s->hash_bits && s->level ? fixedlen : storelen) +
879 wraplen;
881 /* default settings: return tight bound for that case -- ~0.03% overhead
882 plus a small constant */
883 return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
884 (sourceLen >> 25) + 13 - 6 + wraplen;
887 /* =========================================================================
888 * Put a short in the pending buffer. The 16-bit value is put in MSB order.
889 * IN assertion: the stream state is correct and there is enough room in
890 * pending_buf.
892 local void putShortMSB(deflate_state *s, uInt b) {
893 put_byte(s, (Byte)(b >> 8));
894 put_byte(s, (Byte)(b & 0xff));
897 /* =========================================================================
898 * Flush as much pending output as possible. All deflate() output, except for
899 * some deflate_stored() output, goes through this function so some
900 * applications may wish to modify it to avoid allocating a large
901 * strm->next_out buffer and copying into it. (See also read_buf()).
903 local void flush_pending(z_streamp strm) {
904 unsigned len;
905 deflate_state *s = strm->state;
907 _tr_flush_bits(s);
908 len = s->pending;
909 if (len > strm->avail_out) len = strm->avail_out;
910 if (len == 0) return;
912 zmemcpy(strm->next_out, s->pending_out, len);
913 strm->next_out += len;
914 s->pending_out += len;
915 strm->total_out += len;
916 strm->avail_out -= len;
917 s->pending -= len;
918 if (s->pending == 0) {
919 s->pending_out = s->pending_buf;
923 /* ===========================================================================
924 * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1].
926 #define HCRC_UPDATE(beg) \
927 do { \
928 if (s->gzhead->hcrc && s->pending > (beg)) \
929 strm->adler = crc32(strm->adler, s->pending_buf + (beg), \
930 s->pending - (beg)); \
931 } while (0)
933 /* ========================================================================= */
934 int ZEXPORT deflate(z_streamp strm, int flush) {
935 int old_flush; /* value of flush param for previous deflate call */
936 deflate_state *s;
938 if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) {
939 return Z_STREAM_ERROR;
941 s = strm->state;
943 if (strm->next_out == Z_NULL ||
944 (strm->avail_in != 0 && strm->next_in == Z_NULL) ||
945 (s->status == FINISH_STATE && flush != Z_FINISH)) {
946 ERR_RETURN(strm, Z_STREAM_ERROR);
948 if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
950 old_flush = s->last_flush;
951 s->last_flush = flush;
953 /* Flush as much pending output as possible */
954 if (s->pending != 0) {
955 flush_pending(strm);
956 if (strm->avail_out == 0) {
957 /* Since avail_out is 0, deflate will be called again with
958 * more output space, but possibly with both pending and
959 * avail_in equal to zero. There won't be anything to do,
960 * but this is not an error situation so make sure we
961 * return OK instead of BUF_ERROR at next call of deflate:
963 s->last_flush = -1;
964 return Z_OK;
967 /* Make sure there is something to do and avoid duplicate consecutive
968 * flushes. For repeated and useless calls with Z_FINISH, we keep
969 * returning Z_STREAM_END instead of Z_BUF_ERROR.
971 } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) &&
972 flush != Z_FINISH) {
973 ERR_RETURN(strm, Z_BUF_ERROR);
976 /* User must not provide more input after the first FINISH: */
977 if (s->status == FINISH_STATE && strm->avail_in != 0) {
978 ERR_RETURN(strm, Z_BUF_ERROR);
981 /* Write the header */
982 if (s->status == INIT_STATE && s->wrap == 0)
983 s->status = BUSY_STATE;
984 if (s->status == INIT_STATE) {
985 /* zlib header */
986 uInt header = (Z_DEFLATED + ((s->w_bits - 8) << 4)) << 8;
987 uInt level_flags;
989 if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
990 level_flags = 0;
991 else if (s->level < 6)
992 level_flags = 1;
993 else if (s->level == 6)
994 level_flags = 2;
995 else
996 level_flags = 3;
997 header |= (level_flags << 6);
998 if (s->strstart != 0) header |= PRESET_DICT;
999 header += 31 - (header % 31);
1001 putShortMSB(s, header);
1003 /* Save the adler32 of the preset dictionary: */
1004 if (s->strstart != 0) {
1005 putShortMSB(s, (uInt)(strm->adler >> 16));
1006 putShortMSB(s, (uInt)(strm->adler & 0xffff));
1008 strm->adler = adler32(0L, Z_NULL, 0);
1009 s->status = BUSY_STATE;
1011 /* Compression must start with an empty pending buffer */
1012 flush_pending(strm);
1013 if (s->pending != 0) {
1014 s->last_flush = -1;
1015 return Z_OK;
1018 #ifdef GZIP
1019 if (s->status == GZIP_STATE) {
1020 /* gzip header */
1021 strm->adler = crc32(0L, Z_NULL, 0);
1022 put_byte(s, 31);
1023 put_byte(s, 139);
1024 put_byte(s, 8);
1025 if (s->gzhead == Z_NULL) {
1026 put_byte(s, 0);
1027 put_byte(s, 0);
1028 put_byte(s, 0);
1029 put_byte(s, 0);
1030 put_byte(s, 0);
1031 put_byte(s, s->level == 9 ? 2 :
1032 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
1033 4 : 0));
1034 put_byte(s, OS_CODE);
1035 s->status = BUSY_STATE;
1037 /* Compression must start with an empty pending buffer */
1038 flush_pending(strm);
1039 if (s->pending != 0) {
1040 s->last_flush = -1;
1041 return Z_OK;
1044 else {
1045 put_byte(s, (s->gzhead->text ? 1 : 0) +
1046 (s->gzhead->hcrc ? 2 : 0) +
1047 (s->gzhead->extra == Z_NULL ? 0 : 4) +
1048 (s->gzhead->name == Z_NULL ? 0 : 8) +
1049 (s->gzhead->comment == Z_NULL ? 0 : 16)
1051 put_byte(s, (Byte)(s->gzhead->time & 0xff));
1052 put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff));
1053 put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff));
1054 put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff));
1055 put_byte(s, s->level == 9 ? 2 :
1056 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
1057 4 : 0));
1058 put_byte(s, s->gzhead->os & 0xff);
1059 if (s->gzhead->extra != Z_NULL) {
1060 put_byte(s, s->gzhead->extra_len & 0xff);
1061 put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
1063 if (s->gzhead->hcrc)
1064 strm->adler = crc32(strm->adler, s->pending_buf,
1065 s->pending);
1066 s->gzindex = 0;
1067 s->status = EXTRA_STATE;
1070 if (s->status == EXTRA_STATE) {
1071 if (s->gzhead->extra != Z_NULL) {
1072 ulg beg = s->pending; /* start of bytes to update crc */
1073 uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex;
1074 while (s->pending + left > s->pending_buf_size) {
1075 uInt copy = s->pending_buf_size - s->pending;
1076 zmemcpy(s->pending_buf + s->pending,
1077 s->gzhead->extra + s->gzindex, copy);
1078 s->pending = s->pending_buf_size;
1079 HCRC_UPDATE(beg);
1080 s->gzindex += copy;
1081 flush_pending(strm);
1082 if (s->pending != 0) {
1083 s->last_flush = -1;
1084 return Z_OK;
1086 beg = 0;
1087 left -= copy;
1089 zmemcpy(s->pending_buf + s->pending,
1090 s->gzhead->extra + s->gzindex, left);
1091 s->pending += left;
1092 HCRC_UPDATE(beg);
1093 s->gzindex = 0;
1095 s->status = NAME_STATE;
1097 if (s->status == NAME_STATE) {
1098 if (s->gzhead->name != Z_NULL) {
1099 ulg beg = s->pending; /* start of bytes to update crc */
1100 int val;
1101 do {
1102 if (s->pending == s->pending_buf_size) {
1103 HCRC_UPDATE(beg);
1104 flush_pending(strm);
1105 if (s->pending != 0) {
1106 s->last_flush = -1;
1107 return Z_OK;
1109 beg = 0;
1111 val = s->gzhead->name[s->gzindex++];
1112 put_byte(s, val);
1113 } while (val != 0);
1114 HCRC_UPDATE(beg);
1115 s->gzindex = 0;
1117 s->status = COMMENT_STATE;
1119 if (s->status == COMMENT_STATE) {
1120 if (s->gzhead->comment != Z_NULL) {
1121 ulg beg = s->pending; /* start of bytes to update crc */
1122 int val;
1123 do {
1124 if (s->pending == s->pending_buf_size) {
1125 HCRC_UPDATE(beg);
1126 flush_pending(strm);
1127 if (s->pending != 0) {
1128 s->last_flush = -1;
1129 return Z_OK;
1131 beg = 0;
1133 val = s->gzhead->comment[s->gzindex++];
1134 put_byte(s, val);
1135 } while (val != 0);
1136 HCRC_UPDATE(beg);
1138 s->status = HCRC_STATE;
1140 if (s->status == HCRC_STATE) {
1141 if (s->gzhead->hcrc) {
1142 if (s->pending + 2 > s->pending_buf_size) {
1143 flush_pending(strm);
1144 if (s->pending != 0) {
1145 s->last_flush = -1;
1146 return Z_OK;
1149 put_byte(s, (Byte)(strm->adler & 0xff));
1150 put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
1151 strm->adler = crc32(0L, Z_NULL, 0);
1153 s->status = BUSY_STATE;
1155 /* Compression must start with an empty pending buffer */
1156 flush_pending(strm);
1157 if (s->pending != 0) {
1158 s->last_flush = -1;
1159 return Z_OK;
1162 #endif
1164 /* Start a new block or continue the current one.
1166 if (strm->avail_in != 0 || s->lookahead != 0 ||
1167 (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
1168 block_state bstate;
1170 bstate = s->level == 0 ? deflate_stored(s, flush) :
1171 s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
1172 s->strategy == Z_RLE ? deflate_rle(s, flush) :
1173 (*(configuration_table[s->level].func))(s, flush);
1175 if (bstate == finish_started || bstate == finish_done) {
1176 s->status = FINISH_STATE;
1178 if (bstate == need_more || bstate == finish_started) {
1179 if (strm->avail_out == 0) {
1180 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
1182 return Z_OK;
1183 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
1184 * of deflate should use the same flush parameter to make sure
1185 * that the flush is complete. So we don't have to output an
1186 * empty block here, this will be done at next call. This also
1187 * ensures that for a very small output buffer, we emit at most
1188 * one empty block.
1191 if (bstate == block_done) {
1192 if (flush == Z_PARTIAL_FLUSH) {
1193 _tr_align(s);
1194 } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
1195 _tr_stored_block(s, (char*)0, 0L, 0);
1196 /* For a full flush, this empty block will be recognized
1197 * as a special marker by inflate_sync().
1199 if (flush == Z_FULL_FLUSH) {
1200 CLEAR_HASH(s); /* forget history */
1201 if (s->lookahead == 0) {
1202 s->strstart = 0;
1203 s->block_start = 0L;
1204 s->insert = 0;
1208 flush_pending(strm);
1209 if (strm->avail_out == 0) {
1210 s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
1211 return Z_OK;
1216 if (flush != Z_FINISH) return Z_OK;
1217 if (s->wrap <= 0) return Z_STREAM_END;
1219 /* Write the trailer */
1220 #ifdef GZIP
1221 if (s->wrap == 2) {
1222 put_byte(s, (Byte)(strm->adler & 0xff));
1223 put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
1224 put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
1225 put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
1226 put_byte(s, (Byte)(strm->total_in & 0xff));
1227 put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
1228 put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
1229 put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
1231 else
1232 #endif
1234 putShortMSB(s, (uInt)(strm->adler >> 16));
1235 putShortMSB(s, (uInt)(strm->adler & 0xffff));
1237 flush_pending(strm);
1238 /* If avail_out is zero, the application will call deflate again
1239 * to flush the rest.
1241 if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
1242 return s->pending != 0 ? Z_OK : Z_STREAM_END;
1245 /* ========================================================================= */
1246 int ZEXPORT deflateEnd(z_streamp strm) {
1247 int status;
1249 if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
1251 status = strm->state->status;
1253 /* Deallocate in reverse order of allocations: */
1254 TRY_FREE(strm, strm->state->pending_buf);
1255 TRY_FREE(strm, strm->state->head);
1256 TRY_FREE(strm, strm->state->prev);
1257 TRY_FREE(strm, strm->state->window);
1259 ZFREE(strm, strm->state);
1260 strm->state = Z_NULL;
1262 return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1265 /* =========================================================================
1266 * Copy the source state to the destination state.
1267 * To simplify the source, this is not supported for 16-bit MSDOS (which
1268 * doesn't have enough memory anyway to duplicate compression states).
1270 int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) {
1271 #ifdef MAXSEG_64K
1272 (void)dest;
1273 (void)source;
1274 return Z_STREAM_ERROR;
1275 #else
1276 deflate_state *ds;
1277 deflate_state *ss;
1280 if (deflateStateCheck(source) || dest == Z_NULL) {
1281 return Z_STREAM_ERROR;
1284 ss = source->state;
1286 zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
1288 ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
1289 if (ds == Z_NULL) return Z_MEM_ERROR;
1290 dest->state = (struct internal_state FAR *) ds;
1291 zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state));
1292 ds->strm = dest;
1294 ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
1295 ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
1296 ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
1297 ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);
1299 if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
1300 ds->pending_buf == Z_NULL) {
1301 deflateEnd (dest);
1302 return Z_MEM_ERROR;
1304 /* following zmemcpy do not work for 16-bit MSDOS */
1305 zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
1306 zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
1307 zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
1308 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1310 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1311 ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
1313 ds->l_desc.dyn_tree = ds->dyn_ltree;
1314 ds->d_desc.dyn_tree = ds->dyn_dtree;
1315 ds->bl_desc.dyn_tree = ds->bl_tree;
1317 return Z_OK;
1318 #endif /* MAXSEG_64K */
1321 #ifndef FASTEST
1322 /* ===========================================================================
1323 * Set match_start to the longest match starting at the given string and
1324 * return its length. Matches shorter or equal to prev_length are discarded,
1325 * in which case the result is equal to prev_length and match_start is
1326 * garbage.
1327 * IN assertions: cur_match is the head of the hash chain for the current
1328 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1329 * OUT assertion: the match length is not greater than s->lookahead.
1331 local uInt longest_match(deflate_state *s, IPos cur_match) {
1332 unsigned chain_length = s->max_chain_length;/* max hash chain length */
1333 register Bytef *scan = s->window + s->strstart; /* current string */
1334 register Bytef *match; /* matched string */
1335 register int len; /* length of current match */
1336 int best_len = (int)s->prev_length; /* best match length so far */
1337 int nice_match = s->nice_match; /* stop if match long enough */
1338 IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
1339 s->strstart - (IPos)MAX_DIST(s) : NIL;
1340 /* Stop when cur_match becomes <= limit. To simplify the code,
1341 * we prevent matches with the string of window index 0.
1343 Posf *prev = s->prev;
1344 uInt wmask = s->w_mask;
1346 #ifdef UNALIGNED_OK
1347 /* Compare two bytes at a time. Note: this is not always beneficial.
1348 * Try with and without -DUNALIGNED_OK to check.
1350 register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
1351 register ush scan_start = *(ushf*)scan;
1352 register ush scan_end = *(ushf*)(scan + best_len - 1);
1353 #else
1354 register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1355 register Byte scan_end1 = scan[best_len - 1];
1356 register Byte scan_end = scan[best_len];
1357 #endif
1359 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1360 * It is easy to get rid of this optimization if necessary.
1362 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1364 /* Do not waste too much time if we already have a good match: */
1365 if (s->prev_length >= s->good_match) {
1366 chain_length >>= 2;
1368 /* Do not look for matches beyond the end of the input. This is necessary
1369 * to make deflate deterministic.
1371 if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead;
1373 Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1374 "need lookahead");
1376 do {
1377 Assert(cur_match < s->strstart, "no future");
1378 match = s->window + cur_match;
1380 /* Skip to next match if the match length cannot increase
1381 * or if the match length is less than 2. Note that the checks below
1382 * for insufficient lookahead only occur occasionally for performance
1383 * reasons. Therefore uninitialized memory will be accessed, and
1384 * conditional jumps will be made that depend on those values.
1385 * However the length of the match is limited to the lookahead, so
1386 * the output of deflate is not affected by the uninitialized values.
1388 #if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
1389 /* This code assumes sizeof(unsigned short) == 2. Do not use
1390 * UNALIGNED_OK if your compiler uses a different size.
1392 if (*(ushf*)(match + best_len - 1) != scan_end ||
1393 *(ushf*)match != scan_start) continue;
1395 /* It is not necessary to compare scan[2] and match[2] since they are
1396 * always equal when the other bytes match, given that the hash keys
1397 * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
1398 * strstart + 3, + 5, up to strstart + 257. We check for insufficient
1399 * lookahead only every 4th comparison; the 128th check will be made
1400 * at strstart + 257. If MAX_MATCH-2 is not a multiple of 8, it is
1401 * necessary to put more guard bytes at the end of the window, or
1402 * to check more often for insufficient lookahead.
1404 Assert(scan[2] == match[2], "scan[2]?");
1405 scan++, match++;
1406 do {
1407 } while (*(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1408 *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1409 *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1410 *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1411 scan < strend);
1412 /* The funny "do {}" generates better code on most compilers */
1414 /* Here, scan <= window + strstart + 257 */
1415 Assert(scan <= s->window + (unsigned)(s->window_size - 1),
1416 "wild scan");
1417 if (*scan == *match) scan++;
1419 len = (MAX_MATCH - 1) - (int)(strend - scan);
1420 scan = strend - (MAX_MATCH-1);
1422 #else /* UNALIGNED_OK */
1424 if (match[best_len] != scan_end ||
1425 match[best_len - 1] != scan_end1 ||
1426 *match != *scan ||
1427 *++match != scan[1]) continue;
1429 /* The check at best_len - 1 can be removed because it will be made
1430 * again later. (This heuristic is not always a win.)
1431 * It is not necessary to compare scan[2] and match[2] since they
1432 * are always equal when the other bytes match, given that
1433 * the hash keys are equal and that HASH_BITS >= 8.
1435 scan += 2, match++;
1436 Assert(*scan == *match, "match[2]?");
1438 /* We check for insufficient lookahead only every 8th comparison;
1439 * the 256th check will be made at strstart + 258.
1441 do {
1442 } while (*++scan == *++match && *++scan == *++match &&
1443 *++scan == *++match && *++scan == *++match &&
1444 *++scan == *++match && *++scan == *++match &&
1445 *++scan == *++match && *++scan == *++match &&
1446 scan < strend);
1448 Assert(scan <= s->window + (unsigned)(s->window_size - 1),
1449 "wild scan");
1451 len = MAX_MATCH - (int)(strend - scan);
1452 scan = strend - MAX_MATCH;
1454 #endif /* UNALIGNED_OK */
1456 if (len > best_len) {
1457 s->match_start = cur_match;
1458 best_len = len;
1459 if (len >= nice_match) break;
1460 #ifdef UNALIGNED_OK
1461 scan_end = *(ushf*)(scan + best_len - 1);
1462 #else
1463 scan_end1 = scan[best_len - 1];
1464 scan_end = scan[best_len];
1465 #endif
1467 } while ((cur_match = prev[cur_match & wmask]) > limit
1468 && --chain_length != 0);
1470 if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
1471 return s->lookahead;
1474 #else /* FASTEST */
1476 /* ---------------------------------------------------------------------------
1477 * Optimized version for FASTEST only
1479 local uInt longest_match(deflate_state *s, IPos cur_match) {
1480 register Bytef *scan = s->window + s->strstart; /* current string */
1481 register Bytef *match; /* matched string */
1482 register int len; /* length of current match */
1483 register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1485 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1486 * It is easy to get rid of this optimization if necessary.
1488 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1490 Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1491 "need lookahead");
1493 Assert(cur_match < s->strstart, "no future");
1495 match = s->window + cur_match;
1497 /* Return failure if the match length is less than 2:
1499 if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
1501 /* The check at best_len - 1 can be removed because it will be made
1502 * again later. (This heuristic is not always a win.)
1503 * It is not necessary to compare scan[2] and match[2] since they
1504 * are always equal when the other bytes match, given that
1505 * the hash keys are equal and that HASH_BITS >= 8.
1507 scan += 2, match += 2;
1508 Assert(*scan == *match, "match[2]?");
1510 /* We check for insufficient lookahead only every 8th comparison;
1511 * the 256th check will be made at strstart + 258.
1513 do {
1514 } while (*++scan == *++match && *++scan == *++match &&
1515 *++scan == *++match && *++scan == *++match &&
1516 *++scan == *++match && *++scan == *++match &&
1517 *++scan == *++match && *++scan == *++match &&
1518 scan < strend);
1520 Assert(scan <= s->window + (unsigned)(s->window_size - 1), "wild scan");
1522 len = MAX_MATCH - (int)(strend - scan);
1524 if (len < MIN_MATCH) return MIN_MATCH - 1;
1526 s->match_start = cur_match;
1527 return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
1530 #endif /* FASTEST */
1532 #ifdef ZLIB_DEBUG
1534 #define EQUAL 0
1535 /* result of memcmp for equal strings */
1537 /* ===========================================================================
1538 * Check that the match at match_start is indeed a match.
1540 local void check_match(deflate_state *s, IPos start, IPos match, int length) {
1541 /* check that the match is indeed a match */
1542 if (zmemcmp(s->window + match,
1543 s->window + start, length) != EQUAL) {
1544 fprintf(stderr, " start %u, match %u, length %d\n",
1545 start, match, length);
1546 do {
1547 fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
1548 } while (--length != 0);
1549 z_error("invalid match");
1551 if (z_verbose > 1) {
1552 fprintf(stderr,"\\[%d,%d]", start - match, length);
1553 do { putc(s->window[start++], stderr); } while (--length != 0);
1556 #else
1557 # define check_match(s, start, match, length)
1558 #endif /* ZLIB_DEBUG */
1560 /* ===========================================================================
1561 * Flush the current block, with given end-of-file flag.
1562 * IN assertion: strstart is set to the end of the current match.
1564 #define FLUSH_BLOCK_ONLY(s, last) { \
1565 _tr_flush_block(s, (s->block_start >= 0L ? \
1566 (charf *)&s->window[(unsigned)s->block_start] : \
1567 (charf *)Z_NULL), \
1568 (ulg)((long)s->strstart - s->block_start), \
1569 (last)); \
1570 s->block_start = s->strstart; \
1571 flush_pending(s->strm); \
1572 Tracev((stderr,"[FLUSH]")); \
1575 /* Same but force premature exit if necessary. */
1576 #define FLUSH_BLOCK(s, last) { \
1577 FLUSH_BLOCK_ONLY(s, last); \
1578 if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \
1581 /* Maximum stored block length in deflate format (not including header). */
1582 #define MAX_STORED 65535
1584 /* Minimum of a and b. */
1585 #define MIN(a, b) ((a) > (b) ? (b) : (a))
1587 /* ===========================================================================
1588 * Copy without compression as much as possible from the input stream, return
1589 * the current block state.
1591 * In case deflateParams() is used to later switch to a non-zero compression
1592 * level, s->matches (otherwise unused when storing) keeps track of the number
1593 * of hash table slides to perform. If s->matches is 1, then one hash table
1594 * slide will be done when switching. If s->matches is 2, the maximum value
1595 * allowed here, then the hash table will be cleared, since two or more slides
1596 * is the same as a clear.
1598 * deflate_stored() is written to minimize the number of times an input byte is
1599 * copied. It is most efficient with large input and output buffers, which
1600 * maximizes the opportunities to have a single copy from next_in to next_out.
1602 local block_state deflate_stored(deflate_state *s, int flush) {
1603 /* Smallest worthy block size when not flushing or finishing. By default
1604 * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
1605 * large input and output buffers, the stored block size will be larger.
1607 unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size);
1609 /* Copy as many min_block or larger stored blocks directly to next_out as
1610 * possible. If flushing, copy the remaining available input to next_out as
1611 * stored blocks, if there is enough space.
1613 unsigned len, left, have, last = 0;
1614 unsigned used = s->strm->avail_in;
1615 do {
1616 /* Set len to the maximum size block that we can copy directly with the
1617 * available input data and output space. Set left to how much of that
1618 * would be copied from what's left in the window.
1620 len = MAX_STORED; /* maximum deflate stored block length */
1621 have = (s->bi_valid + 42) >> 3; /* number of header bytes */
1622 if (s->strm->avail_out < have) /* need room for header */
1623 break;
1624 /* maximum stored block length that will fit in avail_out: */
1625 have = s->strm->avail_out - have;
1626 left = s->strstart - s->block_start; /* bytes left in window */
1627 if (len > (ulg)left + s->strm->avail_in)
1628 len = left + s->strm->avail_in; /* limit len to the input */
1629 if (len > have)
1630 len = have; /* limit len to the output */
1632 /* If the stored block would be less than min_block in length, or if
1633 * unable to copy all of the available input when flushing, then try
1634 * copying to the window and the pending buffer instead. Also don't
1635 * write an empty block when flushing -- deflate() does that.
1637 if (len < min_block && ((len == 0 && flush != Z_FINISH) ||
1638 flush == Z_NO_FLUSH ||
1639 len != left + s->strm->avail_in))
1640 break;
1642 /* Make a dummy stored block in pending to get the header bytes,
1643 * including any pending bits. This also updates the debugging counts.
1645 last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0;
1646 _tr_stored_block(s, (char *)0, 0L, last);
1648 /* Replace the lengths in the dummy stored block with len. */
1649 s->pending_buf[s->pending - 4] = len;
1650 s->pending_buf[s->pending - 3] = len >> 8;
1651 s->pending_buf[s->pending - 2] = ~len;
1652 s->pending_buf[s->pending - 1] = ~len >> 8;
1654 /* Write the stored block header bytes. */
1655 flush_pending(s->strm);
1657 #ifdef ZLIB_DEBUG
1658 /* Update debugging counts for the data about to be copied. */
1659 s->compressed_len += len << 3;
1660 s->bits_sent += len << 3;
1661 #endif
1663 /* Copy uncompressed bytes from the window to next_out. */
1664 if (left) {
1665 if (left > len)
1666 left = len;
1667 zmemcpy(s->strm->next_out, s->window + s->block_start, left);
1668 s->strm->next_out += left;
1669 s->strm->avail_out -= left;
1670 s->strm->total_out += left;
1671 s->block_start += left;
1672 len -= left;
1675 /* Copy uncompressed bytes directly from next_in to next_out, updating
1676 * the check value.
1678 if (len) {
1679 read_buf(s->strm, s->strm->next_out, len);
1680 s->strm->next_out += len;
1681 s->strm->avail_out -= len;
1682 s->strm->total_out += len;
1684 } while (last == 0);
1686 /* Update the sliding window with the last s->w_size bytes of the copied
1687 * data, or append all of the copied data to the existing window if less
1688 * than s->w_size bytes were copied. Also update the number of bytes to
1689 * insert in the hash tables, in the event that deflateParams() switches to
1690 * a non-zero compression level.
1692 used -= s->strm->avail_in; /* number of input bytes directly copied */
1693 if (used) {
1694 /* If any input was used, then no unused input remains in the window,
1695 * therefore s->block_start == s->strstart.
1697 if (used >= s->w_size) { /* supplant the previous history */
1698 s->matches = 2; /* clear hash */
1699 zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
1700 s->strstart = s->w_size;
1701 s->insert = s->strstart;
1703 else {
1704 if (s->window_size - s->strstart <= used) {
1705 /* Slide the window down. */
1706 s->strstart -= s->w_size;
1707 zmemcpy(s->window, s->window + s->w_size, s->strstart);
1708 if (s->matches < 2)
1709 s->matches++; /* add a pending slide_hash() */
1710 if (s->insert > s->strstart)
1711 s->insert = s->strstart;
1713 zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
1714 s->strstart += used;
1715 s->insert += MIN(used, s->w_size - s->insert);
1717 s->block_start = s->strstart;
1719 if (s->high_water < s->strstart)
1720 s->high_water = s->strstart;
1722 /* If the last block was written to next_out, then done. */
1723 if (last)
1724 return finish_done;
1726 /* If flushing and all input has been consumed, then done. */
1727 if (flush != Z_NO_FLUSH && flush != Z_FINISH &&
1728 s->strm->avail_in == 0 && (long)s->strstart == s->block_start)
1729 return block_done;
1731 /* Fill the window with any remaining input. */
1732 have = s->window_size - s->strstart;
1733 if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
1734 /* Slide the window down. */
1735 s->block_start -= s->w_size;
1736 s->strstart -= s->w_size;
1737 zmemcpy(s->window, s->window + s->w_size, s->strstart);
1738 if (s->matches < 2)
1739 s->matches++; /* add a pending slide_hash() */
1740 have += s->w_size; /* more space now */
1741 if (s->insert > s->strstart)
1742 s->insert = s->strstart;
1744 if (have > s->strm->avail_in)
1745 have = s->strm->avail_in;
1746 if (have) {
1747 read_buf(s->strm, s->window + s->strstart, have);
1748 s->strstart += have;
1749 s->insert += MIN(have, s->w_size - s->insert);
1751 if (s->high_water < s->strstart)
1752 s->high_water = s->strstart;
1754 /* There was not enough avail_out to write a complete worthy or flushed
1755 * stored block to next_out. Write a stored block to pending instead, if we
1756 * have enough input for a worthy block, or if flushing and there is enough
1757 * room for the remaining input as a stored block in the pending buffer.
1759 have = (s->bi_valid + 42) >> 3; /* number of header bytes */
1760 /* maximum stored block length that will fit in pending: */
1761 have = MIN(s->pending_buf_size - have, MAX_STORED);
1762 min_block = MIN(have, s->w_size);
1763 left = s->strstart - s->block_start;
1764 if (left >= min_block ||
1765 ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH &&
1766 s->strm->avail_in == 0 && left <= have)) {
1767 len = MIN(left, have);
1768 last = flush == Z_FINISH && s->strm->avail_in == 0 &&
1769 len == left ? 1 : 0;
1770 _tr_stored_block(s, (charf *)s->window + s->block_start, len, last);
1771 s->block_start += len;
1772 flush_pending(s->strm);
1775 /* We've done all we can with the available input and output. */
1776 return last ? finish_started : need_more;
1779 /* ===========================================================================
1780 * Compress as much as possible from the input stream, return the current
1781 * block state.
1782 * This function does not perform lazy evaluation of matches and inserts
1783 * new strings in the dictionary only for unmatched strings or for short
1784 * matches. It is used only for the fast compression options.
1786 local block_state deflate_fast(deflate_state *s, int flush) {
1787 IPos hash_head; /* head of the hash chain */
1788 int bflush; /* set if current block must be flushed */
1790 for (;;) {
1791 /* Make sure that we always have enough lookahead, except
1792 * at the end of the input file. We need MAX_MATCH bytes
1793 * for the next match, plus MIN_MATCH bytes to insert the
1794 * string following the next match.
1796 if (s->lookahead < MIN_LOOKAHEAD) {
1797 fill_window(s);
1798 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1799 return need_more;
1801 if (s->lookahead == 0) break; /* flush the current block */
1804 /* Insert the string window[strstart .. strstart + 2] in the
1805 * dictionary, and set hash_head to the head of the hash chain:
1807 hash_head = NIL;
1808 if (s->lookahead >= MIN_MATCH) {
1809 INSERT_STRING(s, s->strstart, hash_head);
1812 /* Find the longest match, discarding those <= prev_length.
1813 * At this point we have always match_length < MIN_MATCH
1815 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1816 /* To simplify the code, we prevent matches with the string
1817 * of window index 0 (in particular we have to avoid a match
1818 * of the string with itself at the start of the input file).
1820 s->match_length = longest_match (s, hash_head);
1821 /* longest_match() sets match_start */
1823 if (s->match_length >= MIN_MATCH) {
1824 check_match(s, s->strstart, s->match_start, s->match_length);
1826 _tr_tally_dist(s, s->strstart - s->match_start,
1827 s->match_length - MIN_MATCH, bflush);
1829 s->lookahead -= s->match_length;
1831 /* Insert new strings in the hash table only if the match length
1832 * is not too large. This saves time but degrades compression.
1834 #ifndef FASTEST
1835 if (s->match_length <= s->max_insert_length &&
1836 s->lookahead >= MIN_MATCH) {
1837 s->match_length--; /* string at strstart already in table */
1838 do {
1839 s->strstart++;
1840 INSERT_STRING(s, s->strstart, hash_head);
1841 /* strstart never exceeds WSIZE-MAX_MATCH, so there are
1842 * always MIN_MATCH bytes ahead.
1844 } while (--s->match_length != 0);
1845 s->strstart++;
1846 } else
1847 #endif
1849 s->strstart += s->match_length;
1850 s->match_length = 0;
1851 s->ins_h = s->window[s->strstart];
1852 UPDATE_HASH(s, s->ins_h, s->window[s->strstart + 1]);
1853 #if MIN_MATCH != 3
1854 Call UPDATE_HASH() MIN_MATCH-3 more times
1855 #endif
1856 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
1857 * matter since it will be recomputed at next deflate call.
1860 } else {
1861 /* No match, output a literal byte */
1862 Tracevv((stderr,"%c", s->window[s->strstart]));
1863 _tr_tally_lit(s, s->window[s->strstart], bflush);
1864 s->lookahead--;
1865 s->strstart++;
1867 if (bflush) FLUSH_BLOCK(s, 0);
1869 s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
1870 if (flush == Z_FINISH) {
1871 FLUSH_BLOCK(s, 1);
1872 return finish_done;
1874 if (s->sym_next)
1875 FLUSH_BLOCK(s, 0);
1876 return block_done;
1879 #ifndef FASTEST
1880 /* ===========================================================================
1881 * Same as above, but achieves better compression. We use a lazy
1882 * evaluation for matches: a match is finally adopted only if there is
1883 * no better match at the next window position.
1885 local block_state deflate_slow(deflate_state *s, int flush) {
1886 IPos hash_head; /* head of hash chain */
1887 int bflush; /* set if current block must be flushed */
1889 /* Process the input block. */
1890 for (;;) {
1891 /* Make sure that we always have enough lookahead, except
1892 * at the end of the input file. We need MAX_MATCH bytes
1893 * for the next match, plus MIN_MATCH bytes to insert the
1894 * string following the next match.
1896 if (s->lookahead < MIN_LOOKAHEAD) {
1897 fill_window(s);
1898 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1899 return need_more;
1901 if (s->lookahead == 0) break; /* flush the current block */
1904 /* Insert the string window[strstart .. strstart + 2] in the
1905 * dictionary, and set hash_head to the head of the hash chain:
1907 hash_head = NIL;
1908 if (s->lookahead >= MIN_MATCH) {
1909 INSERT_STRING(s, s->strstart, hash_head);
1912 /* Find the longest match, discarding those <= prev_length.
1914 s->prev_length = s->match_length, s->prev_match = s->match_start;
1915 s->match_length = MIN_MATCH-1;
1917 if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
1918 s->strstart - hash_head <= MAX_DIST(s)) {
1919 /* To simplify the code, we prevent matches with the string
1920 * of window index 0 (in particular we have to avoid a match
1921 * of the string with itself at the start of the input file).
1923 s->match_length = longest_match (s, hash_head);
1924 /* longest_match() sets match_start */
1926 if (s->match_length <= 5 && (s->strategy == Z_FILTERED
1927 #if TOO_FAR <= 32767
1928 || (s->match_length == MIN_MATCH &&
1929 s->strstart - s->match_start > TOO_FAR)
1930 #endif
1931 )) {
1933 /* If prev_match is also MIN_MATCH, match_start is garbage
1934 * but we will ignore the current match anyway.
1936 s->match_length = MIN_MATCH-1;
1939 /* If there was a match at the previous step and the current
1940 * match is not better, output the previous match:
1942 if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
1943 uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
1944 /* Do not insert strings in hash table beyond this. */
1946 check_match(s, s->strstart - 1, s->prev_match, s->prev_length);
1948 _tr_tally_dist(s, s->strstart - 1 - s->prev_match,
1949 s->prev_length - MIN_MATCH, bflush);
1951 /* Insert in hash table all strings up to the end of the match.
1952 * strstart - 1 and strstart are already inserted. If there is not
1953 * enough lookahead, the last two strings are not inserted in
1954 * the hash table.
1956 s->lookahead -= s->prev_length - 1;
1957 s->prev_length -= 2;
1958 do {
1959 if (++s->strstart <= max_insert) {
1960 INSERT_STRING(s, s->strstart, hash_head);
1962 } while (--s->prev_length != 0);
1963 s->match_available = 0;
1964 s->match_length = MIN_MATCH-1;
1965 s->strstart++;
1967 if (bflush) FLUSH_BLOCK(s, 0);
1969 } else if (s->match_available) {
1970 /* If there was no match at the previous position, output a
1971 * single literal. If there was a match but the current match
1972 * is longer, truncate the previous match to a single literal.
1974 Tracevv((stderr,"%c", s->window[s->strstart - 1]));
1975 _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
1976 if (bflush) {
1977 FLUSH_BLOCK_ONLY(s, 0);
1979 s->strstart++;
1980 s->lookahead--;
1981 if (s->strm->avail_out == 0) return need_more;
1982 } else {
1983 /* There is no previous match to compare with, wait for
1984 * the next step to decide.
1986 s->match_available = 1;
1987 s->strstart++;
1988 s->lookahead--;
1991 Assert (flush != Z_NO_FLUSH, "no flush?");
1992 if (s->match_available) {
1993 Tracevv((stderr,"%c", s->window[s->strstart - 1]));
1994 _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
1995 s->match_available = 0;
1997 s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
1998 if (flush == Z_FINISH) {
1999 FLUSH_BLOCK(s, 1);
2000 return finish_done;
2002 if (s->sym_next)
2003 FLUSH_BLOCK(s, 0);
2004 return block_done;
2006 #endif /* FASTEST */
2008 /* ===========================================================================
2009 * For Z_RLE, simply look for runs of bytes, generate matches only of distance
2010 * one. Do not maintain a hash table. (It will be regenerated if this run of
2011 * deflate switches away from Z_RLE.)
2013 local block_state deflate_rle(deflate_state *s, int flush) {
2014 int bflush; /* set if current block must be flushed */
2015 uInt prev; /* byte at distance one to match */
2016 Bytef *scan, *strend; /* scan goes up to strend for length of run */
2018 for (;;) {
2019 /* Make sure that we always have enough lookahead, except
2020 * at the end of the input file. We need MAX_MATCH bytes
2021 * for the longest run, plus one for the unrolled loop.
2023 if (s->lookahead <= MAX_MATCH) {
2024 fill_window(s);
2025 if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) {
2026 return need_more;
2028 if (s->lookahead == 0) break; /* flush the current block */
2031 /* See how many times the previous byte repeats */
2032 s->match_length = 0;
2033 if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
2034 scan = s->window + s->strstart - 1;
2035 prev = *scan;
2036 if (prev == *++scan && prev == *++scan && prev == *++scan) {
2037 strend = s->window + s->strstart + MAX_MATCH;
2038 do {
2039 } while (prev == *++scan && prev == *++scan &&
2040 prev == *++scan && prev == *++scan &&
2041 prev == *++scan && prev == *++scan &&
2042 prev == *++scan && prev == *++scan &&
2043 scan < strend);
2044 s->match_length = MAX_MATCH - (uInt)(strend - scan);
2045 if (s->match_length > s->lookahead)
2046 s->match_length = s->lookahead;
2048 Assert(scan <= s->window + (uInt)(s->window_size - 1),
2049 "wild scan");
2052 /* Emit match if have run of MIN_MATCH or longer, else emit literal */
2053 if (s->match_length >= MIN_MATCH) {
2054 check_match(s, s->strstart, s->strstart - 1, s->match_length);
2056 _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);
2058 s->lookahead -= s->match_length;
2059 s->strstart += s->match_length;
2060 s->match_length = 0;
2061 } else {
2062 /* No match, output a literal byte */
2063 Tracevv((stderr,"%c", s->window[s->strstart]));
2064 _tr_tally_lit(s, s->window[s->strstart], bflush);
2065 s->lookahead--;
2066 s->strstart++;
2068 if (bflush) FLUSH_BLOCK(s, 0);
2070 s->insert = 0;
2071 if (flush == Z_FINISH) {
2072 FLUSH_BLOCK(s, 1);
2073 return finish_done;
2075 if (s->sym_next)
2076 FLUSH_BLOCK(s, 0);
2077 return block_done;
2080 /* ===========================================================================
2081 * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
2082 * (It will be regenerated if this run of deflate switches away from Huffman.)
2084 local block_state deflate_huff(deflate_state *s, int flush) {
2085 int bflush; /* set if current block must be flushed */
2087 for (;;) {
2088 /* Make sure that we have a literal to write. */
2089 if (s->lookahead == 0) {
2090 fill_window(s);
2091 if (s->lookahead == 0) {
2092 if (flush == Z_NO_FLUSH)
2093 return need_more;
2094 break; /* flush the current block */
2098 /* Output a literal byte */
2099 s->match_length = 0;
2100 Tracevv((stderr,"%c", s->window[s->strstart]));
2101 _tr_tally_lit(s, s->window[s->strstart], bflush);
2102 s->lookahead--;
2103 s->strstart++;
2104 if (bflush) FLUSH_BLOCK(s, 0);
2106 s->insert = 0;
2107 if (flush == Z_FINISH) {
2108 FLUSH_BLOCK(s, 1);
2109 return finish_done;
2111 if (s->sym_next)
2112 FLUSH_BLOCK(s, 0);
2113 return block_done;