busybox: update to 1.23.2
[tomato.git] / release / src / router / busybox / archival / libarchive / bz / compress.c
blob23de9d3f58cc6819ae4fbb19c79cead882f86662
1 /*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
7 /*-------------------------------------------------------------*/
8 /*--- Compression machinery (not incl block sorting) ---*/
9 /*--- compress.c ---*/
10 /*-------------------------------------------------------------*/
12 /* ------------------------------------------------------------------
13 This file is part of bzip2/libbzip2, a program and library for
14 lossless, block-sorting data compression.
16 bzip2/libbzip2 version 1.0.4 of 20 December 2006
17 Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
19 Please read the WARNING, DISCLAIMER and PATENTS sections in the
20 README file.
22 This program is released under the terms of the license contained
23 in the file LICENSE.
24 ------------------------------------------------------------------ */
26 /* CHANGES
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- changed setting of nGroups in sendMTFValues()
30 * so as to do a bit better on small files
33 /* #include "bzlib_private.h" */
35 /*---------------------------------------------------*/
36 /*--- Bit stream I/O ---*/
37 /*---------------------------------------------------*/
39 /*---------------------------------------------------*/
40 static
41 void BZ2_bsInitWrite(EState* s)
43 s->bsLive = 0;
44 s->bsBuff = 0;
48 /*---------------------------------------------------*/
49 static NOINLINE
50 void bsFinishWrite(EState* s)
52 while (s->bsLive > 0) {
53 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
54 s->numZ++;
55 s->bsBuff <<= 8;
56 s->bsLive -= 8;
61 /*---------------------------------------------------*/
62 static
63 /* Helps only on level 5, on other levels hurts. ? */
64 #if CONFIG_BZIP2_FAST >= 5
65 ALWAYS_INLINE
66 #endif
67 void bsW(EState* s, int32_t n, uint32_t v)
69 while (s->bsLive >= 8) {
70 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
71 s->numZ++;
72 s->bsBuff <<= 8;
73 s->bsLive -= 8;
75 s->bsBuff |= (v << (32 - s->bsLive - n));
76 s->bsLive += n;
80 /*---------------------------------------------------*/
81 static
82 void bsPutU32(EState* s, unsigned u)
84 bsW(s, 8, (u >> 24) & 0xff);
85 bsW(s, 8, (u >> 16) & 0xff);
86 bsW(s, 8, (u >> 8) & 0xff);
87 bsW(s, 8, u & 0xff);
91 /*---------------------------------------------------*/
92 static
93 void bsPutU16(EState* s, unsigned u)
95 bsW(s, 8, (u >> 8) & 0xff);
96 bsW(s, 8, u & 0xff);
100 /*---------------------------------------------------*/
101 /*--- The back end proper ---*/
102 /*---------------------------------------------------*/
104 /*---------------------------------------------------*/
105 static
106 void makeMaps_e(EState* s)
108 int i;
109 s->nInUse = 0;
110 for (i = 0; i < 256; i++) {
111 if (s->inUse[i]) {
112 s->unseqToSeq[i] = s->nInUse;
113 s->nInUse++;
119 /*---------------------------------------------------*/
120 static NOINLINE
121 void generateMTFValues(EState* s)
123 uint8_t yy[256];
124 int32_t i, j;
125 int32_t zPend;
126 int32_t wr;
127 int32_t EOB;
130 * After sorting (eg, here),
131 * s->arr1[0 .. s->nblock-1] holds sorted order,
132 * and
133 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
134 * holds the original block data.
136 * The first thing to do is generate the MTF values,
137 * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1].
139 * Because there are strictly fewer or equal MTF values
140 * than block values, ptr values in this area are overwritten
141 * with MTF values only when they are no longer needed.
143 * The final compressed bitstream is generated into the
144 * area starting at &((uint8_t*)s->arr2)[s->nblock]
146 * These storage aliases are set up in bzCompressInit(),
147 * except for the last one, which is arranged in
148 * compressBlock().
150 uint32_t* ptr = s->ptr;
151 uint8_t* block = s->block;
152 uint16_t* mtfv = s->mtfv;
154 makeMaps_e(s);
155 EOB = s->nInUse+1;
157 for (i = 0; i <= EOB; i++)
158 s->mtfFreq[i] = 0;
160 wr = 0;
161 zPend = 0;
162 for (i = 0; i < s->nInUse; i++)
163 yy[i] = (uint8_t) i;
165 for (i = 0; i < s->nblock; i++) {
166 uint8_t ll_i;
167 AssertD(wr <= i, "generateMTFValues(1)");
168 j = ptr[i] - 1;
169 if (j < 0)
170 j += s->nblock;
171 ll_i = s->unseqToSeq[block[j]];
172 AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
174 if (yy[0] == ll_i) {
175 zPend++;
176 } else {
177 if (zPend > 0) {
178 zPend--;
179 while (1) {
180 if (zPend & 1) {
181 mtfv[wr] = BZ_RUNB; wr++;
182 s->mtfFreq[BZ_RUNB]++;
183 } else {
184 mtfv[wr] = BZ_RUNA; wr++;
185 s->mtfFreq[BZ_RUNA]++;
187 if (zPend < 2) break;
188 zPend = (uint32_t)(zPend - 2) / 2;
189 /* bbox: unsigned div is easier */
191 zPend = 0;
194 register uint8_t rtmp;
195 register uint8_t* ryy_j;
196 register uint8_t rll_i;
197 rtmp = yy[1];
198 yy[1] = yy[0];
199 ryy_j = &(yy[1]);
200 rll_i = ll_i;
201 while (rll_i != rtmp) {
202 register uint8_t rtmp2;
203 ryy_j++;
204 rtmp2 = rtmp;
205 rtmp = *ryy_j;
206 *ryy_j = rtmp2;
208 yy[0] = rtmp;
209 j = ryy_j - &(yy[0]);
210 mtfv[wr] = j+1;
211 wr++;
212 s->mtfFreq[j+1]++;
217 if (zPend > 0) {
218 zPend--;
219 while (1) {
220 if (zPend & 1) {
221 mtfv[wr] = BZ_RUNB;
222 wr++;
223 s->mtfFreq[BZ_RUNB]++;
224 } else {
225 mtfv[wr] = BZ_RUNA;
226 wr++;
227 s->mtfFreq[BZ_RUNA]++;
229 if (zPend < 2)
230 break;
231 zPend = (uint32_t)(zPend - 2) / 2;
232 /* bbox: unsigned div is easier */
234 zPend = 0;
237 mtfv[wr] = EOB;
238 wr++;
239 s->mtfFreq[EOB]++;
241 s->nMTF = wr;
245 /*---------------------------------------------------*/
246 #define BZ_LESSER_ICOST 0
247 #define BZ_GREATER_ICOST 15
249 static NOINLINE
250 void sendMTFValues(EState* s)
252 int32_t v, t, i, j, gs, ge, bt, bc, iter;
253 int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
254 int32_t nGroups;
257 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
258 * is a global since the decoder also needs it.
260 * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
261 * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
262 * are also globals only used in this proc.
263 * Made global to keep stack frame size small.
265 #define code sendMTFValues__code
266 #define rfreq sendMTFValues__rfreq
267 #define len_pack sendMTFValues__len_pack
269 uint16_t cost[BZ_N_GROUPS];
270 int32_t fave[BZ_N_GROUPS];
272 uint16_t* mtfv = s->mtfv;
274 alphaSize = s->nInUse + 2;
275 for (t = 0; t < BZ_N_GROUPS; t++)
276 for (v = 0; v < alphaSize; v++)
277 s->len[t][v] = BZ_GREATER_ICOST;
279 /*--- Decide how many coding tables to use ---*/
280 AssertH(s->nMTF > 0, 3001);
281 if (s->nMTF < 200) nGroups = 2; else
282 if (s->nMTF < 600) nGroups = 3; else
283 if (s->nMTF < 1200) nGroups = 4; else
284 if (s->nMTF < 2400) nGroups = 5; else
285 nGroups = 6;
287 /*--- Generate an initial set of coding tables ---*/
289 int32_t nPart, remF, tFreq, aFreq;
291 nPart = nGroups;
292 remF = s->nMTF;
293 gs = 0;
294 while (nPart > 0) {
295 tFreq = remF / nPart;
296 ge = gs - 1;
297 aFreq = 0;
298 while (aFreq < tFreq && ge < alphaSize-1) {
299 ge++;
300 aFreq += s->mtfFreq[ge];
303 if (ge > gs
304 && nPart != nGroups && nPart != 1
305 && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
307 aFreq -= s->mtfFreq[ge];
308 ge--;
311 for (v = 0; v < alphaSize; v++)
312 if (v >= gs && v <= ge)
313 s->len[nPart-1][v] = BZ_LESSER_ICOST;
314 else
315 s->len[nPart-1][v] = BZ_GREATER_ICOST;
317 nPart--;
318 gs = ge + 1;
319 remF -= aFreq;
324 * Iterate up to BZ_N_ITERS times to improve the tables.
326 for (iter = 0; iter < BZ_N_ITERS; iter++) {
327 for (t = 0; t < nGroups; t++)
328 fave[t] = 0;
330 for (t = 0; t < nGroups; t++)
331 for (v = 0; v < alphaSize; v++)
332 s->rfreq[t][v] = 0;
334 #if CONFIG_BZIP2_FAST >= 5
336 * Set up an auxiliary length table which is used to fast-track
337 * the common case (nGroups == 6).
339 if (nGroups == 6) {
340 for (v = 0; v < alphaSize; v++) {
341 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
342 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
343 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
346 #endif
347 nSelectors = 0;
348 gs = 0;
349 while (1) {
350 /*--- Set group start & end marks. --*/
351 if (gs >= s->nMTF)
352 break;
353 ge = gs + BZ_G_SIZE - 1;
354 if (ge >= s->nMTF)
355 ge = s->nMTF-1;
358 * Calculate the cost of this group as coded
359 * by each of the coding tables.
361 for (t = 0; t < nGroups; t++)
362 cost[t] = 0;
363 #if CONFIG_BZIP2_FAST >= 5
364 if (nGroups == 6 && 50 == ge-gs+1) {
365 /*--- fast track the common case ---*/
366 register uint32_t cost01, cost23, cost45;
367 register uint16_t icv;
368 cost01 = cost23 = cost45 = 0;
369 #define BZ_ITER(nn) \
370 icv = mtfv[gs+(nn)]; \
371 cost01 += s->len_pack[icv][0]; \
372 cost23 += s->len_pack[icv][1]; \
373 cost45 += s->len_pack[icv][2];
374 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
375 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
376 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
377 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
378 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
379 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
380 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
381 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
382 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
383 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
384 #undef BZ_ITER
385 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
386 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
387 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
389 } else
390 #endif
392 /*--- slow version which correctly handles all situations ---*/
393 for (i = gs; i <= ge; i++) {
394 uint16_t icv = mtfv[i];
395 for (t = 0; t < nGroups; t++)
396 cost[t] += s->len[t][icv];
400 * Find the coding table which is best for this group,
401 * and record its identity in the selector table.
403 /*bc = 999999999;*/
404 /*bt = -1;*/
405 bc = cost[0];
406 bt = 0;
407 for (t = 1 /*0*/; t < nGroups; t++) {
408 if (cost[t] < bc) {
409 bc = cost[t];
410 bt = t;
413 fave[bt]++;
414 s->selector[nSelectors] = bt;
415 nSelectors++;
418 * Increment the symbol frequencies for the selected table.
420 /* 1% faster compress. +800 bytes */
421 #if CONFIG_BZIP2_FAST >= 4
422 if (nGroups == 6 && 50 == ge-gs+1) {
423 /*--- fast track the common case ---*/
424 #define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
425 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
426 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
427 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
428 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
429 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
430 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
431 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
432 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
433 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
434 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
435 #undef BZ_ITUR
436 gs = ge + 1;
437 } else
438 #endif
440 /*--- slow version which correctly handles all situations ---*/
441 while (gs <= ge) {
442 s->rfreq[bt][mtfv[gs]]++;
443 gs++;
445 /* already is: gs = ge + 1; */
450 * Recompute the tables based on the accumulated frequencies.
452 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
453 * comment in huffman.c for details. */
454 for (t = 0; t < nGroups; t++)
455 BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
458 AssertH(nGroups < 8, 3002);
459 AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
461 /*--- Compute MTF values for the selectors. ---*/
463 uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
465 for (i = 0; i < nGroups; i++)
466 pos[i] = i;
467 for (i = 0; i < nSelectors; i++) {
468 ll_i = s->selector[i];
469 j = 0;
470 tmp = pos[j];
471 while (ll_i != tmp) {
472 j++;
473 tmp2 = tmp;
474 tmp = pos[j];
475 pos[j] = tmp2;
477 pos[0] = tmp;
478 s->selectorMtf[i] = j;
482 /*--- Assign actual codes for the tables. --*/
483 for (t = 0; t < nGroups; t++) {
484 minLen = 32;
485 maxLen = 0;
486 for (i = 0; i < alphaSize; i++) {
487 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
488 if (s->len[t][i] < minLen) minLen = s->len[t][i];
490 AssertH(!(maxLen > 17 /*20*/), 3004);
491 AssertH(!(minLen < 1), 3005);
492 BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
495 /*--- Transmit the mapping table. ---*/
497 /* bbox: optimized a bit more than in bzip2 */
498 int inUse16 = 0;
499 for (i = 0; i < 16; i++) {
500 if (sizeof(long) <= 4) {
501 inUse16 = inUse16*2 +
502 ((*(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 0])
503 | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 4])
504 | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 8])
505 | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
506 } else { /* Our CPU can do better */
507 inUse16 = inUse16*2 +
508 ((*(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 0])
509 | *(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
513 bsW(s, 16, inUse16);
515 inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
516 for (i = 0; i < 16; i++) {
517 if (inUse16 < 0) {
518 unsigned v16 = 0;
519 for (j = 0; j < 16; j++)
520 v16 = v16*2 + s->inUse[i * 16 + j];
521 bsW(s, 16, v16);
523 inUse16 <<= 1;
527 /*--- Now the selectors. ---*/
528 bsW(s, 3, nGroups);
529 bsW(s, 15, nSelectors);
530 for (i = 0; i < nSelectors; i++) {
531 for (j = 0; j < s->selectorMtf[i]; j++)
532 bsW(s, 1, 1);
533 bsW(s, 1, 0);
536 /*--- Now the coding tables. ---*/
537 for (t = 0; t < nGroups; t++) {
538 int32_t curr = s->len[t][0];
539 bsW(s, 5, curr);
540 for (i = 0; i < alphaSize; i++) {
541 while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
542 while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
543 bsW(s, 1, 0);
547 /*--- And finally, the block data proper ---*/
548 selCtr = 0;
549 gs = 0;
550 while (1) {
551 if (gs >= s->nMTF)
552 break;
553 ge = gs + BZ_G_SIZE - 1;
554 if (ge >= s->nMTF)
555 ge = s->nMTF-1;
556 AssertH(s->selector[selCtr] < nGroups, 3006);
558 /* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
559 #if 0
560 if (nGroups == 6 && 50 == ge-gs+1) {
561 /*--- fast track the common case ---*/
562 uint16_t mtfv_i;
563 uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
564 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
565 #define BZ_ITAH(nn) \
566 mtfv_i = mtfv[gs+(nn)]; \
567 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
568 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
569 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
570 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
571 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
572 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
573 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
574 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
575 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
576 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
577 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
578 #undef BZ_ITAH
579 gs = ge+1;
580 } else
581 #endif
583 /*--- slow version which correctly handles all situations ---*/
584 /* code is bit bigger, but moves multiply out of the loop */
585 uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
586 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
587 while (gs <= ge) {
588 bsW(s,
589 s_len_sel_selCtr[mtfv[gs]],
590 s_code_sel_selCtr[mtfv[gs]]
592 gs++;
594 /* already is: gs = ge+1; */
596 selCtr++;
598 AssertH(selCtr == nSelectors, 3007);
599 #undef code
600 #undef rfreq
601 #undef len_pack
605 /*---------------------------------------------------*/
606 static
607 void BZ2_compressBlock(EState* s, int is_last_block)
609 if (s->nblock > 0) {
610 BZ_FINALISE_CRC(s->blockCRC);
611 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
612 s->combinedCRC ^= s->blockCRC;
613 if (s->blockNo > 1)
614 s->numZ = 0;
616 BZ2_blockSort(s);
619 s->zbits = &((uint8_t*)s->arr2)[s->nblock];
621 /*-- If this is the first block, create the stream header. --*/
622 if (s->blockNo == 1) {
623 BZ2_bsInitWrite(s);
624 /*bsPutU8(s, BZ_HDR_B);*/
625 /*bsPutU8(s, BZ_HDR_Z);*/
626 /*bsPutU8(s, BZ_HDR_h);*/
627 /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
628 bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
631 if (s->nblock > 0) {
632 /*bsPutU8(s, 0x31);*/
633 /*bsPutU8(s, 0x41);*/
634 /*bsPutU8(s, 0x59);*/
635 /*bsPutU8(s, 0x26);*/
636 bsPutU32(s, 0x31415926);
637 /*bsPutU8(s, 0x53);*/
638 /*bsPutU8(s, 0x59);*/
639 bsPutU16(s, 0x5359);
641 /*-- Now the block's CRC, so it is in a known place. --*/
642 bsPutU32(s, s->blockCRC);
645 * Now a single bit indicating (non-)randomisation.
646 * As of version 0.9.5, we use a better sorting algorithm
647 * which makes randomisation unnecessary. So always set
648 * the randomised bit to 'no'. Of course, the decoder
649 * still needs to be able to handle randomised blocks
650 * so as to maintain backwards compatibility with
651 * older versions of bzip2.
653 bsW(s, 1, 0);
655 bsW(s, 24, s->origPtr);
656 generateMTFValues(s);
657 sendMTFValues(s);
660 /*-- If this is the last block, add the stream trailer. --*/
661 if (is_last_block) {
662 /*bsPutU8(s, 0x17);*/
663 /*bsPutU8(s, 0x72);*/
664 /*bsPutU8(s, 0x45);*/
665 /*bsPutU8(s, 0x38);*/
666 bsPutU32(s, 0x17724538);
667 /*bsPutU8(s, 0x50);*/
668 /*bsPutU8(s, 0x90);*/
669 bsPutU16(s, 0x5090);
670 bsPutU32(s, s->combinedCRC);
671 bsFinishWrite(s);
676 /*-------------------------------------------------------------*/
677 /*--- end compress.c ---*/
678 /*-------------------------------------------------------------*/